diff --git a/README.md b/README.md index 808565f518..9fb8bf22e0 100644 --- a/README.md +++ b/README.md @@ -60,13 +60,41 @@ The documentation is organized into two main directories - `docs`: Contains the documentation files for latest RavenDB version. - `versioned_docs`: Contains the documentation files for all other RavenDB versions. -## Adding new version - -```bash -npm run docusaurus docs:version version_label -``` - -This command creates a new version of the documentation by adding `version-version_label` subdirectory to `versioned_docs` directory, which contains a snapshot of `docs` directory. +## Adding a new version + +To add a new documentation version: + +1. Create a snapshot of the current version in the `versioned_docs` directory. + Use: + ```bash + npm run docusaurus docs:version version_label + ``` +2. Make the new version selectable in the sidebar by updating the current version number in the `docs` property of the `docusaurus.config.js` file. + +E.g., to add version `7.2`: +* Create a version for `7.1` + ```bash + npm run docusaurus docs:version 7.1 + ``` +* Set `7.2` as the current docs version. + `docusaurus.config.js` file: + ```json + docs: { + sidebarPath: "sidebars.ts", + routeBasePath: "/", + includeCurrentVersion: true, + lastVersion: 'current', + versions: { + current: { + label: "7.2", + path: "7.2" + } + }, + onlyIncludeVersions: getOnlyIncludeVersions(), + //editUrl: + // 'https://github.com/ravendb/docs/tree/main/' + }, + ``` ## Modifying latest version diff --git a/docusaurus.config.ts b/docusaurus.config.ts index 3d1cba7b01..fcac613712 100644 --- a/docusaurus.config.ts +++ b/docusaurus.config.ts @@ -49,8 +49,8 @@ const config: Config = { lastVersion: 'current', versions: { current: { - label: "7.1", - path: "7.1" + label: "7.2", + path: "7.2" } }, onlyIncludeVersions: getOnlyIncludeVersions(), diff --git a/package-lock.json b/package-lock.json index 2682a41d98..7b3936fead 100644 --- a/package-lock.json +++ b/package-lock.json @@ -247,6 +247,7 @@ "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-5.41.0.tgz", "integrity": "sha512-G9I2atg1ShtFp0t7zwleP6aPS4DcZvsV4uoQOripp16aR6VJzbEnKFPLW4OFXzX7avgZSpYeBAS+Zx4FOgmpPw==", "license": "MIT", + "peer": true, "dependencies": { "@algolia/client-common": "5.41.0", "@algolia/requester-browser-xhr": "5.41.0", @@ -397,6 +398,7 @@ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.5.tgz", "integrity": "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw==", "license": "MIT", + "peer": true, "dependencies": { "@babel/code-frame": "^7.27.1", "@babel/generator": "^7.28.5", @@ -2184,6 +2186,7 @@ } ], "license": "MIT", + "peer": true, "engines": { "node": ">=18" }, @@ -2206,6 +2209,7 @@ } ], "license": "MIT", + "peer": true, "engines": { "node": ">=18" } @@ -2315,6 +2319,7 @@ "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz", "integrity": "sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA==", "license": "MIT", + "peer": true, "dependencies": { "cssesc": "^3.0.0", "util-deprecate": "^1.0.2" @@ -2736,6 +2741,7 @@ "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz", "integrity": "sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA==", "license": "MIT", + "peer": true, "dependencies": { "cssesc": "^3.0.0", "util-deprecate": "^1.0.2" @@ -3493,6 +3499,7 @@ "resolved": "https://registry.npmjs.org/@docusaurus/faster/-/faster-3.9.2.tgz", "integrity": "sha512-DEVIwhbrZZ4ir31X+qQNEQqDWkgCJUV6kiPPAd2MGTY8n5/n0c4B8qA5k1ipF2izwH00JEf0h6Daaut71zzkyw==", "license": "MIT", + "peer": true, "dependencies": { "@docusaurus/types": "3.9.2", "@rspack/core": "^1.5.0", @@ -3645,6 +3652,7 @@ "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-3.9.2.tgz", "integrity": "sha512-C5wZsGuKTY8jEYsqdxhhFOe1ZDjH0uIYJ9T/jebHwkyxqnr4wW0jTkB72OMqNjsoQRcb0JN3PcSeTwFlVgzCZg==", "license": "MIT", + "peer": true, "dependencies": { "@docusaurus/core": "3.9.2", "@docusaurus/logger": "3.9.2", @@ -4594,6 +4602,7 @@ "version": "3.1.0", "resolved": "https://registry.npmjs.org/@mdx-js/react/-/react-3.1.0.tgz", "integrity": "sha512-QjHtSaoameoalGnKDT3FoIl4+9RwyTmo9ZJGBdLOks/YOiWHoRDI3PUwEzOE7kEmGcV3AFcp9K6dYu9rEuKLAQ==", + "peer": true, "dependencies": { "@types/mdx": "^2.0.0" }, @@ -5148,6 +5157,7 @@ "resolved": "https://registry.npmjs.org/@svgr/core/-/core-8.1.0.tgz", "integrity": "sha512-8QqtOQT5ACVlmsvKOJNEaWmRPmcojMOzCz4Hs2BGG/toAp/K38LcsMRyLp349glq5AzJbCEeimEoxaX6v/fLrA==", "license": "MIT", + "peer": true, "dependencies": { "@babel/core": "^7.21.3", "@svgr/babel-preset": "8.1.0", @@ -5252,6 +5262,7 @@ "integrity": "sha512-oExhY90bes5pDTVrei0xlMVosTxwd/NMafIpqsC4dMbRYZ5KB981l/CX8tMnGsagTplj/RcG9BeRYmV6/J5m3w==", "hasInstallScript": true, "license": "Apache-2.0", + "peer": true, "dependencies": { "@swc/counter": "^0.1.3", "@swc/types": "^0.1.25" @@ -6184,6 +6195,7 @@ "version": "19.1.8", "resolved": "https://registry.npmjs.org/@types/react/-/react-19.1.8.tgz", "integrity": "sha512-AwAfQ2Wa5bCx9WP8nZL2uMZWod7J7/JSplxbTmBQ5ms6QpqNYm672H0Vu9ZVKVngQ+ii4R/byguVEUZQyeg44g==", + "peer": true, "dependencies": { "csstype": "^3.0.2" } @@ -6353,6 +6365,7 @@ "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.39.0.tgz", "integrity": "sha512-g3WpVQHngx0aLXn6kfIYCZxM6rRJlWzEkVpqEFLT3SgEDsp9cpCbxxgwnE504q4H+ruSDh/VGS6nqZIDynP+vg==", "dev": true, + "peer": true, "dependencies": { "@typescript-eslint/scope-manager": "8.39.0", "@typescript-eslint/types": "8.39.0", @@ -6770,6 +6783,7 @@ "version": "8.15.0", "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "peer": true, "bin": { "acorn": "bin/acorn" }, @@ -6849,6 +6863,7 @@ "version": "8.17.1", "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz", "integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", + "peer": true, "dependencies": { "fast-deep-equal": "^3.1.3", "fast-uri": "^3.0.1", @@ -6892,6 +6907,7 @@ "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-5.41.0.tgz", "integrity": "sha512-9E4b3rJmYbBkn7e3aAPt1as+VVnRhsR4qwRRgOzpeyz4PAOuwKh0HI4AN6mTrqK0S0M9fCCSTOUnuJ8gPY/tvA==", "license": "MIT", + "peer": true, "dependencies": { "@algolia/abtesting": "1.7.0", "@algolia/client-abtesting": "5.41.0", @@ -7498,6 +7514,7 @@ } ], "license": "MIT", + "peer": true, "dependencies": { "baseline-browser-mapping": "^2.8.19", "caniuse-lite": "^1.0.30001751", @@ -8446,6 +8463,7 @@ "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz", "integrity": "sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA==", "license": "MIT", + "peer": true, "dependencies": { "cssesc": "^3.0.0", "util-deprecate": "^1.0.2" @@ -9518,6 +9536,7 @@ "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.32.0.tgz", "integrity": "sha512-LSehfdpgMeWcTZkWZVIJl+tkZ2nuSkyyB9C27MZqFWXuph7DvaowgcTvKqxvpLW1JZIk8PN7hFY3Rj9LQ7m7lg==", "dev": true, + "peer": true, "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.12.1", @@ -10365,6 +10384,7 @@ "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "license": "MIT", + "peer": true, "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", @@ -15546,6 +15566,7 @@ "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "license": "MIT", + "peer": true, "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", @@ -16136,6 +16157,7 @@ "url": "https://github.com/sponsors/ai" } ], + "peer": true, "dependencies": { "nanoid": "^3.3.11", "picocolors": "^1.1.1", @@ -17039,6 +17061,7 @@ "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz", "integrity": "sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA==", "license": "MIT", + "peer": true, "dependencies": { "cssesc": "^3.0.0", "util-deprecate": "^1.0.2" @@ -17840,6 +17863,7 @@ "version": "19.1.0", "resolved": "https://registry.npmjs.org/react/-/react-19.1.0.tgz", "integrity": "sha512-FS+XFBNvn3GTAWq26joslQgWNoFu08F4kl0J4CgdNKADkdSGXQyTCnKteIAJy96Br6YbpEU1LSzV5dYtjMkMDg==", + "peer": true, "engines": { "node": ">=0.10.0" } @@ -17848,6 +17872,7 @@ "version": "19.1.0", "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.1.0.tgz", "integrity": "sha512-Xs1hdnE+DyKgeHJeJznQmYMIBG3TKIHJJT95Q58nHLSrElKlGQqDTR2HQ9fx5CN/Gk6Vh/kupBTDLU11/nDk/g==", + "peer": true, "dependencies": { "scheduler": "^0.26.0" }, @@ -17899,6 +17924,7 @@ "version": "6.0.0", "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-6.0.0.tgz", "integrity": "sha512-YMMxTUQV/QFSnbgrP3tjDzLHRg7vsbMn8e9HAa8o/1iXoiomo48b7sk/kkmWEuWNDPJVlKSJRB6Y2fHqdJk+SQ==", + "peer": true, "dependencies": { "@types/react": "*" }, @@ -17925,6 +17951,7 @@ "version": "5.3.4", "resolved": "https://registry.npmjs.org/react-router/-/react-router-5.3.4.tgz", "integrity": "sha512-Ys9K+ppnJah3QuaRiLxk+jDWOR1MekYQrlytiXxC1RyfbdsZkS5pvKAzCCr031xHixZwpnsYNT5xysdFHQaYsA==", + "peer": true, "dependencies": { "@babel/runtime": "^7.12.13", "history": "^4.9.0", @@ -19929,7 +19956,8 @@ "node_modules/tslib": { "version": "2.8.1", "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", - "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==" + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "peer": true }, "node_modules/type-check": { "version": "0.4.0", @@ -20075,6 +20103,7 @@ "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.6.3.tgz", "integrity": "sha512-hjcS1mhfuyi4WW8IWtjP7brDrG2cuDZukyrYrSauoXGNgx0S7zceP07adYkJycEr56BOUTNPzbInooiN3fn1qw==", "devOptional": true, + "peer": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -20426,6 +20455,7 @@ "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "license": "MIT", + "peer": true, "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", @@ -20629,6 +20659,7 @@ "version": "5.100.0", "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.100.0.tgz", "integrity": "sha512-H8yBSBTk+BqxrINJnnRzaxU94SVP2bjd7WmA+PfCphoIdDpeQMJ77pq9/4I7xjLq38cB1bNKfzYPZu8pB3zKtg==", + "peer": true, "dependencies": { "@types/eslint-scope": "^3.7.7", "@types/estree": "^1.0.8", @@ -21321,6 +21352,7 @@ "resolved": "https://registry.npmjs.org/zod/-/zod-4.1.12.tgz", "integrity": "sha512-JInaHOamG8pt5+Ey8kGmdcAcg3OL9reK8ltczgHTAwNhMys/6ThXHityHxVV2p3fkw/c+MAvBHFVYHFZDmjMCQ==", "license": "MIT", + "peer": true, "funding": { "url": "https://github.com/sponsors/colinhacks" } diff --git a/versioned_docs/version-7.1/ai-integration/_category_.json b/versioned_docs/version-7.1/ai-integration/_category_.json new file mode 100644 index 0000000000..dffbdf0935 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 10, + "label": "AI Integration" +} diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/_category_.json b/versioned_docs/version-7.1/ai-integration/ai-agents/_category_.json new file mode 100644 index 0000000000..cf3a568a10 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/ai-agents/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 3, + "label": "AI Agents" +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/ai-agents_configuration.mdx b/versioned_docs/version-7.1/ai-integration/ai-agents/ai-agents_configuration.mdx new file mode 100644 index 0000000000..d4084b323f --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/ai-agents/ai-agents_configuration.mdx @@ -0,0 +1,84 @@ +--- +title: "AI agents: Configuration" +hide_table_of_contents: true +sidebar_label: Configuration +sidebar_position: 3 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# AI agents: Configuration + + +* Configuration keys listed below are used to change AI agents behavior. + +* Configuration scope: + + * **Server-wide** configuration keys apply to agents on all databases. + + * **Per-database** configuration keys apply to all agents on a specific database, + overriding server-wide settings for these agents. + + * To change the settings for a specific agent, edit its configuration using the [API](../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#defining-an-agent-configuration) or [Studio](../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_studio#configure-basic-settings). + +* [Learn about configuring RavenDB](../../server/configuration/configuration-options). + +* On this page: + * [Ai.Agent.Tools.TokenUsageThreshold](../../ai-integration/ai-agents/ai-agents-configuration#aiagenttoolstokenusagethreshold) + * [Ai.Agent.Trimming.Summarization.SummarizationResultPrefix](../../ai-integration/ai-agents/ai-agents-configuration#aiagenttrimmingsummarizationsummarizationresultprefix) + * [Ai.Agent.Trimming.Summarization.SummarizationTaskBeginningPrompt](../../ai-integration/ai-agents/ai-agents-configuration#aiagenttrimmingsummarizationsummarizationtaskbeginningprompt) + * [Ai.Agent.Trimming.Summarization.SummarizationTaskEndPrompt](../../ai-integration/ai-agents/ai-agents-configuration#aiagenttrimmingsummarizationsummarizationtaskendprompt) + + + +## Ai.Agent.Tools.TokenUsageThreshold + +
+ +The recommanded token threshold for a tool response to the LLM. +If the response exceeds this threshold, a notification will be raised. + +- **Type**: `int` +- **Default**: 10000 +- **Scope**: Server-wide or per database + +
+ +## Ai.Agent.Trimming.Summarization.SummarizationResultPrefix + +The text prefix that precedes the summary of the previous conversation. + +- **Type**: `string` +- **Default**: "Summary of previous conversation: " +- **Scope**: Server-wide or per database + +
+ +## Ai.Agent.Trimming.Summarization.SummarizationTaskBeginningPrompt + +The instruction text that precedes the serialized conversation when requesting a summary. + +- **Type**: `string` +- **Default**: @"Summarize the following AI conversation into a concise, linear narrative that + retains all critical information. Ensure the summary: + - Includes key identifiers, usernames, timestamps, and any reference codes + - Preserves the original intent of both the user and the assistant in each exchange + - Reflects decisions made, suggestions given, preferences expressed, and any changes in direction + - Captures tone when relevant (e.g., sarcastic, formal, humorous, concerned) + - Omits general filler or small talk unless it contributes to context or tone Format the output in a structured manner (such as bullet points or labeled sections) suitable for fitting into a limited context window. Do not discard any information that contributes to understanding the conversation's flow and outcome." +- **Scope**: Server-wide or per database + +
+ +## Ai.Agent.Trimming.Summarization.SummarizationTaskEndPrompt + +The user-role message that triggers the conversation summarization process. + +- **Type**: `string` +- **Default**: "Reminder - go over the entire previous conversation and summarize that according to the original instructions" +- **Scope**: Server-wide or per database diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/ai-agents_overview.mdx b/versioned_docs/version-7.1/ai-integration/ai-agents/ai-agents_overview.mdx new file mode 100644 index 0000000000..fa189617c1 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/ai-agents/ai-agents_overview.mdx @@ -0,0 +1,272 @@ +--- +title: "AI agents: Overview" +hide_table_of_contents: true +sidebar_label: Overview +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# AI agents: Overview + + +* An AI agent is a highly customizable [mediation component](../../ai-integration/ai-agents/ai-agents_overview#ai-agent-usage-flowchart) that an authorized client can tailor to its needs and install on the server. The agent serves the client by facilitating communication between the client, an LLM, and a RavenDB database. + +* Clients can use AI agents to automate complex workflows by leveraging LLM capabilities such as data analysis, decision-making, and natural language processing. + +* The LLM can use an AI agent to query the database and request the client to perform actions. + +* Granting an LLM access to a credible data source such as a company database can significantly enhance its ability to provide the client with accurate and context-aware responses. Such access can also mitigate LLM behaviors that harm its usability like 'hallucinations' and user-pleasing bias. + +* Delegating the communication with the LLM to an AI agent can significantly reduce client code complexity and development overhead. + +* In this article: + * [Defining and running AI agents](../../ai-integration/ai-agents/ai-agents_overview#defining-and-running-an-ai-agent) + * [The main stages in defining an AI agent](../../ai-integration/ai-agents/ai-agents_overview#the-main-stages-in-defining-an-ai-agent) + * [What is a conversation](../../ai-integration/ai-agents/ai-agents_overview#what-is-a-conversation) + * [Initiating a conversation](../../ai-integration/ai-agents/ai-agents_overview#initiating-a-conversation) + * [AI agent usage flowchart](../../ai-integration/ai-agents/ai-agents_overview#ai-agent-usage-flowchart) + * [Streaming LLM responses](../../ai-integration/ai-agents/ai-agents_overview#streaming-llm-responses) + * [Reducing throughput and expediting LLM response](../../ai-integration/ai-agents/ai-agents_overview#reducing-throughput-and-expediting-llm-response) + * [Common use cases](../../ai-integration/ai-agents/ai-agents_overview#common-use-cases) + + + +## Defining and running an AI agent + +AI agents can be created by RavenDB clients (providing they have database administration permissions). +They reside on a RavenDB server, and can be invoked by clients to, for example, handle user requests and respond to events tracked by the client. + + +An agent can serve multiple clients concurrently. +* The agent's **layout**, including its configuration, logic, and tools is shared by all the clients that use the agent. +* **Conversations** that clients conduct with the agent are isolated per conversation. + Each client maintains its own conversation instance with the agent with complete privacy, including - + * Parameter values that the client may pass to the agent + * All conversation content and history + * Results received when the conversation ends + + + +* [Learn to create an AI agent using the client API](../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api) +* [Learn to create an AI agent using Studio](../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_studio) + + +### The main stages in defining an AI agent: +To define an AI agent, the client needs to specify - + +* A **connection string** to the AI model. + [Create a connection string using the API](../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#creating-a-connection-string) + [Create a connection string using Studio](../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_studio#configure-basic-settings) + +* An **agent configuration** that defines the agent. + [Define an agent configuration using the API](../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#defining-an-agent-configuration) + [Define an agent configuration using Studio](../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_studio#configure-basic-settings) + + An agent configuration includes - + * **Basic agent settings**, like the unique ID by which the system recognizes the task. + * A **system prompt** by which the agent instructs the AI model what its characteristics are, e.g. its role. + * Optional **agent parameters**. + Agent parameters' values are provided by the client when it starts a conversation with the agent, and can be used in queries initiated by the LLM (see **query tools** below). + * Optional **query tools**. + The LLM will be able to invoke query tools freely to retrieve data from the database. + * **Read-only operations** + Query tools can apply **read operations** only. + To make changes in the database, use [action tools](../../ai-integration/ai-agents/ai-agents_overview#action-tools). + + Note that actions can be performed only by the client. The LLM can just request the client to perform actions on its behalf. + + * **Database access** + The LLM has no direct access to the database. To use a query tool, it must send a query request to the agent, which will send the RQL query defined by the tool to the database and pass its results to the LLM. + * **Query parameters** + The RQL query defined by a query tool may optionally include parameters, identified by a `$` prefix. + Both the user and the LLM can pass values to these parameters. + **Users** can pass values to query parameters through **agent parameters**, + when the client starts a conversation with the agent. + **The LLM** can pass values to queries through a **parameters schema**, + outlined as part of the query tool, when requesting the agent to run the query. + * **Initial-context queries** + You can optionally set a query tool as an **initial-context query**. + Queries that are **not** set this way are invoked when the LLM requests the agent to run them. + Queries that **are** set as initial-context queries are executed by the agent immediately when it starts a conversation with the LLM, without waiting for the LLM to invoke them, to include data that is relevant for the conversation in the initial context sent to the LLM. + E.g., an initial-context query can provide the LLM, before the actual conversation starts, the last 5 orders placed by a customer, as context for an answer that the LLM is requested to provide about the customer's order history. + + * Optional **action tools** that the LLM will be able to invoke freely. + The LLM will be able to use these tools to request the client to perform actions. + +### What is a conversation: +A conversation is a communication session between the client, the agent, and the LLM that maintains the history of messages exchanged between these participants since the conversation began. +* The conversation starts when the client invokes the agent and provides it with an [initial context](../../ai-integration/ai-agents/ai-agents_overview#initiating-a-conversation). +* The conversation may include multiple "turns" of message exchanges between the client and the LLM, mediated by the agent. + * Each turn starts with a new **user prompt** from the client. + * During the turn, the LLM can trigger the agent to run queries or request the client to perform actions, using [defined query and action tools](../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#adding-agent-tools). + * The turn ends with an [LLM response](../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#conversation-response) to the user prompt. + The response may trigger a new turn (e.g., by requesting more information), + or be the final LLM response and end the conversation. +* The agent maintains the continuity of the conversation by [storing all messages](../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#setting-a-conversation) exchanged since the conversation began in a dedicated document in the `@conversation` collection and providing all stored messages to the LLM with each new agent message. +* The conversation ends when the LLM provides the agent with its final response. + +[Initiate a conversation using the API](../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#managing-conversations) +[Initiate a conversation using Studio](../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_studio#start-new-chat) + +### Initiating a conversation: +To start a conversation with the LLM, the agent will send it an **initial context** that includes - + +* The pre-defined [agent configuration](../../ai-integration/ai-agents/ai-agents_overview#the-main-stages-in-defining-an-ai-agent) (automatically sent by the agent) with: + * The system prompt + * A response object that defines the layout for the LLM response + * Optional agent parameters + * Optional Query tools + (and if any query tool is configured as an [initial-context query](../../ai-integration/ai-agents/ai-agents_overview#initial-context-queries) - results for this query) + * Optional Action tools + +* **Values for agent parameters** + If agent parameters were defined in the agent configuration, the client is required to provide their values to the agent when starting a conversation. + + E.g., + The agent configuration may include an agent parameter called `employeeId`. + A query tool may include an RQL query like `from Employees as E where id() == $employeeId`, using this agent parameter. + When the client starts a conversation with the agent, it will be required to provide the value for `employeeId`, e.g. `employees/8-A`. + When the LLM requests the agent to invoke this query tool, the agent will replace `$employeeId` with `employees/8-A` before running the query. + [See an example that utilizes this agent parameter](../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#query-tools). + + + Providing query values when starting a conversation gives the client the ability to shape and limit the scope of LLM queries by its objectives. + + +* **Stored conversation messages** + Since the LLM keeps no record of previous messages, the agent is responsible for allowing a continuous conversation. + It achieves this by automatically recording all messages of each conversation in a dedicated document in the `@conversations` collection. + When the agent needs to continue a conversation, it will pull all previous messages from the `@conversations` collection document, and send them to the LLM. + The conversation will remain available in the `@conversations` collection even after it ends, so it can be resumed at any future time. + +* A **user prompt**, set by the client, that defines, for example, a question or a request for particular information. + +
+ +## AI agent usage flowchart + +The flowchart below illustrates interactions between the User, RavenDB client, AI agent, AI model, and RavenDB database. + +![AI agent usage flowchart](./assets/ai-agents_flowchart.png) + +1. **User`<->`Client** flow + Users can use clients that interact with the AI agent. + The user can provide agent parameters values through the client, and get responses from the agent. + +2. **Client`<->`Database** flow + The client can interact with the database directly, either by its own initiative or as a result of AI agent action requests (query requests are handled by the agent). + +3. **Client`<->`Agent** flow + * To invoke an agent, the client needs to provide it with an [initial context](../../ai-integration/ai-agents/ai-agents_overview#initiating-a-conversation). + * During the conversation, the agent may send to the client action requests on behalf of the LLM. + * When the LLM provides the agent with its final response, the agent will provide it to the client. + The client does not need to reply to this message. + * E.g., the client can pass the agent a research topic, a user prompt that guides the AI model to act as a research assistant, and all the messages that were included in the conversation so far. + The agent can respond with a summary of the research topic, and a request for the client to save it in the database. + +4. **Agent`<->`Database** flow + * The agent can query the database on behalf of the AI model. + When the query ends, the agent will return its results to the AI model. + * When the agent is requested to run a query that includes _agent parameters_, it will replace these parameters with values provided by the client before running the query. + * When the agent is requested to run a query that includes _LLM parameters_, it will replace these parameters with values provided by the LLM before running the query. + +5. **Agent`<->`Model** flow + * **When a conversation is started**, the agent needs to provide the AI model with an [initial context](../../ai-integration/ai-agents/ai-agents_overview#initiating-a-conversation), partly defined by the agent configuration and partly by the client. + * **During the conversation**, the AI model can respond to the agent with - + * Requests for queries. + If a query includes LLM parameters, the LLM will include values for them, and the agent will replace the parameters with these values, run the query, and return its results to the LLM. + If a query includes agent parameters, the agent will replace them with values provided by the client, run the query, and return its results to the LLM. + * Requests for actions. + The agent will pass such requests to the client and return their results to the LLM. + * The final response to the user prompt, in the layout defined by the response object. + The agent will pass the response to the client (which doesn't need to reply to it). + +
+ +## Streaming LLM responses + +Rather than wait for the LLM to finish generating a response and then pass it in its entirety to the client, the agent can stream response chunks (determined by the LLM, e.g. words or symbols) to the client one by one, immediately as each chunk is returned by the LLM, allowing the client to process and display the response gradually. + +Streaming can ease the processing of lengthy LLM responses for clients, and create a better user experience by keeping users from waiting and providing them with a continuous, fluent interaction. + +Streaming is supported by most AI models, including OpenAI services like GPT-4 and Ollama models. + +[Streaming LLM responses using the API](../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#stream-llm-responses) + +
+ +## Reducing throughput and expediting LLM response + +If throughput and LLM response time are considerations, the following suggestions can help optimize performance: + +### Define a chat trimming configuration: + +The LLM doesn't keep conversation history. To allow a continuous conversation, the agent precedes each new message it sends to the LLM with all the messages that were exchanged in the conversation since it started. + +To save traffic and tokens, you can summarize conversations using **chat trimming**. This can be helpful when transfer rate and cost are a concern or the context becomes too large to handle efficiently. + +[Configuring chat trimming using the API](../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#set-chat-trimming-configuration) +[Configuring chat trimming using Studio](../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_studio#configure-chat-trimming) + +### Optimize query tools: + +When creating query tools - +* Provide the LLM with clear instructions on how to use each query tool effectively. +* Narrow your queries: + * Design queries to return only the data that is relevant to the agent's role and the user's prompt. + * You can limit the scope of a query both in the RQL statement itself and by using agent parameters to filter results. + * Avoid overly broad queries that return large datasets, as they can overwhelm the LLM and lead to slower response times. + * Consider projecting only relevant properties and setting a limit on the number of results returned by each query to prevent excessive data transfer and processing, e.g. - + + + ```rql + from Orders as O where O.ShipTo.Country == $country + ``` + + + ```rql + from Orders as O where O.ShipTo.Country == $country select O.Employee, O.Lines.Quantity limit 4 + ``` + + + +* Supervise querying: + * Test query tools with various prompts and scenarios to identify and address any performance bottlenecks. + * Monitor the performance of query tools in production to identify and address any issues that arise over time. + * Regularly review and update query tools to ensure they remain relevant and efficient as the database evolves. + +[Creating query tools using the API](../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#query-tools) +[Creating query tools using Studio](../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_studio#add-query-tools) + +### Set maximum number of querying iterations: + +You can limit the number of times that the LLM is allowed to trigger database queries in response to a single user prompt. + +[Setting iterations limit using the API](../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#set-maximum-number-of-iterations) + +
+ +## Common use cases + +AI agents are designed to easily integrate AI capabilities into applications and workflows. They can interact with users, intelligently retrieve and process data from proprietary databases, and apply actions based on roles they are requested to take and the data they have access to. Some of the tasks and applications they can be tailored to perform include - + +#### Customer support chatbot agents +Agents can answer customer queries based on information stored in databases and internal knowledge bases, provide troubleshooting steps, and guide users through processes in real time. + +#### Data analysis and reporting agents +Agents can analyze large datasets to extract relevant data and present it in a user-friendly format, escalate customer issues and application output, create reports and highlight points of interest, and help businesses make informed decisions. + +#### Content generation agents +Agents can generate summaries, add automated comments to articles and application-generated content, reference readers to related material, and create marketing content based on user input and stored information. + +#### Workflow automation agents +Agents can automate repetitive tasks like email sorting, spam filtering, form filling, or file organization. + +#### Intelligent recommendation agents +Agents can provide personalized recommendations based on user preferences and available data, e.g. a _library assistant_ suggesting books and other resources, an _HR office assistant_ recommending rewards for employees based on their performance and available facilities near their residence, or an _e-commerce assistant_ recommending products. \ No newline at end of file diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/ai-agents_security-concerns.mdx b/versioned_docs/version-7.1/ai-integration/ai-agents/ai-agents_security-concerns.mdx new file mode 100644 index 0000000000..295909a052 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/ai-agents/ai-agents_security-concerns.mdx @@ -0,0 +1,95 @@ +--- +title: "AI agents: Security concerns" +hide_table_of_contents: true +sidebar_label: "Security concerns" +sidebar_position: 4 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# AI agents: Security concerns + + +This page covers potential security concerns related to AI agents and the mitigation strategies for these concerns. + +* On this page: + * [Unauthorized database access](../../ai-integration/ai-agents/ai-agents_security-concerns#unauthorized-database-access) + * [Data compromise during transit](../../ai-integration/ai-agents/ai-agents_security-concerns#data-compromise-during-transit) + * [Untraceable malicious or unexpected actions](../../ai-integration/ai-agents/ai-agents_security-concerns#untraceable-malicious-or-unexpected-actions) + * [AI model data memorization](../../ai-integration/ai-agents/ai-agents_security-concerns#ai-model-data-memorization) + * [Validation or injection attacks via user input](../../ai-integration/ai-agents/ai-agents_security-concerns#validation-or-injection-attacks-via-user-input) + + + +## Unauthorized database access + +Concern: Unauthorized access to databases can lead to data breaches. + +* **Mitigation: Read-only access** + The LLM has no direct access to the database. It can only request the agent, via query tools, to query the database on its behalf, and the agent can only apply read-only operations. + +* **Mitigation: DBA control** + Control over the database is determined using certificates. Only users whose certificates grant them a database administrator or a higher role can create and manage agents. + The DBA retains full control over connections to the AI model (through connection strings), the agent configuration, and the queries that the agent is allowed to run. + +* **Mitigation: Agent scope** + An AI agent is created for a specific database and has no access to other databases on the server, ensuring database-level isolation. + +
+ +## Data compromise during transit + +Concern: Data may be compromised during transit. + +* **Mitigation: Secure TLS (Transport Layer Security) communication** + All data is transferred over HTTPS between the client, the agent, the database, and the AI model, to ensure its encryption during transit. + +
+ +## Untraceable malicious or unexpected actions + +Concern: Inability to trace malicious or unexpected actions related to agents. + +* **Mitigation: Audit logging** + RavenDB [admin logs](../../studio/server/debug/admin-logs/) track the creation, modification, and deletion of AI agents, as well as agent interactions with the database. + + Example of an audit log entry recorded when an agent was deleted: + ``` + Starting to process record 16 (current 15) for aiAgent_useHandleToRunChat_1. + Type: DeleteAiAgentCommand. + Cluster database change type: RecordChanged + Date 2025-09-23 22:29:45.0391 + Level DEBUG + Thread ID 58 + Resource aiAgent_useHandleToRunChat_1 + Logger Raven.Server.Documents.DocumentDatabase + ``` +
+ +## AI model data memorization + +Concern: Sensitive data might inadvertently be memorized and reproduced by the AI model. + +* **Mitigation: Free selection of AI model** + RavenDB doesn't enforce the usage of specific providers or AI models, but gives you free choice of the services that best suit your needs and security requirements. + When using the service of your choice, it is your responsibility to define safe queries and expose only the data that it is in your interest to share with the AI model. + +* **Mitigation: Agent parameters** + You can use [agent parameters](../../ai-integration/ai-agents/ai-agents_overview#query-parameters) to limit the scope of the defined query and the dataset subsequently transferred to the AI model. + +
+ +## Validation or injection attacks via user input + +Concern: Validation or injection attacks crafted through malicious user input. + +* **Mitigation: Query scope** + The agent queries a limited subset of the stored data, restricting an attacker's access to the rest of the data and to data belonging to other users. + +* **Mitigation: Read-only access** + Query tools can apply read-only RQL queries, preventing attackers from modifying any data. diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/ai-agents_start.mdx b/versioned_docs/version-7.1/ai-integration/ai-agents/ai-agents_start.mdx new file mode 100644 index 0000000000..a5bfd34959 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/ai-agents/ai-agents_start.mdx @@ -0,0 +1,53 @@ +--- +title: "AI Agents: Start" +hide_table_of_contents: true +sidebar_label: Start +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; + +import CardWithImage from "@site/src/components/Common/CardWithImage"; +import CardWithImageHorizontal from "@site/src/components/Common/CardWithImageHorizontal"; +import ColGrid from "@site/src/components/ColGrid"; +import practicalLookAiAgentsImage from "../assets/practical-look-ai-agents-article-image.webp"; + +import webinarThumbnailPlaceholder from "@site/static/img/webinar.webp"; + +# AI Agents + +### Create conversational AI proxies for your applications. +AI agents are server-side components that act as secure proxies between RavenDB clients and AI models. They can be easily customized to handle specific client needs, tasks or workflows, such as answering questions, performing data analysis, or automating processes. + - Using AI agents frees developers from the need to manage the communication with the AI model in their code, and enables rapid integration of AI capabilities into their applications. + - An agent receives requests from clients and maintains continuous conversations with AI models to fulfill them. During the conversation, the agent can enable the model to securely query a RavenDB database (e.g., fetch recent orders or run vector searches on products) and request the client to perform actions (like sending emails or creating new orders). +- You can use AI agents to quickly create an intelligent, actionable, conversational interface for your applications, in a way that abstracts much of the complexity of AI integration. + +### Use cases +Creating an AI agent and assigning it a role can be done in minutes using Studio or the API, making it easy to address a wide variety of use cases like - +* Customer support chatbot agents +* Data analysis and reporting agents +* Content generation agents +* Workflow automation agents +* Intelligent recommendation agents + +### Technical documentation +Use the technical documentation to learn how to create and manage AI agents, configure secure database access, enable agents to trigger client actions, and more. + + + + + + +#### Learn more: In-depth AI agents articles + + + + +### Related lives & Videos +Watch our webinars to see AI agents in action and learn practical implementation techniques. + + + + + + diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/assets/ai-agents_flowchart.png b/versioned_docs/version-7.1/ai-integration/ai-agents/assets/ai-agents_flowchart.png new file mode 100644 index 0000000000..0b295c196d Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/assets/ai-agents_flowchart.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/assets/ai-agents_start_apiImage.png b/versioned_docs/version-7.1/ai-integration/ai-agents/assets/ai-agents_start_apiImage.png new file mode 100644 index 0000000000..834870f37d Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/assets/ai-agents_start_apiImage.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/assets/ai-agents_start_ovImage.png b/versioned_docs/version-7.1/ai-integration/ai-agents/assets/ai-agents_start_ovImage.png new file mode 100644 index 0000000000..94cf289840 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/assets/ai-agents_start_ovImage.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/assets/ai-agents_start_studioImage.png b/versioned_docs/version-7.1/ai-integration/ai-agents/assets/ai-agents_start_studioImage.png new file mode 100644 index 0000000000..93c97251cc Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/assets/ai-agents_start_studioImage.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/_category_.json b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/_category_.json new file mode 100644 index 0000000000..55cd0dacc8 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 2, + "label": "Creating AI Agents" +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_ai-agents-view.png b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_ai-agents-view.png new file mode 100644 index 0000000000..8e82023b9c Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_ai-agents-view.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_config-basic-settings.png b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_config-basic-settings.png new file mode 100644 index 0000000000..9f0fe06795 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_config-basic-settings.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_config-basic-settings_schema.png b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_config-basic-settings_schema.png new file mode 100644 index 0000000000..16386152a1 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_config-basic-settings_schema.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_config-chat-trimming.png b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_config-chat-trimming.png new file mode 100644 index 0000000000..6cc2d1a996 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_config-chat-trimming.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_connection-string.png b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_connection-string.png new file mode 100644 index 0000000000..feb4231e1f Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_connection-string.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_connection-string_select-or-create.png b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_connection-string_select-or-create.png new file mode 100644 index 0000000000..dcdc557b30 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_connection-string_select-or-create.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_create-ai-agent.png b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_create-ai-agent.png new file mode 100644 index 0000000000..f6239d076e Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_create-ai-agent.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_define-agent-tools.png b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_define-agent-tools.png new file mode 100644 index 0000000000..e736aa4321 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_define-agent-tools.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_define-agent-tools_add-action-tool.png b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_define-agent-tools_add-action-tool.png new file mode 100644 index 0000000000..33e8823a4c Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_define-agent-tools_add-action-tool.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_define-agent-tools_add-query-tool.png b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_define-agent-tools_add-query-tool.png new file mode 100644 index 0000000000..c6dc7f1161 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_define-agent-tools_add-query-tool.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_run-agent.png b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_run-agent.png new file mode 100644 index 0000000000..30cab35916 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_run-agent.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_running_action-tool.png b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_running_action-tool.png new file mode 100644 index 0000000000..fbac6d49b7 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_running_action-tool.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_running_llm-response-minimized.png b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_running_llm-response-minimized.png new file mode 100644 index 0000000000..8806bdd6b7 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_running_llm-response-minimized.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_running_params.png b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_running_params.png new file mode 100644 index 0000000000..52e545294d Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_running_params.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_running_prompts.png b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_running_prompts.png new file mode 100644 index 0000000000..74e865db76 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_running_prompts.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_running_query-tool.png b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_running_query-tool.png new file mode 100644 index 0000000000..2b050f7db5 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_running_query-tool.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_running_raw-data.png b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_running_raw-data.png new file mode 100644 index 0000000000..52db6317b8 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_running_raw-data.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_runtime-view.png b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_runtime-view.png new file mode 100644 index 0000000000..91ac663fff Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_runtime-view.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_save-agent.png b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_save-agent.png new file mode 100644 index 0000000000..aee0b44e1e Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_save-agent.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_set-agent-params.png b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_set-agent-params.png new file mode 100644 index 0000000000..a93a512867 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_set-agent-params.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_test-agent_run-test.png b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_test-agent_run-test.png new file mode 100644 index 0000000000..cf36ea16ae Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_test-agent_run-test.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_test-agent_test-button.png b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_test-agent_test-button.png new file mode 100644 index 0000000000..bd97ab4ee2 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_test-agent_test-button.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_test-results_minimized.png b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_test-results_minimized.png new file mode 100644 index 0000000000..964d7638fe Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_test-results_minimized.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_your-agent.png b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_your-agent.png new file mode 100644 index 0000000000..f7e97944da Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/ai-agents_your-agent.png differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_ai-agents-view.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_ai-agents-view.snagx new file mode 100644 index 0000000000..05acef667d Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_ai-agents-view.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_config-basic-settings.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_config-basic-settings.snagx new file mode 100644 index 0000000000..b28b2d8d88 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_config-basic-settings.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_config-basic-settings_schema.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_config-basic-settings_schema.snagx new file mode 100644 index 0000000000..c024726034 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_config-basic-settings_schema.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_config-chat-trimming.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_config-chat-trimming.snagx new file mode 100644 index 0000000000..83a260100a Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_config-chat-trimming.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_config-chat-trimming_summarization-settings.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_config-chat-trimming_summarization-settings.snagx new file mode 100644 index 0000000000..f6e4effc8f Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_config-chat-trimming_summarization-settings.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_config-chat-trimming_summarization-settings_history-expiration.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_config-chat-trimming_summarization-settings_history-expiration.snagx new file mode 100644 index 0000000000..88d7b36b03 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_config-chat-trimming_summarization-settings_history-expiration.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_config-chat-trimming_truncation-settings.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_config-chat-trimming_truncation-settings.snagx new file mode 100644 index 0000000000..30d85702da Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_config-chat-trimming_truncation-settings.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_connection-string.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_connection-string.snagx new file mode 100644 index 0000000000..c027fe57d3 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_connection-string.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_connection-string_select-or-create.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_connection-string_select-or-create.snagx new file mode 100644 index 0000000000..37e999d0b5 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_connection-string_select-or-create.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_conversations.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_conversations.snagx new file mode 100644 index 0000000000..1dde92fb1e Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_conversations.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_create-ai-agent.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_create-ai-agent.snagx new file mode 100644 index 0000000000..d04ca31692 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_create-ai-agent.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_define-agent-tools.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_define-agent-tools.snagx new file mode 100644 index 0000000000..d67f29aa6f Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_define-agent-tools.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_define-agent-tools_add-action-tool.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_define-agent-tools_add-action-tool.snagx new file mode 100644 index 0000000000..15c97f67f1 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_define-agent-tools_add-action-tool.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_define-agent-tools_add-query-tool.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_define-agent-tools_add-query-tool.snagx new file mode 100644 index 0000000000..cd4d353b5e Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_define-agent-tools_add-query-tool.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_run-agent.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_run-agent.snagx new file mode 100644 index 0000000000..8ca1cca7ac Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_run-agent.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_running_action-tool.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_running_action-tool.snagx new file mode 100644 index 0000000000..96ca8ea820 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_running_action-tool.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_running_llm-response-minimized.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_running_llm-response-minimized.snagx new file mode 100644 index 0000000000..6f1812864e Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_running_llm-response-minimized.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_running_params.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_running_params.snagx new file mode 100644 index 0000000000..99abc1458b Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_running_params.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_running_prompts.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_running_prompts.snagx new file mode 100644 index 0000000000..499952c701 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_running_prompts.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_running_query-tool.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_running_query-tool.snagx new file mode 100644 index 0000000000..1773184308 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_running_query-tool.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_running_raw-data.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_running_raw-data.snagx new file mode 100644 index 0000000000..f9fc29b12c Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_running_raw-data.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_runtime-view.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_runtime-view.snagx new file mode 100644 index 0000000000..7782b9b36b Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_runtime-view.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_save-agent.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_save-agent.snagx new file mode 100644 index 0000000000..09c543c7fc Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_save-agent.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_set-agent-params.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_set-agent-params.snagx new file mode 100644 index 0000000000..fb02342cca Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_set-agent-params.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_set-agent-params_params-list.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_set-agent-params_params-list.snagx new file mode 100644 index 0000000000..77458f246f Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_set-agent-params_params-list.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_set-chat-persistence.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_set-chat-persistence.snagx new file mode 100644 index 0000000000..0c878d48da Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_set-chat-persistence.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_set-chat-persistence_set-expiration.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_set-chat-persistence_set-expiration.snagx new file mode 100644 index 0000000000..839ea33cf7 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_set-chat-persistence_set-expiration.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_test-agent_run-test.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_test-agent_run-test.snagx new file mode 100644 index 0000000000..a7b62e98dc Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_test-agent_run-test.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_test-agent_test-button.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_test-agent_test-button.snagx new file mode 100644 index 0000000000..38e2604d64 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_test-agent_test-button.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_test-results_minimized.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_test-results_minimized.snagx new file mode 100644 index 0000000000..4e6f0ef18c Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_test-results_minimized.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_your-agent.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_your-agent.snagx new file mode 100644 index 0000000000..541e41132e Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/ai-agents_your-agent.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/gen-ai_hash-flow.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/gen-ai_hash-flow.snagx new file mode 100644 index 0000000000..7e6eff9d12 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/gen-ai_hash-flow.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/gen-ai_licensing.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/gen-ai_licensing.snagx new file mode 100644 index 0000000000..cb3a8439a6 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/gen-ai_licensing.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/gen-ai_metadata-identifier-and-hash-codes.snagx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/gen-ai_metadata-identifier-and-hash-codes.snagx new file mode 100644 index 0000000000..f9b564b644 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/assets/snagit/gen-ai_metadata-identifier-and-hash-codes.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api.mdx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api.mdx new file mode 100644 index 0000000000..8c420895db --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api.mdx @@ -0,0 +1,1289 @@ +--- +title: "Creating AI agents: API" +hide_table_of_contents: true +sidebar_label: Client API +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Creating AI agents: API + + +* To create an AI agent, a client defines its configuration, provides it with settings and tools, and registers the agent with the server. + +* Once the agent is created, the client can initiate or resume conversations, get LLM responses, and perform actions based on LLM insights. + +* This page provides a step-by-step guide to creating an AI agent and interacting with it using the Client API. + +* In this article: + * [Creating a connection string](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#creating-a-connection-string) + * [Defining an agent configuration](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#defining-an-agent-configuration) + * [Set the agent ID](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#set-the-agent-id) + * [Define a response object](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#define-a-response-object) + * [Add agent parameters](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#add-agent-parameters) + * [Set maximum number of iterations](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#set-maximum-number-of-iterations) + * [Set chat trimming configuration](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#set-chat-trimming-configuration) + * [Adding agent tools](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#adding-agent-tools) + * [Query tools](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#query-tools) + * [Initial-context queries](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#initial-context-queries) + * [Action tools](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#action-tools) + * [Creating the Agent](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#creating-the-agent) + * [Retrieving existing agent configurations](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#retrieving-existing-agent-configurations) + * [Managing conversations](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#managing-conversations) + * [Setting a conversation](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#setting-a-conversation) + * [Processing action-tool requests](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#processing-action-tool-requests) + * [Action-tool Handlers](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#action-tool-handlers) + * [Action-tool Receivers](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#action-tool-receivers) + * [Conversation response](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#conversation-response) + * [Setting user prompt and running the conversation](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#setting-user-prompt-and-running-the-conversation) + * [Stream LLM responses](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#stream-llm-responses) + * [Full Example](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#full-example) + + + +## Creating a connection string + +Your agent will need a connection string to connect with the LLM. Create a connection string using an `AiConnectionString` instance and the `PutConnectionStringOperation` operation. +(You can also create a connection string using Studio, see [here](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_studio#configure-basic-settings)) + +You can use a local `Ollama` model if your considerations are mainly speed, cost, open-source, or security, +Or you can use a remote `OpenAI` service for its additional resources and capabilities. + +* **Example** + + + ```csharp + using (var store = new DocumentStore()) + { + // Define the connection string to OpenAI + var connectionString = new AiConnectionString + { + // Connection string name & identifier + Name = "open-ai-cs", + + // Connection type + ModelType = AiModelType.Chat, + + // OpenAI connection settings + OpenAiSettings = new OpenAiSettings( + apiKey: "your-api-key", + endpoint: "https://api.openai.com/v1", + // LLM model for text generation + model: "gpt-4.1") + }; + + // Deploy the connection string to the server + var operation = new PutConnectionStringOperation(connectionString); + var putConnectionStringResult = store.Maintenance.Send(operation); + } + ``` + + + ```csharp + using (var store = new DocumentStore()) + { + // Define the connection string to Ollama + var connectionString = new AiConnectionString + { + // Connection string name & identifier + Name = "ollama-cs", + + // Connection type + ModelType = AiModelType.Chat, + + // Ollama connection settings + OllamaSettings = new OllamaSettings( + // LLM Ollama model for text generation + model: "llama3.2", + // local URL + uri: "http://localhost:11434/") + }; + + // Deploy the connection string to the server + var operation = new PutConnectionStringOperation(connectionString); + var putConnectionStringResult = store.Maintenance.Send(operation); + } + ``` + + + +* **Syntax** + + + ```csharp + public class AiConnectionString + { + public string Name { get; set; } + public AiModelType ModelType { get; set; } + public string Identifier { get; set; } + public OpenAiSettings OpenAiSettings { get; set; } + ... + } + + public class OpenAiSettings : AbstractAiSettings + { + public string ApiKey { get; set; } + public string Endpoint { get; set; } + public string Model { get; set; } + public int? Dimensions { get; set; } + public string OrganizationId { get; set; } + public string ProjectId { get; set; } + } + ``` + + + ```csharp + public class AiConnectionString + { + public string Name { get; set; } + public AiModelType ModelType { get; set; } + public string Identifier { get; set; } + public OllamaSettings OllamaSettings { get; set; } + ... + } + + public class OllamaSettings : AbstractAiSettings + { + public string Model { get; set; } + public string Uri { get; set; } + } + ``` + + + +## Defining an agent configuration + +To create an AI agent you need to prepare an **agent configuration** and populate it with +your settings and tools. + +Start by creating a new `AiAgentConfiguration` instance. +While creating the instance, pass its constructor: + +- The agent's Name +- The [connection string](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#creating-a-connection-string) you created +- A System prompt + +The agent will send the system prompt you define here to the LLM to define its basic characteristics, including its role, purpose, behavior, and the tools it can use. + +* **Example** + ```csharp + // Start setting an agent configuration + var agent = new AiAgentConfiguration("reward-productive-employee", connectionString.Name, + @"You work for a human experience manager. + The manager uses your services to find which employee has made the largest profit and to suggest + a reward. + The manager provides you with the name of a country, or with the word ""everything"" to indicate + all countries. + Then you: + 1. use a query tool to load all the orders sent to the selected country, + or a query tool to load all orders sent to all countries. + 2. calculate which employee made the largest profit. + 3. use a query tool to learn in what general area this employee lives. + 4. find suitable vacations sites or other rewards based on the employee's residence area. + 5. use an action tool to store in the database the employee's ID, profit, and your reward suggestions. + When you're done, return these details in your answer to the user as well."); + ``` + +* `AiAgentConfiguration` constructor + ```csharp + public AiAgentConfiguration(string name, string connectionStringName, string systemPrompt); + ``` + +* `AiAgentConfiguration` class + ```csharp + public class AiAgentConfiguration + { + // A unique identifier given to the AI agent configuration + public string Identifier { get; set; } + + // The name of the AI agent configuration + public string Name { get; set; } + + // Connection string name + public string ConnectionStringName { get; set; } + + // The system prompt that defines the role and purpose of the agent and the LLM + public string SystemPrompt { get; set; } + + // An example object that sets the layout for the LLM's response to the user. + // The object is translated to a schema before it is sent to the LLM. + public string SampleObject { get; set; } + + // A schema that sets the layout for the LLM's response to the user. + // If both a sample object and a schema are defined, only the schema is used. + public string OutputSchema { get; set; } + + // A list of Query tools that the LLM can use (through the agent) to access the database + public List Queries { get; set; } = new List(); + + // A list of Action tools that the LLM can use to trigger the user to action + public List Actions { get; set; } = new List(); + + // Agent parameters whose value the client passes to the LLM each time a chat is started, + // for stricter control over queries initiated by the LLM and as a means for interaction + // between the client and the LLM. + public List Parameters { get; set; } = new List(); + + // The trimming configuration defines if and how the conversation is summarized, + // to minimize the amount of data passed to the LLM when a conversation is started. + public AiAgentChatTrimmingConfiguration ChatTrimming { get; set; } = new + AiAgentChatTrimmingConfiguration(new AiAgentSummarizationByTokens()); + + // Control over the number of times that the LLM is allowed to use agent tools to handle + // a user prompt. + public int? MaxModelIterationsPerCall { get; set; } + } + ``` + +Once the initial agent configuration is created, we need to add it a few additional elements. + +### Set the agent ID: +Use the `Identifier` property to provide the agent with a unique ID that the +system will recognize it by. + +```csharp +// Set agent ID +agent.Identifier = "reward-productive-employee"; +``` + +### Define a response object: +Define a [structured output](https://platform.openai.com/docs/guides/structured-outputs) response object that the LLM will populate with its response to the user. + +To define the response object, you can use the `SampleObject` and/or the `OutputSchema` property +* `SampleObject` is a straightforward sample of the response object that you expect the LLM to return. + It is usually simpler to define the response object this way. +* `OutputSchema` is a formal JSON schema that the LLM can understand. + Even when defining the response object as a `SampleObject`, RavenDB will translate the object to a JSON schema before sending it to the LLM. If you prefer it however, you can explicitly define it as a schema yourself. +* If you define both a sample object and a schema, the agent will send only the schema to the LLM. + + + +```csharp +// Set sample object +agent.SampleObject = "{" + + "\"suggestedReward\": \"your suggestions for a reward\", " + + "\"employeeId\": \"the ID of the employee that made the largest profit\", " + + "\"profit\": \"the profit the employee made\"" + + "}"; +``` + + +```csharp +// Set output schema +agent.OutputSchema = "{" + + "\"name\": \"RHkxaWo5ZHhMM1RuVnIzZHhxZm9vM0c0UnYrL0JWbkhyRDVMd0tJa1g4Yz0\", " + + "\"strict\": true, " + + "\"schema\": {" + + "\"type\": \"object\", " + + "\"properties\": {" + + "\"employeeID\": {" + + "\"type\": \"string\", " + + "\"description\": \"the ID of the employee that made the largest profit\"" + + "}, " + + "\"profit\": {" + + "\"type\": \"string\", " + + "\"description\": \"the profit the employee made\"" + + "}, " + + "\"suggestedReward\": {" + + "\"type\": \"string\", " + + "\"description\": \"your suggestions for a reward\"" + + "}" + + "}, " + + "\"required\": [" + + "\"employeeID\", " + + "\"profit\", " + + "\"suggestedReward\"" + + "], " + + "\"additionalProperties\": false" + + "}" + + "}"; +``` + + + +### Add agent parameters: +Agent parameters are parameters that can be used by [query tools](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#query-tools) when the agent queries the database on behalf of the LLM. +Values for agent parameters are provided by the client, or by a user through the client, +when a chat is started. +When the agent is requested to use a query tool that uses agent parameters, it replaces these parameters with the values provided by the user before running the query. +Using agent parameters allows the client to focus the queries and the entire interaction on its current needs. + +In the example below, an agent parameter is used to determine what area +of the world a query will handle. + +To add an agent parameter create an `AiAgentParameter` instance, initialize it with +the parameter's **name** and **description** (explaining to the LLM what the parameter +is for), and pass this instance to the `agent.Parameters.Add` method. + +* **Example** + ```csharp + // Set agent parameters + agent.Parameters.Add(new AiAgentParameter( + "country", "A specific country that orders were shipped to, " + + "or \"everywhere\" to look for orders shipped to all countries")); + ``` + +* `AiAgentParameter` definition + ```csharp + public AiAgentParameter(string name, string description); + ``` + +### Set maximum number of iterations: +You can limit the number of times that the LLM is allowed to request the usage of +agent tools in response to a single user prompt. Use `MaxModelIterationsPerCall` to change this limit. + +* **Example** + ```csharp + // Limit the number of times the LLM can request for tools in response to a single user prompt + agent.MaxModelIterationsPerCall = 3; + ``` + +* `MaxModelIterationsPerCall` Definition + ```csharp + public int? MaxModelIterationsPerCall + ``` + + +Note that you can improve the TTFB (Time To First Byte) by getting the LLM's response in chunks using streaming. +Find more about streaming in the [overview](../../../ai-integration/ai-agents/ai-agents_overview#streaming-llm-responses) and [below](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#stream-llm-responses). + + +### Set chat trimming configuration: + +To [summarize the conversation](../../../ai-integration/ai-agents/ai-agents_overview#define-a-chat-trimming-configuration), create an `AiAgentChatTrimmingConfiguration` instance, +use it to configure your trimming strategy, and set the agent's `ChatTrimming` property +with the instance. + +When creating the instance, pass its constructor a summarization strategy using +a `AiAgentSummarizationByTokens` class. + +The original conversation, before it was summarized, can optionally be +kept in the `@conversations-history` collection. +To determine whether to keep the original messages and for how long, also pass the +`AiAgentChatTrimmingConfiguration` constructor an `AiAgentHistoryConfiguration` instance +with your settings. + +* **Example** + ```csharp + // Set chat trimming configuration + AiAgentSummarizationByTokens summarization = new AiAgentSummarizationByTokens() + { + // When the number of tokens stored in the conversation exceeds this limit + // summarization of old messages will be triggered. + MaxTokensBeforeSummarization = 32768, + // The maximum number of tokens that the conversation is allowed to contain + // after summarization. + MaxTokensAfterSummarization = 1024 + }; + agent.ChatTrimming = new AiAgentChatTrimmingConfiguration(summarization); + ``` + +* **Syntax** + ```csharp + public class AiAgentSummarizationByTokens + { + // The maximum number of tokens allowed before summarization is triggered. + public long? MaxTokensBeforeSummarization { get; set; } + + // The maximum number of tokens allowed in the generated summary. + public long? MaxTokensAfterSummarization { get; set; } + } + + public class AiAgentHistoryConfiguration + { + // Enables history for AI agents conversations. + public AiAgentHistoryConfiguration() + + // Enables history for AI agents conversations, + // with `expiration` determining the timespan after which history documents expire. + public AiAgentHistoryConfiguration(TimeSpan expiration) + + // The timespan after which history documents expire. + public int? HistoryExpirationInSec { get; set; } + } + ``` + +## Adding agent tools + +You can enhance your agent with Query and Action tools, that allow the LLM to query your database and trigger client actions. +After defining agent tools and submitting them to the LLM, it is up to the LLM to decide if and when to use them. + +### Query tools: + +[Query tools](../../../ai-integration/ai-agents/ai-agents_overview#query-tools) provide the LLM with the ability to retrieve data from the database. +A query tool includes a natural-language **description** that explains the LLM what the tool is for, and an **RQL query**. + +* **Passing values to query tools** + * Query tools optionally include [parameters](../../../ai-integration/ai-agents/ai-agents_overview#query-parameters), identified by a `$` prefix. + Both the user and the LLM can pass values to these parameters. + * **Passing values from the user** + Users can pass values to queries through **agent parameters**. + If agent parameters are defined in the agent configuration - + * The client has to provide values for them when initiating a conversation with the agent. + * The parameters can be included in query tools RQL queries. + Before running a query, the agent will replace any agent parameter included in it with its value. + * **Passing values from the LLM** + The LLM can pass values to queries through a **parameters schema**. + * The parameters schema layout is defined as part of the query tool. + * When the LLM requests the agent to run a query, it will add parameter values to the request. + * You can define a parameters schema either as a **sample object** or a **formal JSON schema**. + If you define both, the LLM will pass parameter values only through the JSON schema. + * Before running a query, the agent will replace any parameter included in it with its value. + +* **Example** + * The first query tool will be used by the LLM when it needs to retrieve all the + orders sent to any place in the world. (the system prompt instructs it to use this + tool when the user enters "everywhere" when the conversation is started.) + * The second query tool will be used by the LLM when it needs to retrieve all the + orders that were sent to a particular country, using the `$country` agent parameter. + * The third tool retrieves from the database the general location of an employee. + To do this it uses a `$employeeId` parameter, whose value is set by the LLM in its + request to run this tool. + + ```csharp + agent.Queries = + [ + // Set a query tool that triggers the agent to retrieve all the orders sent everywhere + new AiAgentToolQuery + { + // Query tool name + Name = "retrieve-orders-sent-to-all-countries", + + // Query tool description + Description = "a query tool that allows you to retrieve all orders sent to all countries.", + + // Query tool RQL query + Query = "from Orders as O select O.Employee, O.Lines.Quantity", + + // Sample parameters object for the query tool + // The LLM can use this object to pass parameters to the query tool + ParametersSampleObject = "{}" + }, + + // Set a query tool that triggers the agent to retrieve all the orders sent to a + // specific country + new AiAgentToolQuery + { + Name = "retrieve-orders-sent-to-a-specific-country", + Description = "a query tool that allows you to retrieve all orders sent " + + "to a specific country", + Query = "from Orders as O where O.ShipTo.Country == $country select O.Employee, " + + "O.Lines.Quantity", + ParametersSampleObject = "{}" + }, + + // Set a query tool that triggers the agent to retrieve the performer's + // residence region details (country, city, and region) from the database + new AiAgentToolQuery + { + Name = "retrieve-performer-living-region", + Description = "a query tool that allows you to retrieve an employee's country, " + + "city, and region, by the employee's ID", + Query = "from Employees as E where id() == $employeeId select E.Address.Country, " + + "E.Address.City, E.Address.Region", + ParametersSampleObject = "{" + + "\"employeeId\": \"embed the employee's ID here\"" + + "}" + } + ]; + ``` + +* **Syntax** + Query tools are defined in a list of `AiAgentToolQuery` classes. + ```csharp + public class AiAgentToolQuery + { + public string Name { get; set; } + public string Description { get; set; } + public string Query { get; set; } + public string ParametersSampleObject { get; set; } + public string ParametersSchema { get; set; } + } + ``` + +#### Initial-context queries + +* You can set a query tool as an [initial-context query](../../../ai-integration/ai-agents/ai-agents_overview#initial-context-queries) using its `Options.AddToInitialContext` property, to execute the query and provide the LLM with its results immediately when the agent is started. + * An initial-context query is **not allowed** to use LLM parameters, since the query + runs before the conversation starts, earlier than the first communication with the LLM, and the LLM will have no opportunity to fill the parameters with values. + * An initial-context query **is** allowed to use agent parameters, whose values are provided by the user even before the query is executed. + +* You can use the `Options.AllowModelQueries` property to Enable or Disable a query tool . + * When a query tool is enabled, the LLM can freely trigger its execution. + * When a query tool is disabled, the LLM cannot trigger its execution. + * If a query tool is set as an initial-context query, it will be executed when the conversation + starts even if disabled using `AllowModelQueries`. + +* **Example** + Set a query tool that runs when the agent is started and retrieves all the orders sent everywhere. + ```csharp + new AiAgentToolQuery + { + Name = "retrieve-orders-sent-to-all-countries", + Description = "a query tool that allows you to retrieve all orders sent to all countries.", + Query = "from Orders as O select O.Employee, O.Lines.Quantity", + ParametersSampleObject = "{}" + + Options = new AiAgentToolQueryOptions + { + // The LLM is allowed to trigger the execution of this query during the conversation + AllowModelQueries = true, + + // The query will be executed when the conversation starts + // and its results will be added to the initial context + AddToInitialContext = true + } + } + ``` + +* **Syntax** + ```csharp + public class AiAgentToolQueryOptions : IDynamicJson + { + public bool? AllowModelQueries { get; set; } + public bool? AddToInitialContext { get; set; } + } + ``` + + |Property|Type|Description| + |--------|----|-----------| + |`AllowModelQueries`|`bool`| `true`: the LLM can trigger the execution of this query tool.
`false`: the LLM cannot trigger the execution of this query tool.
`null`: server-side defaults apply.| + |`AddToInitialContext`|`bool`| `true`: the query will be executed when the conversation starts and its results added to the initial context.
`false`: the query will not be executed when the conversation starts.
`null`: server-side defaults apply.| + + + Note: the two flags can be set regardless of each other. + * Setting `AddToInitialContext` to `true` and `AllowModelQueries` to `false` + will cause the query to be executed when the conversation starts, + but the LLM will not be able to trigger its execution later in the conversation. + * Setting `AddToInitialContext` to `true` and `AllowModelQueries` to `true` + will cause the query to be executed when the conversation starts, + and the LLM will also be able to trigger its execution later in the conversation. + + +### Action tools: + +Action tools allow the LLM to trigger the client to action (e.g., to modify or add a document). +An action tool includes a natural-language **description** that explains the LLM what the tool is capable of, and a **schema** that the LLM will fill with details related to the requested action before sending it to the agent. + +In the example below, the action tool requests the client to store an employee's details +in the database. The LLM will provide the employee's ID and other details whenever it requests the agent +to apply the tool. + +When the client finishes performing the action, it is required to send the LLM +a response that explains how it went, e.g. `done`. + +* **Example** + The following action tool sends to the client employee details that the tool needs to store in the database. + ```csharp + agent.Actions = + [ + // Set an action tool that triggers the client to store the performer's details + new AiAgentToolAction + { + Name = "store-performer-details", + Description = "an action tool that allows you to store the ID of the employee that made " + + "the largest profit, the profit, and your suggestions for a reward, in the " + + "database.", + ParametersSampleObject = "{" + + "\"suggestedReward\": \"embed your suggestions for a reward here\", " + + "\"employeeId\": \"embed the employee’s ID here\", " + + "\"profit\": \"embed the employee’s profit here\"" + + "}" + } + ]; + ``` + +* **Syntax** + Action tools are defined in a list of `AiAgentToolAction` classes. + ```csharp + public class AiAgentToolAction + { + public string Name { get; set; } + public string Description { get; set; } + public string ParametersSampleObject { get; set; } + public string ParametersSchema { get; set; } + } + ``` + +## Creating the Agent + +The agent configuration is ready, and we can now register the agent on the server +using the `CreateAgent` method. + +* Create a response object class that matches the response schema defined in your agent configuration. +* Call `CreateAgent` and pass it - + * The agent configuration + * A new instance of the response object class + +* **Example** + ```csharp + // Create the agent + // Pass it an object for its response + var createResult = await store.AI.CreateAgentAsync(agent, new Performer + { + suggestedReward = "your suggestions for a reward", + employeeId = "the ID of the employee that made the largest profit", + profit = "the profit the employee made" + }); + + // An object for the LLM response + public class Performer + { + public string suggestedReward; + public string employeeId; + public string profit; + } + ``` + +* `CreateAgent` overloads + ```csharp + // Asynchronously creates or updates an AI agent configuration on the database, + // with the given schema as an example for a response object + Task CreateAgentAsync(AiAgentConfiguration configuration, TSchema sampleObject, CancellationToken token = default) + + // Creates or updates (synchronously) an AI agent configuration on the database + AiAgentConfigurationResult CreateAgent(AiAgentConfiguration configuration) + + // Asynchronously creates or updates an AI agent configuration on the database + Task CreateAgentAsync(AiAgentConfiguration configuration, CancellationToken token = default) + + // Creates or updates (synchronously) an AI agent configuration on the database, + // with the given schema as an example for a response object + AiAgentConfigurationResult CreateAgent(AiAgentConfiguration configuration, TSchema sampleObject) where TSchema : new() + ``` + + | Property | Type | Description | + |----------|------|-------------| + | configuration | `AiAgentConfiguration` | The agent configuration | + | sampleObject | `TSchema` | Example response object | + + | Return value | Description | + |--------------|-------------| + | `AiAgentConfigurationResult` | The result of the agent configuration creation or update, including the agent's ID. | + +
+ +## Retrieving existing agent configurations + +You can retrieve the configuration of **an existing agent** using `GetAgent`. + +* **Example** + ```csharp + // Retrieve an existing agent configuration by its ID + var existingAgent = store.AI.GetAgent("reward-productive-employee"); + ``` + +You can also retrieve the configurations of **all existing agents** using `GetAgents`. + +* **Example** + ```csharp + // Extract the agent configurations from the response into a new list + var existingAgentsList = store.AI.GetAgents(); + var agents = existingAgentsList.AiAgents; + ``` + +* `GetAgent` and `GetAgents` overloads + ```csharp + // Synchronously retrieves the configuration of an AI agent by its ID + AiAgentConfiguration GetAgent(string agentId) + + // Asynchronously retrieves the configuration of an AI agent by its ID + async Task GetAgentAsync(string agentId, CancellationToken token = default) + + // Synchronously retrieves the configurations of all AI agents + GetAiAgentsResponse GetAgents() + + // Asynchronously retrieves the configurations of all AI agents + Task GetAgentsAsync(CancellationToken token = default) + ``` + + | Property | Type | Description | + |----------|------|-------------| + | agentId | `string` | The unique ID of the agent you want to retrieve | + + | Return value | Description | + |--------------|-------------| + | `AiAgentConfiguration` | The agent configuration | + | `GetAiAgentsResponse` | The response containing a list of all agent configurations | + ``` + +* `GetAiAgentsResponse` class + ```csharp + public class GetAiAgentsResponse + { + public List AiAgents { get; set; } + } + ``` + +
+ +## Managing conversations + +### Setting a conversation: + +* Set a conversation using the `store.AI.Conversation` method. + Pass `Conversation`: + * The **agent ID** + * The **conversation ID** + The conversation ID that you provide when starting a conversation determines whether a new conversation will start, or an existing conversation will be continued. + * Conversations are kept in the `@conversations` collection. + A conversation document's name starts with a prefix (such as `Chats/`) that can be + set when the conversation is initiated. + * You can - + **Provide a full ID**, including a prefix and the ID that follows it. + **Provide a prefix that ends with `/` or `|`** to trigger automatic ID creation, + similarly to the creation of automatic IDs for documents. + * If you pass the method the ID of an existing conversation (e.g. `"Chats/0000000000000008883-A"`) + the conversation will be retrieved from storage and continued where you left off. + * If you provide an empty prefix (e.g. `"Chats/`), a new conversation will start. + * Values for **agent parameters**, if defined, in an `AiConversationCreationOptions` instance. +* Set the user prompt using the `SetUserPrompt`method. + The user prompt informs the agent of the user's requests and expectations for this chat. +* Use the value returned by the `Conversation` method to run the chat. + +* **Example** + ```csharp + // Create a conversation instance + // Initialize it with - + // The agent's ID, + // A prefix (Performers/) for conversations stored in the @Conversations collection, + // Agent parameters' values + var chat = store.AI.Conversation( + createResult.Identifier, + "Performers/", + new AiConversationCreationOptions().AddParameter("country", "France")); + ``` + +* `Conversation` Definition + ```csharp + public IAiConversationOperations Conversation(string agentId, string conversationId, AiConversationCreationOptions creationOptions, string changeVector = null) + ``` + + | Property | Type | Description | + |----------|------|-------------| + | agentId | `string` | The agent unique ID | + | conversationId | `string` | The [conversation ID](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#setting-a-conversation) | + | creationOptions | `AiConversationCreationOptions` | Conversation creation options (see class definition below) | + | changeVector | `string` | Optional change vector for concurrency control | + + | Return value | Description | + |--------------|-------------| + | `IAiConversationOperations` | The conversation operations interface for conversation management.
Methods of this interface like `Run`, `StreamAsync`, `Handle`, and others, allow you send messages, receive responses, handle action tools, and manage various other aspects of the conversation lifecycle. | + +* `SetUserPrompt` Definition + ```csharp + void SetUserPrompt(string userPrompt); + ``` +* `AiConversationCreationOptions` class + Use this class to set conversation creation options, including values for agent parameters and the conversation's expiration time if it remains idle. + ```csharp + // Conversation creation options, including agent parameters and idle expiration configuration + public class AiConversationCreationOptions + { + // Values for agent parameters defined in the agent configuration + // Used to provide context or input values at the start of the conversation + public Dictionary Parameters { get; set; } + + // Optional expiration time (in seconds) + // If the conversation is idle for longer than this, it will be automatically deleted + public int? ExpirationInSec { get; set; } + + // Initializes a new conversation instance with no parameters + // Use when you want to configure conversation options incrementally + public AiConversationCreationOptions(); + + // Initializes a new conversation instance and passes it a set of parameter values + public AiConversationCreationOptions(Dictionary parameters); + + // Adds an agent parameter value for this conversation + // Returns the current instance to allow method chaining + public AiConversationCreationOptions AddParameter(string name, object value); + } + ``` + +### Processing action-tool requests: +During the conversation, the LLM can request the agent to trigger action tools. +The agent will pass a requested action tool's name and parameters to the client, +and it is then up to the client to process the request. + +The client can process an action-tool request using a [handler](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#action-tool-handlers) or a [receiver](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#action-tool-receivers). + +#### Action-tool Handlers +A **handler** is created for a specific action tool and registered with the server using the `Handle` method. +When the LLM triggers this action tool with an action request, the handler is invoked to process the request, returns a response to the LLM, and ends automatically. + + +Handlers are typically used for simple, immediate operations like storing a document in the database and returning a confirmation, performing a quick calculation and sending its results, and other scenarios where the response can be generated and returned in a single step. + + +* To **create a handler**, + pass the `Handle` method - + * The action tool's name. + * An object to populate with the data sent with the action request. + Make sure that the object has the same structure defined for the action tool's parameters schema. + +* When an **action request for this tool is received**, + the handler will be given - + * The populated object with the data sent with the action request. + +* When you **finish handling the requested action**, + `return` a response that will be sent by the agent back to the LLM. + +* **Example** + In this example, the action tool is requested to store an employee's details in the database. + ```csharp + // "store-performer-details" action tool handler + chat.Handle("store-performer-details", (Performer performer) => + { + using (var session = store.OpenSession()) + { + // store the values in the Performers collection in the database + session.Store(performer); + session.SaveChanges(); + } + + // return to the agent an indication that the action went well. + return "done"; + }); + + // An object that represents the arguments provided by the LLM for this tool call + public class Performer + { + public string suggestedReward; + public string employeeId; + public string profit; + } + ``` +* `Handle` overloads + ```csharp + void Handle(string actionName, Func> action, AiHandleErrorStrategy aiHandleError = AiHandleErrorStrategy.SendErrorsToModel) + + void Handle(string actionName, Func action, AiHandleErrorStrategy aiHandleError = AiHandleErrorStrategy.SendErrorsToModel) where TArgs : class; + + void Handle(string actionName, Func> action, AiHandleErrorStrategy aiHandleError = AiHandleErrorStrategy.SendErrorsToModel) + + void Handle(string actionName, Func action, AiHandleErrorStrategy aiHandleError = AiHandleErrorStrategy.SendErrorsToModel) where TArgs : class; + ``` + + | Property | Type | Description | + |----------|------|-------------| + | actionName | `string` | The action tool name | + | action | `Func>` or `Func` or `Func>` or `Func` | The handler function that processes the action request and returns a response to the LLM | + | aiHandleError | `AiHandleErrorStrategy` | Errors handling strategy.
`SendErrorsToModel` - Send errors to the model for handling.
`RaiseImmediately` - throw error exceptions.| + +#### Action-tool Receivers +A **receiver** is created for a specific action tool and registered with the server using the `Receive` method. +When the LLM triggers this action tool with an action request, the receiver is invoked to process the request, but unlike a handler, the receiver remains active until `AddActionResponse` is explicitly called to close the pending request and send a response to the LLM. + + +Receivers are typically used asynchronously for multi-step or delayed operations such as waiting for an external event or for user input before responding, performing long-running operations like batch processing or integration with an external system, and other use cases where the response cannot be generated immediately. + + +* To **create a receiver**, + pass the `Receive` method - + * The action tool's name. + * An object to populate with the data sent with the action request. + Make sure that this object has the same structure defined for the action tool's parameters schema. + +* When an **action request for this tool is received**, + the receiver will be given - + * An `AiAgentActionRequest` object containing the details of the action request. + * The populated object with the data sent with the action request. + +* When you **finish handling the requested action**, + call `AddActionResponse`. Pass it - + * The action tool's ID. + * The response to send back to the LLM. + + Note that the response can be sent at any time, even after the receiver has finished executing, + and from any context, not necessarily from within the receiver callback. + + +* **Example** + In this example, a receiver gets a recommendation for rewards that can be given to a performant employee and processes it. + + + ```csharp + chat.Receive("store-performer-details", async (AiAgentActionRequest request, Performer performer) => + { + // Perform asynchronous work + using (var session = store.OpenAsyncSession()) + { + await session.StoreAsync(performer); + await session.SaveChangesAsync(); + } + + // Example: Send a notification email asynchronously + await EmailService.SendNotificationAsync("manager@company.com", performer); + + // Manually send the response to close the action + chat.AddActionResponse(request.ToolId, "done"); + }); + ``` + + + ```csharp + chat.Receive("store-performer-details", (AiAgentActionRequest request, Performer performer) => + { + // Perform synchronous work + using (var session = store.OpenSession()) + { + session.Store(performer); + session.SaveChanges(); + } + + // Add any processing logic here + + // Manually send the response and close the action + chat.AddActionResponse(request.ToolId, "done"); + }); + ``` + + + +* `Receive` overloads + ```csharp + // Registers an Asynchronous receiver for an action tool + void Receive(string actionName, Func action, AiHandleErrorStrategy aiHandleError = AiHandleErrorStrategy.SendErrorsToModel) + + // Registers a Synchronous receiver for an action tool + void Receive(string actionName, Action action, AiHandleErrorStrategy aiHandleError = AiHandleErrorStrategy.SendErrorsToModel) + ``` + + | Property | Type | Description | + |----------|------|-------------| + | actionName | `string` | The action tool name | + | action | `Func` or `Action` | The receiver function that processes the action request | + | aiHandleError | `AiHandleErrorStrategy` | Errors handling strategy.
`SendErrorsToModel` - Send errors to the model for handling.
`RaiseImmediately` - throw error exceptions.| + +* `AddActionResponse` Definition + ```csharp + // Closes the action request and sends the response back to the LLM + void AddActionResponse(string toolId, string actionResponse) + ``` + + | Property | Type | Description | + |----------|------|-------------| + | toolId | `string` | The action request unique ID | + | actionResponse | `string` | The response to send back to the LLM through the agent | + + +* `AiAgentActionRequest` class + Contains the action request details, sent by the LLM to the agent and passed to the receiver when invoked. + ```csharp + public class AiAgentActionRequest + { + // Action tool name + public string Name; + + // Action tool unique ID + public string ToolId; + + // Request arguments provided by the LLM + public string Arguments; + } + ``` + +### Conversation response: + +The LLM response is returned by the agent to the client in an `AiAnswer` object, with an answer to the user prompt and the conversation status, indicating whether the conversation is complete or a further "turn" is required. + +* `AiAnswer`syntax + ```csharp + public class AiAnswer + { + // The answer content produced by the AI + public TAnswer Answer; + + // The status of the conversation + public AiConversationResult Status; + } + + public enum AiConversationResult + { + // The conversation has completed and a final answer is available + Done, + // Further interaction is required, such as responding to tool requests + ActionRequired + } + ``` + +### Setting user prompt and running the conversation: + +Set the user prompt using the `SetUserPrompt` method, and run the conversation using the +`RunAsync` method. + +You can also use `StreamAsync` to **stream** the LLM's response as it is generated. +Learn how to do this in the [Stream LLM responses](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#stream-llm-responses) section. + + +```csharp +// Set the user prompt and run the conversation +chat.SetUserPrompt("send a few suggestions to reward the employee that made the largest profit"); + +var LLMResponse = await chat.RunAsync(CancellationToken.None); + +if (LLMResponse.Status == AiConversationResult.Done) +{ + // The LLM successfully processed the user prompt and returned its response. + // The performer's ID, profit, and suggested rewards were stored in the Performers + // collection by the action tool, and are also returned in the final LLM response. +} +``` + +See the full example [below](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#full-example). + +
+ +## Stream LLM responses + +You can set the agent to [stream the LLM's response to the client](../../../ai-integration/ai-agents/ai-agents_overview#streaming-llm-responses) in real time as the LLM generates it, using the `StreamAsync` method, instead of using [RunAsync](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_api#setting-user-prompt-and-running-the-conversation) which sends the whole response to the client when it is fully prepared. + +Streaming the response allows the client to start processing it before it is complete, which can improve the application's responsiveness. + +* **Example** + ```csharp + // A StringBuilder, used in this example to collect the streamed response + var reward = new StringBuilder(); + + // Using StreamAsync to collect the streamed response + // The response property to stream is in this case `suggestedReward` + var LLMResponse = await chat.StreamAsync(responseObj => responseObj.suggestedReward, str => + { + // Callback invoked with the arrival of each incoming chunk of the processed property + + reward.Append(str); // Add the incoming chunk to the StringBuilder instance + return Task.CompletedTask; // Return with an indication that the chunk was processed + + }, CancellationToken.None); + + if (LLMResponse.Status == AiConversationResult.Done) + { + // Handle the full response when ready + + // The streamed property was fully loaded and handled by the callback above, + // remaining parts of the response (including other properties if exist) + // will arrive when the whole response is ready and can be handled here. + } + ``` + +* `StreamAsync` overloads: + + ```csharp + // The property to stream is indicated using a lambda expression + Task> StreamAsync + (Expression> streamPropertyPath, + Func streamedChunksCallback, CancellationToken token = default); + ``` + + ```csharp + // The property to stream is indicated as a string, using its name + Task> StreamAsync + (string streamPropertyPath, + Func streamedChunksCallback, CancellationToken token = default); + ``` + + | Property | Type | Description | + |----------|------|-------------| + | streamPropertyPath | `Expression>` | A lambda expression that selects the property to stream from the response object.
  • **The selected property must be a simple string** (and not a JSON object or an array, for example).
  • It is recommended that this would be the first property defined in the response schema.
    The LLM processes the properties in the order they are defined. Streaming the first property will ensure that streaming to the user starts immediately even if it takes the LLM time to process later properties.
| + | streamPropertyPath | `string` | The name of the property in the response object to stream.
  • **The selected property must be a simple string** (and not a JSON object or an array, for example).
  • It is recommended that this would be the first property defined in the response schema.
    The LLM processes the properties in the order they are defined. Streaming the first property will ensure that streaming to the user starts immediately even if it takes the LLM time to process later properties.
| + | streamedChunksCallback | `Func` | A callback function that is invoked with each incoming chunk of the streamed property | + | token | `CancellationToken` | An optional token that can be used to cancel the streaming operation | + + | Return value | Description | + |--------------|-------------| + | `Task>` | After streaming the specified property, the return value contains the final conversation result and status (e.g. "Done" or "ActionRequired"). | + +
+ +## Full example + +The agent's user in this example is a human experience manager. +The agent helps its user to reward employees by searching, using query tools, +for orders sent to a certain country or (if the user prompts it "everywhere") +to all countries, and finding the employee that made the largest profit. +The agent then runs another query tool to find, by the employee's ID (that +was fetched from the retrieved orders) the employee's residence region, +and finds rewards suitable for the employee based on this region. +Finally, it uses an action tool to store the employee's ID, profit, and reward +suggestions in the `Performers` collection in the database, and returns the same +details in its final response as well. + +```csharp +public async Task createAndRunAiAgent_full() +{ + var store = new DocumentStore(); + + // Define connection string to OpenAI + var connectionString = new AiConnectionString + { + Name = "open-ai-cs", + ModelType = AiModelType.Chat, + OpenAiSettings = new OpenAiSettings( + apiKey: "your-api-key", + endpoint: "https://api.openai.com/v1", + // LLM model for text generation + model: "gpt-4.1") + }; + + // Deploy connection string to server + var operation = new PutConnectionStringOperation(connectionString); + var putConnectionStringResult = store.Maintenance.Send(operation); + + using var session = store.OpenAsyncSession(); + + // Start setting an agent configuration + var agent = new AiAgentConfiguration("reward-productive-employee", connectionString.Name, + @"You work for a human experience manager. + The manager uses your services to find which employee has made the largest profit and to suggest + a reward. + The manager provides you with the name of a country, or with the word ""everything"" to indicate + all countries. + Then you: + 1. use a query tool to load all the orders sent to the selected country, + or a query tool to load all orders sent to all countries. + 2. calculate which employee made the largest profit. + 3. use a query tool to learn in what general area this employee lives. + 4. find suitable vacations sites or other rewards based on the employee's residence area. + 5. use an action tool to store in the database the employee's ID, profit, and your reward suggestions. + When you're done, return these details in your answer to the user as well."); + + // Set agent ID + agent.Identifier = "reward-productive-employee"; + + // Define LLM response object + agent.SampleObject = "{" + + "\"EmployeeID\": \"embed the employee’s ID here\"," + + "\"Profit\": \"embed the profit made by the employee here\"," + + "\"SuggestedReward\": \"embed suggested rewards here\"" + + "}"; + + // Set agent parameters + agent.Parameters.Add(new AiAgentParameter( + "country", "A specific country that orders were shipped to, " + + "or \"everywhere\" to look for orders shipped to all countries")); + + agent.Queries = + [ + // Set a query tool to retrieve all orders sent everywhere + new AiAgentToolQuery + { + // Query tool name + Name = "retrieve-orders-sent-to-all-countries", + + // Query tool description + Description = "a query tool that allows you to retrieve all orders sent to all countries.", + + // Query tool RQL query + Query = "from Orders as O select O.Employee, O.Lines.Quantity", + + // Sample parameters object + ParametersSampleObject = "{}" + }, + + // Set a query tool to retrieve all orders sent to a specific country + new AiAgentToolQuery + { + Name = "retrieve-orders-sent-to-a-specific-country", + Description = + "a query tool that allows you to retrieve all orders sent to a specific country", + Query = + "from Orders as O where O.ShipTo.Country == " + + "$country select O.Employee, O.Lines.Quantity", + ParametersSampleObject = "{}" + }, + + // Set a query tool to retrieve the performer's residence region details from the database + new AiAgentToolQuery + { + Name = "retrieve-performer-living-region", + Description = + "a query tool that allows you to retrieve an employee's country, city, and " + + "region, by the employee's ID", + Query = "from Employees as E where id() == $employeeId select E.Address.Country, " + + "E.Address.City, E.Address.Region", + ParametersSampleObject = "{" + + "\"employeeId\": \"embed the employee's ID here\"" + + "}" + } + ]; + + agent.Actions = + [ + // Set an action tool to store the performer's details + new AiAgentToolAction + { + Name = "store-performer-details", + Description = + "an action tool that allows you to store the ID of the employee that made " + + "the largest profit, the profit, and your suggestions for a reward, in the database.", + ParametersSampleObject = "{" + + "\"suggestedReward\": \"embed your suggestions for a reward here\", " + + "\"employeeId\": \"embed the employee’s ID here\", " + + "\"profit\": \"embed the employee’s profit here\"" + + "}" + } + ]; + + // Set chat trimming configuration + AiAgentSummarizationByTokens summarization = new AiAgentSummarizationByTokens() + { + // Summarize old messages When the number of tokens stored in the conversation exceeds this limit + MaxTokensBeforeSummarization = 32768, + // Max number of tokens that the conversation is allowed to contain after summarization + MaxTokensAfterSummarization = 1024 + }; + + agent.ChatTrimming = new AiAgentChatTrimmingConfiguration(summarization); + + // Limit the number of times the LLM can request for tools in response to a single user prompt + agent.MaxModelIterationsPerCall = 3; + + var createResult = await store.AI.CreateAgentAsync(agent, new Performer + { + suggestedReward = "your suggestions for a reward", + employeeId = "the ID of the employee that made the largest profit", + profit = "the profit the employee made" + }); + + // Set chat ID, prefix, agent parameters. + // (specific country activates one query tool,"everywhere" activates another) + var chat = store.AI.Conversation( + createResult.Identifier, + "Performers/", + new AiConversationCreationOptions().AddParameter("country", "France")); + + // Handle the action tool that the LLM uses to store the performer's details in the database + chat.Handle("store-performer-details", (Performer performer) => + { + using (var session1 = store.OpenSession()) + { + // store values in Performers collection in database + session1.Store(performer); + session1.SaveChanges(); + } + return "done"; + }); + + // Set user prompt and run chat + chat.SetUserPrompt("send a few suggestions to reward the employee that made the largest profit"); + + var LLMResponse = await chat.RunAsync(CancellationToken.None); + + if (LLMResponse.Status == AiConversationResult.Done) + { + // The LLM successfully processed the user prompt and returned its response. + // The performer's ID, profit, and suggested rewards were stored in the Performers + // collection by the action tool, and are also returned in the final LLM response. + } +} +``` + + + diff --git a/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_studio.mdx b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_studio.mdx new file mode 100644 index 0000000000..f98cd48758 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_studio.mdx @@ -0,0 +1,377 @@ +--- +title: "Creating AI agents: Studio" +hide_table_of_contents: true +sidebar_label: Studio +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# AI Agents Integration: Studio + + +* In this article: + * [Create AI Agent](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_studio#create-ai-agent) + * [Configure basic settings](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_studio#configure-basic-settings) + * [Set agent parameters](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_studio#set-agent-parameters) + * [Define agent tools](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_studio#define-agent-tools) + * [Add query tools](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_studio#add-query-tools) + * [Add action tools](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_studio#add-action-tools) + * [Configure chat trimming](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_studio#configure-chat-trimming) + * [Save and Run your agent](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_studio#save-and-run-your-agent) + * [Start new chat](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_studio#start-new-chat) + * [Agent interaction](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_studio#agent-interaction) + * [Action tool dialog](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_studio#action-tool-dialog) + * [Agent results](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_studio#agent-results) + * [Test your agent](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_studio#test-your-agent) + * [Runtime view and Test results](../../../ai-integration/ai-agents/creating-ai-agents/creating-ai-agents_studio#runtime-view-and-test-results) + + + +
+ +## Create AI Agent + +To create an AI agent, open **AI hub > AI Agents** and click **Add new agent**. + +![AI Agents View](./assets/ai-agents_ai-agents-view.png) + +1. **AI Hub** + Click to open the [AI Hub view](../../../ai-integration/ai-tasks-list-view.mdx). + Use this view to handle AI connection strings and tasks, and to view task statistics. +2. **AI Agents** + Click to open the AI Agents view. + Use this view to list, configure, or remove your agents. +3. **Add new agent** + Click to add an AI agent. + + The **Create AI Agent** dialog will open, allowing you to define and test your agent. + + ![Create AI Agent](./assets/ai-agents_create-ai-agent.png) + + Use the buttons at the bottom bar to Cancel, Save, or Test your changes. + +4. **Filter by name** + When multiple agents are created, you can filter them by a string you enter here. + +5. **Defined agent** + After defining an agent, it is listed in this view, allowing you to run, edit, or remove the agent. + +
+ +## Configure basic settings + +![Configure basic settings](./assets/ai-agents_config-basic-settings.png) + +1. **Agent name** + Enter a name for the agent. + E.g., **CustomerSupportAgent** + +2. **Identifier** + Enter a unique identifier for the agent, + or click **Regenerate** to create it automatically. + +3. **Connection String** + + ![Select or Create a Connection String](./assets/ai-agents_connection-string_select-or-create.png) + + **Select** an existing [connection string](../../../ai-integration/connection-strings/connection-strings-overview.mdx) + that the agent will use to connect your LLM of choice, + or click **Create a new AI connection string** to define a new string. + Your agent can use a local LLM like Ollama, or an external model like OpenAI. + + ![Connection String](./assets/ai-agents_connection-string.png) + +4. **System prompt** + Enter a prompt that determines LLM characteristics like its role and purpose. + +5. **Sample response object** and **Response JSON schema** + Define a response JSON object for the LLM reply, either as a sample object or as a formal schema. + - The response object guides the LLM in composing its replies, and can ease their parsing + by the client. + - Defining a sample object is normally simpler. + - Behind the scenes, RavenDB will translate a sample object to a JSON schema format + before sending it to the LLM, but if you prefer it you can define it yourself. + - After defining a sample object, you can open the schema tab and click the "View schema" + button to see the generated schema. + ![Configure basic settings](./assets/ai-agents_config-basic-settings_schema.png) + +
+ +## Set agent parameters + +Define **agent parameters**. +After defining an agent parameter, it can be included in query tools RQL queries. +Values for agent parameters are provided by the client when a conversation is started. +[Read more about parameters](../../../ai-integration/ai-agents/ai-agents_overview#query-parameters). + +![Set agent parameters](./assets/ai-agents_set-agent-params.png) + +1. **Add new parameter** + Click to add an agent parameter. + +2. **Name** + Enter agent parameter name. + +3. **Description** + Describe the parameter in plain language so the LLM would understand its purpose. + +4. **Remove parameter** + Remove a defined parameter from the list. + +
+ +## Define agent tools + +Define **Query** and **Action** agent tools. + +* Query tools you define here can be freely used by the LLM. + * Query tools can trigger the agent to retrieve data from the database and return it to + the LLM. + * Action tools can trigger the client to perform actions such as removing a spam entry from + a comments section or adding a comment to an article. +* The LLM has no direct access to the database or any other server property, all queries and + actions are performed through the agent. +* [Find an AI agent usage flow chart here](../../../ai-integration/ai-agents/ai-agents_overview#ai-agent-usage-flow-chart) + +![Define agent tools](./assets/ai-agents_define-agent-tools.png) + +1. **Query tools** + Click to add a new query tool. + +2. **Action tools** + Click to add a new action tool. + +### Add query tools: + +![Add new query tool](./assets/ai-agents_define-agent-tools_add-query-tool.png) + +1. **Add new query tool** + Click to add a new query tool. + +2. **Remove** + Click to remove this tool. + +3. **Expand/Collapse tool** + Click to expand or collapse the tool's details. + +4. **Tool name** + Enter a name for the query tool. + +5. **Description** + Write a description that will explain to the LLM in natural language what the attached query can be used for. + E.g., `apply this query when you need to retrieve the details of all the companies that reside in a certain country` + +6. **Allow model queries** + Enable to allow the LLM to trigger the execution of this query tool. + Disable to prevent the LLM from using this tool. + + When disabled, the LLM will not be able to trigger this tool - but if the tool is set as an [initial-context query](../../../ai-integration/ai-agents/ai-agents_overview#initial-context-queries) the agent will still be able to execute it when it is started. + + +7. **Add to initial context** + Enable to set the query tool as an [initial-context query](../../../ai-integration/ai-agents/ai-agents_overview#initial-context-queries). + When enabled, the agent will execute the query immediately when it starts a conversation with the LLM without waiting for the LLM to invoke the tool, to include data that is relevant for the conversation in the initial context sent to the LLM. + Disable to prevent the agent from executing the query on startup. + + An initial-context query is **not allowed** to use LLM parameters, since the LLM will not have the opportunity to fill the parameters with values before the query is executed. + The query **can** use agent parameters, whose values are provided by the user when the conversation is started. + + +8. **Query** + Enter the query that the agent will run when the LLM requests it to use this tool. + +9. **Sample parameters object** and **Parameters JSON schema** + Set a schema (as a sample object or a formal JSON schema) that allows the LLM to fill query parameters with values. + [Read more about query parameters](../../../ai-integration/ai-agents/ai-agents_overview#query-parameters) + +### Add action tools: + +![Add new action tool](./assets/ai-agents_define-agent-tools_add-action-tool.png) + +1. **Add new action tool** + Click to add a new action tool. + +2. **Remove** + Click to remove this tool. + +3. **Expand/Collapse tool** + Click to expand or collapse the tool's details. + +4. **Tool name** + Enter a name for the action tool. + +5. **Description** + Enter a description that explains to the LLM in natural language when this action tool should be applied. + E.g., `apply this action tool when you need to create a new summary document` + +6. **Sample parameters object** and **Parameters JSON schema** + Set a sample object or a JSON schema that the LLM can populate when it invokes the action tool. The agent will pass this information to the client to guide it through the action it is requested to perform. + + If you define both a sample response object and a schema, only the schema will be used. + +
+ +## Configure chat trimming + +LLMs have no memory of prior interactions. +To allow a continuous conversation, each time the agent sends to the LLM a new prompt or request, it sends along with it the whole conversation up to this point. +To minimize the size of such messages, you can set the agent to summarize conversations. + +![Configure chat trimming](./assets/ai-agents_config-chat-trimming.png) + +1. **Summarize chat** + Use this option to limit the size of the conversation history. If its size breaches this limit, chat history will be summarized before it is sent to the LLM. + +2. **Max tokens Before summarization** + If the conversation contains a total number of tokens larger than the limit you set here, the conversation will be summarized. + +3. **Max tokens After summarization** + Set the maximum number of tokens that will be left in the conversation after it is summarized. + Messages exceeding this limit will be removed, starting with the oldest. + +4. **History** + * **Enable history** + When history is enabled, the conversation sent to the LLM will be summarized, but a copy of the original conversation will be kept in a dedicated document in the `@conversations-history` collection. + * **Set history expiration** + When this option is enabled, conversations will be deleted from the + `@conversations-history` collection once their age exceeds the period + you set. + +
+ +## Save and Run your agent + +When you're done configuring your agent, save it using the **save** button at the bottom. + +![Save your agent](./assets/ai-agents_save-agent.png) + +You will find your agent in the main **AI Agents** view, where you can run or edit it. + +![Your agent](./assets/ai-agents_your-agent.png) + +1. **Start new chat** + Click to start your agent. + +2. **Edit agent** + Click to edit the agent. + +### Start new chat: + +Starting a new chat will open the chat window, where you can provide values +for the parameters you defined for this agent and enter a user prompt that explains +to the agent what you expect from this session. + +![Run agent](./assets/ai-agents_run-agent.png) + +1. **Conversation ID or prefix** + - Entering **a prefix** (e.g. `Chats/`) will start a new conversation, with the prefix preceding an automatically-created conversation ID. + - Entering **the ID of a conversation that doesn't exist yet** will also start a new conversation. + - Entering **the ID of an existing conversation** will send the entire conversation to the LLM and allow you to continue where you left off. + +2. **Set expiration** + Enable this option and set an expiration period to automatically delete conversations + from the `@Conversations` collection when their age exceeds the set period. + +3. **Agent parameters** + Enter a value for each parameter defined in the agent configuration. + The LLM will embed these values in query tools RQL queries where you + included agent parameters. + E.g., If you enter `France` here as the value for `Country`, + a query tool's `from "Orders" where ShipTo.Country == $country` RQL query + will be executed as `from "Orders" where ShipTo.Country == "France"`. + +4. **User prompt** + Use the user prompt to explain to the agent, in natural language, what + this session is about. + +### Agent interaction: + +Running the agent presents its components and interactions. + +Agent parameters and their values: +![Parameters](./assets/ai-agents_running_params.png) + +The system prompt set for the LLM and the user prompt: +![System and User prompts](./assets/ai-agents_running_prompts.png) + +The query tools and their activity: +![Query tool](./assets/ai-agents_running_query-tool.png) + +You can view the raw data of the agent's activity in JSON form as well: +![Raw data](./assets/ai-agents_running_raw-data.png) + +### Action tool dialog: + +If the agent runs action tools, you will be given a dialog that shows you the +information provided by the LLM when it requests the action, and a dialog inviting +you to enter the results when you finish performing it. + +![Action tool waiting for client response](./assets/ai-agents_running_action-tool.png) + +### Agent results: + +And finally, when the AI model finishes its work and negotiations, you will be able to see its response. +As with all other dialog boxes, you can expand the view to see the content or minimize it to see it in its context. + +![LLM answer: minimized](./assets/ai-agents_running_llm-response-minimized.png) + +
+ +## Test your agent + +You can test your agent while creating or editing it, to examine its configuration and operability before you deploy it. The test interface resembles the one you see when you run your agent normally via Studio, but conversations are not kept in the `@conversations` or `@conversations-history` collections. + +To test your agent, click **Test** at the bottom of the agent configuration view. + +![Test Agent](./assets/ai-agents_test-agent_test-button.png) + +![Run test](./assets/ai-agents_test-agent_run-test.png) + + +1. **New Chat** + Click to start a new chat +2. **Close** + Click to return to the AI Agents configuration view. +3. **Enter parameter value** + Enter a value for each parameter defined in the agent configuration. + The LLM will be able to replace these parameters with fixed values + when it uses query or action tools in which these parameters are embedded. +4. **Agent prompt** + Explain to the agent in natural language what this session is about. +5. **Send prompt** + Click to pass your agent your parameter values and user prompt and run the test. + You can keep sending prompts to the agent and receiving its replies in + a continuous conversation. + +### Runtime view and Test results: + +You will see the components that take part in the agent's run and be able +to enter and send requested information for action tools. Each tool can be +minimized to see it in context or expanded to view the data it carries. + +![Runtime view](./assets/ai-agents_runtime-view.png) + +When the LLM finishes processing, you will see its response. + +![Test results: minimized](./assets/ai-agents_test-results_minimized.png) + +You can expand the dialog or copy the content to see the response in detail. + + +{`\{ + "EmployeeID": "employees/1-A", + "EmployeeProfit": "1760", + "SuggestedRewards": "The employee lives in Redmond, WA, USA. For a special reward, consider a weekend getaway to the Pacific Northwest's scenic sites such as a stay at a luxury resort in Seattle or a relaxing wine tasting tour in Woodinville. Alternatively, you could offer gift cards for outdoor excursions in the Cascade Mountains or tickets to major cultural events in the Seattle area." +\} +`} + + + + + diff --git a/versioned_docs/version-7.1/ai-integration/ai-integration_start.mdx b/versioned_docs/version-7.1/ai-integration/ai-integration_start.mdx new file mode 100644 index 0000000000..9b249152b1 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/ai-integration_start.mdx @@ -0,0 +1,93 @@ +--- +title: "AI Integration" +hide_table_of_contents: true +sidebar_label: "Start" +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import CardWithImage from "@site/src/components/Common/CardWithImage"; +import CardWithImageHorizontal from "@site/src/components/Common/CardWithImageHorizontal"; +import ColGrid from "@site/src/components/ColGrid"; + +import buildVsBuyStartImage from "./assets/ai-start_ai-agents_build-vs-buy.png"; +import vectorSearchIntroImage from "./assets/ai-start_vector-search_intro.png"; +import practicalLookAiAgentsImage from "./assets/practical-look-ai-agents-article-image.webp"; + +import ayendeBlogImage from "@site/static/img/from-ayende-com.webp"; +import webinarThumbnailPlaceholder from "@site/static/img/webinar.webp"; +import discordThumbnailPlaceholder from "@site/static/img/discord.webp"; + +# AI Integration +Ship AI-powered features faster with RavenDB’s native tools. + +### Native AI features that create intelligent applications +RavenDB is equipped with a set of powerful native AI features that can +be used independently or in conjunction with each other, allowing you to easily integrate advanced AI capabilities into your applications. +These features include [AI agents](../ai-integration/ai-integration_start#ai-agents), [GenAI tasks](../ai-integration/ai-integration_start#genai-tasks), [Embeddings generation](../ai-integration/ai-integration_start#embeddings-generation), and [Vector search](../ai-integration/ai-integration_start#vector-search). + +### Use cases +RavenDB AI features help you ship any AI-related scenario quickly, including: +* **Conversational intelligence** - Natural-language chatbots, assistants, and interactive workflows. +* **Automated content enrichment** - Summarization, translation, classification, and document enhancement. +* **Semantic representation** - Creating vector representations for text, images, and other data types. +* **Similarity-based discovery** - Finding related items, aggregation, and context-aware retrieval. +* **Personalization & recommendations** - Tailoring suggestions, feeds, and user experiences. +* **Content moderation & compliance** - Automatically handling sensitive, inappropriate, or non-compliant content. +* **Knowledge management & Q&A** - Asking questions over policies, wikis, and documents; retrieving answers and citations. + +#### Learn more: In-depth AI features articles + + + + + +### AI agents +AI agents are conversational proxy components that reside on the server and autonomously handle client requests using an AI model. Instead of spending your time on integrating AI capabilities into your application, you can rapidly configure AI agents using Studio or the client API. Agents can securely read from the database and request the client for actions on behalf of the AI model, infusing intelligence into the workflow. Whether you need chatbots, automated reporting, or intelligent data processing, you get immediate production-ready AI features without the integration overhead. + + + + + + +### GenAI tasks +GenAI tasks are configurable [ongoing operations](../studio/database/tasks/ongoing-tasks/general-info) that process your documents systematically in the background using an AI model. Instead of building custom AI integration pipelines yourself, you can easily create tasks that weave AI capabilities into your data flow. They can enrich documents with AI-generated content, validate and categorize data, translate documents, or execute countless other automated workflows that leverage AI capabilities. + + + + + + +### Embeddings generation +Embeddings generation tasks transform your content into semantic vectors that enable intelligent similarity-based searches. Instead of building complex search infrastructure, you can utilize native tasks that seamlessly embed vector capabilities into your data, enabling intelligent search by meaning and context. + + + + + + +### Vector search +Vector search enables intelligent similarity-based discovery using embeddings rather than exact matching. Instead of developing custom similarity algorithms yourself, you can employ native vector operations for diverse applications. Whether you need to categorize content, find similar items, or automate recommendations, vector search delivers intelligent matching capabilities that understand meaning and context. + + + + + + +### Related lives & Videos +Watch our broadcasts to see RavenDB's AI features in action and learn practical implementation techniques. + + + + + + + +### Deep dives, content & resources +Find additional resources to enhance your knowledge and skills. + + + + + + diff --git a/versioned_docs/version-7.1/ai-integration/ai-tasks-list-view.mdx b/versioned_docs/version-7.1/ai-integration/ai-tasks-list-view.mdx new file mode 100644 index 0000000000..63603c7664 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/ai-tasks-list-view.mdx @@ -0,0 +1,59 @@ +--- +title: "AI Tasks - List View" +hide_table_of_contents: true +sidebar_label: AI Tasks - List View +sidebar_position: 5 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# AI Tasks - List View + + + +* RavenDB supports the following AI tasks: + * [Embeddings generation task](../ai-integration/generating-embeddings/overview.mdx) + * [Gen AI task](../ai-integration/gen-ai-integration/gen-ai-overview.mdx) + +* AI tasks are part of RavenDB's ongoing tasks. + Learn more in [Ongoing Tasks - Overview](../studio/database/tasks/ongoing-tasks/general-info.mdx). + +* In the **AI Tasks - List view**, you can manage RavenDB's AI tasks - + create new tasks, edit existing ones, or delete them as needed. + +* In this article: + * [AI Tasks - list view](../ai-integration/ai-tasks-list-view.mdx#ai-tasks---list-view) + + + +## AI Tasks - list view + +![AI tasks - list view](./assets/ai-tasks-list-view.png) + +1. Go to **AI Hub > AI Tasks**. + +2. **Add AI Task**: Click to create a new AI task. + +3. **Task name**: The name of an existing AI task. + +4. **Task type**: The type of task: _Embeddings Generation_ or _Gen AI_. + +5. **Assigned node**: The node in the database group that is responsible for running the task. + +6. **Enable/Disable**: Click to enable or disable the task. + +7. **Details**: Click to view detailed information about the task. + +8. **Edit**: Click to modify the task. + +9. **Delete**: Click to remove the task. + +10. **Identifier**: The string identifier defined for the task. + **Connection string**: The name of the connection string used by the task. + +11. **Task status**: Displays the task’s current state and progress. diff --git a/versioned_docs/version-7.1/ai-integration/assets/ai-start_ai-agents_build-vs-buy.png b/versioned_docs/version-7.1/ai-integration/assets/ai-start_ai-agents_build-vs-buy.png new file mode 100644 index 0000000000..990075161e Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/assets/ai-start_ai-agents_build-vs-buy.png differ diff --git a/versioned_docs/version-7.1/ai-integration/assets/ai-start_vector-search_intro.png b/versioned_docs/version-7.1/ai-integration/assets/ai-start_vector-search_intro.png new file mode 100644 index 0000000000..55a9e11828 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/assets/ai-start_vector-search_intro.png differ diff --git a/versioned_docs/version-7.1/ai-integration/assets/ai-tasks-list-view.png b/versioned_docs/version-7.1/ai-integration/assets/ai-tasks-list-view.png new file mode 100644 index 0000000000..23356aa2b3 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/assets/ai-tasks-list-view.png differ diff --git a/versioned_docs/version-7.1/ai-integration/assets/practical-look-ai-agents-article-image.webp b/versioned_docs/version-7.1/ai-integration/assets/practical-look-ai-agents-article-image.webp new file mode 100644 index 0000000000..905ca8f6f3 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/assets/practical-look-ai-agents-article-image.webp differ diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/_category_.json b/versioned_docs/version-7.1/ai-integration/connection-strings/_category_.json new file mode 100644 index 0000000000..2fd9012301 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/connection-strings/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 4, + "label": "Connection Strings" +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/assets/azure-open-ai-1.png b/versioned_docs/version-7.1/ai-integration/connection-strings/assets/azure-open-ai-1.png new file mode 100644 index 0000000000..3d8836044c Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/connection-strings/assets/azure-open-ai-1.png differ diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/assets/azure-open-ai-2.png b/versioned_docs/version-7.1/ai-integration/connection-strings/assets/azure-open-ai-2.png new file mode 100644 index 0000000000..bc603828aa Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/connection-strings/assets/azure-open-ai-2.png differ diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/assets/connection-strings-view.png b/versioned_docs/version-7.1/ai-integration/connection-strings/assets/connection-strings-view.png new file mode 100644 index 0000000000..a71909ddca Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/connection-strings/assets/connection-strings-view.png differ diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/assets/create-connection-string.png b/versioned_docs/version-7.1/ai-integration/connection-strings/assets/create-connection-string.png new file mode 100644 index 0000000000..0aafce2d08 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/connection-strings/assets/create-connection-string.png differ diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/assets/embedded.png b/versioned_docs/version-7.1/ai-integration/connection-strings/assets/embedded.png new file mode 100644 index 0000000000..c060cfbf1d Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/connection-strings/assets/embedded.png differ diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/assets/google-ai.png b/versioned_docs/version-7.1/ai-integration/connection-strings/assets/google-ai.png new file mode 100644 index 0000000000..3bd83f2e63 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/connection-strings/assets/google-ai.png differ diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/assets/hugging-face.png b/versioned_docs/version-7.1/ai-integration/connection-strings/assets/hugging-face.png new file mode 100644 index 0000000000..ac263c8ad6 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/connection-strings/assets/hugging-face.png differ diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/assets/mistral-ai.png b/versioned_docs/version-7.1/ai-integration/connection-strings/assets/mistral-ai.png new file mode 100644 index 0000000000..cd7f4b6d95 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/connection-strings/assets/mistral-ai.png differ diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/assets/ollama-1.png b/versioned_docs/version-7.1/ai-integration/connection-strings/assets/ollama-1.png new file mode 100644 index 0000000000..9dd0dcddab Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/connection-strings/assets/ollama-1.png differ diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/assets/ollama-2.png b/versioned_docs/version-7.1/ai-integration/connection-strings/assets/ollama-2.png new file mode 100644 index 0000000000..04f0524aac Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/connection-strings/assets/ollama-2.png differ diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/assets/open-ai-1.png b/versioned_docs/version-7.1/ai-integration/connection-strings/assets/open-ai-1.png new file mode 100644 index 0000000000..6b0f1d1a4d Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/connection-strings/assets/open-ai-1.png differ diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/assets/open-ai-2.png b/versioned_docs/version-7.1/ai-integration/connection-strings/assets/open-ai-2.png new file mode 100644 index 0000000000..182c61ca8f Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/connection-strings/assets/open-ai-2.png differ diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/assets/vertex-ai.png b/versioned_docs/version-7.1/ai-integration/connection-strings/assets/vertex-ai.png new file mode 100644 index 0000000000..28b5c515dc Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/connection-strings/assets/vertex-ai.png differ diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/azure-open-ai.mdx b/versioned_docs/version-7.1/ai-integration/connection-strings/azure-open-ai.mdx new file mode 100644 index 0000000000..a620c7b91d --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/connection-strings/azure-open-ai.mdx @@ -0,0 +1,33 @@ +--- +title: "Connection String to Azure OpenAI" +hide_table_of_contents: true +sidebar_label: Azure OpenAI +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import AzureOpenAiCsharp from './content/_azure-open-ai-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/connection-strings-overview.mdx b/versioned_docs/version-7.1/ai-integration/connection-strings/connection-strings-overview.mdx new file mode 100644 index 0000000000..48104f4d14 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/connection-strings/connection-strings-overview.mdx @@ -0,0 +1,121 @@ +--- +title: "AI Connection Strings - Overview" +hide_table_of_contents: true +sidebar_label: Overview +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# AI Connection Strings - Overview + + + +* AI connection strings define how RavenDB connects to external AI models. + Each connection string specifies the provider and the model to use. + The model can be either a chat model or a text embedding model. + +* These connection strings are then used by AI-powered features in RavenDB, such as: + * [Embeddings Generation Tasks](../../ai-integration/generating-embeddings/overview.mdx) - + use **text embedding models** to generate vector embeddings from document content for vector search. + * [Gen AI Tasks](../../ai-integration/gen-ai-integration/gen-ai-overview) and [AI Agents](../../ai-integration/ai-agents/ai-agents_overview.mdx) - + interact with **chat models** for reasoning, summarization, or conversational workflows. + +* RavenDB supports connecting to the following external providers: + [OpenAI & OpenAI compatible providers](../../ai-integration/connection-strings/open-ai.mdx), + [Azure OpenAI](../../ai-integration/connection-strings/azure-open-ai.mdx), + [Google AI](../../ai-integration/connection-strings/google-ai.mdx), + [Vertex AI](../../ai-integration/connection-strings/vertex-ai.mdx), + [Ollama](../../ai-integration/connection-strings/ollama.mdx), + [Hugging Face](../../ai-integration/connection-strings/hugging-face.mdx), + and [Mistral AI](../../ai-integration/connection-strings/mistral-ai.mdx), + or to RavenDB’s [embedded model (_bge-micro-v2_)](../../ai-integration/connection-strings/embedded.mdx). + +* While each task can have only one connection string, + you can define multiple connection strings in your database to support different providers or configurations. + A single connection string can also be reused across multiple tasks in the database. + +* The AI connection strings can be created from: + * The **AI Connection Strings view in the Studio** - + where you can create, edit, and delete connection strings that are not in use. + * The **Client API** - + examples are available in the dedicated articles for each provider. + +--- + +* In this article: + * [The AI Connection Strings view](../../ai-integration/connection-strings/connection-strings-overview.mdx#the-ai-connection-strings-view) + * [Creating an AI connection string (from the Studio)](../../ai-integration/connection-strings/connection-strings-overview.mdx#creating-an-ai-connection-string-from-the-studio) + + + +## The AI Connection Strings view + +![connection strings view](./assets/connection-strings-view.png) + +1. Go to the **AI Hub** menu. + +2. Open the **AI Connection Strings** view. + +3. Click **"Add new"** to create a new connection string. + +4. View the list of all AI connection strings that have been defined. + +5. Edit or delete a connection string. + Only connection strings that are not in use by a task can be deleted. + +## Creating an AI connection string (from the Studio) + +![create connection string](./assets/create-connection-string.png) + +
+ +1. **Name** + Enter a unique name for the connection string. + +2. **Identifier** + Enter a unique identifier for the connection string. + Each AI connection string in the database must have a distinct identifier. + + If not specified, or when clicking the "Regenerate" button, + RavenDB automatically generates the identifier based on the connection string name. For example: + * If the connection string name is: _"My connection string to Google AI"_ + * The generated identifier will be: _"my-connection-string-to-google-ai"_ + + Allowed characters: only lowercase letters (a-z), numbers (0-9), and hyphens (-). + For exmaple, see how this identifier is used in the [embeddings cache collection](../../ai-integration/generating-embeddings/embedding-collections.mdx#the-embeddings-cache-collection). + +3. **Regenerate** + Click "Regenerate" to automatically create an identifier based on the connection string name. + +4. **Model type** + Select the type of model you want to interact with: + * **Chat model** + Select this type to use a conversational model for content generation and dialogue. + * **Text embedding model** + Select this type to generate vector embeddings from your document content for vector search. + +5. **Connector** + Select an AI provider from the dropdown menu. + This opens a dialog where you can configure the connection details for the selected provider. + + The list of available providers is filtered based on the selected model type. + (Some providers are currently supported in RavenDB only for text embedding models). + + Configuration details for each provider are explained in the following articles: + * [Azure Open AI](../../ai-integration/connection-strings/azure-open-ai.mdx) + * [Google AI](../../ai-integration/connection-strings/google-ai.mdx) (_embeddings only_) + * [Hugging Face](../../ai-integration/connection-strings/hugging-face.mdx) (_embeddings only_) + * [Ollama](../../ai-integration/connection-strings/ollama.mdx) + * [OpenAI](../../ai-integration/connection-strings/open-ai.mdx) + * [Mistral AI](../../ai-integration/connection-strings/mistral-ai.mdx) (_embeddings only_) + * [Vertex AI](../../ai-integration/connection-strings/vertex-ai.mdx) (_embeddings only_) + * [Embedded model (bge-micro-v2)](../../ai-integration/connection-strings/embedded.mdx) (_embeddings only_) + +6. Once you complete all configurations for the selected provider in the dialog, + save the connection string definition. diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/content/_azure-open-ai-csharp.mdx b/versioned_docs/version-7.1/ai-integration/connection-strings/content/_azure-open-ai-csharp.mdx new file mode 100644 index 0000000000..5f3ff1627b --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/connection-strings/content/_azure-open-ai-csharp.mdx @@ -0,0 +1,206 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article explains how to define a connection string to the [Azure OpenAI Service](https://azure.microsoft.com/en-us/products/ai-services/openai-service), + enabling RavenDB to use Azure OpenAI models for [Embeddings generation tasks](../../../ai-integration/generating-embeddings/overview.mdx), + [Gen AI tasks](../../../ai-integration/gen-ai-integration/gen-ai-overview.mdx), and [AI agents](../../../ai-integration/ai-agents/ai-agents_overview.mdx). + +* In this article: + * [Define the connection string - from the Studio](../../../ai-integration/connection-strings/azure-open-ai.mdx#define-the-connection-string---from-the-studio) + * [Configuring a text embedding model](../../../ai-integration/connection-strings/azure-open-ai.mdx#configuring-a-text-embedding-model) + * [Configuring a chat model](../../../ai-integration/connection-strings/azure-open-ai.mdx#configuring-a-chat-model) + * [Define the connection string - from the Client API](../../../ai-integration/connection-strings/azure-open-ai.mdx#define-the-connection-string---from-the-client-api) + * [Syntax](../../../ai-integration/connection-strings/azure-open-ai.mdx#syntax) + + + +## Define the connection string - from the Studio + +### Configuring a text embedding model + +![connection string to azure open ai](../assets/azure-open-ai-1.png) + +1. **Name** + Enter a name for this connection string. + +2. **Identifier** (optional) + Enter an identifier for this connection string. + Learn more about the identifier in the [connection string identifier](../../../ai-integration/connection-strings/connection-strings-overview.mdx#identifier) section. + +3. **Model Type** + Select "Text Embeddings". + +4. **Connector** + Select **Azure OpenAI** from the dropdown menu. + +5. **API key** + Enter the API key used to authenticate requests to the Azure OpenAI service. + +6. **Endpoint** + Enter the base URL of your Azure OpenAI resource. + +7. **Model** + Select or enter an Azure OpenAI text embedding model from the dropdown list or enter a new one. + +8. **Deployment name** + Specify the unique identifier assigned to your model deployment in your Azure environment. + +9. **Dimensions** (optional) + * Specify the number of dimensions for the output embeddings. + Supported only by _text-embedding-3_ and later models. + * If not specified, the model's default dimensionality is used. + +10. **Max concurrent query batches**: (optional) + * When making vector search queries, the content of the search terms must also be converted to embeddings to compare them against the stored vectors. + Requests to generate such query embeddings via the AI provider are sent in batches. + * This parameter defines the maximum number of these batches that can be processed concurrently. + You can set a default value using the [Ai.Embeddings.MaxConcurrentBatches](../../../server/configuration/ai-integration-configuration.mdx#aiembeddingsmaxconcurrentbatches) configuration key. + +11. Click **Test Connection** to confirm the connection string is set up correctly. + +12. Click **Save** to store the connection string or **Cancel** to discard changes. + +### Configuring a chat model + +* When configuring a chat model, the UI displays the same base fields as those used for [text embedding models](../../../ai-integration/connection-strings/azure-open-ai.mdx#configuring-a-text-embedding-model), + including the connection string _Name_, optional _Identifier_, _API Key_, _Endpoint_, _Deployment Name_, and _Model_ name. + +* One additional setting is specific to chat models: _Temperature_. + +![connection string to azure open ai](../assets/azure-open-ai-2.png) + +1. **Model Type** + Select "Chat". + +2. **Model** + Enter the name of the Azure OpenAI model to use for chat completions. + +3. **Temperature** (optional) + The temperature setting controls the randomness and creativity of the model’s output. + Valid values typically range from `0.0` to `2.0`: + * Higher values (e.g., `1.0` or above) produce more diverse and creative responses. + * Lower values (e.g., `0.2`) result in more focused, consistent, and deterministic output. + * If not explicitly set, Azure OpenAI uses a default temperature of `1.0`. + See [Azure OpenAI chat completions parameters](https://learn.microsoft.com/en-us/azure/ai-foundry/openai/reference#request-body-2). + +--- + +## Define the connection string - from the Client API + + + +```csharp +using (var store = new DocumentStore()) +{ + // Define the connection string to Azure OpenAI + var connectionString = new AiConnectionString + { + // Connection string Name & Identifier + Name = "ConnectionStringToAzureOpenAI", + Identifier = "identifier-to-the-connection-string", // optional + + // Model type + ModelType = AiModelType.TextEmbeddings, + + // Azure OpenAI connection settings + AzureOpenAiSettings = new AzureOpenAiSettings + { + ApiKey = "your-api-key", + Endpoint = "https://your-resource-name.openai.azure.com", + + // Name of text embedding model to use + Model = "text-embedding-3-small", + + DeploymentName = "your-deployment-name", + + // Optionally, override the default maximum number of query embedding batches + // that can be processed concurrently + EmbeddingsMaxConcurrentBatches = 10 + } + }; + + // Deploy the connection string to the server + var putConnectionStringOp = new PutConnectionStringOperation(connectionString); + var putConnectionStringResult = store.Maintenance.Send(putConnectionStringOp); +} +``` + + +```csharp +using (var store = new DocumentStore()) +{ + // Define the connection string to Azure OpenAI + var connectionString = new AiConnectionString + { + // Connection string Name & Identifier + Name = "ConnectionStringToAzureOpenAI", + Identifier = "identifier-to-the-connection-string", // optional + + // Model type + ModelType = AiModelType.Chat, + + // Azure OpenAI connection settings + AzureOpenAiSettings = new AzureOpenAiSettings + { + ApiKey = "your-api-key", + Endpoint = "https://your-resource-name.openai.azure.com", + + // Name of chat model to use + Model = "gpt-4o-mini", + + DeploymentName = "your-deployment-name", + + // Optionally, set the model's temperature + Temperature = 0.4 + } + }; + + // Deploy the connection string to the server + var putConnectionStringOp = new PutConnectionStringOperation(connectionString); + var putConnectionStringResult = store.Maintenance.Send(putConnectionStringOp); +} +``` + + + +## Syntax + + +```csharp +public class AiConnectionString +{ + public string Name { get; set; } + public string Identifier { get; set; } + public AiModelType ModelType { get; set; } + public AzureOpenAiSettings AzureOpenAiSettings { get; set; } +} + +public class AzureOpenAiSettings : AbstractAiSettings +{ + public string ApiKey { get; set; } + public string Endpoint { get; set; } + public string Model { get; set; } + public string DeploymentName { get; set; } + + // Relevant only for text embedding models: + // Specifies the number of dimensions in the generated embedding vectors. + public int? Dimensions { get; set; } + + // Relevant only for chat models: + // Controls the randomness and creativity of the model’s output. + // Higher values (e.g., 1.0 or above) produce more diverse and creative responses. + // Lower values (e.g., 0.2) result in more focused and deterministic output. + // If set to 'null', the temperature is not sent and the model's default will be used. + public double? Temperature { get; set; } +} + +public class AbstractAiSettings +{ + public int? EmbeddingsMaxConcurrentBatches { get; set; } +} +``` + diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/content/_embedded-csharp.mdx b/versioned_docs/version-7.1/ai-integration/connection-strings/content/_embedded-csharp.mdx new file mode 100644 index 0000000000..41a358f906 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/connection-strings/content/_embedded-csharp.mdx @@ -0,0 +1,100 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article explains how to define a connection string to the [bge-micro-v2](https://huggingface.co/TaylorAI/bge-micro-v2) model. + This model, designed exclusively for embeddings generation, is embedded within RavenDB, enabling RavenDB to seamlessly handle its + [Embeddings generation tasks](../../../ai-integration/generating-embeddings/overview.mdx) without requiring an external AI service. + +* Running the model locally consumes processor resources and will impact RavenDB's overall performance, + depending on your workload and usage patterns. + +* In this article: + * [Define the connection string - from the Studio](../../../ai-integration/connection-strings/embedded.mdx#define-the-connection-string---from-the-studio) + * [Define the connection string - from the Client API](../../../ai-integration/connection-strings/embedded.mdx#define-the-connection-string---from-the-client-api) + * [Syntax](../../../ai-integration/connection-strings/embedded.mdx#syntax) + + + +## Define the connection string - from the Studio + +![connection string to the embedded model](../assets/embedded.png) + +1. **Name** + Enter a name for this connection string. + +2. **Identifier** (optional) + Learn more about the identifier in the [connection string identifier](../../../ai-integration/connection-strings/connection-strings-overview.mdx#the-connection-string-identifier) section. + +3. **Model Type** + Select "Text Embeddings". + +4. **Connector** + Select **Embedded (bge-micro-v2)** from the dropdown menu. + +5. **Max concurrent query batches**: (optional) + * When making vector search queries, the content of the search terms must also be converted to embeddings to compare them against the stored vectors. + Requests to generate such query embeddings via the AI provider are sent in batches. + * This parameter defines the maximum number of these batches that can be processed concurrently. + You can set a default value using the [Ai.Embeddings.MaxConcurrentBatches](../../../server/configuration/ai-integration-configuration.mdx#aiembeddingsmaxconcurrentbatches) configuration key. + +6. Click **Save** to store the connection string or **Cancel** to discard changes. + +## Define the connection string - from the Client API + + +```csharp +using (var store = new DocumentStore()) +{ + // Define the connection string to the embedded model + var connectionString = new AiConnectionString + { + // Connection string name & identifier + Name = "ConnectionStringToEmbedded", + Identifier = "identifier-to-the-connection-string", // optional + + // Model type + ModelType = AiModelType.TextEmbeddings, + + // Embedded model settings + // No user configuration is required for the embedded model, + // as it uses predefined values managed internally by RavenDB. + EmbeddedSettings = new EmbeddedSettings() + }; + + // Optionally, override the default maximum number of query embedding batches + // that can be processed concurrently + connectionString.EmbeddedSettings.EmbeddingsMaxConcurrentBatches = 10; + + // Deploy the connection string to the server + var operation = new PutConnectionStringOperation(connectionString); + var putConnectionStringResult = store.Maintenance.Send(operation); +} +``` + + +## Syntax + + +```csharp +public class AiConnectionString +{ + public string Name { get; set; } + public string Identifier { get; set; } + public AiModelType ModelType { get; set; } + public EmbeddedSettings EmbeddedSettings { get; set; } +} + +public class EmbeddedSettings : AbstractAiSettings +{ +} + +public class AbstractAiSettings +{ + public int? EmbeddingsMaxConcurrentBatches { get; set; } +} +``` + diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/content/_google-ai-csharp.mdx b/versioned_docs/version-7.1/ai-integration/connection-strings/content/_google-ai-csharp.mdx new file mode 100644 index 0000000000..4c7ea24f46 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/connection-strings/content/_google-ai-csharp.mdx @@ -0,0 +1,130 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article explains how to define a connection string to [Google AI](https://ai.google.dev/gemini-api/docs/embeddings), + enabling RavenDB to seamlessly integrate its [Embeddings generation tasks](../../../ai-integration/generating-embeddings/overview.mdx) with Google's AI services. + +* This configuration supports **Google AI embeddings** only. + It is not compatible with Vertex AI endpoints or credentials. + +* RavenDB currently supports only text embeddings with Google AI. + Chat models are not supported through this integration. + +* In this article: + * [Define the connection string - from the Studio](../../../ai-integration/connection-strings/google-ai.mdx#define-the-connection-string---from-the-studio) + * [Define the connection string - from the Client API](../../../ai-integration/connection-strings/google-ai.mdx#define-the-connection-string---from-the-client-api) + * [Syntax](../../../ai-integration/connection-strings/google-ai.mdx#syntax) + + + +## Define the connection string - from the Studio + +![connection string to google ai](../assets/google-ai.png) + +1. **Name** + Enter a name for this connection string. + +2. **Identifier** (optional) + Enter an identifier for this connection string. + Learn more about the identifier in the [connection string identifier](../../../ai-integration/connection-strings/connection-strings-overview.mdx#the-connection-string-identifier) section. + +3. **Model Type** + Select "Text Embeddings". + +4. **Connector** + Select **Google AI** from the dropdown menu. + +5. **AI Version** (optional) + * Select the Google AI API version to use. + * If not specified, `V1_Beta` is used. Learn more in [API versions explained](https://ai.google.dev/gemini-api/docs/api-versions). + +6. **API key** + Enter the API key used to authenticate requests to Google's AI services. + +7. **Model** + Select or enter the Google AI text embedding model to use. + +8. **Dimensions** (optional) + * Specify the number of dimensions for the output embeddings. + * If not specified, the model's default dimensionality is used. + +9. **Max concurrent query batches**: (optional) + * When making vector search queries, the content of the search terms must also be converted to embeddings to compare them against the stored vectors. + Requests to generate such query embeddings via the AI provider are sent in batches. + * This parameter defines the maximum number of these batches that can be processed concurrently. + You can set a default value using the [Ai.Embeddings.MaxConcurrentBatches](../../../server/configuration/ai-integration-configuration.mdx#aiembeddingsmaxconcurrentbatches) configuration key. + +10. Click **Test Connection** to confirm the connection string is set up correctly. + +11. Click **Save** to store the connection string or **Cancel** to discard changes. + +## Define the connection string - from the Client API + + +```csharp +using (var store = new DocumentStore()) +{ + // Define the connection string to Google AI + var connectionString = new AiConnectionString + { + // Connection string name & identifier + Name = "ConnectionStringToGoogleAI", + Identifier = "identifier-to-the-connection-string", // optional + + // Model type + ModelType = AiModelType.TextEmbeddings, + + // Google AI connection settings + GoogleSettings = new GoogleSettings( + apiKey: "your-api-key", + model: "text-embedding-004", + aiVersion: GoogleAIVersion.V1) + }; + + // Optionally, override the default maximum number of query embedding batches + // that can be processed concurrently + connectionString.GoogleSettings.EmbeddingsMaxConcurrentBatches = 10; + + // Deploy the connection string to the server + var operation = new PutConnectionStringOperation(connectionString); + var putConnectionStringResult = store.Maintenance.Send(operation); +} +``` + + +## Syntax + + +```csharp +public class AiConnectionString +{ + public string Name { get; set; } + public string Identifier { get; set; } + public AiModelType ModelType { get; set; } + public GoogleSettings GoogleSettings { get; set; } +} + +public class GoogleSettings : AbstractAiSettings +{ + public string ApiKey { get; set; } + public string Model { get; set; } + public GoogleAIVersion? AiVersion { get; set; } + public int? Dimensions { get; set; } +} + +public enum GoogleAIVersion +{ + V1, // Represents the "v1" version of the Google AI API + V1_Beta // Represents the "v1beta" version of the Google AI API +} + +public class AbstractAiSettings +{ + public int? EmbeddingsMaxConcurrentBatches { get; set; } +} +``` + diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/content/_hugging-face-csharp.mdx b/versioned_docs/version-7.1/ai-integration/connection-strings/content/_hugging-face-csharp.mdx new file mode 100644 index 0000000000..7a9db2cd46 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/connection-strings/content/_hugging-face-csharp.mdx @@ -0,0 +1,116 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article explains how to define a connection string to the [Hugging Face's text embedding services](https://huggingface.co/docs/text-embeddings-inference/en/index), + enabling RavenDB to seamlessly integrate its [Embeddings generation tasks](../../../ai-integration/generating-embeddings/overview.mdx) within your environment. + +* Note: RavenDB currently supports only text embeddings with Hugging Face. + Chat models are not supported through this integration. + +* In this article: + * [Define the connection string - from the Studio](../../../ai-integration/connection-strings/hugging-face.mdx#define-the-connection-string---from-the-studio) + * [Define the connection string - from the Client API](../../../ai-integration/connection-strings/hugging-face.mdx#define-the-connection-string---from-the-client-api) + * [Syntax](../../../ai-integration/connection-strings/hugging-face.mdx#syntax) + + + +## Define the connection string - from the Studio + +![connection string to hugging face](../assets/hugging-face.png) + +1. **Name** + Enter a name for this connection string. + +2. **Identifier** (optional) + Learn more about the identifier in the [connection string identifier](../../../ai-integration/connection-strings/connection-strings-overview.mdx#the-connection-string-identifier) section. + +3. **Model Type** + Select "Text Embeddings". + +4. **Connector** + Select **Hugging Face** from the dropdown menu. + +5. **API key** + Enter the API key used to authenticate requests to Hugging Face's text embedding services. + +6. **Endpoint** (optional) + Select or enter the Hugging Face endpoint for generating embeddings from text. + If not specified, the default endpoint is used. + (`https://api-inference.huggingface.co/`) + +7. **Model** + Specify the Hugging Face text embedding model to use. + +8. **Max concurrent query batches**: (optional) + * When making vector search queries, the content of the search terms must also be converted to embeddings to compare them against the stored vectors. + Requests to generate such query embeddings via the AI provider are sent in batches. + * This parameter defines the maximum number of these batches that can be processed concurrently. + You can set a default value using the [Ai.Embeddings.MaxConcurrentBatches](../../../server/configuration/ai-integration-configuration.mdx#aiembeddingsmaxconcurrentbatches) configuration key. + +9. Click **Test Connection** to confirm the connection string is set up correctly. + +10. Click **Save** to store the connection string or **Cancel** to discard changes. + +## Define the connection string - from the Client API + + +```csharp +using (var store = new DocumentStore()) +{ + // Define the connection string to Hugging Face + var connectionString = new AiConnectionString + { + // Connection string name & identifier + Name = "ConnectionStringToHuggingFace", + Identifier = "identifier-to-the-connection-string", // optional + + // Model type + ModelType = AiModelType.TextEmbeddings, + + // Hugging Face connection settings + HuggingFaceSettings = new HuggingFaceSettings( + apiKey: "your-api-key", + endpoint: "https://api-inference.huggingface.co/", + model: "sentence-transformers/all-MiniLM-L6-v2") + }; + + // Optionally, override the default maximum number of query embedding batches + // that can be processed concurrently + connectionString.HuggingFaceSettings.EmbeddingsMaxConcurrentBatches = 10; + + // Deploy the connection string to the server + var operation = new PutConnectionStringOperation(connectionString); + var putConnectionStringResult = store.Maintenance.Send(operation); +} +``` + + +## Syntax + + +```csharp +public class AiConnectionString +{ + public string Name { get; set; } + public string Identifier { get; set; } + public AiModelType ModelType { get; set; } + public HuggingFaceSettings HuggingFaceSettings { get; set; } +} + +public class HuggingFaceSettings : AbstractAiSettings +{ + public string ApiKey { get; set; } + public string Endpoint { get; set; } + public string Model { get; set; } +} + +public class AbstractAiSettings +{ + public int? EmbeddingsMaxConcurrentBatches { get; set; } +} +``` + diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/content/_mistral-ai-csharp.mdx b/versioned_docs/version-7.1/ai-integration/connection-strings/content/_mistral-ai-csharp.mdx new file mode 100644 index 0000000000..22a32befca --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/connection-strings/content/_mistral-ai-csharp.mdx @@ -0,0 +1,114 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article explains how to define a connection string to [Mistral AI](https://docs.mistral.ai/capabilities/embeddings/), + enabling RavenDB to seamlessly integrate its [Embeddings generation tasks](../../../ai-integration/generating-embeddings/overview.mdx) with Mistral's API. + +* Note: RavenDB currently supports only text embeddings with Mistral AI. + Chat models are not supported through this integration. + +* In this article: + * [Define the connection string - from the Studio](../../../ai-integration/connection-strings/mistral-ai.mdx#define-the-connection-string---from-the-studio) + * [Define the connection string - from the Client API](../../../ai-integration/connection-strings/mistral-ai.mdx#define-the-connection-string---from-the-client-api) + * [Syntax](../../../ai-integration/connection-strings/mistral-ai.mdx#syntax) + + + +## Define the connection string - from the Studio + +![connection string to mistral ai](../assets/mistral-ai.png) + +1. **Name** + Enter a name for this connection string. + +2. **Identifier** (optional) + Learn more about the identifier in the [connection string identifier](../../../ai-integration/connection-strings/connection-strings-overview.mdx#the-connection-string-identifier) section. + +3. **Model Type** + Select "Text Embeddings". + +4. **Connector** + Select **Mistral AI** from the dropdown menu. + +5. **API key** + Enter the API key used to authenticate requests to Mistral AI's API. + +6. **Endpoint** + Select or enter the Mistral AI endpoint for generating embeddings from text. + +7. **Model** + Select or enter the Mistral AI text embedding model to use. + +8. **Max concurrent query batches**: (optional) + * When making vector search queries, the content of the search terms must also be converted to embeddings to compare them against the stored vectors. + Requests to generate such query embeddings via the AI provider are sent in batches. + * This parameter defines the maximum number of these batches that can be processed concurrently. + You can set a default value using the [Ai.Embeddings.MaxConcurrentBatches](../../../server/configuration/ai-integration-configuration.mdx#aiembeddingsmaxconcurrentbatches) configuration key. + +9. Click **Test Connection** to confirm the connection string is set up correctly. + +10. Click **Save** to store the connection string or **Cancel** to discard changes. + +## Define the connection string - from the Client API + + +```csharp +using (var store = new DocumentStore()) +{ + // Define the connection string to Mistral AI + var connectionString = new AiConnectionString + { + // Connection string name & identifier + Name = "ConnectionStringToMistralAI", + Identifier = "identifier-to-the-connection-string", // optional + + // Model type + ModelType = AiModelType.TextEmbeddings, + + // Mistral AI connection settings + MistralAiSettings = new MistralAiSettings( + apiKey: "your-api-key", + endpoint: "https://api.mistral.ai/v1", + model: "mistral-embed") + }; + + // Optionally, override the default maximum number of query embedding batches + // that can be processed concurrently + connectionString.MistralAiSettings.EmbeddingsMaxConcurrentBatches = 10; + + // Deploy the connection string to the server + var operation = new PutConnectionStringOperation(connectionString); + var putConnectionStringResult = store.Maintenance.Send(operation); +} +``` + + +## Syntax + + +```csharp +public class AiConnectionString +{ + public string Name { get; set; } + public string Identifier { get; set; } + public AiModelType ModelType { get; set; } + public MistralAiSettings MistralAiSettings { get; set; } +} + +public class MistralAiSettings : AbstractAiSettings +{ + public string ApiKey { get; set; } + public string Endpoint { get; set; } + public string Model { get; set; } +} + +public class AbstractAiSettings +{ + public int? EmbeddingsMaxConcurrentBatches { get; set; } +} +``` + diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/content/_ollama-csharp.mdx b/versioned_docs/version-7.1/ai-integration/connection-strings/content/_ollama-csharp.mdx new file mode 100644 index 0000000000..6e19637fdc --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/connection-strings/content/_ollama-csharp.mdx @@ -0,0 +1,210 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article explains how to define a connection string to [Ollama](https://ollama.com/blog/embedding-models), + enabling RavenDB to use Ollama models for [Embeddings generation tasks](../../../ai-integration/generating-embeddings/overview.mdx), + [Gen AI tasks](../../../ai-integration/gen-ai-integration/gen-ai-overview.mdx), and [AI agents](../../../ai-integration/ai-agents/ai-agents_overview.mdx). + +* In this article: + * [Define the connection string - from the Studio](../../../ai-integration/connection-strings/ollama.mdx#define-the-connection-string---from-the-studio) + * [Configuring a text embedding model](../../../ai-integration/connection-strings/ollama.mdx#configuring-a-text-embedding-model) + * [Configuring a chat model](../../../ai-integration/connection-strings/ollama.mdx#configuring-a-chat-model) + * [Define the connection string - from the Client API](../../../ai-integration/connection-strings/ollama.mdx#define-the-connection-string---from-the-client-api) + * [Syntax](../../../ai-integration/connection-strings/ollama.mdx#syntax) + + + +## Define the connection string - from the Studio + +### Configuring a text embedding model + +![connection string to ollama](../assets/ollama-1.png) + +1. **Name** + Enter a name for this connection string. + +2. **Identifier** (optional) + Learn more about the identifier in the [connection string identifier](../../../ai-integration/connection-strings/connection-strings-overview.mdx#identifier) section. + +3. **Model Type** + Select "Text Embeddings". + +4. **Connector** + Select **Ollama** from the dropdown menu. + +5. **URI** + Enter the Ollama API URI. + +6. **Model** + Specify the Ollama text embedding model to use. + +7. **Max concurrent query batches**: (optional) + * When making vector search queries, the content of the search terms must also be converted to embeddings to compare them against the stored vectors. + Requests to generate such query embeddings via the AI provider are sent in batches. + * This parameter defines the maximum number of these batches that can be processed concurrently. + You can set a default value using the [Ai.Embeddings.MaxConcurrentBatches](../../../server/configuration/ai-integration-configuration.mdx#aiembeddingsmaxconcurrentbatches) configuration key. + +8. Click **Test Connection** to confirm the connection string is set up correctly. + +9. Click **Save** to store the connection string or **Cancel** to discard changes. + +### Configuring a chat model + +* When configuring a chat model, the UI displays the same base fields as those used for [text embedding models](../../../ai-integration/connection-strings/ollama.mdx#configuring-a-text-embedding-model), + including the connection string _Name_, optional _Identifier_, _URI_, and _Model_ name. + +* In addition, two fields are specific to chat models: _Temperature_ and _Thinking mode_. + +![connection string to ollama](../assets/ollama-2.png) + +1. **Model Type** + Select "Chat". + +2. **Model** + Enter the name of the Ollama model to use for chat completions. + +3. **Thinking mode** (optional) + The thinking mode setting controls whether the model outputs its internal reasoning steps before returning the final answer. + * When setting to `Enabled`: + the model outputs a series of intermediate reasoning steps (chain of thought) before the final answer. + This may improve output quality for complex tasks, but increases response time and token usage. + * When setting to `Disabled`: + the model returns only the final answer, without exposing intermediate steps. + This is typically faster and more cost-effective (uses fewer tokens), + but may reduce quality on complex reasoning tasks. + * When setting to `Default`: + The model’s built-in default will be used. + This value may vary depending on the selected model. + Set this parameter based on the trade-off between task complexity and speed/cost requirements. + +4. **Temperature** (optional) + The temperature setting controls the randomness and creativity of the model’s output. + Valid values typically range from `0.0` to `2.0`: + * Higher values (e.g., `1.0` or above) produce more diverse and creative responses. + * Lower values (e.g., `0.2`) result in more focused, consistent, and deterministic output. + * If not explicitly set, Ollama defaults to a temperature of `0.8`. + See [Ollama's parameters reference](https://ollama.readthedocs.io/en/modelfile/?utm_source=chatgpt.com#valid-parameters-and-values). + +--- + +## Define the connection string - from the Client API + + + +```csharp +using (var store = new DocumentStore()) +{ + // Define the connection string to Ollama + var connectionString = new AiConnectionString + { + // Connection string Name & Identifier + Name = "ConnectionStringToOllama", + Identifier = "identifier-to-the-connection-string", // optional + + // Model type + ModelType = AiModelType.TextEmbeddings, + + // Ollama connection settings + OllamaSettings = new OllamaSettings + { + Uri = "http://localhost:11434", + + // Name of text embedding model to use + Model = "mxbai-embed-large", + + // Optionally, override the default maximum number of query embedding batches + // that can be processed concurrently + EmbeddingsMaxConcurrentBatches = 10 + } + }; + + // Deploy the connection string to the server + var putConnectionStringOp = new PutConnectionStringOperation(connectionString); + var putConnectionStringResult = store.Maintenance.Send(putConnectionStringOp); +} +``` + + +```csharp +using (var store = new DocumentStore()) +{ + // Define the connection string to Ollama + var connectionString = new AiConnectionString + { + // Connection string Name & Identifier + Name = "ConnectionStringToOllama", + Identifier = "identifier-to-the-connection-string", // optional + + // Model type + ModelType = AiModelType.Chat, + + // Ollama connection settings + OllamaSettings = new OllamaSettings + { + Uri = "http://localhost:11434", + + // Name of chat model to use + Model = "llama3:8b-instruct", + + // Optionally, set the model's temperature + Temperature = 0.4, + + // Optionally, set the model's thinking behavior + Think = true + } + }; + + // Deploy the connection string to the server + var putConnectionStringOp = new PutConnectionStringOperation(connectionString); + var putConnectionStringResult = store.Maintenance.Send(putConnectionStringOp); +} +``` + + + +## Syntax + + +```csharp +public class AiConnectionString +{ + public string Name { get; set; } + public string Identifier { get; set; } + public AiModelType ModelType { get; set; } + public OllamaSettings OllamaSettings { get; set; } +} + +public class OllamaSettings : AbstractAiSettings +{ + // The base URI of your Ollama server + // For a local setup, use: "http://localhost:11434" + public string Uri { get; set; } + + // The name of the model to use + public string Model { get; set; } + + // Relevant only for chat models: + // Control whether the model outputs its internal reasoning steps before returning the final answer. + // 'true' - the model outputs intermediate reasoning steps (chain of thought) before the final answer. + // 'false' - the model returns only the final answer, without exposing intermediate steps. + // 'null' - the model’s default behavior is used. + public bool? Think { get; set; } + + // Relevant only for chat models: + // Controls the randomness and creativity of the model’s output. + // Higher values (e.g., 1.0 or above) produce more diverse and creative responses. + // Lower values (e.g., 0.2) result in more focused and deterministic output. + // If set to 'null', the temperature is not sent and the model's default will be used. + public double? Temperature { get; set; } +} + +public class AbstractAiSettings +{ + public int? EmbeddingsMaxConcurrentBatches { get; set; } +} +``` + diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/content/_open-ai-csharp.mdx b/versioned_docs/version-7.1/ai-integration/connection-strings/content/_open-ai-csharp.mdx new file mode 100644 index 0000000000..08a595c6ce --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/connection-strings/content/_open-ai-csharp.mdx @@ -0,0 +1,217 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article explains how to define a connection string to the [OpenAI Service](https://platform.openai.com/docs/guides/embeddings), + enabling RavenDB to use OpenAI models for [Embeddings generation tasks](../../../ai-integration/generating-embeddings/overview.mdx), + [Gen AI tasks](../../../ai-integration/gen-ai-integration/gen-ai-overview.mdx), and [AI agents](../../../ai-integration/ai-agents/ai-agents_overview.mdx). + +* Use this connection string format to connect RavenDB to **any OpenAI-compatible provider** that offers a compatible API. + As long as the provider follows the OpenAI API format, RavenDB will be able to use it for Embeddings generation, Gen AI tasks, and chat-based agent interactions. + +* In this article: + * [Define the connection string - from the Studio](../../../ai-integration/connection-strings/open-ai.mdx#define-the-connection-string---from-the-studio) + * [Configuring a text embedding model](../../../ai-integration/connection-strings/open-ai.mdx#configuring-a-text-embedding-model) + * [Configuring a chat model](../../../ai-integration/connection-strings/open-ai.mdx#configuring-a-chat-model) + * [Define the connection string - from the Client API](../../../ai-integration/connection-strings/open-ai.mdx#define-the-connection-string---from-the-client-api) + * [Syntax](../../../ai-integration/connection-strings/open-ai.mdx#syntax) + + + +## Define the connection string - from the Studio + +### Configuring a text embedding model + +![connection string to open ai](../assets/open-ai-1.png) + +1. **Name** + Enter a name for this connection string. + +2. **Identifier** (optional) + Learn more about the identifier in the [connection string identifier](../../../ai-integration/connection-strings/connection-strings-overview.mdx#the-connection-string-identifier) section. + +3. **Model Type** + Select "Text Embeddings". + +4. **Connector** + Select **OpenAI** from the dropdown menu. + +5. **API key** + Enter the API key used to authenticate requests to OpenAI or any OpenAI-compatible provider. + +6. **Endpoint** + Enter the base URL of the OpenAI API. + This can be the standard OpenAI endpoint or a URL provided by any OpenAI-compatible provider. + +7. **Model** + Select or enter the text embedding model to use, as provided by OpenAI or any OpenAI-compatible provider. + +8. **Organization ID** (optional) + * Set the organization ID to use for the `OpenAI-Organization` request header. + * Users belonging to multiple organizations can set this value to specify which organization is used for an API request. + Usage from these API requests will count against the specified organization's quota. + * If not specified, the header will be omitted, and the default organization will be billed. + You can change your default organization in your user settings. + * Learn more in [Setting up your organization](https://platform.openai.com/docs/guides/production-best-practices/setting-up-your-organization#setting-up-your-organization) + +9. **Project ID** (optional) + * Set the project ID to use for the `OpenAI-Project` request header. + * Users who are accessing their projects through their legacy user API key can set this value to specify which project is used for an API request. + Usage from these API requests will count as usage for the specified project. + * If not specified, the header will be omitted, and the default project will be accessed. + +10. **Dimensions** (optional) + * Specify the number of dimensions for the output embeddings. + Supported only by _text-embedding-3_ and later models. + * If not specified, the model's default dimensionality is used. + +11. **Max concurrent query batches**: (optional) + * When making vector search queries, the content of the search terms must also be converted to embeddings to compare them against the stored vectors. + Requests to generate such query embeddings via the AI provider are sent in batches. + * This parameter defines the maximum number of these batches that can be processed concurrently. + You can set a default value using the [Ai.Embeddings.MaxConcurrentBatches](../../../server/configuration/ai-integration-configuration.mdx#aiembeddingsmaxconcurrentbatches) configuration key. + +12. Click **Test Connection** to confirm the connection string is set up correctly. + +13. Click **Save** to store the connection string or **Cancel** to discard changes. + +### Configuring a chat model + +* When configuring a chat model, the UI displays the same base fields as those used for [text embedding models](../../../ai-integration/connection-strings/open-ai.mdx#configuring-a-text-embedding-mode), + including the connection string _Name_, optional _Identifier_, _API Key_, _Endpoint_, _Model_ name, _Organization ID_, and _Project ID_. + +* One additional setting is specific to chat models: _Temperature_. + +![connection string to azure open ai](../assets/open-ai-2.png) + +1. **Model Type** + Select "Chat". + +2. **Model** + Enter the name of the OpenAI model to use for chat completions. + +3. **Temperature** (optional) + The temperature setting controls the randomness and creativity of the model’s output. + Valid values typically range from `0.0` to `2.0`: + * Higher values (e.g., `1.0` or above) produce more diverse and creative responses. + * Lower values (e.g., `0.2`) result in more focused, consistent, and deterministic output. + * If not explicitly set, OpenAI uses a default temperature of `1.0`. + See [OpenAI chat completions parameters](https://platform.openai.com/docs/api-reference/chat/create#chat_create-temperature). + +--- + +## Define the connection string - from the Client API + + + +```csharp +using (var store = new DocumentStore()) +{ + // Define the connection string to OpenAI + var connectionString = new AiConnectionString + { + // Connection string Name & Identifier + Name = "ConnectionStringToOpenAI", + Identifier = "identifier-to-the-connection-string", // optional + + // Model type + ModelType = AiModelType.TextEmbeddings, + + // OpenAI connection settings + OpenAiSettings = new OpenAiSettings + { + ApiKey = "your-api-key", + Endpoint = "https://api.openai.com/v1", + + // Name of text embedding model to use + Model = "text-embedding-3-small", + + // Optionally, override the default maximum number of query embedding batches + // that can be processed concurrently + EmbeddingsMaxConcurrentBatches = 10 + } + }; + + // Deploy the connection string to the server + var putConnectionStringOp = new PutConnectionStringOperation(connectionString); + var putConnectionStringResult = store.Maintenance.Send(putConnectionStringOp); +} +``` + + +```csharp +using (var store = new DocumentStore()) +{ + // Define the connection string to OpenAI + var connectionString = new AiConnectionString + { + // Connection string Name & Identifier + Name = "ConnectionStringToOpenAI", + Identifier = "identifier-to-the-connection-string", // optional + + // Model type + ModelType = AiModelType.Chat, + + // OpenAI connection settings + OpenAiSettings = new OpenAiSettings + { + ApiKey = "your-api-key", + Endpoint = "https://api.openai.com/v1", + + // Name of text embedding model to use + Model = "gpt-4o", + + // Optionally, set the model's temperature + Temperature = 0.4 + } + }; + + // Deploy the connection string to the server + var putConnectionStringOp = new PutConnectionStringOperation(connectionString); + var putConnectionStringResult = store.Maintenance.Send(putConnectionStringOp); +} +``` + + + +## Syntax + + +```csharp +public class AiConnectionString +{ + public string Name { get; set; } + public string Identifier { get; set; } + public AiModelType ModelType { get; set; } + public OpenAiSettings OpenAiSettings { get; set; } +} + +public class OpenAiSettings : AbstractAiSettings +{ + public string ApiKey { get; set; } + public string Endpoint { get; set; } + public string Model { get; set; } + public string OrganizationId { get; set; } + public string ProjectId { get; set; } + + // Relevant only for text embedding models: + // Specifies the number of dimensions in the generated embedding vectors. + public int? Dimensions { get; set; } + + // Relevant only for chat models: + // Controls the randomness and creativity of the model’s output. + // Higher values (e.g., 1.0 or above) produce more diverse and creative responses. + // Lower values (e.g., 0.2) result in more focused and deterministic output. + // If set to 'null', the temperature is not sent and the model's default will be used. + public double? Temperature { get; set; } +} + +public class AbstractAiSettings +{ + public int? EmbeddingsMaxConcurrentBatches { get; set; } +} +``` + diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/content/_vertex-ai-csharp.mdx b/versioned_docs/version-7.1/ai-integration/connection-strings/content/_vertex-ai-csharp.mdx new file mode 100644 index 0000000000..b32a13e501 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/connection-strings/content/_vertex-ai-csharp.mdx @@ -0,0 +1,152 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article explains how to define a connection string to [Vertex AI](https://cloud.google.com/vertex-ai/docs/generative-ai/embeddings/get-text-embeddings), + enabling RavenDB to seamlessly integrate its [Embeddings generation tasks](../../../ai-integration/generating-embeddings/overview.mdx) with Google Cloud’s Vertex AI services. + +* This configuration supports **Vertex AI embeddings** only. + It is not compatible with Google AI (Gemini API) endpoints or API key authentication. + +* RavenDB currently supports only text embeddings with Vertex AI. + Chat models are not supported through this integration. + +* In this article: + * [Define the connection string - from the Studio](../../../ai-integration/connection-strings/vertex-ai.mdx#define-the-connection-string---from-the-studio) + * [Define the connection string - from the Client API](../../../ai-integration/connection-strings/vertex-ai.mdx#define-the-connection-string---from-the-client-api) + * [Syntax](../../../ai-integration/connection-strings/vertex-ai.mdx#syntax) + + + +## Define the connection string - from the Studio + +![connection string to vetex ai](../assets/vertex-ai.png) + +1. **Name** + Enter a name for this connection string. + +2. **Identifier** (optional) + Enter an identifier for this connection string. + Learn more about the identifier in the [connection string identifier](../../../ai-integration/connection-strings/connection-strings-overview.mdx#the-connection-string-identifier) section. + +3. **Model Type** + Select "Text Embeddings". + +4. **Connector** + Select **Vertex AI** from the dropdown menu. + +5. **AI Version** (optional) + * Select the Vertex AI version to use. + * If not specified, `V1_Beta` is used. + Learn more in the [Vertex AI REST API reference](https://cloud.google.com/vertex-ai/docs/reference/rest). + +6. **Google Credentials Json** + Click "Show credentials" to enter your Google Cloud credentials in JSON format. + These credentials are used to authenticate requests to Vertex AI services. + To generate this JSON, follow the steps in [Google's guide to creating service account credentials](https://developers.google.com/workspace/guides/create-credentials#service-account). + + Example: + + + ```json + { + "type": "service_account", + "project_id": "test-raven-237012", + "private_key_id": "12345678123412341234123456789101", + "private_key": "-----BEGIN PRIVATE KEY-----\\abCse=-----END PRIVATE KEY-----", + "client_email": "raven@test-raven-237012-237012.iam.gserviceaccount.com", + "client_id": "111390682349634407434", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/viewonly%40test-raven-237012.iam.gserviceaccount.com" + } + ``` + + +7. **Model** + Select or enter the Vertex AI text embedding model to use. + +8. **Location** + The Google Cloud region where the Vertex AI model is hosted (e.g., _us-central1_). + +9. **Max concurrent query batches**: (optional) + * When making vector search queries, the content of the search terms must also be converted to embeddings to compare them against the stored vectors. + Requests to generate such query embeddings via the AI provider are sent in batches. + * This parameter defines the maximum number of these batches that can be processed concurrently. + You can set a default value using the [Ai.Embeddings.MaxConcurrentBatches](../../../server/configuration/ai-integration-configuration.mdx#aiembeddingsmaxconcurrentbatches) configuration key. + +10. Click **Test Connection** to confirm the connection string is set up correctly. + +11. Click **Save** to store the connection string or **Cancel** to discard changes. + +## Define the connection string - from the Client API + + +```csharp +using (var store = new DocumentStore()) +{ + // Define the connection string to Vertex AI + var connectionString = new AiConnectionString + { + // Connection string name & identifier + Name = "ConnectionStringToVertexAI", + Identifier = "identifier-to-the-connection-string", // optional + + // Model type + ModelType = AiModelType.TextEmbeddings, + + // Vertex AI connection settings + VertexSettings = new VertexSettings( + model: "text‑embedding‑005", // Name of the Vertex AI model to use + googleCredentialsJson: "{...}", // Contents of your service account JSON file + location: "us-central1", // Region where the model is hosted + aiVersion: VertexAIVersion.V1) // Optional: specify V1 or V1_Beta + }; + + // Optionally, override the default maximum number of query embedding batches + // that can be processed concurrently + connectionString.GoogleSettings.EmbeddingsMaxConcurrentBatches = 10; + + // Deploy the connection string to the server + var operation = new PutConnectionStringOperation(connectionString); + var putConnectionStringResult = store.Maintenance.Send(operation); +} +``` + + +## Syntax + + +```csharp +public class AiConnectionString +{ + public string Name { get; set; } + public string Identifier { get; set; } + public AiModelType ModelType { get; set; } + public VertexSettings VertexSettings { get; set; } +} + +public class VertexSettings : AbstractAiSettings +{ + public string Model { get; set; } + public string GoogleCredentialsJson { get; set; } + public string Location { get; set; } + public VertexAIVersion? AiVersion { get; set; } +} + +public enum VertexAIVersion +{ + V1, // Represents the "V1" version of the Vertex AI API. + V1_Beta // Represents the "V1 beta" version of the Vertex AI API. +} + +public class AbstractAiSettings +{ + public int? EmbeddingsMaxConcurrentBatches { get; set; } +} +``` + diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/embedded.mdx b/versioned_docs/version-7.1/ai-integration/connection-strings/embedded.mdx new file mode 100644 index 0000000000..6c64d1df75 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/connection-strings/embedded.mdx @@ -0,0 +1,33 @@ +--- +title: "Connection String to bge-micro-v2 (Embedded)" +hide_table_of_contents: true +sidebar_label: bge-micro-v2 (Embedded) +sidebar_position: 8 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import EmbeddedCsharp from './content/_embedded-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/google-ai.mdx b/versioned_docs/version-7.1/ai-integration/connection-strings/google-ai.mdx new file mode 100644 index 0000000000..5812dd87c7 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/connection-strings/google-ai.mdx @@ -0,0 +1,33 @@ +--- +title: "Connection String to Google AI" +hide_table_of_contents: true +sidebar_label: Google AI +sidebar_position: 6 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GoogleAiCsharp from './content/_google-ai-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/hugging-face.mdx b/versioned_docs/version-7.1/ai-integration/connection-strings/hugging-face.mdx new file mode 100644 index 0000000000..3c8dd6bed7 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/connection-strings/hugging-face.mdx @@ -0,0 +1,33 @@ +--- +title: "Connection String to Hugging Face" +hide_table_of_contents: true +sidebar_label: Hugging Face +sidebar_position: 3 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HuggingFaceCsharp from './content/_hugging-face-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/mistral-ai.mdx b/versioned_docs/version-7.1/ai-integration/connection-strings/mistral-ai.mdx new file mode 100644 index 0000000000..fbd5d11c65 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/connection-strings/mistral-ai.mdx @@ -0,0 +1,33 @@ +--- +title: "Connection String to Mistral AI" +hide_table_of_contents: true +sidebar_label: Mistral AI +sidebar_position: 5 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import MistralAiCsharp from './content/_mistral-ai-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/ollama.mdx b/versioned_docs/version-7.1/ai-integration/connection-strings/ollama.mdx new file mode 100644 index 0000000000..ce6ed2238d --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/connection-strings/ollama.mdx @@ -0,0 +1,33 @@ +--- +title: "Connection String to Ollama" +hide_table_of_contents: true +sidebar_label: Ollama +sidebar_position: 4 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import OllamaCsharp from './content/_ollama-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/open-ai.mdx b/versioned_docs/version-7.1/ai-integration/connection-strings/open-ai.mdx new file mode 100644 index 0000000000..4acddfe3e8 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/connection-strings/open-ai.mdx @@ -0,0 +1,33 @@ +--- +title: "Connection String to OpenAI and OpenAI-Compatible Providers" +hide_table_of_contents: true +sidebar_label: OpenAI & Compatible Providers +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import OpenAiCsharp from './content/_open-ai-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + diff --git a/versioned_docs/version-7.1/ai-integration/connection-strings/vertex-ai.mdx b/versioned_docs/version-7.1/ai-integration/connection-strings/vertex-ai.mdx new file mode 100644 index 0000000000..540e396a4b --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/connection-strings/vertex-ai.mdx @@ -0,0 +1,33 @@ +--- +title: "Connection String to Vertex AI" +hide_table_of_contents: true +sidebar_label: Vertex AI +sidebar_position: 7 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GoogleAiCsharp from './content/_vertex-ai-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/_category_.json b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/_category_.json new file mode 100644 index 0000000000..6970b8e6ed --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 2, + "label": "GenAI Integration" +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/article-cover-genai.webp b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/article-cover-genai.webp new file mode 100644 index 0000000000..3711eca3d2 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/article-cover-genai.webp differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/gen-ai_overview_hash-flow.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/gen-ai_overview_hash-flow.png new file mode 100644 index 0000000000..f606f7a99c Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/gen-ai_overview_hash-flow.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/gen-ai_overview_licensing.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/gen-ai_overview_licensing.png new file mode 100644 index 0000000000..044434cf2a Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/gen-ai_overview_licensing.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/gen-ai_overview_metadata.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/gen-ai_overview_metadata.png new file mode 100644 index 0000000000..a1f485b5c0 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/gen-ai_overview_metadata.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/gen-ai_start_api-image.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/gen-ai_start_api-image.png new file mode 100644 index 0000000000..b43a888381 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/gen-ai_start_api-image.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/gen-ai_start_ov-image.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/gen-ai_start_ov-image.png new file mode 100644 index 0000000000..0f240ad3b5 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/gen-ai_start_ov-image.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/gen-ai_start_studio-image.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/gen-ai_start_studio-image.png new file mode 100644 index 0000000000..612fb0c120 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/gen-ai_start_studio-image.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/snagit/gen-ai_overview_hash-flow.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/snagit/gen-ai_overview_hash-flow.snagx new file mode 100644 index 0000000000..847b6d2e54 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/snagit/gen-ai_overview_hash-flow.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/snagit/gen-ai_overview_metadata.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/snagit/gen-ai_overview_metadata.snagx new file mode 100644 index 0000000000..91ad8c67f5 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/snagit/gen-ai_overview_metadata.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/unlock-genai-potential-article-image.webp b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/unlock-genai-potential-article-image.webp new file mode 100644 index 0000000000..7abb8a04e8 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/assets/unlock-genai-potential-article-image.webp differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/_category_.json b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/_category_.json new file mode 100644 index 0000000000..6f210621b6 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 2, + "label": "Create GenAI Task" +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_add-GenAI-task.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_add-GenAI-task.png new file mode 100644 index 0000000000..6ca3125eb3 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_add-GenAI-task.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_configure-basic-settings.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_configure-basic-settings.png new file mode 100644 index 0000000000..073efd9537 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_configure-basic-settings.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_define-prompt-and-json-schema.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_define-prompt-and-json-schema.png new file mode 100644 index 0000000000..44c925f415 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_define-prompt-and-json-schema.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_generate-context-objects.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_generate-context-objects.png new file mode 100644 index 0000000000..07dba7cc9d Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_generate-context-objects.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_hash-flow.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_hash-flow.png new file mode 100644 index 0000000000..b78423a9ec Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_hash-flow.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_licensing.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_licensing.png new file mode 100644 index 0000000000..044434cf2a Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_licensing.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_metadata-identifier-and-hash-codes.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_metadata-identifier-and-hash-codes.png new file mode 100644 index 0000000000..9784820ac8 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_metadata-identifier-and-hash-codes.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_ollama-connection-string.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_ollama-connection-string.png new file mode 100644 index 0000000000..9d18cccc93 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_ollama-connection-string.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_playground-generated-context-objects.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_playground-generated-context-objects.png new file mode 100644 index 0000000000..bd71b2dac0 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_playground-generated-context-objects.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_playground-prompt-and-json-schema.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_playground-prompt-and-json-schema.png new file mode 100644 index 0000000000..8da50d9727 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_playground-prompt-and-json-schema.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_playground-provide-update-script.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_playground-provide-update-script.png new file mode 100644 index 0000000000..8c2a6ad150 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_playground-provide-update-script.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_provide-update-script.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_provide-update-script.png new file mode 100644 index 0000000000..efda6e361b Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_provide-update-script.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_review-task-configuration.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_review-task-configuration.png new file mode 100644 index 0000000000..4c396decf8 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_review-task-configuration.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_select-ai-task-type.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_select-ai-task-type.png new file mode 100644 index 0000000000..4dbd80ccc5 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/gen-ai_select-ai-task-type.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_add-GenAI-task.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_add-GenAI-task.snagx new file mode 100644 index 0000000000..663a378158 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_add-GenAI-task.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_configure-basic-settings.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_configure-basic-settings.snagx new file mode 100644 index 0000000000..495f5903fc Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_configure-basic-settings.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_define-prompt-and-json-schema.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_define-prompt-and-json-schema.snagx new file mode 100644 index 0000000000..8a81211ba3 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_define-prompt-and-json-schema.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_generate-context-objects.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_generate-context-objects.snagx new file mode 100644 index 0000000000..288e06443b Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_generate-context-objects.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_hash-flow.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_hash-flow.snagx new file mode 100644 index 0000000000..7e6eff9d12 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_hash-flow.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_licensing.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_licensing.snagx new file mode 100644 index 0000000000..cb3a8439a6 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_licensing.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_metadata-identifier-and-hash-codes.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_metadata-identifier-and-hash-codes.snagx new file mode 100644 index 0000000000..f9b564b644 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_metadata-identifier-and-hash-codes.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_ollama-connection-string.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_ollama-connection-string.snagx new file mode 100644 index 0000000000..fbd6c6f6b4 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_ollama-connection-string.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_playground-generated-context-objects.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_playground-generated-context-objects.snagx new file mode 100644 index 0000000000..c7c56e0b80 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_playground-generated-context-objects.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_playground-prompt-and-json-schema.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_playground-prompt-and-json-schema.snagx new file mode 100644 index 0000000000..1bb5dee5ca Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_playground-prompt-and-json-schema.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_playground-provide-update-script.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_playground-provide-update-script.snagx new file mode 100644 index 0000000000..43e2c0fbfe Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_playground-provide-update-script.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_provide-update-script.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_provide-update-script.snagx new file mode 100644 index 0000000000..ba6b0ab7c1 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_provide-update-script.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_review-task-configuration.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_review-task-configuration.snagx new file mode 100644 index 0000000000..4a18f8389d Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_review-task-configuration.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_select-ai-task-type.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_select-ai-task-type.snagx new file mode 100644 index 0000000000..986b78d129 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/assets/snagit/gen-ai_select-ai-task-type.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_api.mdx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_api.mdx new file mode 100644 index 0000000000..3b1738efff --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_api.mdx @@ -0,0 +1,485 @@ +--- +title: "Create GenAI Task: API" +hide_table_of_contents: true +sidebar_label: Client API +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Create GenAI Task: API + + + +* A GenAI task leverages an AI model to enable intelligent processing of documents in runtime. + * The task is associated with a document collection and with an AI model. + * It is an **ongoing task** that: + 1. Continuously monitors the collection; + 2. Whenever needed, like when a document is added to the collection, generates + user-defined context objects based on the source document data; + 3. Passes each context object to the AI model for further processing; + 4. Receives the AI model's JSON-based results; + 5. And finally, runs a user-defined script that potentially acts upon the results. + +* The main steps in defining a GenAI task are: + * Defining a [Connection string](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_api#defining-a-connection-string) + to the AI model + * Defining a [Context generation script](../../../ai-integration/gen-ai-integration/gen-ai-overview#the-elements_context-objects) + * Defining a [Prompt](../../../ai-integration/gen-ai-integration/gen-ai-overview#the-elements_prompt) + * Defining a [JSON schema](../../../ai-integration/gen-ai-integration/gen-ai-overview#the-elements_json-schema) + * Defining an [Update script](../../../ai-integration/gen-ai-integration/gen-ai-overview#the-elements_update-script) + +* In this article: + * [Defining a Connection string](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_api#defining-a-connection-string) + * [Defining the GenAI task](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_api#defining-the-genai-task) + * [Full example](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_api#full-example) + + + + +
+ +## Defining a Connection string + +* Choose the model to connect with, by what you need from your GenAI task. + E.g., If you require security and speed above all for the duration of a rapid + development phase, you may prefer a local AI service like [Ollama](../../../ai-integration/connection-strings/ollama). +* Make sure you define the correct service: both Ollama and OpenAI are supported + but you need to pick an Ollama/OpenAI service that supports generative AI, + like Ollama `llama3.2` or OpenAI `gpt-4o-mini`. +* Learn more about connection strings [here](../../../ai-integration/connection-strings/connection-strings-overview). + +### Example: + + + + +```csharp +using (var store = new DocumentStore()) +{ + // Define the connection string to OpenAI + var connectionString = new AiConnectionString + { + // Connection string name & identifier + Name = "open-ai-cs", + + // Connection type + ModelType = AiModelType.Chat, + + // OpenAI connection settings + OpenAiSettings = new OpenAiSettings( + apiKey: "your-api-key", + endpoint: "https://api.openai.com/v1", + // text generation model + model: "gpt-4o-mini") + }; + + // Deploy the connection string to the server + var operation = new PutConnectionStringOperation(connectionString); + var putConnectionStringResult = store.Maintenance.Send(operation); +} +``` + + + +```csharp +using (var store = new DocumentStore()) +{ + // Define the connection string to Ollama + var connectionString = new AiConnectionString + { + // Connection string name & identifier + Name = "ollama-cs", + + // Connection type + ModelType = AiModelType.Chat, + + // Ollama connection settings + OllamaSettings = new OllamaSettings( + // LLM model for text generation + model: "llama3.2", + // local URL + uri: "http://localhost:11434/") + }; + + // Deploy the connection string to the server + var operation = new PutConnectionStringOperation(connectionString); + var putConnectionStringResult = store.Maintenance.Send(operation); +} +``` + + + +### Syntax: + + + +```csharp +public class AiConnectionString +{ + public string Name { get; set; } + public AiModelType ModelType { get; set; } + public string Identifier { get; set; } + public OpenAiSettings OpenAiSettings { get; set; } + ... +} + +public class OpenAiSettings : AbstractAiSettings +{ + public string ApiKey { get; set; } + public string Endpoint { get; set; } + public string Model { get; set; } + public int? Dimensions { get; set; } + public string OrganizationId { get; set; } + public string ProjectId { get; set; } +} +``` + + + +```csharp +public class AiConnectionString +{ + public string Name { get; set; } + public AiModelType ModelType { get; set; } + public string Identifier { get; set; } + public OllamaSettings OllamaSettings { get; set; } + ... +} + +public class OllamaSettings : AbstractAiSettings +{ + public string Model { get; set; } + public string Uri { get; set; } +} +``` + + + +
+ +## Defining the GenAI task + +* Define a GenAI task using a `GenAiConfiguration` object. +* Run the task using `AddGenAiOperation`. + + + + +```csharp +// Define a GenAI task configuration +GenAiConfiguration config = new GenAiConfiguration +{ + // Task name + Name = "spam-filter", + + // Unique user-defined task identifier + Identifier = "spam-filter", + + // Connection string to AI model + ConnectionStringName = "open-ai-cs", + + // Task is enabled + Disabled = false, + + // Collection associated with the task + Collection = "Posts", + + // Context generation script - format for objects to be sent to the AI model + GenAiTransformation = new GenAiTransformation + { + Script = @" + for(const comment of this.Comments) + { + ai.genContext({Text: comment.Text, Author: comment.Author, Id: comment.Id});}" + }, + + // AI model Prompt - the instructions sent to the AI model + Prompt = @" + Check if the following blog post comment is spam or not. + A spam comment typically includes irrelevant or promotional content, + excessive links, misleading information, or is written with the intent + to manipulate search engines or advertise products/services. + Consider the language, intent, and relevance of the comment for + the blog post content.", + + // Sample object - the layout for the AI model's response + SampleObject = @" + { + ""Blocked"": true, + ""Reason"": ""Concise reason for why this comment was marked as spam or ham"" + }", + + // Update script - specifies what to do with AI model replies. + // Use $input to access the context object that was sent to the AI model. + // Use $output` to access the results object returned from the AI model. + // Use `this` to access and modify the currently processed document. + UpdateScript = @" + // Find the comment + const idx = this.Comments.findIndex(c => c.Id == $input.Id); + // Was detected as spam + if($output.Blocked) + { + // Remove this comment + this.Comments.splice(idx, 1); + }", + + // Max concurrent connections to AI model + MaxConcurrency = 4 +}; + +// Run the task +var GenAiOperation = new AddGenAiOperation(config); +var addAiIntegrationTaskResult = store.Maintenance.Send(GenAiOperation); +``` + + + +```csharp +// Define a GenAI task configuration +GenAiConfiguration config = new GenAiConfiguration +{ + // Task name + Name = "spam-filter", + + // Unique user-defined task identifier + Identifier = "spam-filter", + + // Connection string to AI model + ConnectionStringName = "open-ai-cs", + + // Task is enabled + Disabled = false, + + // Collection associated with the task + Collection = "Posts", + + // Context generation script - format for objects to be sent to the AI model + GenAiTransformation = new GenAiTransformation + { + Script = @" + for(const comment of this.Comments) + { + ai.genContext({Text: comment.Text, Author: comment.Author, Id: comment.Id});}" + }, + + // AI model Prompt - the instructions sent to the AI model + Prompt = @" + Check if the following blog post comment is spam or not. + A spam comment typically includes irrelevant or promotional content, + excessive links, misleading information, or is written with the intent + to manipulate search engines or advertise products/services. + Consider the language, intent, and relevance of the comment for + the blog post content.", + + // JSON schema - a schema to format the AI model's replies by + JsonSchema = @"{ + ""name"": """ + "some-name" + @""", + ""strict"": true, + ""schema"": { + ""type"": ""object"", + ""properties"": { + ""Blocked"": { + ""type"": ""boolean"" + }, + ""Reason"": { + ""type"": ""string"", + ""description"": ""Concise reason for why this comment was marked as spam or ham"" + } + }, + ""required"": [ + ""Blocked"", + ""Reason"" + ], + ""additionalProperties"": false + } + }", + + // Update script - specifies what to do with AI model replies. + // Use $input to access the context object that was sent to the AI model. + // Use $output` to access the results object returned from the AI model. + // Use `this` to access and modify the currently processed document. + UpdateScript = @" + // Find the comment + const idx = this.Comments.findIndex(c => c.Id == $input.Id); + // Was detected as spam + if($output.Blocked) + { + // Remove this comment + this.Comments.splice(idx, 1); + }", + + // Max concurrent connections to AI model + MaxConcurrency = 4 +}; + +// Run the task +var GenAiOperation = new AddGenAiOperation(config); +var addAiIntegrationTaskResult = store.Maintenance.Send(GenAiOperation); +``` + + + +### `GenAiConfiguration` + +| Parameters | Type | Description | +| ------------- | ------------- | ----- | +| **Name** | `string` | Task name | +| **Identifier** | `string` | Unique user-defined task identifier
Use only lowercase letters, numbers, and hyphens | +| **ConnectionStringName** | `string` | Connection string name | +| **Disabled** | `bool` | Determines whether the task is enabled or disabled | +| **Collection** | `string` | Name of the document collection associated with the task | +| **GenAiTransformation** | `GenAiTransformation` | Context generation script - format for objects to be sent to the AI model | +| **Prompt** | `string` | AI model Prompt - the instructions sent to the AI model | +| **SampleObject** | `string` | A [sample response object](../../../ai-integration/gen-ai-integration/gen-ai-overview#the-elements_json-schema) to format the AI model's replies by
If both a `SampleObject` and a `JsonSchema` are provided the schema takes precedence | +| **JsonSchema** | `string` | A [JSON schema](../../../ai-integration/gen-ai-integration/gen-ai-overview#the-elements_json-schema) to format the AI model's replies by
If both a `SampleObject` and a `JsonSchema` are provided the schema takes precedence | +| **UpdateScript** | `string` | Update script - specifies what to do with AI model replies | +| **MaxConcurrency** | `int` | Max concurrent connections to the AI model (each connection serving a single context object) | + +
+ +## Full example + +The following example demonstrates how to define a GenAI task that removes spam comments from blog posts. + +After creating a connection string to the AI model, the we define a GenAI task that: +1. Monitors the `Posts` collection. +2. For each document, generates a context object per each comment in the `Comments` array. +3. Sends each context object to the AI model with a prompt to check if the comment is spam. +4. Receives an AI model response per context object that determines whether the comment is spam or not and specifies the reasoning for the decision. +5. If the comment is marked as spam, the task's update script removes the comment from the `Comments` array in the document. + +After running the task, its functionality is demonstrated by adding to the `Posts` collection a blog post that includes a spammy comment. Adding the post triggers the task, which will scan the post's comments and remove the one that contains spam. + +```csharp +// Define a connection string to OpenAI +var connectionString = new AiConnectionString +{ + // Connection string name & identifier + Name = "open-ai-cs", + + ModelType = AiModelType.Chat, + + // OpenAI connection settings + + OpenAiSettings = new OpenAiSettings( + apiKey: "your-api-key", + endpoint: "https://api.openai.com/v1", + // LLM model for text generation + model: "gpt-4.1") +}; + +// Deploy the connection string to the server +var operation = new PutConnectionStringOperation(connectionString); +var putConnectionStringResult = store.Maintenance.Send(operation); + +// Define a GenAI task configuration +GenAiConfiguration config = new GenAiConfiguration +{ + // Task name + Name = "spam-filter", + + // Unique user-defined task identifier + Identifier = "spam-filter", + + // Connection string to AI model + ConnectionStringName = "open-ai-cs", + + // Task is enabled + Disabled = false, + + // Collection associated with the task + Collection = "Posts", + + // Context generation script - format for objects to be sent to the AI model + GenAiTransformation = new GenAiTransformation + { + Script = @" + for(const comment of this.Comments) + { + ai.genContext({Text: comment.Text, Author: comment.Author, Id: comment.Id});}" + }, + + // AI model Prompt - the instructions sent to the AI model + Prompt = @" + Check if the following blog post comment is spam or not. + A spam comment typically includes irrelevant or promotional content, + excessive links, misleading information, or is written with the intent + to manipulate search engines or advertise products/services. + Consider the language, intent, and relevance of the comment for + the blog post content.", + + // Sample object - the layout for the AI model's response + SampleObject = JsonConvert.SerializeObject( + new + { + Blocked = true, + Reason = "Concise reason for why this comment was marked as spam or ham" + }), + + // Update script - specifies what to do with AI model replies. + // Use $input to access the context object that was sent to the AI model. + // Use $output` to access the results object returned from the AI model. + // Use `this` to access and modify the currently processed document. + UpdateScript = @" + // Find the comment + const idx = this.Comments.findIndex(c => c.Id == $input.Id); + // Was detected as spam + if($output.Blocked) + { + // Remove this comment + this.Comments.splice(idx, 1); + }", + + // Max concurrent connections to AI model + MaxConcurrency = 4 +}; + +// Run the task +var GenAiOperation = new AddGenAiOperation(config); +var addAiIntegrationTaskResult = store.Maintenance.Send(GenAiOperation); + +// Add a blog post document that includes a spam comment to the Posts collection. +// Adding the post will trigger the GenAI task to process it. +using (var session = store.OpenSession()) +{ + var post = new + { + Name = "first post", + Body = "This is my first post", + Comments = new[] + { + new + { + Id = "comment/1", + Text = "This article really helped me understand how indexes work in RavenDB. Great write-up!", + Author = "John" + }, + new + { + Id = "comment/2", + Text = "Learn how to make $5000/month from home! Visit click4cash.biz.example now!!!", + Author = "shady_marketer" + }, + new + { + Id = "comment/3", + Text = "I tried this approach with IO_Uring in the past, but I run into problems " + + "with security around the IO systems and the CISO didn't let us deploy that to " + + "production. It is more mature at this point?", + Author = "dave" + } + } + }; + + session.Store(post, "posts/1"); + session.Advanced.GetMetadataFor(post)["@collection"] = "Posts"; + session.SaveChanges(); +} +``` + diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio.mdx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio.mdx new file mode 100644 index 0000000000..20d949242a --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio.mdx @@ -0,0 +1,383 @@ +--- +title: "Create GenAI Task: Studio" +hide_table_of_contents: true +sidebar_label: Studio +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Create GenAI Task: Studio + + +* In this article: + * [The GenAI Task wizard](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#the-genai-task-wizard) + * [Add a GenAI Task](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#add-a-genai-task) + * [Configure basic settings](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#configure-basic-settings) + * [Generate context objects](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#generate-context-objects) + * [Define Prompt and JSON schema](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#define-prompt-and-json-schema) + * [Provide update script](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#provide-update-script) + * [Review configuration and Save task](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#review-configuration-and-save-task) + + + +
+ +## The GenAI Task wizard +Studio's [AI Tasks](../../../ai-integration/ai-tasks-list-view) view includes a GenAI **wizard**. +Using this wizard, you can easily **create and configure** your task, as well as **test each step +of its creation** in a dedicated "playground". +We will go through the task creation and testing sequence below, using the wizard. + +### Sample data: +While demonstrating the creation and testing of a GenAI task, we will use the following +sample document, illustrating a blog post with an array of comments, of which one is spam. +We will use our GenAI task to go through the comments and identify spam entries so we can +remove them. +To use this sample through this guide, simply create a document named `posts/1` with the +following content. + + +```json +{ + "Name": "first post", + "Body": "This is my first post", + "Comments": [ + { + "Id": "comment/1", + "Text": "This article really helped me understand how indexes work in RavenDB. Great write-up!", + "Author": "John" + }, + { + "Id": "comment/2", + "Text": "Learn how to make $5000/month from home! Visit click4cash.biz.example now!!!", + "Author": "shady_marketer" + }, + { + "Id": "comment/3", + "Text": "I tried this approach with IO_Uring in the past, but I run into problems with security around the IO systems and the CISO didn't let us deploy that to production. It is more mature at this point?", + "Author": "dave" + } + ], + "@metadata": { + "@collection": "Posts" + } +} +``` + + +
+ +## Add a GenAI Task +To add a new GenAI task, open: **AI Hub** > **AI Tasks** > **Add AI Task** > **GenAI** + +![Add a GenAI Task](./assets/gen-ai_add-GenAI-task.png) + +1. **AI Hub** + Click to open the [AI Hub view](../../../ai-integration/ai-tasks-list-view). + Use this view to handle AI connection strings and tasks, and to view task statistics. +2. **AI Tasks** + Click to open the AI Tasks view. + Use this view to list, configure, or remove AI tasks. +3. **Add AI Task** + Click to add an AI task. + ![Add a GenAI Task](./assets/gen-ai_select-ai-task-type.png) + Click the GenAI option to open a wizard that will guide you through the creation and testing of your GenAI task. + The steps of this wizard are explained below, starting with basic GenAI task settings. + +
+ +## Configure basic settings + +![Configure basic settings](./assets/gen-ai_configure-basic-settings.png) + +1. **Task name** + Give your task a meaningful name. + +2.
**Unique user-defined task identifier** + Give your task a unique identifier. + * Use only lowercase letters, numbers, and hyphens. + * You can provide the identifier yourself, or click **Regenerate** to create it automatically. + * When you complete and save your task and it starts running, it will add a metadata property to documents it processes, named after the identifier you define here. + The task will use this property to keep track of document parts it had already processed. + See an example [here](../../../ai-integration/gen-ai-integration/gen-ai-overview#gen-ai-metadata). + +3. **Task state** + Use this switch to enable or disable the task. + +4. **Set responsible node** + Toggle ON to choose the cluster node that will be responsible for this task. + Toggle OFF for the cluster to pick a responsible node for you. + +5. **Connection string** + The Gen AI task will use an AI model to process your data. + It can be a local AI model like Ollama, or an external model like OpenAI. + Use this bar to Select or Create the connection string that the GenAI task + will use to connect with the AI model. + * You can create the connection string either here or in the dedicated + [AI Connection Strings](../../../ai-integration/connection-strings/connection-strings-overview) view. + * Here is an example for a connection string to a local [Ollama](../../../ai-integration/connection-strings/ollama) + AI model capable of filtering spam entries from a blog. + + ![Ollama connection string](./assets/gen-ai_ollama-connection-string.png) + +6. **Steps completed** + You can use this interactive board as you advance through the wizard to see which steps you completed and what is still to define. Click a listed configuration option to modify its settings. + +
+ +## Generate context objects + +![Generate context objects](./assets/gen-ai_generate-context-objects.png) + +1. **Source collection** + Select the collection whose documents this GenAI task will monitor and process. + E.g., `Posts` + +2.
**Context generation script** + Provide a JavaScript, that your GenAI task will run over each document it retrieves + from the selected collection. + The purpose of this script is to form a `Context object` that contains data extracted from the document, + that the AI model will be able to process effectively. + E.g., + + ```javascript + // Go through all the comments that were left for this blog + for(const comment of this.Comments) + { + // Use the `ai.genContext` method to generate a context object for each comment, + // that includes the comment text, author, and id. + ai.genContext({Text: comment.Text, Author: comment.Author, Id: comment.Id}); + } + ``` + + +3. **Playground** + Each of the steps from now on is equipped with its own playground, allowing you + to test what actually happens when you apply your configuration. + + The playground is a secluded environment, using it will **not** modify your documents. + + * **Collapse/Expand** + Toggle to hide or show the playground area. + * **Edit mode** + * Toggle OFF to use the selected document as the source for the generated context. + * Toggle ON to edit the document freely before running the test. + * **Select a document from the source collection** + Select a document to test your context generation script on. + * To use the same sample document we're using to demonstrate the process, + add [posts/1](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#sample-data) and select it here. + * Or if you prefer it, click `enter a document manually` and enter the sample document content yourself. + * To run the test, click the **Test context** button. + If all works well, you will see a list of context objects created by your script, one for each comment. + + ![Playground: Generated context objects](./assets/gen-ai_playground-generated-context-objects.png) + +4. **Controls** + * **Cancel** + Click to cancel any changes made in the task. + * **Back** + Click to return to the previous step, [Configure basic settings](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#configure-basic-settings). + * **Test Context** + Click to test your context generation script on the document selected/entered in the playground area. + * You do not have to use the playground; you'll be able to define and save your task without testing + it first. + * However, running the test here will allow you to use the generated result set in the playground of + the next wizard step. + * **Next** + Click to advance to the next step, [Define prompt & JSON schema](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#define-prompt-and-json-schema). + +
+ +## Define Prompt and JSON schema + +* The GenAI task will send the AI model each context object (configured in the previous step) + on its own connection, along with the prompt and JSON schema you provide in this view. +* The context provides the data for the model to process. + The prompt determines what the model should do with the data. + The JSON schema formats the returned results, so the GenAI task can use them effectively. + +![Define Prompt and JSON schema](./assets/gen-ai_define-prompt-and-json-schema.png) + +1. **Prompt** + These are the instructions for the AI model. + For our spam filtering GenAI task, we can specify, for example: + + + ```plain + Check if the following blog post comment is spam or not. + A spam comment typically includes irrelevant or promotional content, + excessive links, misleading information, or is written with the intent to + manipulate search engines or advertise products/services. + Consider the language, intent, and relevance of the comment for + the blog post content. + ``` + + +2. **JSON schema** + The AI model will return a results JSON object for each context object sent to it. + The JSON schema we set here sets the layout for this results object. + * **Use sample object** + * Select this option to provide an object that the AI model will use as an example. + The results object will be formatted as the sample object you provide. + * Textual fields in the sample object can be written in natural language, + guiding the AI model what to write in the results. + + E.g. if you select this option and provide this object: + + + ```json + { + "Blocked": true, + "Reason": "Concise reason for why this comment was marked as spam or ham" + } + ``` + + + Then result objects returned by the AI model may look like: + + + ```json + { + "Blocked": false, + "Reason": "Relevant and genuine" + } + ``` + + + + ```json + { + "Blocked": true, + "Reason": "Spam" + } + ``` + + + * **Provide JSON schema** + Instead of a sample object, you can provide a formal JSON schema. + Providing a sample object (rather than a formal schema) is normally more convenient. + Behind the scenes, RavenDB will send a formal schema in any case, since this is the + format that the LLM expects to receive. If you provide a schema RavenDB will send it + as is, and if you provide a sample object RavenDB will translate it to a schema for you + before sending it to the LLM. + +3. **Playground** + Use this playground to send the AI model context objects with their prompts and schemas, + and see the results returned by the AI model. + * **Collapse/Expand** + Toggle to hide or show the playground area. + * **Edit mode** + * Toggle OFF to use the results generated using the playground of the previous step. + * Toggle ON to edit the context objects freely before trying out your prompt and schema on them. + This option gives you the freedom to test any context objects you like, regardless of the results + generated by the playground of the previous step. + * To run the test, click the **Test model** button. + The GenAI task will send the model each context in its own transaction, accompanied + by the prompt and JSON schema defined above. + The AI model will process the results and return them in the format set by your schema. + E.g. - + + ![Playground: prompt & JSON schema](./assets/gen-ai_playground-prompt-and-json-schema.png) + +4. **Controls** + * **Cancel** + Click to cancel any changes made in the task. + * **Back** + Click to return to the previous step, [Generate context objects](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#generate-context-objects). + * **Test Context** + Click to test the prompt and JSON schema you define above, on the context objects generated from the + document you provided. + * **Next** + Click to advance to the next step, [Provide update script](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#provide-update-script). + +
+ +## Provide update script + +Now that the AI model returned its output, the Gen AI task needs to know what to do with it. +The update script set in this step, determines what actions should be taken on the arrival of the results. + +![Provide update script](./assets/gen-ai_provide-update-script.png) + +1. **Update script** + Provide a JavaScript that processes each results object returned from the AI model and takes needed actions. + In our case, as the results determine whether each blog comment is spam or not, the script can react to results indicating that a comment is spam, by removing the comment. + In the script, we can use the `$input` variable to access the context object that was sent to the AI model (the sample below uses it to update the document by its ID), the `$output` variable to access the results object returned from the AI model, and `this` to access and modify the currently processed document. + + + ```javascript + // Find the comment + const idx = this.Comments.findIndex(c => c.Id == $input.Id); + // Was detected as spam + if($output.Blocked) + { + // Remove this comment + this.Comments.splice(idx, 1); // remove + } + ``` + + +2.
**Playground** + Use this playground to verify that your update script does what you want it to do. + In the case of our spam filtering task, we can check whether the comment that was + detected as spam was removed from the blog post. + + ![Playground: Provide update script](./assets/gen-ai_playground-provide-update-script.png) + + * **Edit mode** + * Toggle OFF to use the results generated using the playground of the previous step. + * Toggle ON to edit the model output freely before testing your update script on it. + This option gives you the freedom to test any content you like, regardless of the results + generated by the playground of the previous step. + +3. **Controls** + * **Cancel** + Click to cancel any changes made in the task. + * **Back** + Click to return to the previous step, [Define Prompt and JSON schema](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#define-prompt-and-json-schema). + * **Test Context** + Click to test the update script you define above. + Note that even though in our case we remove comments from existing documents, + the update script can leave the original document unchanged, create new documents, + and so on - as you choose. + * **Next** + Click to advance to the next step, [Review configuration and Save task](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#review-configuration-and-save-task). + +
+ +## Review configuration and Save task + +Use this final step to review your GenAI task configuration before saving and executing it. +If your task is enabled, it will start running when you save it. + +![Review configuration and Save task](./assets/gen-ai_review-task-configuration.png) + +1. **Review Configuration** + Click a step's **Edit** button to view and modify its current configuration. + Click a script/object **Show** button to view its current content. + +2. **Reprocess all documents** + * Enable this option to have the task reprocess all documents in the source collection. + + Note that documents that were already processed and the hash code in their metadata is identical to the hash code of the current task configuration (meaning the configuration hasn't changed since they were processed) will be skipped even if this option is enabled. + + * Disable this option to have the task process only documents that it had not processed before. + +3. **Controls** + * **Cancel** + Click to cancel any changes made in the task. + * **Back** + Click to return to the previous step, [Provide update script](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#provide-update-script). + * **Save** + Click to save your task. + If enabled, saving the task will start its execution. + + * Test your task and make sure you understand how it might change your documents before saving. + * Take every precaution to protect your data, including ensuring it is backed up. + diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/gen-ai-overview.mdx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/gen-ai-overview.mdx new file mode 100644 index 0000000000..4df728057a --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/gen-ai-overview.mdx @@ -0,0 +1,288 @@ +--- +title: "GenAI Integration: Overview" +hide_table_of_contents: true +sidebar_label: Overview +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# GenAI Integration: Overview + + +* **Ongoing GenAI tasks** allow RavenDB to connect and interact with Generative AI models, introducing intelligent, autonomous data processing in production. + +* Tasks can be easily defined, tested and deployed using [the client API](../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_api) or [Studio](../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio). + + While creating a GenAI task via Studio, a smart interactive **test environment** is provided, allowing each phase of the task to be tested in a secluded playground, freely and without changing your data, while at the same time producing a result set that can be tried out by the next phase. + +* A task can be built in minutes, e.g. to generate automated responses to frequently asked questions, escalate support tickets, summarize lengthy documents, enhance data security by detecting anomalies, or numerous other applications. + See a few additional examples in the [common use cases](../../ai-integration/gen-ai-integration/gen-ai-overview#common-use-cases) section below. + +* You can use local and remote AI models, e.g. a local `Ollama llama3.2` service during a development phase that requires speed and no additional costs, and a remote `OpenAI gpt-4o-mini` when you need a live service with advanced capabilities. + +* In this article: + * [RavenDB GenAI tasks](../../ai-integration/gen-ai-integration/gen-ai-overview#ravendb-genai-tasks) + * [The flow](../../ai-integration/gen-ai-integration/gen-ai-overview#the-flow) + * [The elements](../../ai-integration/gen-ai-integration/gen-ai-overview#the-elements) + * [How to create and run a GenAI task](../../ai-integration/gen-ai-integration/gen-ai-overview#how-to-create-and-run-a-genai-task) + * [Runtime](../../ai-integration/gen-ai-integration/gen-ai-overview#runtime) + * [Tracking of processed document parts](../../ai-integration/gen-ai-integration/gen-ai-overview#tracking-of-processed-document-parts) + * [Licensing](../../ai-integration/gen-ai-integration/gen-ai-overview#licensing) + * [Supported services](../../ai-integration/gen-ai-integration/gen-ai-overview#supported-services) + * [Common use cases](../../ai-integration/gen-ai-integration/gen-ai-overview#common-use-cases) + + + +
+ +## RavenDB GenAI tasks + +RavenDB offers an integration of generative AI capabilities through user-defined **GenAI tasks**. +A GenAI task is an ongoing process that continuously monitors a document collection associated with it, and reacts when a document is added or modified by Retrieving the document, Generating "context objects" based on its data, Sending these objects to a generative AI model along with instructions regarding what to do with the data and how to format the reply, and potentially Acting upon the model's response. + +### The flow: +Let's put the stages described above in order. + +1. A GenAI task continuously monitors the collection it is associated with. +2. When a document is added or modified, the task retrieves it. +3. The task generates context objects based on the source document data. + To generate these objects, the task applies a user-defined [context generation script](../../ai-integration/gen-ai-integration/gen-ai-overview#the-elements_context-objects) + that runs through the source document and generates context objects based on the document data. +4. The task sends each context object to a GenAI model for processing. + * The task is associated with a [Connection string](../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#studio_connection-string) + that defines how to connect to the AI model. + * Each context object is sent via a separate connection to the AI model. + (note that the number of concurrent connections to the AI model is configurable via the [MaxConcurrency](../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_api#genaiconfiguration) setting.) + * Each context object is sent along with a user-defined [Prompt](../../ai-integration/gen-ai-integration/gen-ai-overview#the-elements_prompt), + that instructs the AI model what to do with the data, and + a user-defined [JSON schema](../../ai-integration/gen-ai-integration/gen-ai-overview#the-elements_json-schema) + that instructs the AI model how to shape its response. +5. When the AI model returns its response, a user-defined [Update script](../../ai-integration/gen-ai-integration/gen-ai-overview#the-elements_update-script) + is applied to handle the results. + +### The elements: +These are the elements that need to be defined for a GenAI task. + +* [Connection string](../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#studio_connection-string) + The connection string defines the connection to the GenAI model. + +*
**Context generation script** + The context generation script goes through the source document, + and applies the `ai.genContext` method to create **context objects** based on the source document's data. + E.g. - + + + ```javascript + for(const comment of this.Comments) { + // Use the \`ai.genContext\` method to generate a context object for each comment. + ai.genContext({Text: comment.Text, Author: comment.Author, Id: comment.Id}); + } + ``` + + + * RavenDB will pass the AI model **not** the source document, but the generated context objects. + * Producing a series of context objects that share a clear common format can add the communication + with the AI model a methodical, reliable aspect that is under our full control. + * This is also an important security layer added between the database and the AI model, that + you can use to ensure that only data you actually want to share with the AI model is passed on. + +* **JSON schema** + This is a JSON-based object that defines the layout of the AI model's response. + This object can be either an **explicit JSON schema**, or a **sample response object** + that RavenDB will turn to a JSON schema for us. + + It is normally easier to provide a sample response object, and let RavenDB create + the schema behind the scenes. E.g. - + + + + ```json + { + "Blocked": true, + "Reason": "Concise reason for why this comment was marked as spam or ham" + } + ``` + + + + ```json + { + "name": "some-name", + "strict": true, + "schema": { + "type": "object", + "properties": { + "Blocked": { + "type": "boolean" + }, + "Reason": { + "type": "string", + "description": "Concise reason for why this comment was marked as spam or ham" + } + }, + "required": [ + "Blocked", + "Reason" + ], + "additionalProperties": false + } + } + ``` + + + +* **Prompt** + The prompt relays to the AI model what we need it to do. + * It can be phrased in natural language. + * Since the JSON schema already specifies the response layout, including what fields we'd + like the AI model to fill and with what content, the prompt can be used simply to explain + what we want the model to do. + E.g. - + + + ```plain + Check if the following blog post comment is spam or not. + A spam comment typically includes irrelevant or promotional content, + excessive links, misleading information, or is written with the intent to + manipulate search engines or advertise products/services. + Consider the language, intent, and relevance of the comment for + the blog post content. + ``` + + +* **Update Script** + The update script is executed when the AI model responds to a context object we've sent it. + * The update script can take any action, based on the information included in the model's response. + It can, for example, Modify the source document, Create new documents populated by AI-generated text, + Remove existing documents, and so on. + E.g., the following script removes a comment from a blog post if the AI model has concluded that the comment is spam. + + + ```javascript + const idx = this.Comments.findIndex(c => c.Id == $input.Id); + if($output.Blocked) + { + this.Comments.splice(idx, 1); + } + ``` + + + * The update script can also be used as an additional security measure, and apply only actions + that we trust not to inflict any damage. + +### How to create and run a GenAI task: + +* You can use [Studio's intuitive wizard](../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#add-a-genai-task) + to create GenAI tasks. The wizard will guide you through the task creation phases, + exemplify where needed, and provide you with convenient, interactive, secluded "playgrounds" + for free interactive experimenting. +* Or, you can create GenAI tasks using the [Client API](../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_api). + +
+ +## Runtime + +Once you complete the configuration and save the task, it will start running (if enabled). +The task will monitor the collection associated with it, and process documents as they are +added or modified. + +### Tracking of processed document parts: + +* After creating a [context object](../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#generate-context-objects) + for a document part and processing it, the GenAI task will create a hash code and log it in the document's metadata, under a property named after the user-defined task identifier. + + The hash code is computed based on these elements: + * The context object + * The prompt + * The GenAI provider and model (e.g. OpenAI gpt-4o-mini) + * The JSON schema + * The update script + +* If the task is requested to process this document part again, it will compute a new hash code based on these elements, and compare it with the existing hash, logged in the document metadata. + * If the new hash differs from the existing one, it will indicate that the content and/or the configuration changed, and the task will reprocess this document part. + * If the new hash is identical to the existing one, the task will conclude that the context object was already processed with the exact same content and task configuration, and skip reprocessing it. + + **Tracking processed document parts**: + ![Tracking processed document parts](./assets/gen-ai_overview_hash-flow.png) + + **Hash codes in document metadata**: +
![Metadata Identifier and Hash codes](./assets/gen-ai_overview_metadata.png) + + 1. **Identifier** + This is the user-defined task identifier (defined as part of the configuration). + 2. **Hash codes** + These hash codes were created after processing the document. + The codes were computed per comment, based on the comment's content and the current task configuration. + When the document is processed again, the task will generate a new hash code for each comment. If the comment or the task configuration has changed, the new hash will differ from the existing one and trigger reprocessing. If none of them changed, the identical hash will indicate that no reprocessing is needed. + +
+ +## Licensing + +For RavenDB to support the GenAI Integration feature, you need a `RavenDB AI` license type. +A `Developer` license will also enable the feature for experimentation and development. + +![Licensing: RavenDB AI license](./assets/gen-ai_overview_licensing.png) + +
+ +## Supported services + +Supported services include: + +* `OpenAI` and `OpenAI-compatible` services +* `Ollama` + +
+ +## Common use cases + +GenAI tasks can be used to address numerous scenarios through intelligent content processing, +here are a few key use case categories. + +#### Data enrichment & enhancement use cases +* **Document summarization** + Generate concise summaries of lengthy reports, articles, or legal documents. +* **Data extraction** + Extract key details like dates, names, amounts, or entities from unstructured text. +* **Content translation** + Automatically translate documents or user-generated content. + +#### Smart automation & workflows use cases +* **Support ticket routing** + Analyze incoming tickets and automatically assign priority levels or route to appropriate teams. +* **Compliance checking** + Scan documents for regulatory compliance issues or policy violations. +* **Data quality improvement** + Standardize formats, correct inconsistencies, or enrich incomplete records. + +#### Enhanced search & discovery use cases +* **Intelligent tagging** + Generate relevant keywords and metadata for better document searchability. +* **Content recommendations** + Suggest related articles, products, or resources based on document analysis. +* **Knowledge extraction** + Build searchable knowledge bases from unstructured document collections. + +#### Business intelligence & insights use cases +* **Trend detection** + Identify patterns and emerging themes in customer communications or market data. +* **Competitive analysis** + Monitor and analyze competitor mentions, pricing, or product information. +* **Risk assessment** + Flag potentially problematic contracts, transactions, or communications. + +#### Content analysis & moderation use cases +* **Content categorization** + Automatically tag and organize articles, documents, or media files. +* **Spam and content filtering** + Automatically detect and flag spam, offensive, or inappropriate comments, reviews, or posts. +* **Sentiment analysis** + Classify customer feedback, support tickets, or social media mentions by emotional tone. + diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/gen-ai-security-concerns.mdx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/gen-ai-security-concerns.mdx new file mode 100644 index 0000000000..b50811b281 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/gen-ai-security-concerns.mdx @@ -0,0 +1,76 @@ +--- +title: "GenAI Integration: Security Concerns" +hide_table_of_contents: true +sidebar_label: Security Concerns +sidebar_position: 5 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# GenAI Integration: Security Concerns + + +This page addresses concerns that potential GenAI tasks' users may have, +regarding the safety of data sent to an AI model through the task and the +security of the database while running such tasks. + +* In this article: + * [Security measures](../../ai-integration/gen-ai-integration/gen-ai-security-concerns#security-measures) + + +## Security measures + +Our approach toward data safety while using RavenDB AI tasks, is that we need +to take care of security on our end, rather than expect the AI model to protect +our data. + +You can take these security measures: + +* **Use a local model when possible** + Use a local AI model like Ollama whenever you don't have to transit your data + to an external model, to keep the data, as much as possible, within the safe + boundaries of your own network. + +* **Pick the right model** + RavenDB does not dictate what model to use, giving you full freedom to pick + the services that you want to connect. + Choose wisely the AI model you connect, some seem to be in better hands than others. + +* **Send only the data you want to send** + You are in full control of the data that is sent from your server to the AI model. + Your choices while defining the task, including the collection you associate the + task with and the [context generation script](../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#generate-context-objects) + you define, determine the only data that will be exposed to the AI model. + Take your time, when preparing this script, to make sure you send only the + data you actually want to send. + +* **Use the playgrounds** + While defining your AI task, take the time and use Studio's + [playgrounds](../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#generate-context-objects-playground) + to double-check what is actually sent. + There are separate playgrounds for the different stages, using them is + really enjoyable, and you can test your configuration on various documents + and see exactly what you send and what you receive. + +* **Use a secure server** + The AI model is **not** given entry to your database. The data that you send it + voluntarily is all it gets. However, as always, if you care about your privacy + and safety, you'd want to use a [secure server](../../start/installation/setup-wizard#select-setup-mode). + This will assure that you have full control over visitors to your database and + their permissions. + +* **Use your update script wisely** + When considering threats to our data we often focus on external risks, + but many times it is us that endanger it the most. + The [update script](../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#provide-update-script) + is the JavaScript that the GenAI task runs after receiving a reply from + the AI model. Here too, take your time to check this powerful script + using the built in Studio [playground](../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#provide-update-script-playground). + + + diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/gen-ai_start.mdx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/gen-ai_start.mdx new file mode 100644 index 0000000000..1c0e070e19 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/gen-ai_start.mdx @@ -0,0 +1,60 @@ +--- +title: "GenAI tasks: Start" +hide_table_of_contents: true +sidebar_label: Start +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; + +import CardWithImage from "@site/src/components/Common/CardWithImage"; +import CardWithImageHorizontal from "@site/src/components/Common/CardWithImageHorizontal"; +import ColGrid from "@site/src/components/ColGrid"; +import genAiStartOvImage from "./assets/gen-ai_start_ov-image.png"; +import genAiStartApiImage from "./assets/gen-ai_start_api-image.png"; +import genAiStartStudioImage from "./assets/gen-ai_start_studio-image.png"; +import unlockGenAiPotentialArticleImage from "./assets/unlock-genai-potential-article-image.webp"; +import articleGenAiImage from "./assets/article-cover-genai.webp"; + +import ayendeBlogImage from "@site/static/img/from-ayende-com.webp"; +import webinarThumbnailPlaceholder from "@site/static/img/webinar.webp"; + +# GenAI tasks + +### Build intelligent workflows with GenAI tasks. +GenAI tasks are [ongoing operations](../../studio/database/tasks/ongoing-tasks/general-info) that continuously monitor specified collections and process documents as they are added or modified. +- Similar to [ETL tasks](../../studio/database/tasks/ongoing-tasks/ravendb-etl-task), a GenAI task extracts content from documents. But instead of sending the content to another database, the task sends it to an AI model (like OpenAI) along with a guiding **prompt** and a **JSON schema** that defines the layout for the model's response. +- When the LLM responds, the GenAI task can use its response to, for example, update the source document with LLM-generated content, or create new documents in the database. +- GenAI tasks can infuse intelligence into a wide variety of content handling scenarios. + E.g., they can enrich documents with AI-generated summaries or classifications, translate text into different languages, or generate new content based on existing data. +- You can easily create GenAI tasks using Studio or the client API. + When created via Studio, each step of their creation can be easily tested and validated before deployment. + +### Use cases +GenAI tasks can infuse intelligence into a wide variety of content handling scenarios. Here are some of the categories in which they can help. +* Data enrichment & enhancement +* Smart automation & workflows +* Enhanced search & discovery +* Business intelligence & insights +* Content analysis & moderation + +### Technical documentation +Learn how to create and manage tasks that intelligently process your data and transform your content. + + + + + + +#### Learn more: In-depth GenAI tasks articles + + + + + + +### Related lives & Videos +Learn how GenAI tasks help create reliable and effective AI-powered workflows. + + + diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/modify-gen-ai-task/_category_.json b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/modify-gen-ai-task/_category_.json new file mode 100644 index 0000000000..23fad24d6f --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/modify-gen-ai-task/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 3, + "label": "Modify GenAI Task" +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/modify-gen-ai-task/assets/gen-ai_review-task-configuration.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/modify-gen-ai-task/assets/gen-ai_review-task-configuration.png new file mode 100644 index 0000000000..9e0a0c7866 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/modify-gen-ai-task/assets/gen-ai_review-task-configuration.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/modify-gen-ai-task/assets/gen-ai_task-view_edit.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/modify-gen-ai-task/assets/gen-ai_task-view_edit.png new file mode 100644 index 0000000000..ad2c6bd83b Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/modify-gen-ai-task/assets/gen-ai_task-view_edit.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/modify-gen-ai-task/assets/snagit/gen-ai_review-task-configuration.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/modify-gen-ai-task/assets/snagit/gen-ai_review-task-configuration.snagx new file mode 100644 index 0000000000..867fd18bcc Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/modify-gen-ai-task/assets/snagit/gen-ai_review-task-configuration.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/modify-gen-ai-task/assets/snagit/gen-ai_task-view_edit.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/modify-gen-ai-task/assets/snagit/gen-ai_task-view_edit.snagx new file mode 100644 index 0000000000..4f8e13bfb1 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/modify-gen-ai-task/assets/snagit/gen-ai_task-view_edit.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/modify-gen-ai-task/modify-gen-ai-task_api.mdx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/modify-gen-ai-task/modify-gen-ai-task_api.mdx new file mode 100644 index 0000000000..100c502950 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/modify-gen-ai-task/modify-gen-ai-task_api.mdx @@ -0,0 +1,166 @@ +--- +title: "Modify GenAI Task: API" +hide_table_of_contents: true +sidebar_label: "Client API" +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Modify GenAI Task: API + + + +* To modify an existing GenAI task, register a modified task configuration object with the server using the existing `TaskID`, via the `UpdateGenAiOperation` store operation. +* Note that this `TaskID` is **not** the [user-defined task identifier](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#configure-basic-settings) that we define as part of the task configuration, but an identifier that RavenDB uses internally to manage the task (same as it does with other ongoing tasks like ETL tasks, backup tasks, and others). + * The **user-defined task identifier** is a `string` variable that is mainly used as a property name for a list of hashes that identify [processed document parts](../../../ai-integration/gen-ai-integration/gen-ai-overview#tracking-of-processed-document-parts) in the document metadata. + * The `TaskID` is a `long` variable that is used by RavenDB to identify and manage the task. + See the examples below to learn how to extract the `TaskID` and use it to register the modified task configuration. + +* In this article: + * [Modify task configuration](../../../ai-integration/gen-ai-integration/modify-gen-ai-task/modify-gen-ai-task_api#modify-task-configuration) + * [Syntax](../../../ai-integration/gen-ai-integration/modify-gen-ai-task/modify-gen-ai-task_api#syntax) + + + +
+ +## Modify task configuration + +To modify the configuration of an existing GenAI task: +* Retrieve the ongoing task information using `GetOngoingTaskInfoOperation`, passing it: + * The existing task's user-defined task identifier (a `string` variable). + * The task type (`OngoingTaskType.GenAi` for GenAI tasks). +* Extract the `TaskID` (a `long` variable) from the returned `OngoingTaskInfo` object. +* You can - + * Either **modify the existing task configuration** and change only selected sections of it + (this approach is often easier as you can change only relevant details), + * Or **create a new configuration object** and populate it from scratch with new settings for your task + (this approach may be preferable if you want to redefine the whole configuration). +* Register the new or modified configuration with the server using `UpdateGenAiOperation`, passing it: + * The extracted `TaskID`. + * The configuration object. + +### Examples: +The below examples modify the spam-filter demonstrated in the [create GenAI task](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_api#full-example) article, which removes spam comments from documents in the `Posts` collection. + * The first example, **modify-selected-configuration-details**, demonstrates how to retrieve the existing configuration, modify selected sections of it, and register it with the server again. + * The second example, **create-configuration-from-scratch**, demonstrates how to create a new configuration object, populate it with all necessary configuration details, and register it with the server again. + * Both examples leave all details as configured in the original example except for the task **name**, the user-defined task **identifier**, and the **update script** - which doesn't remove spammy comments but instead adds a `Warning` property to each comment suspected as spam and explains in it why the comment might be spam. + + + + +```csharp +// Provide the existing user-defined task identifier to retrieve the ongoing task info +var getTaskInfo = new GetOngoingTaskInfoOperation("spam-filter", OngoingTaskType.GenAi); +var ongoingTask = store.Maintenance.Send(getTaskInfo); // returns existing task info + +// Extract the internal TaskID that RavenDB uses to manage the task +long TaskId = ongoingTask.TaskId; + +// Use the existing task configuration as a base for modifications +var modifiedConfig = ((GenAi)ongoingTask).Configuration; + +// Modify selected details +modifiedConfig.Identifier = "spam-warning-filter"; +modifiedConfig.Name = "spam-warning-filter"; +modifiedConfig.UpdateScript = @" + // Find the comment + const idx = this.Comments.findIndex(c => c.Id == $input.Id); + // Was detected as spam + if($output.Blocked) + { + // Add a warning to the comment instead of removing it + this.Comments[idx].Warning = 'This comment may be spam: ' + $output.Reason; + }"; + +// Update the GenAI task using the existing TaskID and the modified configuration +store.Maintenance.Send(new UpdateGenAiOperation(TaskId, modifiedConfig)); +``` + + + +```csharp +// Provide the existing user-defined task identifier to retrieve the ongoing task info +var getTaskInfo = new GetOngoingTaskInfoOperation("spam-filter", OngoingTaskType.GenAi); +var ongoingTask = store.Maintenance.Send(getTaskInfo); + +// Extract the internal TaskID that RavenDB uses to manage the task +long TaskId = ongoingTask.TaskId; + +// Create and populate a new task configuration object +GenAiConfiguration newConfig = new GenAiConfiguration +{ + // New user-defined task identifier + Identifier = "spam-warning-filter", + + // New task name + Name = "spam-warning-filter", + + // Connection string to AI model + ConnectionStringName = "open-ai-cs", + + // Task is enabled + Disabled = false, + + // Collection associated with the task + Collection = "Posts", + + // Context generation script - format for objects to be sent to the AI model + GenAiTransformation = new GenAiTransformation + { + Script = @" + for(const comment of this.Comments) + { + ai.genContext({Text: comment.Text, Author: comment.Author, Id: comment.Id});}" + }, + + // AI model Prompt - the instructions sent to the AI model + Prompt = "Check if the following blog post comment is spam or not", + + // Sample object - the layout for the AI model's response + SampleObject = @" + { + ""Blocked"": true, + ""Reason"": ""Concise reason for why this comment was marked as spam or ham"" + }", + + // New Update script - specifies what to do with AI model replies. + UpdateScript = @" + // Find the comment + const idx = this.Comments.findIndex(c => c.Id == $input.Id); + // Was detected as spam + if($output.Blocked) + { + // Add a warning to the comment instead of removing it + this.Comments[idx].Warning = 'This comment may be spam: ' + $output.Reason; + }", + + // Max concurrent connections to AI model + MaxConcurrency = 4 +}; + +// Update the GenAI task using the existing TaskID and the new configuration +store.Maintenance.Send(new UpdateGenAiOperation(TaskId, newConfig)); +``` + + + +### Syntax: + +* `UpdateGenAiOperation` definition: + ```csharp + public class UpdateGenAiOperation(long taskId, GenAiConfiguration configuration, StartingPointChangeVector startingPoint = null) : IMaintenanceOperation + ``` + + | Parameters | Type | Description | + | ------------- | ------------- | ----- | + | `taskId` | `long` | The internal RavenDB `TaskID` of the task to update. | + | `configuration` | `GenAiConfiguration` | The new or modified configuration for the GenAI task. | + | `startingPoint` | `StartingPointChangeVector` | Optional starting point for the update operation. | + diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/modify-gen-ai-task/modify-gen-ai-task_studio.mdx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/modify-gen-ai-task/modify-gen-ai-task_studio.mdx new file mode 100644 index 0000000000..399293cda1 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/modify-gen-ai-task/modify-gen-ai-task_studio.mdx @@ -0,0 +1,44 @@ +--- +title: "Modify GenAI Task: Studio" +hide_table_of_contents: true +sidebar_label: Studio +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Modify GenAI Task: Studio + +Saved tasks are listed in the AI Tasks view. +Selecting a task from the list will take you to the task's [Review task configuration](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#review-configuration-and-save-task) page, which provides an overall view of the task configuration and allows you to review different sections, edit them, and save the modified configuration when you're done. + +![Modify GenAI task](./assets/gen-ai_task-view_edit.png) + +1. **AI Hub** + Click to open the [AI Hub view](../../../ai-integration/ai-tasks-list-view). +2. **AI Tasks** + Click to open the AI Tasks view. +3. **Tasks list** + Pick the task that you want to modify by clicking its name or edit (pencil) icon. + This will take you to the task's **Review task configuration** page. + +
+ +## Review and edit task configuration + +![Review and edit task configuration](./assets/gen-ai_review-task-configuration.png) + +* Use this view to review, edit, and save the task configuration. + +* Click **Show** to view the current settings of a configuration section, + or **Edit** to modify a configuration section using the same [task creation wizard](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio) used to initially define the task. + + * If the task is enabled, your modifications will take effect as soon as you save the configuration. + Test your task and make sure you understand how it might change your documents before saving. + * Take every precaution to protect your data, including ensuring it is backed up. + \ No newline at end of file diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/_category_.json b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/_category_.json new file mode 100644 index 0000000000..fec2fde5f5 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 4, + "label": "Process attachments" +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_attachment-example.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_attachment-example.png new file mode 100644 index 0000000000..7f4dc8455d Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_attachment-example.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_context-generation-script_attachments-list.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_context-generation-script_attachments-list.png new file mode 100644 index 0000000000..6c1d531555 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_context-generation-script_attachments-list.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_context-generation-script_include-attachment.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_context-generation-script_include-attachment.png new file mode 100644 index 0000000000..153dd82fd8 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_context-generation-script_include-attachment.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_context-generation-script_test-context.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_context-generation-script_test-context.png new file mode 100644 index 0000000000..e27e75cfdb Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_context-generation-script_test-context.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_electric-toys-collection-after-processing.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_electric-toys-collection-after-processing.png new file mode 100644 index 0000000000..2b090c30c3 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_electric-toys-collection-after-processing.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_prompt-and-json-schema_include-attachment-analysis.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_prompt-and-json-schema_include-attachment-analysis.png new file mode 100644 index 0000000000..35dcdf812a Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_prompt-and-json-schema_include-attachment-analysis.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_prompt-and-json-schema_test-prompt-and-schema.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_prompt-and-json-schema_test-prompt-and-schema.png new file mode 100644 index 0000000000..eaaed9f547 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_prompt-and-json-schema_test-prompt-and-schema.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_update-script_test-update-script.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_update-script_test-update-script.png new file mode 100644 index 0000000000..c3e08b4112 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_update-script_test-update-script.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_update-script_update-document-with-llm-response.png b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_update-script_update-document-with-llm-response.png new file mode 100644 index 0000000000..9b59b6417c Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/gen-ai_update-script_update-document-with-llm-response.png differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_attachment-example.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_attachment-example.snagx new file mode 100644 index 0000000000..d5c2007211 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_attachment-example.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_context-generation-script_attachments-list.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_context-generation-script_attachments-list.snagx new file mode 100644 index 0000000000..0a1fde5ece Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_context-generation-script_attachments-list.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_context-generation-script_include-attachment.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_context-generation-script_include-attachment.snagx new file mode 100644 index 0000000000..c5f3b1599e Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_context-generation-script_include-attachment.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_context-generation-script_test-context.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_context-generation-script_test-context.snagx new file mode 100644 index 0000000000..79ae86b1d7 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_context-generation-script_test-context.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_electric-toys-collection-after-processing.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_electric-toys-collection-after-processing.snagx new file mode 100644 index 0000000000..fb465a1cab Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_electric-toys-collection-after-processing.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_prompt-and-json-schema_include-attachment-analysis.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_prompt-and-json-schema_include-attachment-analysis.snagx new file mode 100644 index 0000000000..84d168a4fd Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_prompt-and-json-schema_include-attachment-analysis.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_prompt-and-json-schema_test-prompt-and-schema.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_prompt-and-json-schema_test-prompt-and-schema.snagx new file mode 100644 index 0000000000..745b23f086 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_prompt-and-json-schema_test-prompt-and-schema.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_update-script_test-update-script.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_update-script_test-update-script.snagx new file mode 100644 index 0000000000..3724462ada Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_update-script_test-update-script.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_update-script_update-document-with-llm-response.snagx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_update-script_update-document-with-llm-response.snagx new file mode 100644 index 0000000000..246a678a81 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/assets/snagit/gen-ai_update-script_update-document-with-llm-response.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/processing-attachments_api.mdx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/processing-attachments_api.mdx new file mode 100644 index 0000000000..220c127eb4 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/processing-attachments_api.mdx @@ -0,0 +1,162 @@ +--- +title: "Process attachments: API" +hide_table_of_contents: true +sidebar_label: Client API +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Process attachments: API + + + +* A GenAI task can send the LLM not only documents, but also files attached to the documents. + +* Supported file types are: + * **Plain text files** + Text files are sent to the LLM as is, without any additional encoding. + * **Image files: `jpeg`, `png`, `webp`, `gif`** + Image files are sent to the LLM in base64-encoded strings. + * **PDF files** + PDF files are sent to the LLM in base64-encoded strings. + +* In this article: + * [Sending attachments to the LLM](../../../ai-integration/gen-ai-integration/process-attachments/processing-attachments_api#sending-attachments-to-the-llm) + + + +
+ +## Sending attachments to the LLM + + +Find a complete example of defining and running a GenAI task that processes attachments in [Processing attachments: Studio](../../../ai-integration/gen-ai-integration/process-attachments/processing-attachments_studio). + + +To send documents to the LLM along with their attachments using the API, define and run your GenAI task just as you would without attachments, with the following differences: + +* When [defining the connection string](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_api#defining-a-connection-string) to the LLM, make sure the AI model you're utilizing is capable of processing the files attached to your documents. + E.g., use OpenAI `gpt-4.1-mini` to process attached image files. + +* When [creating the context object](../../../ai-integration/gen-ai-integration/gen-ai-overview#the-elements) that will be sent to the LLM, include document attachments by specifying them in the context generation script. The LLM will receive and process the attachments along with the main document content. + + * Use the `with` method of the `ai.genContext` object to include document attachments. + + * Replace `` with the type of the attachment you want to include: + `withText` - for plain text files + `withPng` - for PNG image files + `withJpeg` - for JPEG image files + `withWebp` - for WEBP image files + `withGif` - for GIF image files + `withPdf` - for PDF files + + * Pass `with` the attached file using `loadAttachment` with the file name as an argument. + E.g., to include a PNG attachment named `electric-circuit.png`, use: + ```javascript + ai.genContext({ ToyName: this.Name, ToyId: id(this) }) + .withPng(loadAttachment(`electric-circuit.png`)); + ``` + + + Additional options include: + + * [Conditional attachment](../../../ai-integration/gen-ai-integration/process-attachments/processing-attachments_studio#conditional-attachment) + * [Multiple attachments](../../../ai-integration/gen-ai-integration/process-attachments/processing-attachments_studio#multiple-attachments) + * [Embedding base64-encoded images in the context object](../../../ai-integration/gen-ai-integration/process-attachments/processing-attachments_studio#embedding-base64-encoded-images-in-the-context-object) + * [Embedding text in the context object](../../../ai-integration/gen-ai-integration/process-attachments/processing-attachments_studio#embedding-text-in-the-context-object) + + + + +* When [defining the task Prompt and JSON schema](../../../ai-integration/gen-ai-integration/process-attachments/processing-attachments_studio#set-task-prompt-and-json-schema), make sure to include in the prompt instructions for how the LLM should handle the attachments, and set in the schema fields for any information you expect the LLM to return related to the attachments. + +* When [defining the task Update script](../../../ai-integration/gen-ai-integration/process-attachments/processing-attachments_studio#set-task-update-script), make sure to include logic for how to handle the LLM's responses related to the attachments. + +## Example + +```csharp +using (var store = new DocumentStore()) +{ + // Define the connection string to OpenAI + var connectionString = new AiConnectionString + { + // Connection string name & identifier + Name = "open-ai-cs", + + // Connection type + ModelType = AiModelType.Chat, + + // OpenAI connection settings + OpenAiSettings = new OpenAiSettings( + apiKey: "your-api-key", + endpoint: "https://api.openai.com/v1", + model: "gpt-4.1-mini") // Model capable of handling image processing + }; + + // Deploy the connection string to the server + var operation = new PutConnectionStringOperation(connectionString); + var putConnectionStringResult = store.Maintenance.Send(operation); + + // Define the GenAI task configuration + GenAiConfiguration config = new GenAiConfiguration + { + // Task name + Name = "electric-toy-circuit-description", + + // Unique task identifier + Identifier = "electric-toy-circuit-description", + + // Connection string to AI model + ConnectionStringName = "open-ai-cs", + + // Task is enabled + Disabled = false, + + // Collection associated with the task + Collection = "ElectricToys", + + // Context generation script - format for objects to be sent to the AI model + // Include document attachments in the context object using `with` methods + GenAiTransformation = new GenAiTransformation { + Script = @" + ai.genContext({ ToyName: this.Name, ToyId: id(this) }) + .withPng(loadAttachment(`electric-circuit.png`));" + }, + + // AI model Prompt - the instructions sent to the AI model + Prompt = "You get documents from an `ElectricToys` document collection. " + + "These are toys for youth that wants to learn simple electronics. " + + "Each document includes a toy's ID and name, and an attached " + + "image with the scheme of a circuit that operates the toy. " + + "Your job is to provide a simple description of up to 20 words " + + "for the circuit, that will be added to the toy's document to " + + "describe how it is operated.", + + // Sample object - a sample response object to format the AI model's replies by + SampleObject = JsonConvert.SerializeObject( new { + ToyName = "Toy name as provided by the GenAI task", + ToyId = "Toy ID as provided by the GenAI task", + CircuitDescription = "LLM's description of the electric circuit" + }), + + // Update script - specifies what to do with AI model replies + UpdateScript = @" + // Embed LLM response in source document + this.CircuitDescription = $output.CircuitDescription;", + + // Max concurrent connections to AI model + MaxConcurrency = 4 + }; + + // Run the task + var GenAiOperation = new AddGenAiOperation(config); + var addAiIntegrationTaskResult = store.Maintenance.Send(GenAiOperation); +} + +``` \ No newline at end of file diff --git a/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/processing-attachments_studio.mdx b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/processing-attachments_studio.mdx new file mode 100644 index 0000000000..2b327464ac --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/gen-ai-integration/process-attachments/processing-attachments_studio.mdx @@ -0,0 +1,223 @@ +--- +title: "Process attachments: Studio" +hide_table_of_contents: true +sidebar_label: Studio +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Process attachments: Studio + + +* When a GenAI task sends to the LLM a document that files are attached to, the task + can send the attached files along with the document so the LLM would process them as well. + This way you can, for example, make the LLM analyze technical schemes attached to product documents, review reports attached to user profiles, and so on. + +* Supported file types are: + * Plain text files + * Image files: `jpeg`, `png`, `webp`, `gif` + * PDF files + +* Attached text files are sent to the LLM as plain text. + Attached PDF and image files are sent to the LLM in a base64 format. + +* Make sure the LLM model you use is capable of handling the attachments you send it. + E.g., to process image files you can use OpenAI's `gpt-4.1-mini` model. + +* In this article: + * [Include attachments in the Context generation script](../../../ai-integration/gen-ai-integration/process-attachments/processing-attachments_studio#include-attachments-in-the-context-generation-script) + * [Conditional attachment](../../../ai-integration/gen-ai-integration/process-attachments/processing-attachments_studio#conditional-attachment) + * [Multiple attachments](../../../ai-integration/gen-ai-integration/process-attachments/processing-attachments_studio#multiple-attachments) + * [Embedding base64-encoded images in the context object](../../../ai-integration/gen-ai-integration/process-attachments/processing-attachments_studio#embedding-base64-encoded-images-in-the-context-object) + * [Embedding text in the context object](../../../ai-integration/gen-ai-integration/process-attachments/processing-attachments_studio#embedding-text-in-the-context-object) + * [Set task Prompt and JSON schema](../../../ai-integration/gen-ai-integration/process-attachments/processing-attachments_studio#set-task-prompt-and-json-schema) + * [Set task Update script](../../../ai-integration/gen-ai-integration/process-attachments/processing-attachments_studio#set-task-update-script) + + + +
+ +## Include attachments in the Context generation script + + + +Our example GenAI task sends the LLM documents from the `ElectricToys` collection. +Each document of this collection has an image file named `electric-circuit.png` attached to it, that illustrates a simple electric circuit that operates the toy. For example: + +![An exmple for a scheme attached to one of the documents](./assets/gen-ai_attachment-example.png) + +We want the LLM to analyze each circuit scheme, and return a short explanation of how the circuit operates. +We will then embed this explanation in the original document. + + +* Learn about the GenAI task's context generation script and how to set it [here](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#generate-context-objects). + +* When creating the context object that will be sent to the LLM, include document attachments by specifying them in the context generation script. The LLM will receive and process the attachments along with the main document content. + +* Use the `with` method of the `ai.genContext` object to include document attachments. + Replace `` with the type of the attachment you want to include: + * `withText` - for plain text files + * `withPng` - for PNG image files + * `withJpeg` - for JPEG image files + * `withWebp` - for WEBP image files + * `withGif` - for GIF image files + * `withPdf` - for PDF files + +* Pass `with` the attached file using `loadAttachment` with the file name as an argument. + +* The context generation script below includes in the context object the source document's Name and ID, and uses `withPng` to also include the `electric-circuit.png` file attached to the document. + ```javascript + ai.genContext({ ToyName: this.Name, ToyId: id(this) }) + .withPng(loadAttachment("electric-circuit.png")); + ``` + +![Context generation script: Include an attachment](./assets/gen-ai_context-generation-script_include-attachment.png) + +1. **Context generation script** + Provide a JavaScript that generates the context object sent to the AI model for each document processed by the GenAI task. + In the script, include any attachment you want the LLM to process, + +2. **Test context** + Click to test your context generation script to ensure it works as expected. + The test result shows the generated context object, including the attached files. + + ![Test context](./assets/gen-ai_context-generation-script_test-context.png) + + * **See attachments**: Click for a list of attachments icluded in the context: + ![Attachments list](./assets/gen-ai_context-generation-script_attachments-list.png) + +### Conditional attachment: + +When `loadAttachment` fails to load an attachment, it will return `null`. +You can use this to condition the delivery of an attachment on `loadAttachment`'s succees to load it. +Including a non-existing attachment in the context object will not generate an error, but the LLM will receive a "not found" message instead of the attachment. + +```javascript +// Verify attachment existance before sending it to the LLM +const img = loadAttachment("electric-circuit.png"); +if (img != null) { + ai.genContext({ ToyName: this.Name, ToyId: id(this) }) + .withPng(img); +} else { + ai.genContext({ ToyName: this.Name, ToyId: id(this) }); +} +``` + +### Multiple attachments: + +You can include multiple attachments in the context object. +E.g., to include both a PNG and a PDF attachment, use: + +```javascript +const img = loadAttachment("electric-circuit.png"); +const pdf = loadAttachment("circuit-diagram.pdf"); +ai.genContext({ ToyName: this.Name, ToyId: id(this) }) + .withPng(img) + .withPdf(pdf); +``` + +### Embedding base64-encoded images in the context object: + +You can embed base64-encoded images in the context object instead of sending them as separate attachments. +E.g., + +```javascript +// Base64-encoded image string +const starImage = "iVBORw0KGgoAAAANSUhEUgAAACUAAAAkCAYAAAAOwvOmAAAACXBIWXMAABYlAAAWJQFJUiTwAAAAB3RJTUUH6QkOABYvpxl9JgAAA6VJREFUWIXNmL9vG2UYxz/Pe45TJ45BdIlAQgilTdK0YihCsCAW1IHCDv8BAjYGRsQfgMRC5w4MbDAgxIL4IaHCQJe0glYkRaFVQ5M0iY3PPt+9X4a7Om5qn12fCf1Kr3X2Pffc55679/s+PpMkHjG5SSVStEEyocubDJSHxrWnseRPJsFVHEqg9hbOQ3jtWUT8CEAZ1NfmsQTAY+ENKFivwlBRYw+nBJXAJ1BfW0xJ/y8oIaKbT4IHZRxOHt9ZQwWqVQBK+PYqLm52s1gGFv6+gLCxsQpAGa3rL+D7nFkI3/x57MxjQgkfXsZoDYyIbryIP0ooyWhcf568tUAxJPVfx7qFI0DdS+tBSkf4I84NqYNB5/ZZPKCHrJk9uPYpnUrW8xUwton3f6JV/w6/+wlmCZZTBlN2PXOvk0y/QvWx13DHFpA5zOVbhqmXym+h8BLNO1/Q3P+F6WAV50SQpCfwxeynq8AgSkoElVexx89SeeJNnC2BOVAG1bg6j2mT3irf8x31gORVpqhkYAlUTl5Jobwg/O04+B3kKWrIY8kb1FZ2kZ/NHnQTleU7+KmXsYk1M6NLBtWTTWAWM5dCOQyTqJ74lqR8/shgLBvVE01cqUxqBnZgCWYBJqO28CXMvH8kUDKYPRVhpQpYkEKZHbIEZR+CcPMjku0P/xsay9xiJQKm7nnOwe5BPbrkiTYvEG+/m+vcDyuld4i55Q5Q6s88GEr4RHT2Pye+9dZEgExAMM3McivtIaz/NB8IlYIlmIl472vCv94o7FPSFDOnIlzgs7vVf6r3r18mswDwWPBcMZpuwhgz3zu/+moEV/K0tz6eDJMJG8EIR4AKUHiRYU3BKJLo2xSOAQXEuwVxetT+YWjIcCgZKJgEDk6Q7FwYHjeUyUfIJTkBB5vG/V3FA6EG4c7w3n0oVNT4Pj/AA1NnmDndovzUN2mXMUAmmLJ1IM79v5rvU0B9/SVc49KhZQBw4Eo1Kot/A9NpfJaptfkVfut82hRmv1nPsZUzwh1aWnqVW6kkAYU3DwizRB33DJWlOtOLe12g3qDy/DmOnRbl+U+7y0o3wsCpTl6pcqFcAIE2ukljq1FdalNb+QMXVAf2gkaAIcrH32ZuWQS1D+7b3777WX4jqRz5xOufK+ju5VnF7W15L3nv8w7JSSa1br2nxirauXpOUjIwNBdKuq2ktaGOvJKcJCMydUdj/R15Hw+MzX3Q6b6msOzNwISad5HNlv75/gUYA5gJ5sXALwAAAABJRU5ErkJggg=="; +ai.genContext({ ToyName: this.Name, ToyId: id(this) }) + .withPng(starImage); +``` + +### Embedding text in the context object: + +You can embed plain text in the context object instead of sending it as a separate attachment. +E.g., + +```javascript +const imageDescription = "Simple switch turning the toy on and off"; +ai.genContext({ ToyName: this.Name, ToyId: id(this) }) + .withText(imageDescription) +``` + +or - + +```javascript +ai.genContext({ ToyName: this.Name, ToyId: id(this) }) + .withText("Simple switch turning the toy on and off") +``` + +
+ +## Set task Prompt and JSON schema + +* Learn about the GenAI task's Prompt and JSON schema and how to set them [here](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#define-prompt-and-json-schema). + +* While defining the task prompt and JSON schema, consider the attachments you include in the context object. + Make sure your prompt instructs the LLM to analyze the attached files, and that your Sample response object (or JSON schema) is designed to capture LLM output derived from these files. + +* In the examples below, the prompt instructs the LLM to analyze the electric circuit scheme attached to each toy document, and the Sample response object is designed to capture a description of the circuit based on the analysis of the attached image. + + * **Prompt example**: + ```text + You get documents from an "ElectricToys" document collection. + These are toys for youth that wants to learn how to operate simple circuits. + Each document includes a toy's ID and name, and an attached image with the scheme of a circuit that operates the toy. + Your job is to provide a simple description of up to 20 words for the circuit, that will be added to the toy's document to describe how it is operated. + ``` + * **Sample response object example**: + ```json + { + "ToyName": "Toy name as provided by the GenAI task", + "ToyId": "Toy ID as provided by the GenAI task", + "CircuitDescription": "LLM's description of the electric circuit" + } + ``` +![Prompt and JSON schema: Include attachment analysis](./assets/gen-ai_prompt-and-json-schema_include-attachment-analysis.png) + +1. **Prompt** + Provide a prompt that instructs the AI model how to process each document, including its attachments. + In the prompt, specify what information you expect the AI model to derive from the attached files (in this case - a description of the electric circuit depicted by the attached image). + +2. **Sample response object / JSON schema** + Define a response object or a schema that outlines the structure of the response you expect from the AI model. + Ensure the schema includes fields that will capture the AI model's analysis of the attached files (in this case - a field for the circuit description). + +3. **Test model** + Click to test your prompt and JSON schema to ensure they work as expected. + The test result shows a sample response from the AI model, formatted according to your JSON schema, including information about the attached file. + + ![Test prompt and JSON schema](./assets/gen-ai_prompt-and-json-schema_test-prompt-and-schema.png) + +
+ +## Set task Update script + +* Learn about the GenAI task's Update script and how to set it [here](../../../ai-integration/gen-ai-integration/create-gen-ai-task/create-gen-ai-task_studio#provide-update-script). + +* Provide an update script that processes the AI model's responses, including any response derived from the attached files, and updates the source documents in your database accordingly. + +* In the example below, the update script takes the circuit description provided by the LLM, and simply updates the source document's `CircuitDescription` field with it. + + ```javascript + this.CircuitDescription = $output.CircuitDescription; + ``` + +![Update script: Update document with LLM response](./assets/gen-ai_update-script_update-document-with-llm-response.png) + +1. **Update script** + Provide a JavaScript that processes the results object returned from the AI model and takes needed actions. +2. **Test update script** + Click to test your update script to ensure it works as expected. + The test result shows how the currently processed document will be updated based on the AI model's response, including any information derived from the attached files. + + ![Test update script](./assets/gen-ai_update-script_test-update-script.png) + + Here is a list of all documents in the ElectricToys collection, after they were sent by the GenAI task to the LLM along with their attached images and updated according to its analysis: + + ![ElectricToys collection after processing](./assets/gen-ai_electric-toys-collection-after-processing.png) \ No newline at end of file diff --git a/versioned_docs/version-7.1/ai-integration/generating-embeddings/_category_.json b/versioned_docs/version-7.1/ai-integration/generating-embeddings/_category_.json new file mode 100644 index 0000000000..1ba1b3a8d8 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/generating-embeddings/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 1, + "label": "Generating Embeddings" +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/add-ai-task-1.png b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/add-ai-task-1.png new file mode 100644 index 0000000000..bedad14b23 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/add-ai-task-1.png differ diff --git a/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/add-ai-task-2.png b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/add-ai-task-2.png new file mode 100644 index 0000000000..23b77a447b Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/add-ai-task-2.png differ diff --git a/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/add-ai-task-3.png b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/add-ai-task-3.png new file mode 100644 index 0000000000..0b16de4606 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/add-ai-task-3.png differ diff --git a/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/add-ai-task-4-script.png b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/add-ai-task-4-script.png new file mode 100644 index 0000000000..67e94d45df Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/add-ai-task-4-script.png differ diff --git a/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/add-ai-task-4.png b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/add-ai-task-4.png new file mode 100644 index 0000000000..fa47cfdf5e Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/add-ai-task-4.png differ diff --git a/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/add-ai-task-5.png b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/add-ai-task-5.png new file mode 100644 index 0000000000..6a4b828c96 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/add-ai-task-5.png differ diff --git a/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/add-ai-task-6.png b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/add-ai-task-6.png new file mode 100644 index 0000000000..42d20a25c5 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/add-ai-task-6.png differ diff --git a/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/ai-search-article-cover.webp b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/ai-search-article-cover.webp new file mode 100644 index 0000000000..9696394161 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/ai-search-article-cover.webp differ diff --git a/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-cache-1.png b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-cache-1.png new file mode 100644 index 0000000000..1c21c4585f Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-cache-1.png differ diff --git a/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-cache-2.png b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-cache-2.png new file mode 100644 index 0000000000..8fc4b3af2d Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-cache-2.png differ diff --git a/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-cache-3.png b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-cache-3.png new file mode 100644 index 0000000000..b67df995d3 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-cache-3.png differ diff --git a/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-collection-1.png b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-collection-1.png new file mode 100644 index 0000000000..eee180fc7f Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-collection-1.png differ diff --git a/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-collection-2.png b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-collection-2.png new file mode 100644 index 0000000000..f72721196e Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-collection-2.png differ diff --git a/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-generation-task-flow.png b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-generation-task-flow.png new file mode 100644 index 0000000000..c3e65b2f8c Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-generation-task-flow.png differ diff --git a/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-generation_start_api-image.png b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-generation_start_api-image.png new file mode 100644 index 0000000000..a43afaf78d Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-generation_start_api-image.png differ diff --git a/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-generation_start_ov-image.png b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-generation_start_ov-image.png new file mode 100644 index 0000000000..b90ffb74a8 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-generation_start_ov-image.png differ diff --git a/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-generation_start_studio-image.png b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-generation_start_studio-image.png new file mode 100644 index 0000000000..19a72d98dc Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/embeddings-generation_start_studio-image.png differ diff --git a/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/vector-search-flow.png b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/vector-search-flow.png new file mode 100644 index 0000000000..e7b09525a8 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/generating-embeddings/assets/vector-search-flow.png differ diff --git a/versioned_docs/version-7.1/ai-integration/generating-embeddings/content/_embeddings-generation-task-csharp.mdx b/versioned_docs/version-7.1/ai-integration/generating-embeddings/content/_embeddings-generation-task-csharp.mdx new file mode 100644 index 0000000000..68198c5af1 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/generating-embeddings/content/_embeddings-generation-task-csharp.mdx @@ -0,0 +1,520 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In RavenDB, you can define AI tasks to automatically generate embeddings from your document content. + These embeddings are then stored in [dedicated collections](../../../ai-integration/generating-embeddings/embedding-collections.mdx) within the database, + enabling [Vector search](../../../ai-integration/vector-search/ravendb-as-vector-database.mdx) on your documents. + +* This article explains how to configure such a task. + It is recommended to first refer to this [Overview](../../../ai-integration/generating-embeddings/overview.mdx#embeddings-generation---overview) + to understand the embeddings generation process flow. + +* In this article: + * [Configuring an embeddings generation task - from the Studio](../../../ai-integration/generating-embeddings/embeddings-generation-task.mdx#configuring-an-embeddings-generation-task---from-the-studio) + * [Configuring an embeddings generation task - from the Client API](../../../ai-integration/generating-embeddings/embeddings-generation-task.mdx#configuring-an-embeddings-generation-task---from-the-client-api) + * [Define source using PATHS](../../../ai-integration/generating-embeddings/embeddings-generation-task.mdx#configure-an-embeddings-generation-task---define-source-using-paths) + * [Define source using SCRIPT](../../../ai-integration/generating-embeddings/embeddings-generation-task.mdx#configure-an-embeddings-generation-task---define-source-using-script) + * [Chunking methods and tokens](../../../ai-integration/generating-embeddings/embeddings-generation-task.mdx#chunking-methods-and-tokens) + * [Syntax](../../../ai-integration/generating-embeddings/embeddings-generation-task.mdx#syntax) + + + +## Configuring an embeddings generation task - from the Studio + +* **Define the general task settings**: + + ![Create embeddings generation task - general](../assets/add-ai-task-3.png) + + 1. **Name** + Enter a name for the task. + 2. **Identifier** + Enter a unique identifier for the task. + Each AI task in the database must have a distinct identifier. + + If not specified, or when clicking the "Regenerate" button, + RavenDB automatically generates the identifier based on the task name. For example: + * If the task name is: _"Generate embeddings from OpenAI"_ + * The generated identifier will be: _"generate-embeddings-from-openai"_ + + Allowed characters: only lowercase letters (a-z), numbers (0-9), and hyphens (-). + + **This identifier is used:** + * When querying embeddings generated by the task via a dynamic query. + An example is available in [Querying pre-made embeddings](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#querying-pre-made-embeddings-generated-by-tasks). + * When indexing the embeddings generated by the task. + An example is available in [Indexing pre-made text-embeddings](../../../ai-integration/vector-search/vector-search-using-static-index.mdx#indexing-pre-made-text-embeddings). + * In documents in the [Embeddings collection](../../../ai-integration/generating-embeddings/embedding-collections.mdx#the-embeddings-collection), + where the task identifier is used to identify the origin of each embedding. + + See how this identifier is used in the [Embeddings collection](../../../ai-integration/generating-embeddings/embedding-collections.mdx#the-embeddings-collection) + documents that reference the generated embeddings. + + 3. **Regenerate** + Click "Regenerate" to automatically create an identifier based on the task name. + 4. **Task state** + Enable/Disable the task. + 5. **Responsible node** + Select a node from the [Database group](../../../studio/database/settings/manage-database-group.mdx) to be the responsible node for this task. + 6. **Connection string** + Select a previously defined [AI connection string](../../../ai-integration/connection-strings/connection-strings-overview.mdx) or create a new one. + 7. **Enable document expiration** + This toggle appears only if the [Document expiration feature](../../../studio/database/settings/document-expiration.mdx) is Not enabled in the database. + Enabling document expiration ensures that embeddings in the `@embeddings-cache` collection are automatically deleted when they expire. + 8. **Save** + Click _Save_ to store the task definition or _Cancel_. + +* **Define the embeddings source - using PATHS**: + + ![Create embeddings generation task - source by paths](../assets/add-ai-task-4.png) + + 1. **Collection** + Enter or select the source document collection from the dropdown. + 2. **Embeddings source** + Select `Paths` to define the source content by document properties. + 3. **Path configuration** + Specify which document properties to extract text from, and how the text should be chunked into embeddings. + + * **Source text path** + Enter the property name from the document that contains the text for embedding generation. + * **Chunking method** + Select the method for splitting the source text into chunks. + Learn more in [Chunking methods and tokens](../../../ai-integration/generating-embeddings/embeddings-generation-task.mdx#chunking-methods-and-tokens). + * **Max tokens per chunk** + Enter the maximum number of tokens allowed per chunk (this depends on the service provider). + * **Overlap tokens** + Enter the number of tokens to repeat at the start of each chunk from the end of the previous one. + This helps preserve context between chunks by carrying over some tokens from one to the next. + Applies only to the _"Plain Text: Split Paragraphs"_ and _"Markdown: Split Paragraphs"_ chunking methods. + + 4. **Add path configuration** + Click to add the specified to the list. + 5. **List of paths** + Displays the document properties you added for embedding generation. + +* **Define the embeddings source - using SCRIPT**: + + ![Create embeddings generation task - source by script](../assets/add-ai-task-4-script.png) + + 1. **Embeddings source** + Select `Script` to define the source content and chunking methods using a JavaScript script. + 2. **Script** + Refer to section [Chunking methods and tokens](../../../ai-integration/generating-embeddings/embeddings-generation-task.mdx#chunking-methods-and-tokens) for available JavaScript methods. + 3. **Default chunking method** + The selected chunking method will be used by default when no method is specified in the script. + e.g., when the script contains: `Name: this.Name`. + 4. **Default max tokens per chunk**: + Enter the default value to use when no specific value is set for the chunking method in the script. + This is the maximum number of tokens allowed per chunk (depends on the service provider). + 5. **Default overlap tokens** + Enter the default value to use when no specific value is set for the chunking method in the script. + This is the number of tokens to repeat at the start of each chunk from the end of the previous one. + Applies only to the _"Plain Text: Split Paragraphs"_ and _"Markdown: Split Paragraphs"_ chunking methods. + +* **Define quantization and expiration - + for the generated embeddings from the source documents**: + + ![Create embeddings generation task - quantization and expiration](../assets/add-ai-task-5.png) + + 1. **Quantization** + Select the quantization method that RavenDB will apply to embeddings received from the service provider. + Available options: + * Single (no quantization) + * Int8 + * Binary + 2. **Embeddings cache expiration** + Set the expiration period for documents stored in the `@embeddings-cache` collection. + These documents contain embeddings generated from the source documents, serving as a cache for these embeddings. + The default initial period is `90` days. This period may be extended when the source documents change. + Learn more in [The embeddings cache collection](../../../ai-integration/generating-embeddings/embedding-collections.mdx#the-embeddings-cache-collection). + 3. **Regenerate embeddings** + This toggle is visible only when editing an existing task. + Toggle ON to regenerate embeddings for all documents in the collection, as specified by the _Paths_ or _Script_. + +* **Define chunking method & expiration - + for the embedding generated from a search term in a vector search query**: + + ![Create embeddings generation task - for the query](../assets/add-ai-task-6.png) + + 1. **Querying** + This label indicates that this section configures parameters only for embeddings + generated by the task for **search terms** in vector search queries. + 2. **Chunking method** + Select the method for splitting the search term into chunks. + Learn more in [Chunking methods and tokens](../../../ai-integration/generating-embeddings/embeddings-generation-task.mdx#chunking-methods-and-tokens). + 3. **Max tokens per chunk** + Enter the maximum number of tokens allowed per chunk (this depends on the service provider). + 4. **Embeddings cache expiration** + Set the expiration period for documents stored in the `@embeddings-cache` collection. + These documents contain embeddings generated from the search terms, serving as a cache for these embeddings. + The default period is `14` days. Learn more in [The embeddings cache collection](../../../ai-integration/generating-embeddings/embedding-collections.mdx#the-embeddings-cache-collection). + +## Configuring an embeddings generation task - from the Client API + + + +#### Configure an embeddings generation task - define source using PATHS: + + +```csharp +// Define a connection string that will be used in the task definition: +// ==================================================================== + +var connectionString = new AiConnectionString +{ + // Connection string name & identifier + Name = "ConnectionStringToOpenAI", + Identifier = "id-for-open-ai-connection-string", + + // OpenAI connection settings + OpenAiSettings = new OpenAiSettings( + apiKey: "your-api-key", + endpoint: "https://api.openai.com/v1", + model: "text-embedding-3-small") +}; + +// Deploy the connection string to the server: +// =========================================== +var putConnectionStringOp = + new PutConnectionStringOperation(connectionString); +var putConnectionStringResult = store.Maintenance.Send(putConnectionStringOp); + +// Define the embeddings generation task: +// ====================================== +var embeddingsTaskConfiguration = new EmbeddingsGenerationConfiguration +{ + // General info: + Name = "GetEmbeddingsFromOpenAI", + Identifier = "id-for-task-open-ai", + ConnectionStringName = "ConnectionStringToOpenAI", + Disabled = false, + + // Embeddings source & chunking methods - using PATHS configuration: + Collection = "Categories", + EmbeddingsPathConfigurations = [ + new EmbeddingPathConfiguration() { + Path = "Name", + ChunkingOptions = new() + { + ChunkingMethod = ChunkingMethod.PlainTextSplit, + MaxTokensPerChunk = 2048 + } + }, + new EmbeddingPathConfiguration() + { + Path = "Description", + ChunkingOptions = new() + { + ChunkingMethod = ChunkingMethod.PlainTextSplitParagraphs, + MaxTokensPerChunk = 2048, + + // 'OverlapTokens' is only applicable when ChunkingMethod is + // 'PlainTextSplitParagraphs' or 'MarkDownSplitParagraphs' + OverlapTokens = 128 + } + }, + ], + + // Quantization & expiration - + // for embeddings generated from source documents: + Quantization = VectorEmbeddingType.Single, + EmbeddingsCacheExpiration = TimeSpan.FromDays(90), + + // Chunking method and expiration - + // for the embeddings generated from search term in vector search query: + ChunkingOptionsForQuerying = new() + { + ChunkingMethod = ChunkingMethod.PlainTextSplit, + MaxTokensPerChunk = 2048 + }, + + EmbeddingsCacheForQueryingExpiration = TimeSpan.FromDays(14) +}; + +// Deploy the embeddings generation task to the server: +// ==================================================== +var addEmbeddingsGenerationTaskOp = + new AddEmbeddingsGenerationOperation(embeddingsTaskConfiguration); +var addAiIntegrationTaskResult = store.Maintenance.Send(addEmbeddingsGenerationTaskOp); +``` + + + + + + +#### Configure an embeddings generation task - define source using SCRIPT: + +* To configure the source content using a script - + use the `EmbeddingsTransformation` object instead of the `EmbeddingsPathConfigurations` object. + +* The rest of the configuration properties are the same as in the example above. + +* Call `embeddings.generate(object)` within the script and apply the appropriate text-splitting methods to each field inside the object. + Each KEY in the object represents a document field, and the VALUE is a text-splitting function that processes the field's content before generating embeddings. + +* These methods ensure that the text chunks derived from document fields stay within the token limits required by the provider, preventing request rejection. + Learn more in [Chunking methods and tokens](../../../ai-integration/generating-embeddings/embeddings-generation-task.mdx#chunking-methods-and-tokens). + +* For example: + + +```csharp +// Source collection: +Collection = "Categories", + +// Use 'EmbeddingsTransformation': +EmbeddingsTransformation = new EmbeddingsTransformation() +{ + // Define the script: + Script = + @"embeddings.generate({ + + // Process the document 'Name' field using method text.split(). + // The text content will be split into chunks of up to 2048 tokens. + Name: text.split(this.Name, 2048), + + // Process the document 'Description' field using method text.splitParagraphs(). + // The text content will be split into chunks of up to 2048 tokens. + // 128 overlapping tokens will be repeated at the start of each chunk + // from the end of the previous one. + Description: text.splitParagraphs(this.Description, 2048, 128) + });" +}, +``` + + +* If no chunking method is provided in the script, you can set default values as follows: + + +```csharp +Collection = "Categories", +EmbeddingsTransformation = new EmbeddingsTransformation() +{ + Script = + @"embeddings.generate({ + + // No chunking method is specified here + Name: this.Name, + Description: this.Description + });", + + // Specify the default chunking options to use in the script + ChunkingOptions = new ChunkingOptions() + { + ChunkingMethod = ChunkingMethod.PlainTextSplit, + MaxTokensPerChunk = 2048 + } +}, +``` + + + + +## Chunking methods and tokens + +**Tokens and processing limits**: + +* A token is the fundamental unit that Large Language Models (LLMs) use to process text. + AI service providers that generate embeddings from text enforce token limits for each processed text part. + If a text exceeds the provider’s limit, it may be truncated or rejected. + +**Using chunking methods**: + +* To handle lengthy text, you can define chunking strategies in the task definition and specify the desired number of tokens per chunk. + Chunking splits large input texts into smaller, manageable chunks, each containing no more than the specified maximum number of tokens. + +* The maximum number of tokens per chunk depends on the AI service provider and the specific model defined in the [connection string](../../../ai-integration/connection-strings/connection-strings-overview.mdx). + While RavenDB does not tokenize text, it estimates the number of tokens for chunking purposes by dividing the text length by 4. + +* The AI provider generates a single embedding for each chunk. + Depending on the maximum tokens per chunk setting, a single input text may result in multiple embeddings. + +**Available chunking methods**: + +RavenDB offers several chunking methods that can be applied per source type. +These methods determine how input text is split before being sent to the provider. + + + +* `PlainText: Split` + Splits a plain text string into multiple chunks based on the specified maximum token count. + Estimates token lengths based on an average of 4 characters per token and applies a 0.75 ratio to determine chunk sizes. + Ensures that words are not split mid-way when forming chunks. + + **Applies to**: + Fields containing plain text strings. + **Return Value**: + A list of text chunks (strings), where each chunk approximates the specified maximum token count without breaking words. + +* `PlainText: Split Lines` + Uses the Semantic Kernel _SplitPlainTextLines_ method. + Splits a plain text string into individual lines based on line breaks and whitespace while ensuring that each line does not exceed the specified maximum token limit. + + **Applies to**: + Fields containing an array of plain text strings. + **Return value**: + A list of text segments (lines) derived from the original input, preserving line structure while ensuring token constraints. + +* `PlainText: Split Paragraphs` + Uses the Semantic Kernel _SplitPlainTextParagraphs_ method. + Combines consecutive lines to form paragraphs while ensuring each paragraph is as complete as possible without exceeding the specified token limit. + Optionally, set an overlap between chunks using the _overlapTokens_ parameter, which repeats the last _n_ tokens from one chunk at the start of the next. + This helps preserve context continuity across paragraph boundaries. + + **Applies to**: + Fields containing an array of plain text strings. + **Return value**: + A list of paragraphs, where each paragraph consists of grouped lines that preserve readability without exceeding the token limit. + +* `Markdown: Split Lines` + Uses the Semantic Kernel _SplitMarkDownLines_ method. + Splits markdown content into individual lines at line breaks while ensuring that each line remains within the specified token limit. + Preserves markdown syntax, ensuring each line remains an independent, valid segment. + + **Applies to**: + Fields containing strings with markdown content. + **Return value**: + A list of markdown lines, each respecting the token limit while maintaining the original formatting. + +* `Markdown: Split Paragraphs` + Uses the Semantic Kernel _SplitMarkdownParagraphs_ method. + Groups lines into coherent paragraphs at designated paragraph breaks while ensuring each paragraph remains within the specified token limit. + Markdown formatting is preserved. + Optionally, set an overlap between chunks using the _overlapTokens_ parameter, which repeats the last _n_ tokens from one chunk at the start of the next. + This helps preserve context continuity across paragraph boundaries. + + + **Applies to**: + Fields containing an array of strings with markdown content. + **Return value**: + A list of markdown paragraphs, each respecting the token limit and maintaining structural integrity. + +* `HTML: Strip` + Removes HTML tags from the content and splits the resulting plain text into chunks based on a specified token limit. + + **Applies to**: + Fields containing strings with HTML. + **Return value**: + A list of text chunks derived from the stripped content, ensuring each chunk remains within the token limit. + + +**Chunking method syntax for the JavaScript scripts**: + + +```javascript +// Available text-splitting methods: +// ================================= + +// Plain text methods: +text.split(text | [text], maxTokensPerLine); +text.splitLines(text | [text], maxTokensPerLine); +text.splitParagraphs(line | [line], maxTokensPerLine, overlapTokens?); + +// Markdown methods: +markdown.splitLines(text | [text], maxTokensPerLine); +markdown.splitParagraphs(line | [line], maxTokensPerLine, overlapTokens?); + +// HTML processing: +html.strip(htmlText | [htmlText], maxTokensPerChunk); +``` + + +| Parameter | Type | Description | +|------------------------------------------|-----------|------------------------------------------------------------------ | +| **text** | `string` | A plain text or markdown string to split. | +| **line** | `string` | A single line or paragraph of text. | +| **[text] / [line]** | `string[]`| An array of text or lines to split into chunks. | +| **htmlText** | `string` | A string containing HTML content to process. | +| **maxTokensPerChunk / maxTokensPerLine** | `number` | The maximum number of tokens allowed per chunk.
Default is `512`. | +| **overlapTokens** | `number` (optional) | The number of tokens to overlap between consecutive chunks. Helps preserve context continuity across chunks (e.g., between paragraphs).
Default is `0`. | + +## Syntax + +#### The embeddings generation task configuration: + + +```csharp +// The 'EmbeddingsGenerationConfiguration' class inherits from 'EtlConfiguration' +// and provides the following specialized configurations for the embeddings generation task: +// ========================================================================================= + +public class EmbeddingsGenerationConfiguration : EtlConfiguration +{ + public string Identifier { get; set; } + public string Collection { get; set; } + public List EmbeddingsPathConfigurations { get; set; } + public EmbeddingsTransformation EmbeddingsTransformation { get; set; } + public VectorEmbeddingType Quantization { get; set; } + public ChunkingOptions ChunkingOptionsForQuerying { get; set; } + public TimeSpan EmbeddingsCacheExpiration { get; set; } = TimeSpan.FromDays(90); + public TimeSpan EmbeddingsCacheForQueryingExpiration { get; set; } = TimeSpan.FromDays(14); +} +``` + + +| Parameter | Type | Description | +|------------------------------------------|------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **Identifier** | `string` | The identifier of the embeddings generation task. | +| **Collection** | `string` | The name of the source collection from which documents are processed for embeddings generation. | +| **EmbeddingsPathConfigurations** | `List` | A list of properties inside documents that contain text to be embedded, along with their chunking settings. | +| **EmbeddingsTransformation** | `EmbeddingsTransformation ` | An object that contains a script defining the transformations and processing applied to the source text before generating embeddings. | +| **Quantization** | `VectorEmbeddingType ` | The quantization type for the generated embeddings. | +| **ChunkingOptionsForQuerying** | `ChunkingOptions ` | The chunking method and maximum token limit used when processing search terms in vector search queries. | +| **EmbeddingsCacheExpiration** | `TimeSpan ` | The expiration period for documents in the [Embedding cache collection](../../../ai-integration/generating-embeddings/embedding-collections.mdx#the-embeddings-cache-collection) that contain embeddings generated from source documents. | +| **EmbeddingsCacheForQueryingExpiration** | `TimeSpan ` | The expiration period for documents in the embedding cache collection that contain embeddings generated from search terms in vector search queries. | + + +```csharp +public class EmbeddingPathConfiguration +{ + public string Path { get; set; } + public ChunkingOptions ChunkingOptions { get; set; } +} + +public class ChunkingOptions +{ + public ChunkingMethod ChunkingMethod { get; set; } // Default is PlainTextSplit + public int MaxTokensPerChunk { get; set; } = 512; + + // 'OverlapTokens' is only applicable when ChunkingMethod is + // 'PlainTextSplitParagraphs' or 'MarkDownSplitParagraphs' + public int OverlapTokens { get; set; } = 0; +} + +public enum ChunkingMethod +{ + PlainTextSplit, + PlainTextSplitLines, + PlainTextSplitParagraphs, + MarkDownSplitLines, + MarkDownSplitParagraphs, + HtmlStrip +} + +public class EmbeddingsTransformation +{ + public string Script { get; set; } + public ChunkingOptions ChunkingOptions {get; set;} +} + +public enum VectorEmbeddingType +{ + Single, + Int8, + Binary, + Text +} +``` + + +#### Deploying the embeddings generation task: + + +```csharp +public AddEmbeddingsGenerationOperation(EmbeddingsGenerationConfiguration configuration); +``` + diff --git a/versioned_docs/version-7.1/ai-integration/generating-embeddings/content/_overview-csharp.mdx b/versioned_docs/version-7.1/ai-integration/generating-embeddings/content/_overview-csharp.mdx new file mode 100644 index 0000000000..51906d1ad7 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/generating-embeddings/content/_overview-csharp.mdx @@ -0,0 +1,185 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* RavenDB can serve as a vector database, see [Why choose RavenDB as your vector database](../../../ai-integration/vector-search/ravendb-as-vector-database.mdx#why-choose-ravendb-as-your-vector-database). + +* Vector search can be performed on: + * Raw text stored in your documents. + * Pre-made embeddings that you created yourself and stored using these [Data types](../../../ai-integration/vector-search/data-types-for-vector-search.mdx#numerical-data). + * Pre-made embeddings that are automatically generated from your document content by RavenDB's + **embeddings generation tasks** using external service providers, as explained below. +* In this article: + * [Embeddings generation - overview](../../../ai-integration/generating-embeddings/overview.mdx#embeddings-generation---overview) + * [Embeddings generation - process flow](../../../ai-integration/generating-embeddings/overview.mdx#embeddings-generation---process-flow) + * [Supported providers](../../../ai-integration/generating-embeddings/overview.mdx#supported-providers) + * [Creating an embeddings generation task](../../../ai-integration/generating-embeddings/overview.mdx#creating-an-embeddings-generation-task) + * [Monitoring the tasks](../../../ai-integration/generating-embeddings/overview.mdx#monitoring-the-tasks) + * [Get embeddings generation task details](../../../ai-integration/generating-embeddings/overview.mdx#get-embeddings-generation-task-details) + + + +## Embeddings generation - overview + + + +#### Embeddings generation - process flow + +* **Define an Embeddings Generation Task**: + Specify a [connection string](../../../ai-integration/connection-strings/connection-strings-overview.mdx) that defines the AI provider and model for generating embeddings. + Define the source content - what parts of the documents will be used to create the embeddings. + +* **Source content is processed**: + 1. The task extracts the specified content from the documents. + 2. If a processing script is defined, it transforms the content before further processing. + 3. The text is split according to the defined chunking method; a separate embedding will be created for each chunk. + 4. Before contacting the provider, RavenDB checks the [embeddings cache](../../../ai-integration/generating-embeddings/embedding-collections.mdx#the-embeddings-cache-collection) + to determine whether an embedding already exists for the given content from that provider. + 5. If a matching embedding is found, it is reused, avoiding unnecessary requests. + If no cached embedding is found, the transformed and chunked content is sent to the configured AI provider. + +* **Embeddings are generated by the AI provider**: + The provider generates embeddings and sends them back to RavenDB. + If quantization was defined in the task, RavenDB applies it to the embeddings before storing them. + +* **Embeddings are stored in your database**: + * Each embedding is stored as an attachment in a [dedicated collection](../../../ai-integration/generating-embeddings/embedding-collections.mdx#the-embeddings-collection). + * RavenDB maintains an [embeddings cache](../../../ai-integration/generating-embeddings/embedding-collections.mdx#the-embeddings-cache-collection), + allowing reuse of embeddings for the same source content and reducing provider calls. + Cached embeddings expire after a configurable duration. + +* **Perform vector search:** + Once the embeddings are stored, you can perform vector searches on your document content by: + * Running a [dynamic query](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#querying-pre-made-embeddings-generated-by-tasks), which automatically creates an auto-index for the search. + * Defining a [static index](../../../ai-integration/vector-search/vector-search-using-static-index.mdx#indexing-pre-made-text-embeddings) to store and query embeddings efficiently. + + The query search term is split into chunks, and each chunk is looked up in the cache. + If not found, RavenDB requests an embedding from the provider and caches it. + The embedding (cached or newly created) is then used to compare against stored vectors. + +* **Continuous processing**: + * Embeddings generation tasks are [Ongoing Tasks](../../../studio/database/tasks/ongoing-tasks/general-info.mdx) that process documents as they change. + Before contacting the provider after a document change, the task first checks the cache to see if a matching embedding already exists, avoiding unnecessary requests. + * The requests to generate embeddings from the source text are sent to the provider in batches. + The batch size is configurable, see the [Ai.Embeddings.MaxBatchSize](../../../server/configuration/ai-integration-configuration.mdx#aiembeddingsmaxbatchsize) configuration key. + * A failed embeddings generation task will retry after the duration set in the + [Ai.Embeddings.MaxFallbackTimeInSec](../../../server/configuration/ai-integration-configuration.mdx#aiembeddingsmaxfallbacktimeinsec) configuration key. + + + + + +#### Supported providers + +* The following service providers are supported for auto-generating embeddings using tasks: + + * [OpenAI & OpenAI-compatible providers](../../../ai-integration/connection-strings/open-ai.mdx) + * [Azure Open AI](../../../ai-integration/connection-strings/azure-open-ai.mdx) + * [Google AI](../../../ai-integration/connection-strings/google-ai.mdx) + * [Vertex AI](../../../ai-integration/connection-strings/vertex-ai.mdx) + * [Hugging Face](../../../ai-integration/connection-strings/hugging-face.mdx) + * [Ollama](../../../ai-integration/connection-strings/ollama.mdx) + * [Mistral AI](../../../ai-integration/connection-strings/mistral-ai.mdx) + * [bge-micro-v2](../../../ai-integration/connection-strings/embedded.mdx) (a local embedded model within RavenDB) + + + +![flow chart](../assets/embeddings-generation-task-flow.png) + +![flow chart](../assets/vector-search-flow.png) + +## Creating an embeddings generation task + +* An embeddings generation tasks can be created from: + * The **AI Tasks view in the Studio**, where you can create, edit, and delete tasks. Learn more in [AI Tasks - list view](../../../ai-integration/ai-tasks-list-view.mdx). + * The **Client API** - see [Configuring an embeddings generation task - from the Client API](../../../ai-integration/generating-embeddings/embeddings-generation-task.mdx#configuring-an-embeddings-generation-task---from-the-client-api) +* From the Studio: + + ![Add ai task 1](../assets/add-ai-task-1.png) + + 1. Go to the **AI Hub** menu. + 2. Open the **AI Tasks** view. + 3. Click **Add AI Task** to add a new task. + + ![Add ai task 2](../assets/add-ai-task-2.png) + +* See the complete details of the task configuration in the [Embeddings generation task](../../../ai-integration/generating-embeddings/embeddings-generation-task.mdx) article. + +## Monitoring the tasks + +* The status and state of each embeddings generation task are visible in the [AI Tasks - list view](../../../ai-integration/ai-tasks-list-view.mdx). + +* Task performance and activity over time can be analyzed in the _AI Tasks Stats_ view, + where you can track processing duration, batch sizes, and overall progress. + Learn more about the functionality of the stats view in the [Ongoing Tasks Stats](../../../studio/database/stats/ongoing-tasks-stats/overview.mdx) article. + +* The number of embeddings generation tasks across all databases can also be monitored using [SNMP](../../../server/administration/snmp/snmp-overview.mdx). + The following SNMP OIDs provide relevant metrics: + * [5.1.11.25](../../../server/administration/snmp/snmp-overview.mdx#511125) – Total number of enabled embeddings generation tasks. + * [5.1.11.26](../../../server/administration/snmp/snmp-overview.mdx#511126) – Total number of active embeddings generation tasks. + +## Get embeddings generation task details + +* Besides viewing the list of tasks in the [AI Tasks - list view](../../../ai-integration/ai-tasks-list-view.mdx) in the Studio, + you can also retrieve embeddings generation task details programmatically. + +* This is useful when issuing a vector search query that references an embeddings generation task, + where it's important to verify that the task exists beforehand. For example: + * when [Querying pre-made embeddings generated by tasks](../../../ai-integration/vector-search/vector-search-using-dynamic-query#querying-pre-made-embeddings-generated-by-tasks) + * or when [Indexing numerical data and querying using text input](../../../ai-integration/vector-search/vector-search-using-static-index#indexing-numerical-data-and-querying-using-text-input) + +* There are two ways to check if an embeddings generation task exists: + * Using `GetOngoingTaskInfoOperation`. + * Accessing the full list of embeddings generation tasks from the database record. + + + + +```csharp +// Define the get task operation, pass the task NAME +var getOngoingTaskOp = + new GetOngoingTaskInfoOperation("theEmbeddingsGenerationTaskName", OngoingTaskType.EmbeddingsGeneration); + +// Execute the operation by by passing it to Maintenance.Send +// Explicitly cast the result to the "EmbeddingsGeneration" type +var task = (EmbeddingsGeneration)store.Maintenance.Send(getOngoingTaskOp); + +// Verify the task exists +if (task != null) +{ + // Access any of the task details + var taskStatus = task.TaskState; + + // Access the task identifier + var taskIdentifier = task.Configuration.Identifier; +} +``` + + +```csharp +// Define the get database record operation, pass your database name +var getDatabaseRecordOp = new GetDatabaseRecordOperation("yourDatabaseName"); + +// Execute the operation by passing it to Maintenance.Send +var dbRecord = store.Maintenance.Server.Send(getDatabaseRecordOp); + +// Access the list of embeddings generation tasks +var tasks = dbRecord.EmbeddingsGenerations; + +if (tasks.Count > 0) +{ + // Access the first task + var task = tasks[0]; + + // Access any of the task details + var isTaskDisabled = task.Disabled; + + // Access the task identifier + var taskIdentifier = task.Identifier; +} +``` + + diff --git a/versioned_docs/version-7.1/ai-integration/generating-embeddings/embedding-collections.mdx b/versioned_docs/version-7.1/ai-integration/generating-embeddings/embedding-collections.mdx new file mode 100644 index 0000000000..cb25679e5a --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/generating-embeddings/embedding-collections.mdx @@ -0,0 +1,214 @@ +--- +title: "The Embedding Collections" +hide_table_of_contents: true +sidebar_label: The Embedding Collections +sidebar_position: 3 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# The Embedding Collections + + +* The embeddings generated by the providers are stored as **attachments** in your database. + Each attachment contains a single embedding. + +* The server creates the following dedicated collections, + which contain documents that reference the embedding attachments: + * **Embeddings Collections** + * **Embeddings Cache Collection** + +* This article describes these custom-designed collections. + It is recommended to first refer to this [Overview](../../ai-integration/generating-embeddings/overview.mdx#embeddings-generation---overview) + to understand the embeddings generation process flow. +* In this article: + * [The embeddings collections](../../ai-integration/generating-embeddings/embedding-collections.mdx#the-embeddings-collections) + * [The embeddings cache collection](../../ai-integration/generating-embeddings/embedding-collections.mdx#the-embeddings-cache-collection) + + +## The embeddings collections + +* RavenDB creates a separate embeddings collection for each source collection from which embeddings are generated. + The naming format for these collections is: `@embeddings/`. + +* Each document in the embeddings collection references ALL embeddings generated from + the content of the corresponding document in the source collection by any defined embeddings generation task. + +* The document structure in the embeddings collection is: + + + +{`\{ + "identifier-of-task-1": \{ + "@quantization": "", + "Property1": [ + "Hash of the embedding vector generated for 1st text chunk of Property1's content", + "Hash of the embedding vector generated for 2nd text chunk of Property1's content", + "Hash of the embedding vector generated for 3rd text chunk of Property1's content", + "..." + ], + "Property2": [ + "Hash of the embedding vector generated for 1st text chunk of Property2's content", + "..." + ] + \}, + "identifier-of-task-2": \{ + "Property3": [ + "Hash of the embedding vector generated for 1st text chunk of Property3's content", + "..." + ] + \}, + "Other-tasks...": \{ + ... + \}, + "@metadata": \{ + "@collection": "/embeddings", + "@flags": "HasAttachments" + \} +\} +`} + + +* For example: + In this [task definition](../../ai-integration/generating-embeddings/embeddings-generation-task.mdx#configuring-an-embeddings-generation-task---from-the-studio), + an embeddings generation task is defined on the `Categories` collection. + This creates the `@embeddings/Categories` collection, where a document will look as follows: + + ![The embeddings document](./assets/embeddings-collection-1.png) + + 1. **Collection name** + The unique name of the embeddings collection: `@embeddings/Categories`. + 2. **Document ID** + Each document ID in this collection follows the format: `embeddings/` + 3. **Task identifier** + The identifier of the task that generated the embeddings for the listed properties. + 4. **Quantization type** + The quantization method applied by the task when generating the embeddings. + 5. **Source properties & their hashes**: + This section contains properties from the source document whose content was converted into embeddings. + Each property holds an array of Base64 hashes. + Each hash is derived from the content of an embedding vector generated for a text chunk from the property's content: + `: [` +       `,` +       `,` +       `...` + `]` + 6. **Attachment flag** + Indicates that the document includes attachments, which store the embeddings. + The next image shows the embedding attachments in the document's properties pane. + + ![The embeddings document - attachments](./assets/embeddings-collection-2.png) + + * Each attachment contains a **single embedding**. + + * The **attachment name** is the Base64 hash derived from the content of the embedding vector stored in the attachment: + `` + + + +## The embeddings cache collection + + + +#### Cache contents +* In addition to creating embeddings collections for each source collection, + RavenDB creates and maintains a single **embeddings cache collection** named: `@embeddings-cache`. + +* This cache collection contains embeddings generated by all providers, + both from source documents and from search terms used in vector search queries. + +* Each document in the `@embeddings-cache` collection references a **single attachment** that contains a single embedding vector. + **The document ID includes**: + * The [connection string identifier](../../ai-integration/connection-strings/connection-strings-overview.mdx#creating-an-ai-connection-string), + which specifies the provider and model that generated the embedding. + * A Base64 hash generated from a text chunk value - either from a source document property or from a search term. + * If the embedding was quantized by the task, the document ID also includes the quantization type. + + + + +#### Cache lookup +* Before making a request to a text embedding provider, + RavenDB first checks the `@embeddings-cache` collection to determine whether an embedding for the given input already exists from the same provider. + +* This applies both when generating embeddings for source document content and when performing a vector search that requires an embedding for the search term. + +* To find a matching embedding, RavenDB: + 1. **Generates a hash** from the chunked text content that requires embedding. + 2. **Identifies the provider** the user is working with, based on the specified connection string. + 3. **Compares these values** (the connection string identifier and the hash) with those stored in the cache collection. + (Each document in `@embeddings-cache` has an ID that includes these two components). + 4. If a document with a matching ID exists in the cache, + RavenDB **retrieves the corresponding embedding** instead of generating a new one. + + + + +#### Cache performance benefits +* **Reduced latency**: + Reusing cached embeddings eliminates the need for additional provider requests, improving response time. + +* **Lower provider costs**: + Since embedding providers often charge per request, caching prevents redundant calls and reduces expenses. + +* **Optimized vector search**: + If a cached embedding exists for the search term in the query, the search runs faster by skipping unnecessary processing. + + + + +#### Expiration policy +* **The expiration date**: + Each document in this cache collection is created with an expiration date, which is set according to the expiration period defined in the embeddings generation task. + Once the expiration date is reached, the document is automatically deleted (provided that [document expiration](../../studio/database/settings/document-expiration.mdx) is enabled). + +* **Extending the expiration period**: + * When a source document (from which embeddings were generated) is modified - even if the change is not to a property used for embeddings - + RavenDB checks the expiration of the matching document in the cache collection. + If the remaining time is less than half of the original duration, RavenDB extends the expiration by the period defined in the task. + * When you make a vector search query and an embedding generated from a chunk of the search term already exists in the cache, + RavenDB also extends the expiration of the matching document by the period defined in the query settings of the embeddings generation task. + + +* **The @embeddings-cache collection**: + + ![The embeddings cache - list](./assets/embeddings-cache-1.png) + + 1. **Collection name** + The name of the embeddings cache collection: `@embeddings-cache`. + + 2. **Connection string identifier** + The document ID includes the connection string identifier, which specifies the provider that generated the embedding. + + 3. **Hash** + The document ID includes a Base64 hash created from a text chunk - + either from a source document property or from a search term in a vector search query. +* **A document in the @embeddings-cache collection**: + + ![The embeddings cache - document](./assets/embeddings-cache-2.png) + + 1. **Document ID** + The document ID follows this format: + `embeddings-cache//` + + If the embedding was [quantized](../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#what-is-quantization) by the task + using a type other than _Single_ (e.g., _Int8_ or _Binary_), + the ID format includes the quantization type: + `embeddings-cache///` + + 2. **Expiration time** + The document is removed when the expiration time is reached. +* **The embedding attachment**: + + ![The embeddings cache - attachments](./assets/embeddings-cache-3.png) + + * The name of the attachment is the hash string: + `` + + + diff --git a/versioned_docs/version-7.1/ai-integration/generating-embeddings/embeddings-generation-task.mdx b/versioned_docs/version-7.1/ai-integration/generating-embeddings/embeddings-generation-task.mdx new file mode 100644 index 0000000000..6c8d44a7d4 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/generating-embeddings/embeddings-generation-task.mdx @@ -0,0 +1,40 @@ +--- +title: "The Embeddings Generation Task" +hide_table_of_contents: true +sidebar_label: The Embeddings Generation Task +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import EmbeddingsGenerationTaskCsharp from './content/_embeddings-generation-task-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + diff --git a/versioned_docs/version-7.1/ai-integration/generating-embeddings/embeddings-generation_start.mdx b/versioned_docs/version-7.1/ai-integration/generating-embeddings/embeddings-generation_start.mdx new file mode 100644 index 0000000000..43462027a4 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/generating-embeddings/embeddings-generation_start.mdx @@ -0,0 +1,60 @@ +--- +title: "Generating embeddings: Start" +hide_table_of_contents: true +sidebar_label: Start +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; + +import CardWithImage from "@site/src/components/Common/CardWithImage"; +import CardWithImageHorizontal from "@site/src/components/Common/CardWithImageHorizontal"; +import ColGrid from "@site/src/components/ColGrid"; +import embedGenStartApiImage from "./assets/embeddings-generation_start_api-image.png"; +import embedGenStartStudioImage from "./assets/embeddings-generation_start_studio-image.png"; +import aiSearchArticleImage from "./assets/ai-search-article-cover.webp"; + +import ayendeBlogImage from "@site/static/img/from-ayende-com.webp"; +import webinarThumbnailPlaceholder from "@site/static/img/webinar.webp"; +import discordLargeThumbnailPlaceholder from "@site/static/img/discord-lg.webp"; + +# Generating embeddings + +### Create embeddings to enable AI-powered similarity search. +[Embeddings](https://en.wikipedia.org/wiki/Embedding_(machine_learning)) are numeric vectors that you can create for data (like a text or an image) to capture meanings, contexts, or relationships related to the data. You can then search the data by running intelligent queries over its embeddings using [vector search](../../ai-integration/vector-search/vector-search_start) to find content by similarity rather than exact match. +- RavenDB allows you to create embeddings using native [ongoing embeddings-generation tasks](../../ai-integration/generating-embeddings/embeddings-generation-task) that systematically process document collections and convert document fields (like texts or arrays) into embeddings. To create the embeddings, the tasks can use either an external AI model (such as OpenAI) or RavenDB's default embedding model. +- You can also create embeddings using external embeddings providers and store them in your database (e.g., to handle other content types such as images). +- You can avoid pre-generating embeddings, and let vector search operations generate embeddings on-the-fly, while searching. +- Embeddings can be used by other RavenDB AI features. E.g., [AI agents](../../ai-integration/ai-agents/ai-agents_start) can use vector search to retrieve relevant data requested by the LLM. + +### Use cases +Embeddings generation tasks can be used to prepare your data for AI-powered search, analysis, and usage, e.g., for - +* **Enterprise knowledge bases** + Generate embeddings for thousands of documents, policies, and procedures to enable instant semantic search +* **Legal document libraries** + Process case law, contracts, and regulations to build searchable legal repositories +* **Product catalogs** + Convert product descriptions, specifications, and reviews into embeddings for enhanced e-commerce search +* **Content management systems** + Transform blog posts, articles, and marketing materials into searchable vector representations + +### Technical documentation +Learn about generating, storing, and using embeddings in RavenDB. + + + + + + +#### Learn more: In-depth embeddings generation articles + + + + + +### Related lives & Videos +Learn more about enhancing your applications using vector search operations. + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/ai-integration/generating-embeddings/overview.mdx b/versioned_docs/version-7.1/ai-integration/generating-embeddings/overview.mdx new file mode 100644 index 0000000000..5dd8ef569f --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/generating-embeddings/overview.mdx @@ -0,0 +1,40 @@ +--- +title: "Generating Embeddings - Overview" +hide_table_of_contents: true +sidebar_label: "Overview" +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import OverviewCsharp from './content/_overview-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + diff --git a/versioned_docs/version-7.1/ai-integration/vector-search/_category_.json b/versioned_docs/version-7.1/ai-integration/vector-search/_category_.json new file mode 100644 index 0000000000..d0ca16e140 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/vector-search/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 0, + "label": "Vector Search" +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/ai-integration/vector-search/assets/add-vector-field-1.png b/versioned_docs/version-7.1/ai-integration/vector-search/assets/add-vector-field-1.png new file mode 100644 index 0000000000..ebccdbb278 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/vector-search/assets/add-vector-field-1.png differ diff --git a/versioned_docs/version-7.1/ai-integration/vector-search/assets/add-vector-field-2.png b/versioned_docs/version-7.1/ai-integration/vector-search/assets/add-vector-field-2.png new file mode 100644 index 0000000000..4d99d382a4 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/vector-search/assets/add-vector-field-2.png differ diff --git a/versioned_docs/version-7.1/ai-integration/vector-search/assets/ai-image-search-with-ravendb.webp b/versioned_docs/version-7.1/ai-integration/vector-search/assets/ai-image-search-with-ravendb.webp new file mode 100644 index 0000000000..f4ed44169a Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/vector-search/assets/ai-image-search-with-ravendb.webp differ diff --git a/versioned_docs/version-7.1/ai-integration/vector-search/assets/json-document.png b/versioned_docs/version-7.1/ai-integration/vector-search/assets/json-document.png new file mode 100644 index 0000000000..8634b1b803 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/vector-search/assets/json-document.png differ diff --git a/versioned_docs/version-7.1/ai-integration/vector-search/assets/snagit/view-auto-index-entries-1.snagx b/versioned_docs/version-7.1/ai-integration/vector-search/assets/snagit/view-auto-index-entries-1.snagx new file mode 100644 index 0000000000..b11fc7784c Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/vector-search/assets/snagit/view-auto-index-entries-1.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/vector-search/assets/snagit/view-auto-index-entries-2.snagx b/versioned_docs/version-7.1/ai-integration/vector-search/assets/snagit/view-auto-index-entries-2.snagx new file mode 100644 index 0000000000..cc88043a28 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/vector-search/assets/snagit/view-auto-index-entries-2.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/vector-search/assets/snagit/view-auto-index-entries-3.snagx b/versioned_docs/version-7.1/ai-integration/vector-search/assets/snagit/view-auto-index-entries-3.snagx new file mode 100644 index 0000000000..b3cf8930fa Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/vector-search/assets/snagit/view-auto-index-entries-3.snagx differ diff --git a/versioned_docs/version-7.1/ai-integration/vector-search/assets/vector-search-1.png b/versioned_docs/version-7.1/ai-integration/vector-search/assets/vector-search-1.png new file mode 100644 index 0000000000..85ffaf211d Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/vector-search/assets/vector-search-1.png differ diff --git a/versioned_docs/version-7.1/ai-integration/vector-search/assets/vector-search-2.png b/versioned_docs/version-7.1/ai-integration/vector-search/assets/vector-search-2.png new file mode 100644 index 0000000000..fa114f14a5 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/vector-search/assets/vector-search-2.png differ diff --git a/versioned_docs/version-7.1/ai-integration/vector-search/assets/view-auto-index-entries-1.png b/versioned_docs/version-7.1/ai-integration/vector-search/assets/view-auto-index-entries-1.png new file mode 100644 index 0000000000..383867ae2a Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/vector-search/assets/view-auto-index-entries-1.png differ diff --git a/versioned_docs/version-7.1/ai-integration/vector-search/assets/view-auto-index-entries-2.png b/versioned_docs/version-7.1/ai-integration/vector-search/assets/view-auto-index-entries-2.png new file mode 100644 index 0000000000..25c880ebbd Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/vector-search/assets/view-auto-index-entries-2.png differ diff --git a/versioned_docs/version-7.1/ai-integration/vector-search/assets/view-auto-index-entries-3.png b/versioned_docs/version-7.1/ai-integration/vector-search/assets/view-auto-index-entries-3.png new file mode 100644 index 0000000000..bda8528061 Binary files /dev/null and b/versioned_docs/version-7.1/ai-integration/vector-search/assets/view-auto-index-entries-3.png differ diff --git a/versioned_docs/version-7.1/ai-integration/vector-search/content/_data-types-for-vector-search-csharp.mdx b/versioned_docs/version-7.1/ai-integration/vector-search/content/_data-types-for-vector-search-csharp.mdx new file mode 100644 index 0000000000..6334e4665e --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/vector-search/content/_data-types-for-vector-search-csharp.mdx @@ -0,0 +1,128 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Data for vector search can be stored in **raw** or **pre-quantized** formats using several data types, + as outlined below. + +* Text and numerical data that is not pre-quantized can be further quantized in the generated embeddings. + Learn more in [Quantization options](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#quantization-options). + +* In this article: + * [Supported data types for vector search](../../../ai-integration/vector-search/data-types-for-vector-search.mdx#supported-data-types-for-vector-search) + * [Textual data](../../../ai-integration/vector-search/data-types-for-vector-search.mdx#textual-data) + * [Numerical data](../../../ai-integration/vector-search/data-types-for-vector-search.mdx#numerical-data) + * [RavenVector](../../../ai-integration/vector-search/data-types-for-vector-search.mdx#ravenvector) + + + +## Supported data types for vector search + +### Textual data + + + +`string` - A single text entry. +`string[]` - An array of text entries. + + + +### Numerical data + +* You can store **pre-generated** embedding vectors in your documents, + typically created by machine-learning models from text, images, or other sources. + +* When storing numerical embeddings in a document field: + * Ensure that all vectors within this field across all documents in the collection are generated by the **same model** and model version and have the **same dimensions**. + * Consistency in both dimensionality and model source is crucial for meaningful comparisons in the vector space. + +* In addition to the native types described below, we highly recommended using [RavenVector](../../../ai-integration/vector-search/data-types-for-vector-search.mdx#ravenvector) + for efficient storage and fast queries when working with numerical embeddings. + + + +**Raw embedding data**: +Use when precision is critical. + +`float[]` - A single vector of numerical values representing raw embedding data. +`float[][]`- An array of vectors, where each entry is a separate embedding vector. + + + + + +**Pre-quantized data**: +Use when you prioritize storage efficiency and query speed. + +`byte[] / sbyte[]` - A single pre-quantized embedding vector in the _Int8_ or _Binary_ quantization format. +`byte[][] / sbyte[][]` - An array of pre-quantized embedding vectors. + +When storing data in these formats in your documents, you should use [RavenDB’s vector quantizer methods](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#section-1). + + + + + +**Base64-encoded data**: +Use when embedding data needs to be represented as a compact and easily serializable string format. + +`string` - A single vector encoded as a Base64 string. +`string[]` - An array of Base64-encoded vectors. + + + + + +**Using lists**: + +While arrays (`float[]`) are the most direct representation of numerical embeddings, +you can also use lists (for example, `List` or `List`) for dynamic sizing in your application code. + + + +## RavenVector + +RavenVector is RavenDB's dedicated data type for storing and querying **numerical embeddings**. +It is highly optimized to minimize storage space and improve the speed of reading arrays from disk, +making it ideal for vector search. + +For example, you can define: + + + +{`RavenVector; // A single vector of floating-point values. +List>; // A collection of float-based vectors. +RavenVector; // A single pre-quantized vector in Int8 format (8-bit signed integer). +List>; // A collection of sbyte-based vectors. +RavenVector; // A single pre-quantized vector in Binary format (8-bit unsigned integer). +List>; // A collection of byte-based vectors. +`} + + + +When a class property is stored as a `RavenVector`, the vector's content will appear under the `@vector` field in the JSON document stored in the database. +For example: + + + + +{`public class SampleClass +{ + public string Id { get; set; } + public string Title { get; set; } + + // Storing data in a RavenVector property for optimized storage and performance + public RavenVector EmbeddingRavenVector { get; set; } + + // Storing data in a regular array property + public float[] EmbeddingVector { get; set; } +} +`} + + + + +![json document](../assets/json-document.png) diff --git a/versioned_docs/version-7.1/ai-integration/vector-search/content/_indexing-attachments-for-vector-search-csharp.mdx b/versioned_docs/version-7.1/ai-integration/vector-search/content/_indexing-attachments-for-vector-search-csharp.mdx new file mode 100644 index 0000000000..86b063916a --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/vector-search/content/_indexing-attachments-for-vector-search-csharp.mdx @@ -0,0 +1,960 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article explains how to index attachments using a **static-index** to enable vector search on their content. + Note: Vector search on attachment content is not available when making a [dynamic query](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx). + +* **Prior to this article**, refer to the [Vector search using a static index](../../../ai-integration/vector-search/vector-search-using-static-index.mdx) article for general knowledge about + indexing a vector field. + +* In this article: + * [Overview](../../../ai-integration/vector-search/indexing-attachments-for-vector-search.mdx#overview) + * [Indexing TEXT attachments](../../../ai-integration/vector-search/indexing-attachments-for-vector-search.mdx#indexing-text-attachments) + * [Indexing NUMERICAL attachments](../../../ai-integration/vector-search/indexing-attachments-for-vector-search.mdx#indexing-numerical-attachments) + * [LINQ index](../../../ai-integration/vector-search/indexing-attachments-for-vector-search.mdx#linq-index) + * [JS index](../../../ai-integration/vector-search/indexing-attachments-for-vector-search.mdx#js-index) + * [Indexing ALL attachments](../../../ai-integration/vector-search/indexing-attachments-for-vector-search.mdx#indexing-all-attachments) + + + +## Overview + + + +#### Attachments in RavenDB + +* Attachments in RavenDB allow you to associate binary files with your JSON documents. + You can use attachments to store images, PDFs, videos, text files, or any other format. + +* Attachments are stored separately from documents, reducing document size and avoiding unnecessary duplication. + They are stored as **binary data**, regardless of content type. + +* Attachments are handled as streams, allowing efficient upload and retrieval. + Learn more in: [What are attachments](../../../document-extensions/attachments/what-are-attachments.mdx). + + + + + +#### Indexing attachment content for vector search + +You can index attachment content in a vector field within a static-index, +enabling vector search on text or numerical data that is stored in the attachments: + +* **Attachments with TEXT**: + * During indexing, RavenDB processes the text into a single embedding per attachment using the built-in + [bge-micro-v2](https://huggingface.co/TaylorAI/bge-micro-v2) model. + +* **Attachments with NUMERICAL data**: + * While attachments can store any file type, RavenDB does Not generate embeddings from images, videos, or other non-textual content. + Each attachment must contain a **single** precomputed embedding vector, generated externally. + * RavenDB indexes the embedding vector from the attachment in and can apply [quantization](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#quantization-options) + (e.g., index it in _Int8_ format) if this is configured. + * All embeddings indexed within the same vector-field in the static-index must be vectors of the **same dimension** to ensure consistency in indexing and search. + They must also be created using the **same model**. + + + +## Indexing TEXT attachments + +* The following index defines a **vector field** named `VectorFromAttachment`. + +* It indexes embeddings generated from the text content of the `description.txt` attachment. + This applies to all _Company_ documents that contain an attachment with that name. + + + + +{`public class Companies_ByVector_FromTextAttachment : + AbstractIndexCreationTask +{ + public class IndexEntry() + { + // This index-field will hold embeddings + // generated from the TEXT in the attachments. + public object VectorFromAttachment { get; set; } + } + + public Companies_ByVector_FromTextAttachment() + { + Map = companies => from company in companies + + // Load the attachment from the document (ensure it is not null) + let attachment = LoadAttachment(company, "description.txt") + where attachment != null + + select new IndexEntry() + { + // Index the text content from the attachment in the vector field + VectorFromAttachment = + CreateVector(attachment.GetContentAsString(Encoding.UTF8)) + }; + + // Configure the vector field: + VectorIndexes.Add(x => x.VectorFromAttachment, + new VectorOptions() + { + // Specify 'Text' as the source format + SourceEmbeddingType = VectorEmbeddingType.Text, + // Specify the desired destination format within the index + DestinationEmbeddingType = VectorEmbeddingType.Single + }); + + SearchEngineType = Raven.Client.Documents.Indexes.SearchEngineType.Corax; + } +} +`} + + + + +{`public class Companies_ByVector_FromTextAttachment_JS : + AbstractJavaScriptIndexCreationTask +{ + public Companies_ByVector_FromTextAttachment_JS() + { + Maps = new HashSet + { + @"map('Companies', function (company) { + + var attachment = loadAttachment(company, 'description.txt'); + if (!attachment) return null; + + return { + VectorFromAttachment: createVector(attachment.getContentAsString('utf8')) + }; + })" + }; + + Fields = new Dictionary() + { + { + "VectorFromAttachment", + new IndexFieldOptions() + { + Vector = new() + { + SourceEmbeddingType = VectorEmbeddingType.Text, + DestinationEmbeddingType = VectorEmbeddingType.Single + } + } + } + }; + + SearchEngineType = Raven.Client.Documents.Indexes.SearchEngineType.Corax; + } +} +`} + + + + +{`var indexDefinition = new IndexDefinition +{ + Name = "Companies/ByVector/FromTextAttachment", + + Maps = new HashSet + { + @"from company in docs.Companies + + let attachment = LoadAttachment(company, ""description.txt"") + where attachment != null + + select new + { + VectorFromAttachment = + CreateVector(attachment.GetContentAsString(Encoding.UTF8)) + }" + }, + + Fields = new Dictionary() + { + { + "VectorFromAttachment", + new IndexFieldOptions() + { + Vector = new VectorOptions() + { + SourceEmbeddingType = VectorEmbeddingType.Text, + DestinationEmbeddingType = VectorEmbeddingType.Single + } + } + } + }, + + Configuration = new IndexConfiguration() + { + ["Indexing.Static.SearchEngineType"] = "Corax" + } +}; + +store.Maintenance.Send(new PutIndexesOperation(indexDefinition)); +`} + + + + +{`// Prepare text as \`byte[]\` to be stored as attachments: +// ===================================================== +var byteArray1 = Encoding.UTF8.GetBytes( + "Supplies soft drinks, fruit juices, and flavored syrups to restaurants and retailers."); +var byteArray2 = Encoding.UTF8.GetBytes( + "Supplies fine dining restaurants with premium meats, cheeses, and wines across France."); +var byteArray3 = Encoding.UTF8.GetBytes( + "An American grocery chain known for its fresh produce, organic foods, and local meats."); +var byteArray4 = Encoding.UTF8.GetBytes( + "An Asian grocery store specializing in ingredients for Japanese and Thai cuisine."); +var byteArray5 = Encoding.UTF8.GetBytes( + "A rural general store offering homemade jams, fresh-baked bread, and locally crafted gifts."); + +using (var session = store.OpenSession()) +{ + // Load existing Company documents from RavenDB's sample data: + // =========================================================== + var company1 = session.Load("companies/11-A"); + var company2 = session.Load("companies/26-A"); + var company3 = session.Load("companies/32-A"); + var company4 = session.Load("companies/41-A"); + var company5 = session.Load("companies/43-A"); + + // Store the attachments in the documents (using MemoryStream): + // ============================================================ + session.Advanced.Attachments.Store(company1, "description.txt", + new MemoryStream(byteArray1), "text/plain"); + session.Advanced.Attachments.Store(company2, "description.txt", + new MemoryStream(byteArray2), "text/plain"); + session.Advanced.Attachments.Store(company3, "description.txt", + new MemoryStream(byteArray3), "text/plain"); + session.Advanced.Attachments.Store(company4, "description.txt", + new MemoryStream(byteArray4), "text/plain"); + session.Advanced.Attachments.Store(company5, "description.txt", + new MemoryStream(byteArray5), "text/plain"); + + session.SaveChanges(); +} +`} + + + + +Execute a vector search using the index: +Results will include _Company_ documents whose attachment contains text similar to `"chinese food"`. + + + + +{`var relevantCompanies = session + .Query() + .VectorSearch( + field => field + .WithField(x => x.VectorFromAttachment), + searchTerm => searchTerm + .ByText("chinese food"), 0.8f) + .Customize(x => x.WaitForNonStaleResults()) + .OfType() + .ToList(); +`} + + + + +{`var relevantCompanies = await asyncSession + .Query() + .VectorSearch( + field => field + .WithField(x => x.VectorFromAttachment), + searchTerm => searchTerm + .ByText("chinese food"), 0.8f) + .Customize(x => x.WaitForNonStaleResults()) + .OfType() + .ToListAsync(); +`} + + + + +{`var relevantCompanies = session.Advanced + .DocumentQuery() + .VectorSearch( + field => field + .WithField(x => x.VectorFromAttachment), + searchTerm => searchTerm + .ByText("chinese food"), 0.8f) + .WaitForNonStaleResults() + .OfType() + .ToList(); +`} + + + + +{`var relevantCompanies = await asyncSession.Advanced + .AsyncDocumentQuery() + .VectorSearch( + field => field + .WithField(x => x.VectorFromAttachment), + searchTerm => searchTerm + .ByText("chinese food"), 0.8f) + .WaitForNonStaleResults() + .OfType() + .ToListAsync(); +`} + + + + +{`var relevantCompanies = session.Advanced + .RawQuery(@" + from index 'Companies/ByVector/FromTextAttachment' + where vector.search(VectorFromAttachment, $searchTerm, 0.8)") + .AddParameter("searchTerm", "chinese food") + .WaitForNonStaleResults() + .ToList(); +`} + + + + +{`var relevantCompanies = await asyncSession.Advanced + .AsyncRawQuery(@" + from index 'Companies/ByVector/FromTextAttachment' + where vector.search(VectorFromAttachment, $searchTerm, 0.8)") + .AddParameter("searchTerm", "chinese food") + .WaitForNonStaleResults() + .ToListAsync(); +`} + + + + +{`from index "Companies/ByVector/FromTextAttachment" +where vector.search(VectorFromAttachment, $searchTerm, 0.8) +{ "searchTerm" : "chinese food" } +`} + + + + +You can now extract the text from the attachments of the resulting documents: + + + +{`// Extract text from the attachment of the first resulting document +// ================================================================ + +// Retrieve the attachment stream +var company = relevantCompanies[0]; +var attachmentResult = session.Advanced.Attachments.Get(company, "description.txt"); +var attStream = attachmentResult.Stream; + +// Read the attachment content into memory and decode it as a UTF-8 string +var ms = new MemoryStream(); +attStream.CopyTo(ms); +string attachmentText = Encoding.UTF8.GetString(ms.ToArray()); +`} + + + +## Indexing NUMERICAL attachments + +### LINQ index + +* The following index defines a **vector field** named `VectorFromAttachment`. + +* It indexes embeddings generated from the numerical data stored in the `vector.raw` attachment. + This applies to all _Company_ documents that contain an attachment with that name. + +* Each attachment contains raw numerical data in 32-bit floating-point format. + + + + +{`public class Companies_ByVector_FromNumericalAttachment : + AbstractIndexCreationTask +{ + public class IndexEntry() + { + // This index-field will hold embeddings + // generated from the NUMERICAL content in the attachments. + public object VectorFromAttachment { get; set; } + } + + public Companies_ByVector_FromNumericalAttachment() + { + Map = companies => from company in companies + + // Load the attachment from the document (ensure it is not null) + let attachment = LoadAttachment(company, "vector.raw") + where attachment != null + + select new IndexEntry + { + // Index the attachment's content in the vector field + VectorFromAttachment = CreateVector(attachment.GetContentAsStream()) + }; + + // Configure the vector field: + VectorIndexes.Add(x => x.VectorFromAttachment, + new VectorOptions() + { + // Define the source embedding type + SourceEmbeddingType = VectorEmbeddingType.Single, + // Define the desired destination format within the index + DestinationEmbeddingType = VectorEmbeddingType.Single + }); + + SearchEngineType = Raven.Client.Documents.Indexes.SearchEngineType.Corax; + } +} +`} + + + + +{`var indexDefinition = new IndexDefinition +{ + Name = "Companies/ByVector/FromNumericalAttachment", + + Maps = new HashSet + { + @"from company in docs.Companies + + let attachment = LoadAttachment(company, ""vector.raw"") + where attachment != null + + select new + { + VectorFromAttachment = CreateVector(attachment.GetContentAsStream()) + }" + }, + + Fields = new Dictionary() + { + { + "VectorFromAttachment", + new IndexFieldOptions() + { + Vector = new VectorOptions() + { + SourceEmbeddingType = VectorEmbeddingType.Single, + DestinationEmbeddingType = VectorEmbeddingType.Single + } + } + } + }, + + Configuration = new IndexConfiguration() + { + ["Indexing.Static.SearchEngineType"] = "Corax" + } +}; + +store.Maintenance.Send(new PutIndexesOperation(indexDefinition)); +`} + + + + +{`// These vectors are simple pre-computed embedding vectors with 32-bit floating-point values. +// Note: In a real scenario, embeddings would be generated by a model. +// ========================================================================================== +var v1 = new float[] { 0.1f, 0.2f, 0.3f, 0.4f }; +var v2 = new float[] { 0.1f, 0.7f, 0.8f, 0.9f }; +var v3 = new float[] { 0.5f, 0.6f, 0.7f, 0.8f }; + +// Prepare the embedding vectors as \`byte[]\` to be stored as attachments: +// ===================================================================== +var byteArray1 = MemoryMarshal.Cast(v1).ToArray(); +var byteArray2 = MemoryMarshal.Cast(v2).ToArray(); +var byteArray3 = MemoryMarshal.Cast(v3).ToArray(); + +using (var session = store.OpenSession()) +{ + // Load existing Company documents from RavenDB's sample data: + // =========================================================== + var company1 = session.Load("companies/50-A"); + var company2 = session.Load("companies/51-A"); + var company3 = session.Load("companies/52-A"); + + // Store the attachments in the documents (using MemoryStream): + // ============================================================ + session.Advanced.Attachments.Store(company1, "vector.raw", new MemoryStream(byteArray1)); + session.Advanced.Attachments.Store(company2, "vector.raw", new MemoryStream(byteArray2)); + session.Advanced.Attachments.Store(company3, "vector.raw", new MemoryStream(byteArray3)); + + session.SaveChanges(); +} +`} + + + + +Execute a vector search using the index: +Results will include _Company_ documents whose attachment contains vectors similar to the query vector. + + + + +{`var similarCompanies = session + .Query() + .VectorSearch( + field => field + .WithField(x => x.VectorFromAttachment), + queryVector => queryVector + .ByEmbedding(new float[] { 0.1f, 0.2f, 0.3f, 0.4f })) + .Customize(x => x.WaitForNonStaleResults()) + .OfType() + .ToList(); +`} + + + + +{`var similarCompanies = await asyncSession + .Query() + .VectorSearch( + field => field + .WithField(x => x.VectorFromAttachment), + queryVector => queryVector + .ByEmbedding(new float[] { 0.1f, 0.2f, 0.3f, 0.4f })) + .Customize(x => x.WaitForNonStaleResults()) + .OfType() + .ToListAsync(); +`} + + + + +{`var similarCompanies = session.Advanced + .DocumentQuery() + .VectorSearch( + field => field + .WithField(x => x.VectorFromAttachment), + queryVector => queryVector + .ByEmbedding(new float[] { 0.1f, 0.2f, 0.3f, 0.4f })) + .WaitForNonStaleResults() + .OfType() + .ToList(); +`} + + + + +{`var similarCompanies = await asyncSession.Advanced + .AsyncDocumentQuery() + .VectorSearch( + field => field + .WithField(x => x.VectorFromAttachment), + queryVector => queryVector + .ByEmbedding(new float[] { 0.1f, 0.2f, 0.3f, 0.4f })) + .WaitForNonStaleResults() + .OfType() + .ToListAsync(); +`} + + + + +{`var similarCompanies = session.Advanced + .RawQuery(@" + from index 'Companies/ByVector/FromNumericalAttachment' + where vector.search(VectorFromAttachment, $queryVector)") + .AddParameter("queryVector", new float[] { 0.1f, 0.2f, 0.3f, 0.4f }) + .WaitForNonStaleResults() + .ToList(); +`} + + + + +{`var similarCompanies = await asyncSession.Advanced + .AsyncRawQuery(@" + from index 'Companies/ByVector/FromNumericalAttachment' + where vector.search(VectorFromAttachment, $queryVector)") + .AddParameter("queryVector", new float[] { 0.1f, 0.2f, 0.3f, 0.4f }) + .WaitForNonStaleResults() + .ToListAsync(); +`} + + + + +{`from index "Companies/ByVector/FromNumericalAttachment" +where vector.search(VectorFromAttachment, $queryVector) +{ "queryVector" : [0.1, 0.2, 0.3, 0.4] } +`} + + + + +### JS index + +* The following is the JavaScript index format equivalent to the [LINQ index](../../../ai-integration/vector-search/indexing-attachments-for-vector-search.mdx#linq-index) shown above. + +* The main difference is that JavaScript indexes do Not support `getContentAsStream()` on attachment objects: + * Because of this, embedding vectors must be stored in attachments as **Base64-encoded strings**. + * Use `getContentAsString()` to retrieve the attachment content as a string, as shown in this example. + + + + +{`public class Companies_ByVector_FromNumericalAttachment_JS : + AbstractJavaScriptIndexCreationTask +{ + public Companies_ByVector_FromNumericalAttachment_JS() + { + Maps = new HashSet() + { + @"map('Companies', function (company) { + + var attachment = loadAttachment(company, 'vector_base64.raw'); + if (!attachment) return null; + + return { + VectorFromAttachment: createVector(attachment.getContentAsString('utf8')) + }; + })" + }; + + Fields = new(); + Fields.Add("VectorFromAttachment", new IndexFieldOptions() + { + Vector = new VectorOptions() + { + SourceEmbeddingType = VectorEmbeddingType.Single, + DestinationEmbeddingType = VectorEmbeddingType.Single + } + }); + + SearchEngineType = Raven.Client.Documents.Indexes.SearchEngineType.Corax; + } +} +`} + + + + +{`// These vectors are simple pre-computed embedding vectors with 32-bit floating-point values. +// Note: In a real scenario, embeddings would be generated by a model. +// ========================================================================================== +var v1 = new float[] { 0.1f, 0.2f, 0.3f, 0.4f }; +var v2 = new float[] { 0.1f, 0.7f, 0.8f, 0.9f }; +var v3 = new float[] { 0.5f, 0.6f, 0.7f, 0.8f }; + +// Prepare the embedding vectors as a BASE64 string to be stored as attachments: +// ============================================================================= +var base64ForV1 = Convert.ToBase64String(MemoryMarshal.Cast(v1)); +var base64ForV2 = Convert.ToBase64String(MemoryMarshal.Cast(v2)); +var base64ForV3 = Convert.ToBase64String(MemoryMarshal.Cast(v3)); + +// Convert to byte[] for streaming: +// ================================ +var byteArray1 = Encoding.UTF8.GetBytes(base64ForV1); +var byteArray2 = Encoding.UTF8.GetBytes(base64ForV2); +var byteArray3 = Encoding.UTF8.GetBytes(base64ForV3); + +using (var session = store.OpenSession()) +{ + // Load existing Company documents from RavenDB's sample data: + // =========================================================== + var company1 = session.Load("companies/60-A"); + var company2 = session.Load("companies/61-A"); + var company3 = session.Load("companies/62-A"); + + // Store the attachments in the documents (using MemoryStream): + // ============================================================ + session.Advanced.Attachments.Store(company1, "vector_base64.raw", new MemoryStream(byteArray1)); + session.Advanced.Attachments.Store(company2, "vector_base64.raw", new MemoryStream(byteArray2)); + session.Advanced.Attachments.Store(company3, "vector_base64.raw", new MemoryStream(byteArray3)); + + session.SaveChanges(); +} +`} + + + + +Execute a vector search using the index: +Results will include _Company_ documents whose attachment contains vectors similar to the query vector. + + + + +{`var similarCompanies = session.Advanced + .RawQuery(@" + from index 'Companies/ByVector/FromNumericalAttachment/JS' + where vector.search(VectorFromAttachment, $queryVector)") + .AddParameter("queryVector", new float[] { 0.1f, 0.2f, 0.3f, 0.4f }) + .WaitForNonStaleResults() + .ToList(); +`} + + + + +{`var similarCompanies = await asyncSession.Advanced + .AsyncRawQuery(@" + from index 'Companies/ByVector/FromNumericalAttachment/JS' + where vector.search(VectorFromAttachment, $queryVector)") + .AddParameter("queryVector", new float[] { 0.1f, 0.2f, 0.3f, 0.4f }) + .WaitForNonStaleResults() + .ToListAsync(); +`} + + + + +{`from index "Companies/ByVector/FromNumericalAttachment/JS" +where vector.search(VectorFromAttachment, $queryVector) +{ "queryVector" : [0.1, 0.2, 0.3, 0.4] } +`} + + + + +## Indexing ALL attachments + +* The following index defines a vector field named `VectorFromAttachment`. + +* It indexes embeddings generated from the numerical data stored in ALL attachments of all _Company_ documents. + + + + +{`public class Companies_ByVector_AllAttachments : + AbstractIndexCreationTask +{ + public class IndexEntry() + { + // This index-field will hold embeddings + // generated from the NUMERICAL content of ALL attachments. + public object VectorFromAttachment { get; set; } + } + + public Companies_ByVector_AllAttachments() + { + Map = companies => from company in companies + + // Load ALL attachments from the document + let attachments = LoadAttachments(company) + + select new IndexEntry + { + // Index the attachments content in the vector field + VectorFromAttachment = CreateVector( + attachments.Select(e => e.GetContentAsStream())) + }; + + // Configure the vector field: + VectorIndexes.Add(x => x.VectorFromAttachment, + new VectorOptions() + { + SourceEmbeddingType = VectorEmbeddingType.Single, + DestinationEmbeddingType = VectorEmbeddingType.Single + }); + + SearchEngineType = Raven.Client.Documents.Indexes.SearchEngineType.Corax; + } +} +`} + + + + +{`var indexDefinition = new IndexDefinition +{ + Name = "Companies/ByVector/AllAttachments", + + Maps = new HashSet + { + @"from company in docs.Companies + + let attachments = LoadAttachments(company) + + select new + { + VectorFromAttachment = + CreateVector(attachments.Select(e => e.GetContentAsStream())) + }" + }, + + Fields = new Dictionary() + { + { + "VectorFromAttachment", + new IndexFieldOptions() + { + Vector = new VectorOptions() + { + SourceEmbeddingType = VectorEmbeddingType.Single, + DestinationEmbeddingType = VectorEmbeddingType.Single, + } + } + } + }, + + Configuration = new IndexConfiguration() + { + ["Indexing.Static.SearchEngineType"] = "Corax" + } +}; + +store.Maintenance.Send(new PutIndexesOperation(indexDefinition)); +`} + + + + +{`// These vectors are simple pre-computed embedding vectors with 32-bit floating-point values. +// Note: In a real scenario, embeddings would be generated by a model. +// ========================================================================================== +var v1 = new float[] { 0.1f, 0.2f, 0.3f, 0.4f }; +var v2 = new float[] { 0.5f, 0.6f, 0.7f, 0.8f }; + +var v3 = new float[] { -0.1f, 0.2f, -0.7f, -0.8f }; +var v4 = new float[] { 0.3f, -0.6f, 0.9f, -0.9f }; + +// Prepare the embedding vectors as \`byte[]\` to be stored as attachments: +// ===================================================================== +var byteArray1 = MemoryMarshal.Cast(v1).ToArray(); +var byteArray2 = MemoryMarshal.Cast(v2).ToArray(); + +var byteArray3 = MemoryMarshal.Cast(v3).ToArray(); +var byteArray4 = MemoryMarshal.Cast(v4).ToArray(); + +using (var session = store.OpenSession()) +{ + // Load existing Company documents from RavenDB's sample data: + // =========================================================== + var company1 = session.Load("companies/70-A"); + var company2 = session.Load("companies/71-A"); + + // Store multiple attachments in the documents (using MemoryStream): + // ================================================================= + + session.Advanced.Attachments.Store(company1, "vector1.raw", new MemoryStream(byteArray1)); + session.Advanced.Attachments.Store(company1, "vector2.raw", new MemoryStream(byteArray2)); + + session.Advanced.Attachments.Store(company2, "vector1.raw", new MemoryStream(byteArray3)); + session.Advanced.Attachments.Store(company2, "vector2.raw", new MemoryStream(byteArray4)); + + session.SaveChanges(); +} +`} + + + + +Execute a vector search using the index: +Results will include Company documents whose attachments contains vectors similar to the query vector. + + + + +{`var similarCompanies = session + .Query() + .VectorSearch( + field => field + .WithField(x => x.VectorFromAttachment), + queryVector => queryVector + .ByEmbedding(new float[] { -0.1f, 0.2f, -0.7f, -0.8f })) + .Customize(x => x.WaitForNonStaleResults()) + .OfType() + .ToList(); +`} + + + + +{`var similarCompanies = await asyncSession + .Query() + .VectorSearch( + field => field + .WithField(x => x.VectorFromAttachment), + queryVector => queryVector + .ByEmbedding(new float[] { -0.1f, 0.2f, -0.7f, -0.8f })) + .Customize(x => x.WaitForNonStaleResults()) + .OfType() + .ToListAsync(); +`} + + + + +{`var similarCompanies = session.Advanced + .DocumentQuery() + .VectorSearch( + field => field + .WithField(x => x.VectorFromAttachment), + queryVector => queryVector + .ByEmbedding(new float[] { -0.1f, 0.2f, -0.7f, -0.8f })) + .WaitForNonStaleResults() + .OfType() + .ToList(); +`} + + + + +{`var similarCompanies = await asyncSession.Advanced + .AsyncDocumentQuery() + .VectorSearch( + field => field + .WithField(x => x.VectorFromAttachment), + queryVector => queryVector + .ByEmbedding(new float[] { -0.1f, 0.2f, -0.7f, -0.8f })) + .WaitForNonStaleResults() + .OfType() + .ToListAsync(); +`} + + + + +{`var similarCompanies = session.Advanced + .RawQuery(@" + from index 'Companies/ByVector/AllAttachments' + where vector.search(VectorFromAttachment, $queryVector)") + .AddParameter("queryVector", new float[] { 0.1f, 0.2f, -0.7f, -0.8f }) + .WaitForNonStaleResults() + .ToList(); +`} + + + + +{`var similarCompanies = await asyncSession.Advanced + .AsyncRawQuery(@" + from index 'Companies/ByVector/AllAttachments' + where vector.search(VectorFromAttachment, $queryVector)") + .AddParameter("queryVector", new float[] { 0.1f, 0.2f, -0.7f, -0.8f }) + .WaitForNonStaleResults() + .ToListAsync(); +`} + + + + +{`from index "Companies/ByVector/AllAttachments" +where vector.search(VectorFromAttachment, $queryVector) +{ "queryVector" : [0.1, 0.2, -0.7, -0.8] } +`} + + + diff --git a/versioned_docs/version-7.1/ai-integration/vector-search/content/_vector-search-using-dynamic-query-csharp.mdx b/versioned_docs/version-7.1/ai-integration/vector-search/content/_vector-search-using-dynamic-query-csharp.mdx new file mode 100644 index 0000000000..4001ed9b44 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/vector-search/content/_vector-search-using-dynamic-query-csharp.mdx @@ -0,0 +1,1806 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article explains how to run a vector search using a **dynamic query**. + To learn how to run a vector search using a static-index, see [vector search using a static-index](../../../ai-integration/vector-search/vector-search-using-static-index.mdx). + +* In this article: + * [What is a vector search](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#what-is-a-vector-search) + * [Dynamic vector search - query overview](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#dynamic-vector-search---query-overview) + * [Creating embeddings for the auto-index](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#creating-embeddings-for-the-auto-index) + * [Retrieving results](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#retrieving-results) + * [The dynamic query parameters](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#the-dynamic-query-parameters) + * [Corax auto-indexes](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#corax-auto-indexes) + * [Dynamic vector search - querying TEXT](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#dynamic-vector-search---querying-text) + * [Querying raw text](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#querying-raw-text) + * [Querying pre-made embeddings generated by tasks](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#querying-pre-made-embeddings-generated-by-tasks) + * [Dynamic vector search - querying NUMERICAL content](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#dynamic-vector-search---querying-numerical-content) + * [Dynamic vector search - querying for similar documents](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#dynamic-vector-search---querying-for-similar-documents) + * [Dynamic vector search - exact search](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#dynamic-vector-search---exact-search) + * [Quantization options](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#quantization-options) + * [Querying vector fields and regular data in the same query](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#querying-vector-fields-and-regular-data-in-the-same-query) + * [Combining multiple vector searches in the same query](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#combining-multiple-vector-searches-in-the-same-query) + * [Syntax](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#syntax) + + + +## What is a vector search + +* Vector search is a method for finding documents based on their **contextual similarity** to the search item provided in a given query. + +* Your data is converted into vectors, known as **embeddings**, and stored in a multidimensional space. + Unlike traditional keyword-based searches, which rely on exact matches, + vector search identifies vectors closest to your query vector and retrieves the corresponding documents. + +## Dynamic vector search - query overview + + + +#### Overview + +* A dynamic vector search query can be performed on: + * Raw text stored in your documents. + * Pre-made embeddings that you created yourself and stored using these [Data types](../../../ai-integration/vector-search/data-types-for-vector-search.mdx). + * Pre-made embeddings that are automatically generated from your document content + by RavenDB's [Embeddings generation tasks](../../../ai-integration/generating-embeddings/overview.mdx) using external service providers. + +* Note: Vector search queries cannot be used with [Subscription queries](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscription-query). + +* When executing a dynamic vector search query, RavenDB creates a [Corax Auto-Index](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#corax-auto-indexes) to process the query, + and the results are retrieved from that index. + +* To make a **dynamic vector search query**: + * From the Client API - use method `VectorSearch()` + * In RQL - use method `vector.search()` + * Examples are provided below + + + + + +#### Creating embeddings for the Auto-index + +* **Creating embeddings from TEXTUAL content**: + + * **Pre-made embeddings via tasks**: + Embeddings can be created from textual content in your documents by defining [Tasks that generate embeddings](../../../ai-integration/generating-embeddings/overview.mdx). + When performing a dynamic vector search query over textual data and explicitly specifying the task, + results will be retrieved by comparing your search term against the embeddings previously generated by that task. + A query example is available in: [Querying pre-made embeddings generated by tasks](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#querying-pre-made-embeddings-generated-by-tasks). + + * **Default embeddings generation**: + When querying textual data without specifying a task, RavenDB generates an embedding vector for the specified document field in each document of the queried collection, + using the built-in [bge-micro-v2](https://huggingface.co/TaylorAI/bge-micro-v2) sentence-transformer model. + A query example is available in: [Querying raw text](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#querying-raw-text). + +* **Creating embeddings from NUMERICAL arrays**: + When querying over pre-made numerical arrays that are already in vector format, + RavenDB will index them without transformation (unless further quantization is applied). + A query example is available in: [Vector search on numerical content](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#dynamic-vector-search---querying-numerical-content). + + To avoid index errors, ensure that the dimensionality of these numerical arrays (i.e., their length) + is consistent across all your source documents for the field you are querying. + If you wish to enforce such consistency - + perform a vector search using a [Static-index](../../../ai-integration/vector-search/vector-search-using-static-index.mdx) instead of a dynamic query. + + +* **Quantizing the embeddings**: + The embeddings are quantized based on the parameters specified in the query. + Learn more about quantization in [Quantization options](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#quantization-options). + +* **Indexing the embeddings**: + RavenDB indexes the embeddings on the server using the [HNSW algorithm](https://en.wikipedia.org/wiki/Hierarchical_navigable_small_world). + This algorithm organizes embeddings into a high-dimensional graph structure, + enabling efficient retrieval of Approximate Nearest Neighbors (ANN) during queries. + + + + + +#### Retrieving results + +* **Processing the query**: + To ensure consistent comparisons, the **search term** is transformed into an embedding vector using the same method as the document fields. + The server will search for the most similar vectors in the indexed vector space, + taking into account all the [query parameters](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#the-dynamic-query-parameters) described below. + The documents that correspond to the resulting vectors are then returned to the client. + +* **Search results**: + By default, the resulting documents will be ordered by their score. + You can modify this behavior using the [Indexing.Corax.VectorSearch.OrderByScoreAutomatically](../../../server/configuration/indexing-configuration.mdx#indexingcoraxvectorsearchorderbyscoreautomatically) configuration key. + In addition, you can apply any of the 'order by' methods to your query, as explained in [sort query results](../../../client-api/session/querying/sort-query-results.mdx). + + + + + +#### The dynamic query parameters + +* **Source data format** + RavenDB supports performing vector search on TEXTUAL values or NUMERICAL arrays. + the source data can be formatted as `Text`, `Single`, `Int8`, or `Binary`. + +* **Target quantization** + You can specify the quantization encoding for the embeddings that will be created from source data. + Learn more about quantization in [Quantization options](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#quantization-options). + +* **Minimum similarity** + You can specify the minimum similarity to use when searching for related vectors. + The value can be between `0.0f` and `1.0f`. + * A value closer to `1.0f` requires higher similarity between vectors, + while a value closer to `0.0f` allows for less similarity. + * **Important**: To filter out less relevant results when performing vector search queries, + it is recommended to explicitly specify the minimum similarity level at query time. + + If not specified, the default value is taken from the following configuration key: + [Indexing.Corax.VectorSearch.DefaultMinimumSimilarity](../../../server/configuration/indexing-configuration.mdx#indexingcoraxvectorsearchdefaultminimumsimilarity). + +* **Number of candidates** + You can specify the maximum number of vectors that RavenDB will return from a graph search. + The number of the resulting documents that correspond to these vectors may be: + * lower than the number of candidates - when multiple vectors originated from the same document. + * higher than the number of candidates - when the same vector is shared between multiple documents. + + If not specified, the default value is taken from the following configuration key: + [Indexing.Corax.VectorSearch.DefaultNumberOfCandidatesForQuerying](../../../server/configuration/indexing-configuration.mdx#indexingcoraxvectorsearchdefaultnumberofcandidatesforquerying). + +* **Search method** + * _Approximate Nearest-Neighbor search_ (Default): + Search for related vectors in an approximate manner, providing faster results. + * _Exact search_: + Perform a thorough scan of the vectors to find the actual closest vectors, + offering better accuracy but at a higher computational cost. + Learn more in [Exact search](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#dynamic-vector-search---exact-search). + + + + + +#### Corax auto-indexes + +* Only [Corax indexes](../../../indexes/search-engine/corax.mdx) support vector search. + +* Even if your **default auto-index engine** is set to Lucene (via [Indexing.Auto.SearchEngineType](../../../server/configuration/indexing-configuration.mdx#indexingautosearchenginetype)), + performing a vector search using a dynamic query will create a new auto-index based on Corax. + +* Normally, new dynamic queries extend existing [auto-indexes](../../../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index) if they require additional fields. + However, a dynamic query with a vector search will not extend an existing Lucene-based auto-index. + + + For example, suppose you have an existing **Lucene**-based auto-index on the Employees collection: e.g.: + `Auto/Employees/ByFirstName`. + + Now, you run a query that: + + * searches for Employees by _LastName_ (a regular text search) + * and performs a vector search over the _Notes_ field. + + The following new **Corax**-based auto-index will be created: + `Auto/Employees/ByLastNameAndVector.search(embedding.text(Notes))`, + and the existing **Lucene** index on Employees will not be deleted or extended. + + + + +## Dynamic vector search - querying TEXT + +### Querying raw text + +* The following example searches for Product documents where the _'Name'_ field is similar to the search term `"italian food"`. + +* Since the query does Not specify an [Embeddings generation task](../../../ai-integration/generating-embeddings/overview.mdx), + RavenDB dynamically generates embedding vectors for the _'Name'_ field of each document in the queried collection using the built-in + [bge-micro-v2](https://huggingface.co/TaylorAI/bge-micro-v2) text-embedding model. + The generated embeddings are indexed within the auto-index. + Unlike embeddings pre-made by tasks, this process does not create dedicated collections for storing embeddings. + +* Since this query does not specify a target quantization format, + the generated embedding vectors will be encoded in the default _Single_ format (single-precision floating-point). + Refer to [Quantization options](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#quantization-options) for examples that specify the destination quantization. + + + +```csharp +var similarProducts = session.Query() + // Perform a vector search + // Call the 'VectorSearch' method + .VectorSearch( + // Call 'WithText' + // Specify the document field in which to search for similar values + field => field.WithText(x => x.Name), + // Call 'ByText' + // Provide the term for the similarity comparison + searchTerm => searchTerm.ByText("italian food"), + // It is recommended to specify the minimum similarity level + 0.82f, + // Optionally, specify the number of candidates for the search + 20) + // Waiting for not-stale results is not mandatory + // but will assure results are not stale + .Customize(x => x.WaitForNonStaleResults()) + .ToList(); +``` + + +```csharp +var similarProducts = await asyncSession.Query() + .VectorSearch( + field => field.WithText(x => x.Name), + searchTerm => searchTerm.ByText("italian food"), + 0.82f, + 20) + .Customize(x => x.WaitForNonStaleResults()) + .ToListAsync(); +``` + + +```csharp +var similarProducts = session.Advanced + .DocumentQuery() + .VectorSearch( + field => field.WithText(x => x.Name), + searchTerm => searchTerm.ByText("italian food"), + 0.82f, + 20) + .WaitForNonStaleResults() + .ToList(); +``` + + +```csharp +var similarProducts = await asyncSession.Advanced + .AsyncDocumentQuery() + .VectorSearch( + field => field.WithText(x => x.Name), + searchTerm => searchTerm.ByText("italian food"), + 0.82f, + 20) + .WaitForNonStaleResults() + .ToListAsync(); +``` + + +```csharp +var similarProducts = session.Advanced + .RawQuery(@" + from 'Products' + // Wrap the document field 'Name' with 'embedding.text' to indicate the source data type + where vector.search(embedding.text(Name), $searchTerm, 0.82, 20)") + .AddParameter("searchTerm", "italian food") + .WaitForNonStaleResults() + .ToList(); +``` + + +```csharp +var similarProducts = await asyncSession.Advanced + .AsyncRawQuery(@" + from 'Products' + // Wrap the document field 'Name' with 'embedding.text' to indicate the source data type + where vector.search(embedding.text(Name), $searchTerm, 0.82, 20)") + .AddParameter("searchTerm", "italian food") + .WaitForNonStaleResults() + .ToListAsync(); +``` + + +```sql +// Query the Products collection +from "Products" +// Call 'vector.search' +// Wrap the document field 'Name' with 'embedding.text' to indicate the source data type +where vector.search(embedding.text(Name), "italian food", 0.82, 20) +``` + + + +* Executing the above query on the RavenDB sample data will create the following **auto-index**: + `Auto/Products/ByVector.search(embedding.text(Name))` + + ![Search for italian food 1](../assets/vector-search-1.png) + +* Running the same query at a lower similarity level will return more results related to _"Italian food"_ but they may be less similar: + + ![Search for italian food 2](../assets/vector-search-2.png) + +### Querying pre-made embeddings generated by tasks + +* The following example searches for Category documents where the _'Name'_ field is similar to the search term `"candy"`. + +* The query explicitly specifies the **identifier** of the embeddings generation task that was defined in + [this example](../../../ai-integration/generating-embeddings/embeddings-generation-task.mdx#configuring-an-embeddings-generation-task---from-the-studio). + An `InvalidQueryException` will be thrown if no embeddings generation task with the specified identifier exists. + + To avoid this error, you can verify that the specified embeddings generation task exists before issuing the query. + Refer to [Get embeddings generation task details](../ai-integration/generating-embeddings/overview#get-embeddings-generation-task-details) + to learn how to programmatically check which tasks are defined + and what their identifiers are. + +* Results are retrieved by comparing the search term against the pre-made embeddings generated by the specified task, + which are stored in the [Embedding collections](../../../ai-integration/generating-embeddings/embedding-collections.mdx). + To ensure consistent comparisons, the search term is transformed into an embedding using the same embeddings generation task. + + + +```csharp +var similarCategories = session.Query() + .VectorSearch( + field => field + // Call 'WithText' + // Specify the document field in which to search for similar values + .WithText(x => x.Name) + // Call 'UsingTask' + // Specify the identifier of the task that generated + // the embeddings for the Name field + .UsingTask("id-for-task-open-ai"), + // Call 'ByText' + // Provide the search term for the similarity comparison + searchTerm => searchTerm.ByText("candy"), + // It is recommended to specify the minimum similarity level + 0.75f) + .Customize(x => x.WaitForNonStaleResults()) + .ToList(); +``` + + +```csharp +var similarCategories = await asyncSession.Query() + .VectorSearch( + field => field + .WithText(x => x.Name) + .UsingTask("id-for-task-open-ai"), + searchTerm => searchTerm.ByText("candy"), + 0.75f) + .Customize(x => x.WaitForNonStaleResults()) + .ToListAsync(); +``` + + +```csharp +var similarCategories = session.Advanced + .DocumentQuery() + .VectorSearch( + field => field + .WithText(x => x.Name) + .UsingTask("id-for-task-open-ai"), + searchTerm => searchTerm.ByText("candy"), + 0.75f) + .WaitForNonStaleResults() + .ToList(); +``` + + +```csharp +var similarCategories = await asyncSession.Advanced + .AsyncDocumentQuery() + .VectorSearch( + field => field + .WithText(x => x.Name) + .UsingTask("id-for-task-open-ai"), + searchTerm => searchTerm.ByText("candy"), + 0.75f) + .WaitForNonStaleResults() + .ToListAsync(); +``` + + +```csharp +var similarCategories = session.Advanced + .RawQuery(@" + from 'Categories' + // Specify the identifier of the task that generated the embeddings inside 'ai.task' + where vector.search(embedding.text(Name, ai.task('id-for-task-open-ai')), $searchTerm, 0.75)") + .AddParameter("searchTerm", "candy") + .WaitForNonStaleResults() + .ToList(); +``` + + +```csharp +var similarCategories = await asyncSession.Advanced + .AsyncRawQuery(@" + from 'Categories' + // Specify the identifier of the task that generated the embeddings inside 'ai.task' + where vector.search(embedding.text(Name, ai.task('id-for-task-open-ai')), $searchTerm, 0.75)") + .AddParameter("searchTerm", "candy") + .WaitForNonStaleResults() + .ToListAsync(); +``` + + +```sql +// Query the Categories collection +from "Categories" +// Call 'vector.search' +// Specify the identifier of the task that generated the embeddings inside the 'ai.task' method +where vector.search(embedding.text(Name, ai.task('id-for-task-open-ai')), $searchTerm, 0.75) +{"searchTerm": "candy"} +``` + + + +* Executing the above query on the RavenDB sample data will create the following **auto-index**: + `Auto/Categories/ByVector.search(embedding.text(Name|ai.task('id-for-task-open-ai')))` + +## Dynamic vector search - querying NUMERICAL content + +* The following examples will use the sample data shown below. + The _Movie_ class includes various formats of numerical vector data. + Note: This sample data is minimal to keep the examples simple. + +* Note the usage of RavenDB's dedicated data type, [RavenVector](../../../ai-integration/vector-search/data-types-for-vector-search.mdx#ravenvector), which is highly optimized for reading and writing arrays to disk. + Learn more about the source data types suitable for vector search in [Data types for vector search](../../../ai-integration/vector-search/data-types-for-vector-search.mdx). + +* Unlike vector searches on text, where RavenDB transforms the raw text into an embedding vector, + numerical vector searches require your source data to already be in an embedding vector format. + +* If your raw data is in a _float_ format, you can request further quantization of the embeddings that will be indexed in the auto-index. + See an example of this in: [Quantization options](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#quantization-options). + +* Raw data that is already formatted as _Int8_ or _Binary_ **cannot** be quantized to lower-form (e.g. Int8 -> Int1). + When storing data in these formats in your documents, you should use [RavenDB’s `vectorQuantizer` methods](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#vectorquanitzer). + +#### Sample data: + + + +```csharp +// Sample class representing a document with various formats of numerical vectors +// The embedding vectors for these fields here are generated externally by you (not by RavenDB). +public class Movie +{ + public string Id { get; set; } + public string Title { get; set; } + + // This field will hold numerical vector data - Not quantized + public RavenVector TagsEmbeddedAsSingle { get; set; } + + // This field will hold numerical vector data - Quantized to Int8 + public sbyte[][] TagsEmbeddedAsInt8 { get; set; } + + // This field will hold numerical vector data - Encoded in Base64 format + public List TagsEmbeddedAsBase64 { get; set; } + + // A field for holding a numerical vector data produced by a multimodal model + // that converts an image into an embedding + public RavenVector MoviePhotoEmbedding { get; set; } +} +``` + + +```csharp +using (var session = store.OpenSession()) +{ + var movie1 = new Movie() + { + Title = "Hidden Figures", + Id = "movies/1", + + // Embedded vector represented as float values + TagsEmbeddedAsSingle = new RavenVector(new float[] + { + 6.599999904632568f, 7.699999809265137f + }), + + // Embedded vectors encoded in Base64 format + TagsEmbeddedAsBase64 = new List() + { + "zczMPc3MTD6amZk+", "mpmZPs3MzD4AAAA/" + }, + + // Array of embedded vectors quantized to Int8 + TagsEmbeddedAsInt8 = new sbyte[][] + { + // Use RavenDB's quantization methods to convert float vectors to Int8 + VectorQuantizer.ToInt8(new float[] { 0.1f, 0.2f }), + VectorQuantizer.ToInt8(new float[] { 0.3f, 0.4f }) + }, + + // Example of an image embedding + // In a real scenario, this vector would come from a multimodal model + // such as CLIP, OpenCLIP, or similar + MoviePhotoEmbedding = new RavenVector(new float[] + { + 0.123f, -0.045f, 0.987f, 0.564f, -0.321f, 0.220f + }) + }; + + var movie2 = new Movie() + { + Title = "The Shawshank Redemption", + Id = "movies/2", + + TagsEmbeddedAsSingle =new RavenVector(new float[] + { + 8.800000190734863f, 9.899999618530273f + }), + TagsEmbeddedAsBase64 = new List() {"zcxMPs3MTD9mZmY/", "zcxMPpqZmT4zMzM/"}, + TagsEmbeddedAsInt8 = new sbyte[][] + { + VectorQuantizer.ToInt8(new float[] { 0.5f, 0.6f }), + VectorQuantizer.ToInt8(new float[] { 0.7f, 0.8f }) + }, + + MoviePhotoEmbedding = new RavenVector(new float[] + { + 0.456f, -0.056f, 0.123f, 0.899f, -0.765f, 0.881f + }) + }; + + session.Store(movie1); + session.Store(movie2); + session.SaveChanges(); +} +``` + + +```csharp +{ + "Title": "Hidden Figures", + + "TagsEmbeddedAsSingle": { + "@vector": [ + 6.599999904632568, + 7.699999809265137 + ] + }, + + "TagsEmbeddedAsInt8": [ + [ + 64, + 127, + -51, + -52, + 76, + 62 + ], + [ + 95, + 127, + -51, + -52, + -52, + 62 + ] + ], + + "TagsEmbeddedAsBase64": [ + "zczMPc3MTD6amZk+", + "mpmZPs3MzD4AAAA/" + ], + + "MoviePhotoEmbedding": { + "@vector": [0.123, -0.045, 0.987, 0.564, -0.321, 0.220] + } + + "@metadata": { + "@collection": "Movies" + } +} +``` + + + +#### Examples: + +These examples search for Movie documents with vectors similar to the one provided in the query. + + + +* Search on the `TagsEmbeddedAsSingle` field, + which contains numerical data in **floating-point format**. + + + +```csharp +var similarMovies = session.Query() + // Perform a vector search + // Call the 'VectorSearch' method + .VectorSearch( + // Call 'WithEmbedding', specify: + // * The source field that contains the embedding in the document + // * The source embedding type + field => field.WithEmbedding( + x => x.TagsEmbeddedAsSingle, VectorEmbeddingType.Single), + // Call 'ByEmbedding' + // Provide the vector for the similarity comparison + queryVector => queryVector.ByEmbedding( + new RavenVector(new float[] { 6.599999904632568f, 7.699999809265137f })), + // It is recommended to specify the minimum similarity level + 0.85f, + // Optionally, specify the number of candidates for the search + 10) + .Customize(x => x.WaitForNonStaleResults()) + .ToList(); +``` + + +```csharp +var similarMovies = await asyncSession.Query() + .VectorSearch( + field => field.WithEmbedding( + x => x.TagsEmbeddedAsSingle, VectorEmbeddingType.Single), + queryVector => queryVector.ByEmbedding( + new RavenVector(new float[] { 6.599999904632568f, 7.699999809265137f })), + 0.85f, + 10) + .Customize(x => x.WaitForNonStaleResults()) + .ToListAsync(); +``` + + +```csharp +var similarMovies = session.Advanced + .DocumentQuery() + .VectorSearch( + field => field.WithEmbedding( + x => x.TagsEmbeddedAsSingle, VectorEmbeddingType.Single), + queryVector => queryVector.ByEmbedding( + new RavenVector(new float[] { 6.599999904632568f, 7.699999809265137f })), + 0.85f, + 10) + .WaitForNonStaleResults() + .ToList(); +``` + + +```csharp +var similarMovies = await asyncSession.Advanced + .AsyncDocumentQuery() + .VectorSearch( + field => field.WithEmbedding( + x => x.TagsEmbeddedAsSingle, VectorEmbeddingType.Single), + queryVector => queryVector.ByEmbedding( + new RavenVector(new float[] { 6.599999904632568f, 7.699999809265137f })), + 0.85f, + 10) + .WaitForNonStaleResults() + .ToListAsync(); +``` + + +```csharp +var similarProducts = session.Advanced + .RawQuery(@" + from 'Movies' + where vector.search(TagsEmbeddedAsSingle, $queryVector, 0.85, 10)") + .AddParameter("queryVector", new RavenVector(new float[] + { + 6.599999904632568f, 7.699999809265137f + })) + .WaitForNonStaleResults() + .ToList(); +``` + + +```csharp +var similarProducts = await asyncSession.Advanced + .AsyncRawQuery(@" + from 'Movies' + where vector.search(TagsEmbeddedAsSingle, $queryVector, 0.85, 10)") + .AddParameter("queryVector", new RavenVector(new float[] + { + 6.599999904632568f, 7.699999809265137f + })) + .WaitForNonStaleResults() + .ToListAsync(); +``` + + +```sql +from "Movies" +// The source document field type is interpreted as 'Single' by default +where vector.search(TagsEmbeddedAsSingle, $queryVector, 0.85, 10) +{ "queryVector" : { "@vector" : [6.599999904632568, 7.699999809265137] }} +``` + + + + + + +* Search on the `TagsEmbeddedAsInt8` field, + which contains numerical data that is already quantized in **_Int8_ format**. + + + +```csharp +var similarMovies = session.Query() + .VectorSearch( + // Call 'WithEmbedding', specify: + // * The source field that contains the embeddings in the document + // * The source embedding type + field => field.WithEmbedding( + x => x.TagsEmbeddedAsInt8, VectorEmbeddingType.Int8), + // Call 'ByEmbedding' + // Provide the vector for the similarity comparison + // (provide a single vector from the vector list in the TagsEmbeddedAsInt8 field) + queryVector => queryVector.ByEmbedding( + // The provided vector MUST be in the same format as was stored in your document + // Call 'VectorQuantizer.ToInt8' to transform the raw data to the Int8 format + VectorQuantizer.ToInt8(new float[] { 0.1f, 0.2f }))) + .Customize(x => x.WaitForNonStaleResults()) + .ToList(); +``` + + +```sql +from "Movies" +// Wrap the source document field name with 'embedding.i8' to indicate the source data type +where vector.search(embedding.i8(TagsEmbeddedAsInt8), $queryVector) +{ "queryVector" : [64, 127, -51, -52, 76, 62] } +``` + + + + + + +* Search on the `TagsEmbeddedAsBase64` field, + which contains numerical data represented in **_Base64_ format**. + + + +```csharp +var similarMovies = session.Query() + .VectorSearch( + // Call 'WithBase64', specify: + // * The source field that contains the embeddings in the document + // * The source embedding type + // (the type from which the Base64 string was constructed) + field => field.WithBase64(x => x.TagsEmbeddedAsBase64, VectorEmbeddingType.Single), + // Call 'ByBase64' + // Provide the Base64 string that represents the vector to query against + queryVector => queryVector.ByBase64("zczMPc3MTD6amZk+")) + .Customize(x => x.WaitForNonStaleResults()) + .ToList(); +``` + + +```sql +from "Movies" +// * Wrap the source document field name using 'embedding.' to specify +// the source data type from which the Base64 string was generated. +// * If the document field is Not wrapped, 'single' is assumed as the default source type. +where vector.search(TagsEmbeddedAsBase64, $queryVectorBase64) +{ "queryVectorBase64" : "zczMPc3MTD6amZk+" } +``` + + + + + +## Dynamic vector search - querying for similar documents + +* In the above examples, to find documents with similar content, the query was given an arbitrary input - + either a [raw textual search term](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#dynamic-vector-search---querying-text) + or a [numerical query vector](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#dynamic-vector-search---querying-numerical-content). + +* RavenDB also allows you to search for documents whose content is similar to an **existing document**: + + * To do so, use the `ForDocument` method and specify the existing document ID. See the example below. + + * When performing a dynamic vector query over a field, index-entries are generated in the auto-index, + one per document in the collection. Each index-entry contains the document ID and the embedding vector for the queried field. + + * RavenDB retrieves the embedding that was indexed for the queried field in the specified document and uses it as the query vector for the similarity comparison. + + * The results will include documents whose indexed embeddings are most similar to the one stored in the referenced document’s index-entry. + + + +```csharp +var similarProducts = session.Query() + // Perform a vector search + // Call the 'VectorSearch' method + .VectorSearch( + // Call 'WithText' + // Specify the document field in which to search for similar values + field => field.WithText(x => x.Name), + // Call 'ForDocument' + // Provide the document ID for which you want to find similar documents. + // The embedding stored in the auto-index for the specified document + // will be used as the "query vector". + embedding => embedding.ForDocument("Products/7-A"), + 0.82f) + .Customize(x => x.WaitForNonStaleResults()) + .ToList(); +``` + + +```csharp +var similarProducts = await asyncSession.Query() + .VectorSearch( + field => field.WithText(x => x.Name), + embedding => embedding.ForDocument("Products/7-A"), + 0.82f) + .Customize(x => x.WaitForNonStaleResults()) + .ToListAsync(); +``` + + +```csharp +var similarProducts = session.Advanced + .DocumentQuery() + .VectorSearch( + field => field.WithText(x => x.Name), + embedding => embedding.ForDocument("Products/7-A"), + 0.82f) + .WaitForNonStaleResults() + .ToList(); +``` + + +```csharp +var similarProducts = await asyncSession.Advanced + .AsyncDocumentQuery() + .VectorSearch( + field => field.WithText(x => x.Name), + embedding => embedding.ForDocument("Products/7-A"), + 0.82f) + .WaitForNonStaleResults() + .ToListAsync(); +``` + + +```csharp +var similarProducts = session.Advanced + .RawQuery(@" + from 'Products' + // Pass a document ID to the 'forDoc' method to find similar documents + where vector.search(embedding.text(Name), embedding.forDoc($documentID), 0.82)") + .AddParameter("$documentID", "Products/7-A") + .WaitForNonStaleResults() + .ToList(); +``` + + +```csharp +var similarProducts = await asyncSession.Advanced + .AsyncRawQuery(@" + from 'Products' + // Pass a document ID to the 'forDoc' method to find similar documents + where vector.search(embedding.text(Name), embedding.forDoc($documentID), 0.82)") + .AddParameter("$documentID", "Products/7-A") + .WaitForNonStaleResults() + .ToListAsync(); +``` + + +```sql +from "Products" +// Pass a document ID to the 'forDoc' method to find similar documents +where vector.search(embedding.text(Name), embedding.forDoc($documentID), 0.82) +{"documentID" : "Products/7-A"} +``` + + + +Running the above example on RavenDB’s sample data returns the following documents that have similar content in their _Name_ field: +(Note: the results include the referenced document itself, _Products/7-A_) + + +```csharp +// ID: products/7-A ... Name: "Uncle Bob's Organic Dried Pears" +// ID: products/51-A ... Name: "Manjimup Dried Apples" +// ID: products/6-A ... Name: "Grandma's Boysenberry Spread" +``` + + +The auto-index generated by running the above dynamic query is: +`Auto/Products/ByVector.search(embedding.text(Name))` + +You can **view the index-entries** of this auto-index in the Studio's query view: + +![Query the auto index](../assets/view-auto-index-entries-1.png) + +1. Go to the Query view in the Studio +2. Query the index +3. Open the settings dialog: + +![Open the settings dialog](../assets/view-auto-index-entries-2.png) + +![The index entries](../assets/view-auto-index-entries-3.png) + +## Dynamic vector search - exact search + +* When performing a dynamic vector search query, you can specify whether to perform an **exact search** to find the closest similar vectors in the vector space: + * A thorough scan will be performed to find the actual closest vectors. + * This ensures better accuracy but comes at a higher computational cost. + +* If exact is Not specified, the search defaults to the **Approximate Nearest-Neighbor** (ANN) method, + which finds related vectors in an approximate manner, offering faster results. + +* The following example demonstrates how to specify the exact method in the query. + Setting the param is similar for both text and numerical content searches. + + + +```csharp +var similarProducts = session.Query() + .VectorSearch( + field => field.WithText(x => x.Name), + searchTerm => searchTerm.ByText("italian food"), + // Optionally, set the 'isExact' param to true to perform an Exact search + isExact: true) + .Customize(x => x.WaitForNonStaleResults()) + .ToList(); +``` + + +```csharp +var similarProducts = await asyncSession.Query() + .VectorSearch( + field => field.WithText(x => x.Name), + searchTerm => searchTerm.ByText("italian food"), + isExact: true) + .Customize(x => x.WaitForNonStaleResults()) + .ToListAsync(); +``` + + +```csharp +var similarProducts = session.Advanced + .DocumentQuery() + .VectorSearch( + field => field.WithText(x => x.Name), + searchTerm => searchTerm.ByText("italian food"), + isExact: true) + .WaitForNonStaleResults() + .ToList(); +``` + + +```csharp +var similarProducts = await asyncSession.Advanced + .AsyncDocumentQuery() + .VectorSearch( + field => field.WithText(x => x.Name), + searchTerm => searchTerm.ByText("italian food"), + isExact: true) + .WaitForNonStaleResults() + .ToListAsync(); +``` + + +```csharp +var similarProducts = session.Advanced + .RawQuery(@" + from 'Products' + // Wrap the query with the 'exact()' method + where exact(vector.search(embedding.text(Name), $searchTerm))") + .AddParameter("searchTerm", "italian food") + .WaitForNonStaleResults() + .ToList(); +``` + + +```csharp +var similarProducts = await asyncSession.Advanced + .AsyncRawQuery(@" + from 'Products' + // Wrap the query with the 'exact()' method + where exact(vector.search(embedding.text(Name), $searchTerm))") + .AddParameter("searchTerm", "italian food") + .WaitForNonStaleResults() + .ToListAsync(); +``` + + +```sql +from "Products" +// Wrap the vector.search query with the 'exact()' method +where exact(vector.search(embedding.text(Name), "italian food")) +``` + + + +## Quantization options + +#### What is quantization: + +Quantization is a technique that reduces the precision of numerical data. +It converts high-precision values, such as 32-bit floating-point numbers, into lower-precision formats like 8-bit integers or binary representations. + +The quantization process, applied to each dimension (or item) in the numerical array, +serves as a form of compression by reducing the number of bits used to represent each value in the vector. +For example, transitioning from 32-bit floats to 8-bit integers significantly reduces data size while preserving the vector's essential structure. + +Although it introduces some precision loss, quantization minimizes storage requirements and optimizes memory usage. +It also reduces computational overhead, making operations like similarity searches faster and more efficient. + +#### Quantization in RavenDB: + +For non-quantized raw 32-bit data or text stored in your documents, +RavenDB allows you to choose the quantization format for the generated embeddings stored in the index. +The selected quantization type determines the similarity search technique that will be applied. + +If no target quantization format is specified, the `Single` option will be used as the default. + +The available quantization options are: + + * `Single` (a 32-bit floating point value per dimension): + Provides precise vector representations. + The [Cosine similarity](https://en.wikipedia.org/wiki/Cosine_similarity) method will be used for searching and matching. + + * `Int8` (an 8-bit integer value per dimension): + Reduces storage requirements while maintaining good performance. + Saves up to 75% storage compared to 32-bit floating-point values. + The Cosine similarity method will be used for searching and matching. + + * `Binary` (1-bit per dimension): + Minimizes storage usage, suitable for use cases where binary representation suffices. + Saves approximately 96% storage compared to 32-bit floating-point values. + The [Hamming distance](https://en.wikipedia.org/wiki/Hamming_distance) method will be used for searching and matching. + + + If your documents contain data that is already quantized, + it cannot be re-quantized to a lower precision format (e.g., Int8 cannot be converted to Binary). + + +#### Examples + + + +* In this example: + * The source data consists of text. + * The generated embeddings will use the _Int8_ format. + + + +```csharp +var similarProducts = session.Query() + .VectorSearch( + field => field + // Specify the source text field for the embeddings + .WithText(x => x.Name) + // Set the quantization type for the generated embeddings + .TargetQuantization(VectorEmbeddingType.Int8), + searchTerm => searchTerm + // Provide the search term for comparison + .ByText("italian food")) + .Customize(x => x.WaitForNonStaleResults()) + .ToList(); +``` + + +```csharp +var similarProducts = await asyncSession.Query() + .VectorSearch( + field => field + .WithText(x => x.Name) + .TargetQuantization(VectorEmbeddingType.Int8), + searchTerm => searchTerm + .ByText("italian food")) + .Customize(x => x.WaitForNonStaleResults()) + .ToListAsync(); +``` + + +```csharp +var similarProducts = session.Advanced + .DocumentQuery() + .VectorSearch( + field => field + .WithText(x => x.Name) + .TargetQuantization(VectorEmbeddingType.Int8), + searchTerm => searchTerm + .ByText("italian food")) + .WaitForNonStaleResults() + .ToList(); +``` + + +```csharp +var similarProducts = await asyncSession.Advanced + .AsyncDocumentQuery() + .VectorSearch( + field => field + .WithText(x => x.Name) + .TargetQuantization(VectorEmbeddingType.Int8), + searchTerm => searchTerm + .ByText("italian food")) + .WaitForNonStaleResults() + .ToListAsync(); +``` + + +```csharp +var similarProducts = session.Advanced + .RawQuery(@" + from 'Products' + // Wrap the 'Name' field with 'embedding.text_i8' + where vector.search(embedding.text_i8(Name), $searchTerm)") + .AddParameter("searchTerm", "italian food") + .WaitForNonStaleResults() + .ToList(); +``` + + +```csharp +var similarProducts = await asyncSession.Advanced + .AsyncRawQuery(@" + from 'Products' + // Wrap the 'Name' field with 'embedding.text_i8' + where vector.search(embedding.text_i8(Name), $searchTerm)") + .AddParameter("searchTerm", "italian food") + .WaitForNonStaleResults() + .ToListAsync(); +``` + + +```sql +from "Products" +// Wrap the 'Name' field with 'embedding.text_i8' +where vector.search(embedding.text_i8(Name), $searchTerm) +{ "searchTerm" : "italian food" } +``` + + + + + + +* In this example: + * The source data is an array of 32-bit floats. + * The generated embeddings will use the _Binary_ format. + + + +```csharp +var similarMovies = session.Query() + .VectorSearch( + field => field + // Specify the source field and its type + .WithEmbedding(x => x.TagsEmbeddedAsSingle, VectorEmbeddingType.Single) + // Set the quantization type for the generated embeddings + .TargetQuantization(VectorEmbeddingType.Binary), + queryVector => queryVector + // Provide the vector to use for comparison + .ByEmbedding(new RavenVector(new float[] + { + 6.599999904632568f, 7.699999809265137f + }))) + .Customize(x => x.WaitForNonStaleResults()) + .ToList(); +``` + + +```csharp +var similarMovies = await asyncSession.Query() + .VectorSearch( + field => field + .WithEmbedding(x => x.TagsEmbeddedAsSingle, VectorEmbeddingType.Single) + .TargetQuantization(VectorEmbeddingType.Binary), + queryVector => queryVector + .ByEmbedding(new RavenVector(new float[] + { + 6.599999904632568f, 7.699999809265137f + }))) + .Customize(x => x.WaitForNonStaleResults()) + .ToListAsync(); +``` + + +```csharp +var similarProducts = session.Advanced + .DocumentQuery() + .VectorSearch( + field => field + .WithEmbedding(x => x.TagsEmbeddedAsSingle, VectorEmbeddingType.Single) + .TargetQuantization(VectorEmbeddingType.Binary), + queryVector => queryVector + .ByEmbedding(new RavenVector(new float[] + { + 6.599999904632568f, 7.699999809265137f + }))) + .WaitForNonStaleResults() + .ToList(); +``` + + +```csharp +var similarProducts = await asyncSession.Advanced + .AsyncDocumentQuery() + .VectorSearch( + field => field + .WithEmbedding(x => x.TagsEmbeddedAsSingle, VectorEmbeddingType.Single) + .TargetQuantization(VectorEmbeddingType.Binary), + queryVector => queryVector + .ByEmbedding(new RavenVector(new float[] + { + 6.599999904632568f, 7.699999809265137f + }))) + .WaitForNonStaleResults() + .ToListAsync(); +``` + + +```csharp +var similarMovies = session.Advanced + .RawQuery(@" + from 'Movies' + // Wrap the 'TagsEmbeddedAsSingle' field with 'embedding.f32_i1' + where vector.search(embedding.f32_i1(TagsEmbeddedAsSingle), $queryVector)") + .AddParameter("queryVector", new RavenVector(new float[] + { + 6.599999904632568f, 7.699999809265137f + })) + .WaitForNonStaleResults() + .ToList(); +``` + + +```csharp +var similarMovies = await asyncSession.Advanced + .AsyncRawQuery(@" + from 'Movies' + // Wrap the 'TagsEmbeddedAsSingle' field with 'embedding.f32_i1' + where vector.search(embedding.f32_i1(TagsEmbeddedAsSingle), $queryVector)") + .AddParameter("queryVector", new RavenVector(new float[] + { + 6.599999904632568f, 7.699999809265137f + })) + .WaitForNonStaleResults() + .ToListAsync(); +` + + +```sql +from "Movies" +// Wrap the 'TagsEmbeddedAsSingle' field with 'embedding.f32_i1' +where vector.search(embedding.f32_i1(TagsEmbeddedAsSingle), $queryVector) +{ "queryVector" : { "@vector" : [6.599999904632568,7.699999809265137] }} +``` + + + + + +#### Field configuration methods in RQL: + +The following methods are available for performing a vector search via RQL: + + + +* `embedding.text`: + Generates embeddings from text as multi-dimensional vectors with 32-bit floating-point values, + without applying quantization. + +* `embedding.text_i8`: + Generates embeddings from text as multi-dimensional vectors with 8-bit integer values. + +* `embedding.text_i1`: + Generates embeddings from text as multi-dimensional vectors in a binary format. +* `embedding.f32_i8`: + Converts multi-dimensional vectors with 32-bit floating-point values into vectors with 8-bit integer values. + +* `embedding.f32_i1`: + Converts multi-dimensional vectors with 32-bit floating-point values into vectors in a binary format. +* `embedding.i8`: + Indicates that the source data is already quantized as Int8 (cannot be further quantized). + +* `embedding.i1`: + Indicates that the source data is already quantized as binary (cannot be further quantized). + + + +Wrap the field name using any of the relevant methods listed above, based on your requirements. +For example, the following RQL encodes **text to Int8**: + + + +```sql +from "Products" +// Wrap the document field with 'embedding.text_i8' +where vector.search(embedding.text_i8(Name), "italian food", 0.82, 20) +``` + + + +When the field name is Not wrapped in any method, +the underlying values are treated as numerical values in the form of **32-bit floating-point** (Single) precision. +For example, the following RQL will use the floating-point values as they are, without applying further quantization: + + + +```sql +from "Movies" +// No wrapping +where vector.search(TagsEmbeddedAsSingle, $queryVector, 0.85, 10) +{"queryVector" : { "@vector" : [6.599999904632568, 7.699999809265137] }} +``` + + + +## Querying vector fields and regular data in the same query + +* You can perform a vector search and a regular search in the same query. + A single auto-index will be created for both search predicates. + +* In the following example, results will include Product documents with content similar to "Italian food" in their _Name_ field and a _PricePerUnit_ above 20. + The following auto-index will be generated: + `Auto/Products/ByPricePerUnitAndVector.search(embedding.text(Name))`. + + + +```csharp +var similarProducts = session.Query() + // Perform a filtering condition: + .Where(x => x.PricePerUnit > 35) + // Perform a vector search: + .VectorSearch( + field => field.WithText(x => x.Name), + searchTerm => searchTerm.ByText("italian food"), + 0.75f, 16) + .Customize(x => x.WaitForNonStaleResults()) + .ToList(); +``` + + +```csharp +var similarProducts = await asyncSession.Query() + .Where(x => x.PricePerUnit > 35) + .VectorSearch( + field => field.WithText(x => x.Name), + searchTerm => searchTerm.ByText("italian food"), + 0.75f, 16) + .Customize(x => x.WaitForNonStaleResults()) + .ToListAsync(); +``` + + +```csharp +var similarProducts = session.Advanced + .DocumentQuery() + .VectorSearch( + field => field.WithText(x => x.Name), + searchTerm => searchTerm.ByText("italian food"), + 0.75f, 16) + .WhereGreaterThan(x => x.PricePerUnit, 35) + .WaitForNonStaleResults() + .ToList(); +``` + + +```csharp +var similarProducts = await asyncSession.Advanced + .AsyncDocumentQuery() + .VectorSearch( + field => field.WithText(x => x.Name), + searchTerm => searchTerm.ByText("italian food"), + 0.75f, 16) + .WhereGreaterThan(x => x.PricePerUnit, 35) + .WaitForNonStaleResults() + .ToListAsync(); +``` + + +```csharp +var similarProducts = session.Advanced + .RawQuery(@" + from 'Products' + where (PricePerUnit > $minPrice) and (vector.search(embedding.text(Name), $searchTerm, 0.75, 16))") + .AddParameter("minPrice", 35.0) + .AddParameter("searchTerm", "italian food") + .WaitForNonStaleResults() + .ToList(); +``` + + +```csharp +var similarProducts = await asyncSession.Advanced + .AsyncRawQuery(@" + from 'Products' + where (PricePerUnit > $minPrice) and (vector.search(embedding.text(Name), $searchTerm, 0.75, 16))") + .AddParameter("minPrice", 35.0) + .AddParameter("searchTerm", "italian food") + .WaitForNonStaleResults() + .ToListAsync(); +``` + + +```sql +from "Products" +// The filtering condition: +where (PricePerUnit > $minPrice) +and (vector.search(embedding.text(Name), $searchTerm, 0.75, 16)) +{ "minPrice" : 35.0, "searchTerm" : "italian food" } +``` + + + + + +**Impact of _NumberOfCandidates_ on query results**: + +* When combining a vector search with a filtering condition, the filter applies only to the documents retrieved within the `NumberOfCandidates` param limit. + Increasing or decreasing _NumberOfCandidates_ can affect the query results. + A larger _NumberOfCandidates_ increases the pool of documents considered, + improving the chances of finding results that match both the vector search and the filter condition. + +* For example, in the above query, the vector search executes with: Similarity `0.75f` and NumberOfCandidates `16`. + Running this query on RavenDB's sample data returns **2** documents. + +* However, if you increase _NumberOfCandidates_, the query will retrieve more candidate documents before applying the filtering condition. + If you run the following query: + + + +```sql +from "Products" +where (PricePerUnit > $minPrice) +// Run vector search with similarity 0.75 and NumberOfCandidates 25 +and (vector.search(embedding.text(Name), $searchTerm, 0.75, 25)) +{ "minPrice" : 35.0, "searchTerm" : "italian food" } +``` + + + + now the query returns **4** documents instead of **2**. + + + +## Combining multiple vector searches in the same query + +* You can combine multiple vector search statements in the same query using logical operators. + This is useful when you want to retrieve documents that match more than one vector-based criterion. + +* This can be done using [DocumentQuery](../../../client-api/session/querying/how-to-query.mdx#sessionadvanceddocumentquery), + [RawQuery](../../../client-api/session/querying/how-to-query.mdx#sessionadvancedrawquery) or raw [RQL](../../../client-api/session/querying/what-is-rql.mdx). + +* In the example below, the results will include companies that match one of two vector search conditions: + * Companies from European countries with a _Name_ similar to "snack" + * Or companies with a _Name_ similar to "dairy" + +* Running the query example on the RavenDB sample data will generate the following auto-index: + `Auto/Companies/ByVector.search(embedding.text(Address.Country))AndVector.search(embedding.text(Name))`. + This index includes two vector fields: _Address.Country_ and _Name_. + + + +```csharp +var companies = session.Advanced + .DocumentQuery() + // Use OpenSubclause & CloseSubclause to differentiate between clauses: + // ==================================================================== + + .OpenSubclause() + .VectorSearch( // Search for companies that sell snacks or similar + field => field.WithText(x => x.Name), + searchTerm => searchTerm.ByText("snack"), + minimumSimilarity: 0.78f + ) + // Use 'AndAlso' for an AND operation + .AndAlso() + .VectorSearch( // Search for companies located in Europe + field => field.WithText(x => x.Address.Country), + searchTerm => searchTerm.ByText("europe"), + minimumSimilarity: 0.82f + ) + .CloseSubclause() + // Use 'OrElse' for an OR operation + .OrElse() + .OpenSubclause() + .VectorSearch( // Search for companies that sell dairy products or similar + field => field.WithText(x => x.Name), + v => v.ByText("dairy"), + minimumSimilarity: 0.80f + ) + .CloseSubclause() + .WaitForNonStaleResults() + .ToList(); +``` + + +```csharp +var companies = await asyncSession.Advanced + .AsyncDocumentQuery() + .OpenSubclause() + .VectorSearch( + field => field.WithText(x => x.Name), + searchTerm => searchTerm.ByText("snack"), + minimumSimilarity: 0.78f + ) + .AndAlso() + .VectorSearch( + field => field.WithText(x => x.Address.Country), + searchTerm => searchTerm.ByText("europe"), + minimumSimilarity: 0.82f + ) + .CloseSubclause() + .OrElse() + .OpenSubclause() + .VectorSearch( + field => field.WithText(x => x.Name), + searchTerm => searchTerm.ByText("dairy"), + minimumSimilarity: 0.80f + ) + .CloseSubclause() + .WaitForNonStaleResults() + .ToListAsync(); +``` + + +```csharp +var companies = session.Advanced + .RawQuery(@" + from Companies + where + ( + vector.search(embedding.text(Name), $searchTerm1, 0.78) + and + vector.search(embedding.text(Address.Country), $searchTerm2, 0.82) + ) + or + ( + vector.search(embedding.text(Name), $searchTerm3, 0.80) + ) + ") + .AddParameter("searchTerm1", "snack") + .AddParameter("searchTerm2","europe") + .AddParameter("searchTerm3", "dairy") + .WaitForNonStaleResults() + .ToList(); +``` + + +```csharp +var companies = await asyncSession.Advanced + .AsyncRawQuery(@" + from Companies + where + ( + vector.search(embedding.text(Name), $searchTerm1, 0.78) + and + vector.search(embedding.text(Address.Country), $searchTerm2, 0.82) + ) + or + ( + vector.search(embedding.text(Name), $searchTerm3, 0.80) + ) + ") + .AddParameter("searchTerm1", "snack") + .AddParameter("searchTerm2","europe") + .AddParameter("searchTerm3", "dairy") + .WaitForNonStaleResults() + .ToListAsync(); +``` + + +```sql +from "Companies" +where +( + vector.search(embedding.text(Name), $searchTerm1, 0.78) + and + vector.search(embedding.text(Address.Country), $searchTerm2, 0.82) +) +or +( + vector.search(embedding.text(Name), $searchTerm3, 0.80) +) +{"searchTerm1" : "snack", "searchTerm2" : "europe", "searchTerm3" : "dairy"} +``` + + + + + +**How multiple vector search clauses are evaluated**: + +* Each vector search clause is evaluated independently - the search algorithm runs separately for each vector field. + +* Each clause retrieves a limited number of candidates, determined by the _NumberOfCandidates_ parameter. + * You can explicitly set this value in the query clause, see [query parameters](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#the-dynamic-query-parameters). + * If not specified, it is taken from the [Indexing.Corax.VectorSearch.DefaultNumberOfCandidatesForQuerying](../../../server/configuration/indexing-configuration.mdx#indexingcoraxvectorsearchdefaultnumberofcandidatesforquerying) configuration key (default is 16). + +* **The final result set** is computed by applying the logical operators (and, or) between these independently retrieved sets. + +* To improve the chances of getting intersecting results, consider increasing the _NumberOfCandidates_ in each vector search clause. + This expands the pool of documents considered by each clause, raising the likelihood of finding matches that satisfy the combined logic. + + + +## Syntax + +`VectorSearch`: + + +```csharp +public IRavenQueryable VectorSearch( + Func, IVectorEmbeddingTextField> textFieldFactory, + Action textValueFactory, + float? minimumSimilarity = null, + int? numberOfCandidates = null, + bool isExact = false); + +public IRavenQueryable VectorSearch( + Func, IVectorEmbeddingField> embeddingFieldFactory, + Action embeddingValueFactory, + float? minimumSimilarity = null, + int? numberOfCandidates = null, + bool isExact = false); + +public IRavenQueryable VectorSearch( + Func, IVectorField> embeddingFieldFactory, + Action embeddingValueFactory, + float? minimumSimilarity = null, + int? numberOfCandidates = null, + bool isExact = false); +``` + + +| Parameter | Type | Description | +|---------------------------|-----------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------| +| **textFieldFactory** | `Func, IVectorEmbeddingTextField>` | Factory creating textual vector field for indexing purposes. | +| **textValueFactory** | `Action` | Factory preparing queried data to be used in vector search. | +| **embeddingFieldFactory** | `Func, IVectorEmbeddingField>` | Factory creating embedding vector field for indexing purposes. | +| **embeddingValueFactory** | `Action` | Factory preparing queried data to be used in vector search. | +| **embeddingFieldFactory** | `Func, IVectorField>` | Factory using existing, already indexed vector field. | +| **embeddingValueFactory** | `Action` | Factory preparing queried data to be used in vector search. | +| **minimumSimilarity** | `float?` | Minimum similarity between the queried value and the indexed value for the vector search to match. | +| **numberOfCandidates** | `int?` | Number of candidate nodes for the HNSW algorithm.
Higher values improve accuracy but require more computation. | +| **isExact** | `bool` | `false` - vector search will be performed in an approximate manner.
`true` - vector search will be performed in an exact manner. | + +The default value for `minimumSimilarity` is defined by this configuration key: +[Indexing.Corax.VectorSearch.DefaultMinimumSimilarity ](../../../server/configuration/indexing-configuration.mdx#indexingcoraxvectorsearchdefaultnumberofcandidatesforquerying). + +The default value for `numberOfCandidates` is defined by this configuration key: +[Indexing.Corax.VectorSearch.DefaultNumberOfCandidatesForQuerying](../../../server/configuration/indexing-configuration.mdx#indexingcoraxvectorsearchdefaultminimumsimilarity). + +`IVectorFieldFactory`: + + +```csharp +public interface IVectorFieldFactory +{ + // Methods for the dynamic query: + // ============================== + + public IVectorEmbeddingTextField WithText(string documentFieldName); + public IVectorEmbeddingTextField WithText(Expression> propertySelector); + + public IVectorEmbeddingField WithEmbedding(string documentFieldName, + VectorEmbeddingType storedEmbeddingQuantization = VectorEmbeddingType.Single); + public IVectorEmbeddingField WithEmbedding(Expression> propertySelector, + VectorEmbeddingType storedEmbeddingQuantization = VectorEmbeddingType.Single); + + public IVectorEmbeddingField WithBase64(string documentFieldName, + VectorEmbeddingType storedEmbeddingQuantization = VectorEmbeddingType.Single); + public IVectorEmbeddingField WithBase64(Expression> propertySelector, + VectorEmbeddingType storedEmbeddingQuantization = VectorEmbeddingType.Single); + + // Methods for querying a static index: + // ==================================== + + public IVectorField WithField(string indexFieldName); + public IVectorField WithField(Expression> indexPropertySelector); +} +``` + + +| Parameter | Type | Description | +|---------------------------------|-------------------------------|----------------------------------------------------------------------------------------| +| **documentFieldName** | `string` | The name of the document field containing
text / embedding / base64 encoded data. | +| **indexFieldName** | `string` | The name of the index-field that vector search will be performed on. | +| **propertySelector** | `Expression>` | Path to the document field containing
text / embedding /base64 encoded data. | +| **indexPropertySelector** | `Expression>` | Path to the index-field containing indexed data. | +| **storedEmbeddingQuantization** | `VectorEmbeddingType` | Quantization format of the stored embeddings.
Default: `VectorEmbeddingType.Single` | + +`IVectorEmbeddingTextField` & `IVectorEmbeddingField`: + + +```csharp +public interface IVectorEmbeddingTextField +{ + public IVectorEmbeddingTextField TargetQuantization( + VectorEmbeddingType targetEmbeddingQuantization); + + public IVectorEmbeddingTextField UsingTask( + string embeddingsGenerationTaskIdentifier); +} + +public interface IVectorEmbeddingField +{ + public IVectorEmbeddingField TargetQuantization( + VectorEmbeddingType targetEmbeddingQuantization); +} +``` + + +| Parameter | Type | Description | +|----------------------------------------|-----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **targetEmbeddingQuantization** | `VectorEmbeddingType` | The desired target quantization format. | +| **embeddingsGenerationTaskIdentifier** | `string ` | The identifier of an embeddings generation task.
Used to locate the embeddings generated by the task in the [Embedding collections](../../../ai-integration/generating-embeddings/embedding-collections.mdx). | + + +```csharp +public enum VectorEmbeddingType +{ + Single, + Int8, + Binary, + Text +} +``` + + +`IVectorEmbeddingTextFieldValueFactory` & `IVectorEmbeddingFieldValueFactory`: + + +```csharp +public interface IVectorEmbeddingTextFieldValueFactory +{ + // Defines the queried text(s) + public void ByText(string text); + public void ByTexts(IEnumerable texts); + + // Defines the queried text(s) and the embedding generation task to use. + // These overloads should be used only when querying a static-index where vector fields contain + // numerical embeddings that were not generated by RavenDB's built-in embedding tasks. + // The text is embedded at query time using the specified task ID and compared to the indexed vectors. + public void ByText(string text, string embeddingsGenerationTaskIdentifier); + public void ByTexts(IEnumerable texts, string embeddingsGenerationTaskIdentifier); + + // Query by the embedding(s) indexed from the specified document for the queried field + public void ForDocument(string documentId); +} + +public interface IVectorEmbeddingFieldValueFactory +{ + // Define the queried embedding: + // ============================= + + // 'embeddings' is an enumerable containing embedding values + public void ByEmbedding(IEnumerable embedding) where T : unmanaged, INumber; + public void ByEmbedding(IEnumerable> embeddings) where T : unmanaged, INumber; + + + // 'embeddings' is an array containing embedding values + public void ByEmbedding(T[] embedding) where T : unmanaged, INumber; + public void ByEmbeddings(T[][] embeddings) where T : unmanaged, INumber; + + // 'embedding` is a `RavenVector` containing embedding values + public void ByEmbedding(RavenVector embedding) where T : unmanaged, INumber; + + // 'base64Embedding' is encoded as base64 string(s). + public void ByBase64(string base64Embedding); + public void ByBase64(IEnumerable base64Embeddings); +} +``` + + +#### `RavenVector`: + +RavenVector is RavenDB's dedicated data type for storing and querying numerical embeddings. +Learn more in [RavenVector](../../../ai-integration/vector-search/data-types-for-vector-search.mdx#ravenvector). + + +```csharp +public class RavenVector() +{ + public T[] Embedding { get; set; } +} +``` + + +#### `VectorQuanitzer`: + +RavenDB provides the following quantizer methods. +Use them to transform your raw data to the desired format. +Other quantizers may not be compatible. + + +```csharp +public static class VectorQuantizer +{ + public static sbyte[] ToInt8(float[] rawEmbedding); + public static byte[] ToInt1(ReadOnlySpan rawEmbedding); +} +``` + diff --git a/versioned_docs/version-7.1/ai-integration/vector-search/content/_vector-search-using-static-index-csharp.mdx b/versioned_docs/version-7.1/ai-integration/vector-search/content/_vector-search-using-static-index-csharp.mdx new file mode 100644 index 0000000000..b4edc0ca42 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/vector-search/content/_vector-search-using-static-index-csharp.mdx @@ -0,0 +1,1822 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article explains how to perform a **vector search** using a **static index**. + **Prior to this article**, it is recommended to get familiar with the [Vector search using a dynamic query](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx) article. + +* A static index allows you to define a **vector index-field**, enabling you to execute vector searches + while leveraging the advantages of RavenDB's [indexes](../../../indexes/what-are-indexes.mdx). + +* The vector search feature is only supported by indexes that use the [Corax search engine](../../../indexes/search-engine/corax.mdx). + +* In this article: + * [Indexing a vector field - Overview](../../../ai-integration/vector-search/vector-search-using-static-index.mdx#indexing-a-vector-field---overview) + * [Defining a vector field in a static index](../../../ai-integration/vector-search/vector-search-using-static-index.mdx#defining-a-vector-field-in-a-static-index) + * [Parameters defined at index definition](../../../ai-integration/vector-search/vector-search-using-static-index.mdx#parameters-defined-at-index-definition) + * [Behavior during indexing](../../../ai-integration/vector-search/vector-search-using-static-index.mdx#behavior-during-indexing) + * [Parameters used at query time](../../../ai-integration/vector-search/vector-search-using-static-index.mdx#parameters-used-at-query-time) + * [Behavior when documents are deleted](../../../ai-integration/vector-search/vector-search-using-static-index.mdx#vector-behavior-when-documents-are-deleted) + * [Indexing vector data - TEXT](../../../ai-integration/vector-search/vector-search-using-static-index.mdx#indexing-vector-data---text) + * [Indexing raw text](../../../ai-integration/vector-search/vector-search-using-static-index.mdx#indexing-raw-text) + * [Indexing pre-made text-embeddings](../../../ai-integration/vector-search/vector-search-using-static-index.mdx#indexing-pre-made-text-embeddings) + * [Indexing vector data - NUMERICAL](../../../ai-integration/vector-search/vector-search-using-static-index.mdx#indexing-vector-data---numerical) + * [Indexing numerical data and querying using numeric input](../../../ai-integration/vector-search/vector-search-using-static-index.mdx#indexing-numerical-data-and-querying-using-numeric-input) + * [Indexing numerical data and querying using text input](../../../ai-integration/vector-search/vector-search-using-static-index.mdx#indexing-numerical-data-and-querying-using-text-input) + * [Indexing multiple field types](../../../ai-integration/vector-search/vector-search-using-static-index.mdx#indexing-multiple-field-types) + * [Querying the static index for similar documents](../../../ai-integration/vector-search/vector-search-using-static-index.mdx#querying-the-static-index-for-similar-documents) + * [Configure the vector field in the Studio](../../../ai-integration/vector-search/vector-search-using-static-index.mdx#configure-the-vector-field-in-the-studio) + + + +## Indexing a vector field - Overview + + + +#### Defining a vector field in a static index + +To define a vector index-field in your static-index definition: + +* **From the Client API**: + + **`LoadVector()`**: + When indexing **pre-made text-embeddings** generated by RavenDB's [Embeddings generation tasks](../../../ai-integration/generating-embeddings/overview.mdx), + use the `LoadVector()` method in your index definition. + An example is available in [Indexing pre-made text-embeddings](../../../ai-integration/vector-search/vector-search-using-static-index.mdx#indexing-pre-made-text-embeddings). + + **`CreateVector()`**: + When indexing **your own data** (textual or numerical) that was not generated by these tasks, + use the `CreateVector()` method in your index definition. + An example is available in [Indexing raw text](../../../ai-integration/vector-search/vector-search-using-static-index.mdx#indexing-raw-text). + +* **From the Studio**: + See [Define a vector field in the Studio](../../../ai-integration/vector-search/vector-search-using-static-index.mdx#define-a-vector-field-in-the-studio). + +The **source data types** that can be used for vector search are detailed in [Data types for vector search](../../../ai-integration/vector-search/data-types-for-vector-search.mdx). + + + + + +#### Parameters defined at index definition + +The following params can be defined for the vector index-field in the index definition: + +**Source embedding type** - +RavenDB supports performing vector search on TEXTUAL values or NUMERICAL arrays. +This param specifies the embedding format of the source data to be indexed. +Options include `Text`, `Single`, `Int8`, or `Binary`. + +**Destination embedding type** - +Specify the quantization format for the embeddings that will be generated. +Read more about quantization in [Quantization options](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#quantization-options). + +**Dimensions** - +For numerical input only - define the size of the array from your source document. + +* If this param is Not provided - + the size will be determined by the first document indexed and will apply to all subsequent documents. + +* Ensure the dimensionality of these numerical arrays (i.e., their length) is consistent across all source documents for the indexed field. + An index error will occur if a source document has a different dimension for the indexed field. + +**Number of edges** - +Specify the number of edges that will be created for a vector during indexing. +If not specified, the default value is taken from the following configuration key: [Indexing.Corax.VectorSearch.DefaultNumberOfEdges](../../../server/configuration/indexing-configuration.mdx#indexingcoraxvectorsearchdefaultnumberofedges). + +**Number of candidates for indexing time** - +The number of candidates (potential neighboring vectors) that RavenDB evaluates during vector indexing. +If not specified, the default value is taken from the following configuration key: [Indexing.Corax.VectorSearch.DefaultNumberOfCandidatesForIndexing](../../../server/configuration/indexing-configuration.mdx#indexingcoraxvectorsearchdefaultnumberofcandidatesforindexing). +(Note, this param differs from the number of candidates for query time). + + + + + +#### Behavior during indexing + +* **Raw textual input**: + When indexing raw textual input from your documents, RavenDB generates embedding vectors using the built-in + [bge-micro-v2](https://huggingface.co/TaylorAI/bge-micro-v2) sentence-transformer model, which are then indexed. + +* **Pre-made text-embeddings input**: + When indexing embeddings that are pre-generated from your documents' raw text by RavenDB's + [Embeddings generation tasks](../../../ai-integration/generating-embeddings/overview.mdx), + RavenDB indexes them without additional transformation, unless quantization is applied. + +* **Raw numerical input**: + When indexing pre-made numerical arrays that are already in vector format but were Not generated by these tasks, + such as numerical arrays you created externally, RavenDB indexes them without additional transformation, + unless quantization is applied. + +The embeddings are indexed on the server using the [HNSW algorithm](https://en.wikipedia.org/wiki/Hierarchical_navigable_small_world). +This algorithm organizes embeddings into a high-dimensional graph structure, +enabling efficient retrieval of Approximate Nearest Neighbors (ANN) during queries. + + + + + +#### Parameters used at query time + +**Minimum similarity** - +You can specify the minimum similarity to use when searching for related vectors. Can be a value between `0.0f` and `1.0f`. +A value closer to `1.0f` requires higher similarity between vectors, while a value closer to `0.0f` allows for less similarity. +If not specified, the default value is taken from the following configuration key: [Indexing.Corax.VectorSearch.DefaultMinimumSimilarity](../../../server/configuration/indexing-configuration.mdx#indexingcoraxvectorsearchdefaultminimumsimilarity). + +**Number of candidates at query time** - +You can specify the maximum number of vectors that RavenDB will return from a graph search. +The number of the resulting documents that correspond to these vectors may be: + + * lower than the number of candidates - when multiple vectors originated from the same document. + + * higher than the number of candidates - when the same vector is shared between multiple documents. + +If not specified, the default value is taken from the following configuration key: [Indexing.Corax.VectorSearch.DefaultNumberOfCandidatesForQuerying](../../../server/configuration/indexing-configuration.mdx#indexingcoraxvectorsearchdefaultnumberofcandidatesforquerying). + +**Search method** - +You can specify the search method at query time: + + * _Approximate Nearest-Neighbor search_ (Default): + Search for related vectors in an approximate manner, providing faster results. + + * _Exact search_: + Perform a thorough scan of the vectors to find the actual closest vectors, + offering better accuracy but at a higher computational cost. + +**To ensure consistent comparisons** - +the search term is transformed into an embedding vector using the same method as the vector index-field. + +**Search results** - +The server will search for the most similar vectors in the indexed vector space, taking into account all the parameters described. +The documents that correspond to the resulting vectors are then returned to the client. + +By default, the resulting documents will be ordered by their score. +You can modify this behavior using the [Indexing.Corax.VectorSearch.OrderByScoreAutomatically](../../../server/configuration/indexing-configuration.mdx#indexingcoraxvectorsearchorderbyscoreautomatically) configuration key. +In addition, you can apply any of the 'order by' methods to your query, as explained in [sort query results](../../../client-api/session/querying/sort-query-results.mdx). + + + + + +#### Vector behavior when documents are deleted + +* RavenDB's implementation of the HNSW graph is append-only. + +* When all documents associated with a specific vector are deleted, the vector itself is Not physically removed but is soft-deleted. + This means the vector is marked as deleted and will no longer appear in query results. + Currently, compaction is not supported. + + + +--- + +## Indexing vector data - TEXT + +### Indexing raw text + +The index in this example indexes data from raw text. +For an index that indexes pre-made text-embeddings see [this example below](../../../ai-integration/vector-search/vector-search-using-static-index.mdx#indexing-pre-made-text-embeddings). + +The following index defines a **vector field** named `VectorfromText`. +It indexes embeddings generated from the raw textual data in the `Name` field of all _Product_ documents. + + + + +{`public class Products_ByVector_Text : + AbstractIndexCreationTask +{ + public class IndexEntry() + { + // This index-field will hold the embeddings that will be generated + // from the TEXT in the documents + public object VectorFromText { get; set; } + } + + public Products_ByVector_Text() + { + Map = products => from product in products + select new IndexEntry + { + // Call 'CreateVector' to create a VECTOR FIELD. + // Pass the document field containing the text + // from which the embeddings will be generated. + VectorFromText = CreateVector(product.Name) + }; + + // You can customize the vector field using EITHER of the following syntaxes: + // ========================================================================== + + // Customize using VectorOptions: + VectorIndexes.Add(x => x.VectorFromText, + new VectorOptions() + { + // Define the source embedding type + SourceEmbeddingType = VectorEmbeddingType.Text, + + // Define the quantization for the destination embedding + DestinationEmbeddingType = VectorEmbeddingType.Single, + + // Optionally, set the number of edges + NumberOfEdges = 20, + + // Optionally, set the number of candidates + NumberOfCandidatesForIndexing = 20 + }); + + // OR - Customize using builder: + Vector(x=>x.VectorFromText, + builder => builder + .SourceEmbedding(VectorEmbeddingType.Text) + .DestinationEmbedding(VectorEmbeddingType.Single) + .NumberOfEdges(20) + .NumberOfCandidates(20)); + + // The index MUST use the Corax search engine + SearchEngineType = Raven.Client.Documents.Indexes.SearchEngineType.Corax; + } +} +`} + + + + +{`public class Products_ByVector_Text_JS : AbstractJavaScriptIndexCreationTask +{ + public Products_ByVector_Text_JS() + { + Maps = new HashSet() + { + @"map('Products', function (product) { + return { + VectorFromText: createVector(product.Name) + }; + })" + }; + + Fields = new(); + Fields.Add("VectorFromText", new IndexFieldOptions() + { + Vector = new VectorOptions() + { + SourceEmbeddingType = VectorEmbeddingType.Text, + DestinationEmbeddingType = VectorEmbeddingType.Single, + NumberOfEdges = 20, + NumberOfCandidatesForIndexing = 20 + } + }); + + SearchEngineType = Raven.Client.Documents.Indexes.SearchEngineType.Corax; + } +} +`} + + + + +{`var indexDefinition = new IndexDefinition +{ + Name = "Products/ByVector/Text", + + Maps = new HashSet + { + @" + from product in docs.Products + select new + { + VectorFromText = CreateVector(product.Name) + }" + }, + + Fields = new Dictionary() + { + { + "VectorFromText", + new IndexFieldOptions() + { + Vector = new VectorOptions() + { + SourceEmbeddingType = VectorEmbeddingType.Text, + DestinationEmbeddingType = VectorEmbeddingType.Single, + NumberOfEdges = 20, + NumberOfCandidatesForIndexing = 20 + } + } + } + }, + + Configuration = new IndexConfiguration() + { + ["Indexing.Static.SearchEngineType"] = "Corax" + } +}; + +store.Maintenance.Send(new PutIndexesOperation(indexDefinition)); +`} + + + + +Execute a vector search using the index: +Results will include _Product_ documents where the `Name` field is similar to the search term `"italian food"`. + + + + +{`var similarProducts = session + .Query() + // Perform a vector search + // Call the 'VectorSearch' method + .VectorSearch( + field => field + // Call 'WithField' + // Specify the index-field in which to search for similar values + .WithField(x => x.VectorFromText), + searchTerm => searchTerm + // Call 'ByText' + // Provide the term for the similarity comparison + .ByText("italian food"), + // Optionally, specify the minimum similarity value + minimumSimilarity: 0.82f, + // Optionally, specify the number candidates for querying + numberOfCandidates: 20, + // Optionally, specify whether the vector search should use the 'exact search method' + isExact: true) + // Waiting for not-stale results is not mandatory + // but will assure results are not stale + .Customize(x => x.WaitForNonStaleResults()) + .OfType() + .ToList(); +`} + + + + +{`var similarProducts = await asyncSession + .Query() + .VectorSearch( + field => field + .WithField(x => x.VectorFromText), + searchTerm => searchTerm + .ByText("italian food"), 0.82f, 20, isExact: true) + .Customize(x => x.WaitForNonStaleResults()) + .OfType() + .ToListAsync(); +`} + + + + +{`var similarProducts = session.Advanced + .DocumentQuery() + .VectorSearch( + field => field + .WithField(x => x.VectorFromText), + searchTerm => searchTerm + .ByText("italian food"), 0.82f, 20, isExact: true) + .WaitForNonStaleResults() + .OfType() + .ToList(); +`} + + + + +{`var similarProducts = await asyncSession.Advanced + .AsyncDocumentQuery() + .VectorSearch( + field => field + .WithField(x => x.VectorFromText), + searchTerm => searchTerm + .ByText("italian food"), + 0.82f, 20, isExact: true) + .WaitForNonStaleResults() + .OfType() + .ToListAsync(); +`} + + + + +{`var similarProducts = session.Advanced + .RawQuery(@" + from index 'Products/ByVector/Text' + // Optionally, wrap the 'vector.search' query with 'exact()' to perform an exact search + where exact(vector.search(VectorFromText, $searchTerm, 0.82, 20))") + .AddParameter("searchTerm", "italian food") + .WaitForNonStaleResults() + .ToList(); +`} + + + + +{`var similarProducts = await asyncSession.Advanced + .AsyncRawQuery(@" + from index 'Products/ByVector/Text' + // Optionally, wrap the 'vector.search' query with 'exact()' to perform an exact search + where exact(vector.search(VectorFromText, $searchTerm, 0.82, 20))") + .AddParameter("searchTerm", "italian food") + .WaitForNonStaleResults() + .ToListAsync(); +`} + + + + +{`from index "Products/ByVector/Text" +// Optionally, wrap the 'vector.search' query with 'exact()' to perform an exact search +where exact(vector.search(VectorFromText, $searchTerm, 0.82, 20)) +{ "searchTerm" : "italian food" } +`} + + + + +### Indexing pre-made text-embeddings + +The index in this example defines a **vector field** named `VectorFromTextEmbeddings`. +It indexes pre-made text-embeddings that were generated by this +[embedding generation task](../../../ai-integration/generating-embeddings/embeddings-generation-task.mdx#configuring-an-embeddings-generation-task---from-the-studio). + + + + +{`public class Categories_ByPreMadeTextEmbeddings : + AbstractIndexCreationTask +{ + public class IndexEntry() + { + // This index-field will hold the text embeddings + // that were pre-made by the Embeddings Generation Task + public object VectorFromTextEmbeddings { get; set; } + } + + public Categories_ByPreMadeTextEmbeddings() + { + Map = categories => from category in categories + select new IndexEntry + { + // Call 'LoadVector' to create a VECTOR FIELD. Pass: + // * The document field name to be indexed (as a string) + // * The identifier of the task that generated the embeddings + // for the 'Name' field + VectorFromTextEmbeddings = LoadVector("Name", "id-for-task-open-ai") + }; + + VectorIndexes.Add(x => x.VectorFromTextEmbeddings, + new VectorOptions() + { + // Vector options can be customized + // in the same way as the above index example. + }); + + // The index MUST use the Corax search engine + SearchEngineType = Raven.Client.Documents.Indexes.SearchEngineType.Corax; + } +} +`} + + + + +{`public class Categories_ByPreMadeTextEmbeddings_JS : AbstractJavaScriptIndexCreationTask +{ + public Categories_ByPreMadeTextEmbeddings_JS() + { + Maps = new HashSet() + { + @"map('Categories', function (category) { + return { + VectorFromTextEmbeddings: + loadVector('Name', 'id-for-task-open-ai') + }; + })" + }; + + Fields = new(); + Fields.Add("VectorFromTextEmbeddings", new IndexFieldOptions() + { + Vector = new VectorOptions() + { + // Vector options can be customized + // in the same way as the above index example. + } + }); + + SearchEngineType = Raven.Client.Documents.Indexes.SearchEngineType.Corax; + } +} +`} + + + + +{`var indexDefinition = new IndexDefinition +{ + Name = "Categories/ByPreMadeTextEmbeddings", + Maps = new HashSet + { + @" + from category in docs.Categories + select new + { + VectorFromTextEmbeddings = LoadVector(""Name"", ""id-for-task-open-ai"") + }" + }, + + Fields = new Dictionary() + { + { + "VectorFromTextEmbeddings", + new IndexFieldOptions() + { + Vector = new VectorOptions() + { + // Vector options can be customized + // in the same way as the above index example. + } + } + } + }, + + Configuration = new IndexConfiguration() + { + ["Indexing.Static.SearchEngineType"] = "Corax" + } +}; + +store.Maintenance.Send(new PutIndexesOperation(indexDefinition)); +`} + + + + +Execute a vector search using the index: +Results will include _Category_ documents where the `Name` field is similar to the search term `"candy"`. + + + + +{`var similarCategories = session + .Query() + // Perform a vector search + // Call the 'VectorSearch' method + .VectorSearch( + field => field + // Call 'WithField' + // Specify the index-field in which to search for similar values + .WithField(x => x.VectorFromTextEmbeddings), + searchTerm => searchTerm + // Call 'ByText' + // Provide the search term for the similarity comparison + .ByText("candy"), + // Optionally, specify the minimum similarity value + minimumSimilarity: 0.75f, + // Optionally, specify the number of candidates for querying + numberOfCandidates: 20, + // Optionally, specify whether the vector search should use the 'exact search method' + isExact: true) + // Waiting for not-stale results is not mandatory + // but will assure results are not stale + .Customize(x => x.WaitForNonStaleResults()) + .OfType() + .ToList(); +`} + + + + +{`var similarCategories = await asyncSession + .Query() + .VectorSearch( + field => field + .WithField(x => x.VectorFromTextEmbeddings), + searchTerm => searchTerm + .ByText("candy"), 0.75f, 20, isExact: true) + .Customize(x => x.WaitForNonStaleResults()) + .OfType() + .ToListAsync(); +`} + + + + +{`var similarCategories = session.Advanced + .DocumentQuery() + .VectorSearch( + field => field + .WithField(x => x.VectorFromTextEmbeddings), + searchTerm => searchTerm + .ByText("candy"), 0.75f, 20, isExact: true) + .WaitForNonStaleResults() + .OfType() + .ToList(); +`} + + + + +{`var similarCategories = await asyncSession.Advanced + .AsyncDocumentQuery() + .VectorSearch( + field => field + .WithField(x => x.VectorFromTextEmbeddings), + searchTerm => searchTerm + .ByText("candy"), + 0.75f, 20, isExact: true) + .WaitForNonStaleResults() + .OfType() + .ToListAsync(); +`} + + + + +{`var similarCategories = session.Advanced + .RawQuery(@" + from index 'Categories/ByPreMadeTextEmbeddings' + // Optionally, wrap the 'vector.search' query with 'exact()' to perform an exact search + where exact(vector.search(VectorFromTextEmbeddings, $searchTerm, 0.75, 20))") + .AddParameter("searchTerm", "candy") + .WaitForNonStaleResults() + .ToList(); +`} + + + + +{`var similarCategories = await asyncSession.Advanced + .AsyncRawQuery(@" + from index 'Categories/ByPreMadeTextEmbeddings' + // Optionally, wrap the 'vector.search' query with 'exact()' to perform an exact search + where exact(vector.search(VectorFromTextEmbeddings, $searchTerm, 0.75, 20))") + .AddParameter("searchTerm", "candy") + .WaitForNonStaleResults() + .ToListAsync(); +`} + + + + +{`from index "Categories/ByPreMadeTextEmbeddings" +// Optionally, wrap the 'vector.search' query with 'exact()' to perform an exact search +where exact(vector.search(VectorFromTextEmbeddings, $p0, 0.75, 20)) +{ "p0": "candy" } +`} + + + + +--- + +## Indexing vector data - NUMERICAL + + + +* RavenDB’s [Embedding generation tasks](../../../ai-integration/generating-embeddings/overview.mdx) are typically used to generate vector embeddings from TEXTUAL data stored in your documents. + These embeddings are then stored in [dedicated collections](../../../ai-integration/generating-embeddings/embedding-collections.mdx). + +* However, you are not limited to using these built-in tasks. + You can generate your own NUMERICAL embeddings - from any source (e.g., text, image, audio, etc.) - using a suitable multimodal model, and store them: + * as numerical arrays in your documents’ properties, or + * as attachments associated with your documents. + +* This numerical data can be indexed in a vector field in a static-index. + Once indexed, you can query the vector field using either of the following: + + * **Query using a numerical embedding (direct vector)**: + You provide a numerical array as the search term, and RavenDB compares it directly against the indexed embeddings. + See [Indexing numerical data and querying using numeric input](../../../ai-integration/vector-search/vector-search-using-static-index.mdx#indexing-numerical-data-and-querying-using-numeric-input). + + * **Query using a text input**: + You provide a text string as the search term and specify an existing [Embedding generation task](../../../ai-integration/generating-embeddings/overview.mdx) that will convert this text into a vector embedding. + This will work only if: + * the vector field you're querying contains numerical embeddings that were created using the **same model** as the one configured in the specified task, and + * that task exists in your database (i.e., its identifier is still available). + + In this case, RavenDB uses the task to transform the search term into an embedding, then compares it to the vector data that you had previously indexed yourself. + To improve performance, the generated embedding is cached, so repeated queries with the same search term don’t require re-computation. + + This hybrid approach allows you to index custom embeddings (e.g., externally generated image vectors) + while still benefiting from RavenDB’s ability to perform semantic text search, as long as the same model was used for both. + See [Indexing numerical data and querying using text input](../../../ai-integration/vector-search/vector-search-using-static-index.mdx#indexing-numerical-data-and-querying-using-text-input). + +* + The examples in this section use the [sample data provided in the dynamic query article](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#sample-data). + + + + +### Indexing numerical data and querying using numeric input + +The following index defines a vector field named `VectorFromSingle`. +It indexes embeddings generated from the numerical data in the `TagsEmbeddedAsSingle` field of all _Movie_ documents. +The raw numerical data in the source documents is in **32-bit floating-point format**. + + + + +{`public class Movies_ByVector_Single : + AbstractIndexCreationTask +{ + public class IndexEntry() + { + // This index-field will hold the embeddings that will be generated + // from the NUMERICAL content in the documents. + public object VectorFromSingle { get; set; } + } + + public Movies_ByVector_Single() + { + Map = movies => from movie in movies + select new IndexEntry + { + // Call 'CreateVector' to create a VECTOR FIELD. + // Pass the document field containing the array (32-bit floating-point values) + // from which the embeddings will be generated. + VectorFromSingle = CreateVector(movie.TagsEmbeddedAsSingle) + }; + + // EITHER - Customize the vector field using VectorOptions: + VectorIndexes.Add(x => x.VectorFromSingle, + new VectorOptions() + { + // Define the source embedding type + SourceEmbeddingType = VectorEmbeddingType.Single, + + // Define the quantization for the destination embedding + DestinationEmbeddingType = VectorEmbeddingType.Single, + + // It is recommended to configure the number of dimensions + // which is the size of the arrays that will be indexed. + Dimensions = 2, + + // Optionally, set the number of edges and candidates + NumberOfEdges = 20, + NumberOfCandidatesForIndexing = 20 + }); + + // OR - Customize the vector field using builder: + Vector(x => x.VectorFromSingle, + builder => builder + .SourceEmbedding(VectorEmbeddingType.Single) + .DestinationEmbedding(VectorEmbeddingType.Single) + .Dimensions(2) + .NumberOfEdges(20) + .NumberOfCandidates(20)); + + // The index MUST use the Corax search engine + SearchEngineType = Raven.Client.Documents.Indexes.SearchEngineType.Corax; + } +} +`} + + + + +{`public class Movies_ByVector_Single_JS : AbstractJavaScriptIndexCreationTask +{ + public Movies_ByVector_Single_JS() + { + Maps = new HashSet() + { + @"map('Movies', function (movie) { + return { + VectorFromSingle: createVector(movie.TagsEmbeddedAsSingle) + }; + })" + }; + + Fields = new(); + Fields.Add("VectorFromSingle", new IndexFieldOptions() + { + Vector = new VectorOptions() + { + SourceEmbeddingType = VectorEmbeddingType.Single, + DestinationEmbeddingType = VectorEmbeddingType.Single, + Dimensions = 2, + NumberOfEdges = 20, + NumberOfCandidatesForIndexing = 20 + } + }); + + SearchEngineType = Raven.Client.Documents.Indexes.SearchEngineType.Corax; + } +} +`} + + + + +{`var indexDefinition = new IndexDefinition +{ + Name = "Movies/ByVector/Single", + + Maps = new HashSet + { + @" + from movie in docs.Movies + select new + { + VectorFromSingle = CreateVector(movie.TagsEmbeddedAsSingle) + }" + }, + + Fields = new Dictionary() + { + { + "VectorFromSingle", + new IndexFieldOptions() + { + Vector = new VectorOptions() + { + SourceEmbeddingType = VectorEmbeddingType.Single, + DestinationEmbeddingType = VectorEmbeddingType.Single, + Dimensions = 2, + NumberOfEdges = 20, + NumberOfCandidatesForIndexing = 20 + } + } + } + }, + + Configuration = new IndexConfiguration() + { + ["Indexing.Static.SearchEngineType"] = "Corax" + } +}; + +store.Maintenance.Send(new PutIndexesOperation(indexDefinition)); +`} + + + + +Execute a vector search using the index: +(Provide a vector as the search term to the `ByEmbedding` method) + + + + +{`var similarMovies = session + .Query() + // Perform a vector search + // Call the 'VectorSearch' method + .VectorSearch( + field => field + // Call 'WithField' + // Specify the index-field in which to search for similar values + .WithField(x => x.VectorFromSingle), + queryVector => queryVector + // Call 'ByEmbedding' + // Provide the vector for the similarity comparison + .ByEmbedding( + new RavenVector(new float[] { 6.599999904632568f, 7.699999809265137f }))) + .Customize(x => x.WaitForNonStaleResults()) + .OfType() + .ToList(); +`} + + + + +{`var similarMovies = await asyncSession + .Query() + .VectorSearch( + field => field + .WithField(x => x.VectorFromSingle), + queryVector => queryVector + .ByEmbedding( + new RavenVector(new float[] { 6.599999904632568f, 7.699999809265137f }))) + .Customize(x => x.WaitForNonStaleResults()) + .OfType() + .ToListAsync(); +`} + + + + +{`var similarMovies = session.Advanced + .DocumentQuery() + .VectorSearch( + field => field + .WithField(x => x.VectorFromSingle), + queryVector => queryVector + .ByEmbedding( + new RavenVector(new float[] { 6.599999904632568f, 7.699999809265137f }))) + .WaitForNonStaleResults() + .OfType() + .ToList(); +`} + + + + +{`var similarMovies = await asyncSession.Advanced + .AsyncDocumentQuery() + .VectorSearch( + field => field + .WithField(x => x.VectorFromSingle), + queryVector => queryVector + .ByEmbedding( + new RavenVector(new float[] { 6.599999904632568f, 7.699999809265137f }))) + .WaitForNonStaleResults() + .OfType() + .ToListAsync(); +`} + + + + +{`var similarMovies = session.Advanced + .RawQuery(@" + from index 'Movies/ByVector/Single' + where vector.search(VectorFromSingle, $queryVector)") + .AddParameter("queryVector", new RavenVector(new float[] + { + 6.599999904632568f, 7.699999809265137f + })) + .WaitForNonStaleResults() + .ToList(); +`} + + + + +{`var similarMovies = await asyncSession.Advanced + .AsyncRawQuery(@" + from index 'Movies/ByVector/Single' + where vector.search(VectorFromSingle, $queryVector)") + .AddParameter("queryVector", new RavenVector(new float[] + { + 6.599999904632568f, 7.699999809265137f + })) + .WaitForNonStaleResults() + .ToListAsync(); +`} + + + + +{`from index "Movies/ByVector/Single" +where vector.search(VectorFromSingle, $queryVector) +{ "queryVector" : { "@vector" : [6.599999904632568, 7.699999809265137] }} +`} + + + + +The following index defines a vector field named `VectorFromInt8Arrays`. +It indexes embeddings generated from the numerical arrays in the `TagsEmbeddedAsInt8` field of all _Movie_ documents. +The raw numerical data in the source documents is in **Int8 (8-bit integers) format**. + + + + +{`public class Movies_ByVector_Int8 : + AbstractIndexCreationTask +{ + public class IndexEntry() + { + // This index-field will hold the embeddings that will be generated + // from the NUMERICAL content in the documents. + public object VectorFromInt8Arrays { get; set; } + } + + public Movies_ByVector_Int8() + { + Map = movies => from movie in movies + select new IndexEntry + { + // Call 'CreateVector' to create a VECTOR FIELD. + // Pass the document field containing the arrays (8-bit integer values) + // from which the embeddings will be generated. + VectorFromInt8Arrays = CreateVector(movie.TagsEmbeddedAsInt8) + }; + + // EITHER - Customize the vector field using VectorOptions: + VectorIndexes.Add(x => x.VectorFromInt8Arrays, + new VectorOptions() + { + // Define the source embedding type + SourceEmbeddingType = VectorEmbeddingType.Int8, + + // Define the quantization for the destination embedding + DestinationEmbeddingType = VectorEmbeddingType.Int8, + + // It is recommended to configure the number of dimensions + // which is the size of the arrays that will be indexed. + Dimensions = 2, + + // Optionally, set the number of edges and candidates + NumberOfEdges = 20, + NumberOfCandidatesForIndexing = 20 + }); + + // OR - Customize the vector field using builder: + Vector(x => x.VectorFromInt8Arrays, + builder => builder + .SourceEmbedding(VectorEmbeddingType.Int8) + .DestinationEmbedding(VectorEmbeddingType.Int8) + .Dimensions(2) + .NumberOfEdges(20) + .NumberOfCandidates(20)); + + // The index MUST use the Corax search engine + SearchEngineType = Raven.Client.Documents.Indexes.SearchEngineType.Corax; + } +} +`} + + + + +{`public class Movies_ByVector_Int8_JS : AbstractJavaScriptIndexCreationTask +{ + public Movies_ByVector_Int8_JS() + { + Maps = new HashSet() + { + @"map('Movies', function (movie) { + return { + VectorFromInt8Arrays: createVector(movie.TagsEmbeddedAsInt8) + }; + })" + }; + + Fields = new(); + Fields.Add("VectorFromInt8Arrays", new IndexFieldOptions() + { + Vector = new VectorOptions() + { + SourceEmbeddingType = VectorEmbeddingType.Int8, + DestinationEmbeddingType = VectorEmbeddingType.Int8, + Dimensions = 2, + NumberOfEdges = 20, + NumberOfCandidatesForIndexing = 20 + } + }); + + SearchEngineType = Raven.Client.Documents.Indexes.SearchEngineType.Corax; + } +} +`} + + + + +{`var indexDefinition = new IndexDefinition +{ + Name = "Movies/ByVector/Int8", + + Maps = new HashSet + { + @" + from movie in docs.Movies + select new + { + VectorFromInt8Arrays = CreateVector(movie.TagsEmbeddedAsInt8) + }" + }, + + Fields = new Dictionary() + { + { + "VectorFromInt8Arrays", + new IndexFieldOptions() + { + Vector = new VectorOptions() + { + SourceEmbeddingType = VectorEmbeddingType.Int8, + DestinationEmbeddingType = VectorEmbeddingType.Int8, + Dimensions = 2, + NumberOfEdges = 20, + NumberOfCandidatesForIndexing = 20 + } + } + } + }, + + Configuration = new IndexConfiguration() + { + ["Indexing.Static.SearchEngineType"] = "Corax" + } +}; + +store.Maintenance.Send(new PutIndexesOperation(indexDefinition)); +`} + + + + +Execute a vector search using the index: +(Provide a vector as the search term to the `ByEmbedding` method) + + + + +{`var similarMovies = session + .Query() + // Perform a vector search + // Call the 'VectorSearch' method + .VectorSearch( + field => field + // Call 'WithField' + // Specify the index-field in which to search for similar values + .WithField(x => x.VectorFromInt8Arrays), + queryVector => queryVector + // Call 'ByEmbedding' + // Provide the vector for the similarity comparison + // (Note: provide a single vector) + .ByEmbedding( + // The provided vector MUST be in the same format as was stored in your document + // Call 'VectorQuantizer.ToInt8' to transform the rawData to the Int8 format + VectorQuantizer.ToInt8(new float[] { 0.1f, 0.2f }))) + .Customize(x => x.WaitForNonStaleResults()) + .OfType() + .ToList(); +`} + + + + +{`var similarMovies = await asyncSession + .Query() + .VectorSearch( + field => field + .WithField(x => x.VectorFromInt8Arrays), + queryVector => queryVector + .ByEmbedding( + VectorQuantizer.ToInt8(new float[] { 0.1f, 0.2f }))) + .Customize(x => x.WaitForNonStaleResults()) + .OfType() + .ToListAsync(); +`} + + + + +{`var similarMovies = session.Advanced + .DocumentQuery() + .VectorSearch( + field => field + .WithField(x => x.VectorFromInt8Arrays), + queryVector => queryVector + .ByEmbedding( + VectorQuantizer.ToInt8(new float[] { 0.1f, 0.2f }))) + .WaitForNonStaleResults() + .OfType() + .ToList(); +`} + + + + +{`var similarMovies = await asyncSession.Advanced + .AsyncDocumentQuery() + .VectorSearch( + field => field + .WithField(x => x.VectorFromInt8Arrays), + queryVector => queryVector + .ByEmbedding( + VectorQuantizer.ToInt8(new float[] { 0.1f, 0.2f }))) + .WaitForNonStaleResults() + .OfType() + .ToListAsync(); +`} + + + + +{`var similarMovies = session.Advanced + .RawQuery(@" + from index 'Movies/ByVector/Int8' + where vector.search(VectorFromInt8Arrays, $queryVector)") + .AddParameter("queryVector", VectorQuantizer.ToInt8(new float[] { 0.1f, 0.2f })) + .WaitForNonStaleResults() + .ToList(); +`} + + + + +{`var similarMovies = await asyncSession.Advanced + .AsyncRawQuery(@" + from index 'Movies/ByVector/Int8' + where vector.search(VectorFromInt8Arrays, $queryVector)") + .AddParameter("queryVector", VectorQuantizer.ToInt8(new float[] { 0.1f, 0.2f })) + .WaitForNonStaleResults() + .ToListAsync(); +`} + + + + +{`from index "Movies/ByVector/Int8" +where vector.search(VectorFromInt8Arrays, $queryVector) +{ "queryVector" : [64, 127, -51, -52, 76, 62] } +`} + + + + +### Indexing numerical data and querying using text input + +The following index defines a vector field named `VectorFromPhoto`. +It indexes embeddings generated from the numerical data in the `MoviePhotoEmbedding` field of all _Movie_ documents. + + + +```csharp +public class Movies_ByVectorFromPhoto : + AbstractIndexCreationTask +{ + public class IndexEntry() + { + // This index-field will hold the embeddings that will be generated + // from the NUMERICAL content in the documents. + public object VectorFromPhoto { get; set; } + } + + public Movies_ByVectorFromPhoto() + { + Map = movies => from movie in movies + select new IndexEntry + { + // Call 'CreateVector' to create a VECTOR FIELD. + // Pass the document field containing the array + // from which the embeddings will be generated. + VectorFromPhoto = CreateVector(movie.MoviePhotoEmbedding) + }; + + // Customize the vector field: + Vector(x => x.VectorFromPhoto, + builder => builder + .SourceEmbedding(VectorEmbeddingType.Single) + .DestinationEmbedding(VectorEmbeddingType.Single) + // Dimensions should match the embedding size, 6 is only for our simple example... + .Dimensions(6)); + + // The index MUST use the Corax search engine + SearchEngineType = Raven.Client.Documents.Indexes.SearchEngineType.Corax; + } +} +``` + + +```csharp +public class Movies_ByVectorFromPhoto_JS : AbstractJavaScriptIndexCreationTask +{ + public Movies_ByVectorFromPhoto_JS() + { + Maps = new HashSet() + { + @"map('Movies', function (movie) { + return { + VectorFromPhoto: createVector(movie.MoviePhotoEmbedding) + }; + })" + }; + + Fields = new(); + Fields.Add("VectorFromPhoto", new IndexFieldOptions() + { + Vector = new VectorOptions() + { + SourceEmbeddingType = VectorEmbeddingType.Single, + DestinationEmbeddingType = VectorEmbeddingType.Single, + Dimensions = 6, // using 6 only for this simple example + } + }); + + SearchEngineType = Raven.Client.Documents.Indexes.SearchEngineType.Corax; + } +} +``` + + +```csharp +var indexDefinition = new IndexDefinition +{ + Name = "Movies/ByVectorFromPhoto", + + Maps = new HashSet + { + @" + from movie in docs.Movies + select new + { + VectorFromPhoto = CreateVector(movie.MoviePhotoEmbedding) + }" + }, + + Fields = new Dictionary() + { + { + "VectorFromPhoto", + new IndexFieldOptions() + { + Vector = new VectorOptions() + { + SourceEmbeddingType = VectorEmbeddingType.Single, + DestinationEmbeddingType = VectorEmbeddingType.Single, + Dimensions = 6, // using 6 only for this simple example + } + } + } + }, + + Configuration = new IndexConfiguration() + { + ["Indexing.Static.SearchEngineType"] = "Corax" + } +}; + +store.Maintenance.Send(new PutIndexesOperation(indexDefinition)); +``` + + + +Execute a vector search using the index: + + * Pass a textual search term to the `ByText` method, + along with the ID of the embedding generation task that will convert the text into an embedding. + + * The query is only meaningful if the vector field being searched contains numerical embeddings + generated using the same model as the one configured in the specified task. + + * If the specified task ID is not found, RavenDB will throw an `InvalidQueryException`. + To avoid this error, you can verify that the specified embeddings generation task exists before issuing the query. + See [Get embeddings generation task details](../ai-integration/generating-embeddings/overview#get-embeddings-generation-task-details) + to learn how to check which tasks are defined and what their identifiers are. + + + +```csharp +// Query for movies with images related to 'NASA' +var similarMovies = session + .Query() + // Perform a vector search + // Call the 'VectorSearch' method + .VectorSearch( + field => field + // Call 'WithField' + // Specify the index field that stores the image embeddings + .WithField(x => x.VectorFromPhoto), + queryVector => queryVector + // Call 'ByText' + // Provide a textual description to be embedded by the same multimodal model + // used for the MoviePhotoEmbedding field + .ByText("NASA", "id-of-embedding-generation-task"), + // As with any other vector search query, you can optionally specify + // 'minimumSimilarity', 'numberOfCandidates', and 'isExact' + minimumSimilarity: 0.85f) + .Customize(x => x.WaitForNonStaleResults()) + .OfType() + .ToList(); +``` + + +```csharp +var similarMovies = await asyncSession + .Query() + .VectorSearch( + field => field.WithField(x => x.VectorFromPhoto), + queryVector => queryVector.ByText("NASA", "id-of-embedding-generation-task"), + minimumSimilarity: 0.85f) + .Customize(x => x.WaitForNonStaleResults()) + .OfType() + .ToListAsync(); +``` + + +```csharp +var similarMovies = session.Advanced + .DocumentQuery() + .VectorSearch( + field => field.WithField(x => x.VectorFromPhoto), + queryVector => queryVector.ByText("NASA", "id-of-embedding-generation-task"), 0.85f) + .WaitForNonStaleResults() + .OfType() + .ToList(); +``` + + +```csharp +var similarMovies = await asyncSession.Advanced + .AsyncDocumentQuery() + .VectorSearch( + "VectorFromPhoto", + queryVector => queryVector.ByText("NASA", "id-of-embedding-generation-task"), 0.85f) + .WaitForNonStaleResults() + .OfType() + .ToListAsync(); +``` + + +```csharp +var similarMovies = session + .Advanced + .RawQuery(@" + from index 'Movies/ByVectorFromPhoto' + where vector.search(VectorFromPhoto, embedding.text($searchTerm, ai.task($embeddingTaskId)), 0.85, null) + ") + .AddParameter("searchTerm", "NASA") + .AddParameter("embeddingTaskId", "id-of-embedding-generation-task") + .ToList(); +``` + + +```csharp +var similarMovies = await asyncSession + .Advanced + .RawQuery(@" + from index 'Movies/ByVectorFromPhoto' + where vector.search(VectorFromPhoto, embedding.text($searchTerm, ai.task($embeddingTaskId)), 0.85, null) + ") + .AddParameter("searchTerm", "NASA") + .AddParameter("embeddingTaskId", "id-of-embedding-generation-task") + .ToListAsync(); +``` + + +```sql +from index 'Movies/ByVectorFromPhoto' +where vector.search(VectorFromPhoto, embedding.text($searchTerm, ai.task($embeddingTaskId)), 0.85, null) +{ "searchTerm" : "NASA", "embeddingTaskId" : "id-of-embedding-generation-task" } +``` + + + +--- + +## Indexing multiple field types + +An index can define multiple types of index-fields. In this example, the index includes: +A _'regular'_ field, a _'vector'_ field, and a field configured for [full-text search](../../../indexes/querying/searching.mdx). +This allows you to query across all fields using various predicates. + + + + +{`public class Products_ByMultipleFields : + AbstractIndexCreationTask +{ + public class IndexEntry() + { + // An index-field for 'regular' data + public decimal PricePerUnit { get; set; } + + // An index-field for 'full-text' search + public string Name { get; set; } + + // An index-field for 'vector' search + public object VectorFromText { get; set; } + } + + public Products_ByMultipleFields() + { + Map = products => from product in products + select new IndexEntry + { + PricePerUnit = product.PricePerUnit, + Name = product.Name, + VectorFromText = CreateVector(product.Name) + }; + + // Configure the index-field 'Name' for FTS: + Index(x => x.Name, FieldIndexing.Search); + + // Note: + // Default values will be used for the VECTOR FIELD if not customized here. + + // The index MUST use the Corax search engine + SearchEngineType = Raven.Client.Documents.Indexes.SearchEngineType.Corax; + } +} +`} + + + + +{`public class Products_ByMultipleFields_JS : AbstractJavaScriptIndexCreationTask +{ + public Products_ByMultipleFields_JS() + { + Maps = new HashSet() + { + @"map('Products', function (product) { + return { + PricePerUnit: product.PricePerUnit, + Name: product.Name, + VectorFromText: createVector(product.Name) + }; + })" + }; + + Fields = new(); + Fields.Add("Name", new IndexFieldOptions() + { + Indexing = FieldIndexing.Search + }); + + SearchEngineType = Raven.Client.Documents.Indexes.SearchEngineType.Corax; + } +} +`} + + + + +{`var indexDefinition = new IndexDefinition +{ + Name = "Products/ByMultipleFields", + Maps = new HashSet + { + @" + from product in docs.Products + select new + { + PricePerUnit = product.PricePerUnit, + Name = product.Name, + VectorFromText = CreateVector(product.Name) + }" + }, + + Fields = new Dictionary() + { + { + "Name", + new IndexFieldOptions() + { + Indexing = FieldIndexing.Search + } + } + }, + + Configuration = new IndexConfiguration() + { + ["Indexing.Static.SearchEngineType"] = "Corax" + } +}; + +store.Maintenance.Send(new PutIndexesOperation(indexDefinition)); +`} + + + + +Execute a query that combines predicates across all index-field types: + + + + +{`var results = session.Advanced + .DocumentQuery() + // Perform a regular search + .WhereGreaterThan(x => x.PricePerUnit, 200) + .OrElse() + // Perform a full-text search + .Search(x => x.Name, "Alice") + .OrElse() + // Perform a vector search + .VectorSearch( + field => field + .WithField(x => x.VectorFromText), + searchTerm => searchTerm + .ByText("italian food"), + minimumSimilarity: 0.8f) + .WaitForNonStaleResults() + .OfType() + .ToList(); +`} + + + + +{`var results = await asyncSession.Advanced + .AsyncDocumentQuery() + .WhereGreaterThan(x => x.PricePerUnit, 200) + .OrElse() + .Search(x => x.Name, "Alice") + .OrElse() + .VectorSearch( + field => field + .WithField(x => x.VectorFromText), + searchTerm => searchTerm + .ByText("italian food"), + minimumSimilarity: 0.8f) + .WaitForNonStaleResults() + .OfType() + .ToListAsync(); +`} + + + + +{`var results = session.Advanced + .RawQuery(@" + from index 'Products/ByMultipleFields' + where PricePerUnit > $minPrice + or search(Name, $searchTerm1) + or vector.search(VectorFromText, $searchTerm2, 0.8)") + .AddParameter("minPrice", 200) + .AddParameter("searchTerm1", "Alice") + .AddParameter("searchTerm2", "italian") + .WaitForNonStaleResults() + .ToList(); +`} + + + + +{`var results = await asyncSession.Advanced + .AsyncRawQuery(@" + from index 'Products/ByMultipleFields' + where PricePerUnit > $minPrice + or search(Name, $searchTerm1) + or vector.search(VectorFromText, $searchTerm2, 0.8)") + .AddParameter("minPrice", 200) + .AddParameter("searchTerm1", "Alice") + .AddParameter("searchTerm2", "italian") + .WaitForNonStaleResults() + .ToListAsync(); +`} + + + + +{`from index "Products/ByMultipleFields" +where PricePerUnit > $minPrice +or search(Name, $searchTerm1) +or vector.search(VectorFromText, $searchTerm2, 0.8) +{ "minPrice" : 200, "searchTerm1" : "Alice", "searchTerm2": "italian" } +`} + + + + +--- + +## Querying the static index for similar documents + +* Similar to [querying for similar documents using a dynamic query](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#dynamic-vector-search---querying-for-similar-documents), + you can **query a static-index for similar documents** by specifying a document ID in the vector search. + +* The following example queries the static-index defined in [this example](../../../ai-integration/vector-search/vector-search-using-static-index.mdx#indexing-vector-data---text) above. + The document for which we want to find similar documents is specified by the document ID passed to the `ForDocument` method. + +* RavenDB retrieves the embedding that was indexed for the queried field in the specified document and uses it as the query vector for the similarity comparison. + +* The results will include documents whose indexed embeddings are most similar to the one stored in the referenced document’s index-entry. + + + + +{`var similarProducts = session + .Query() + // Perform a vector search + // Call the 'VectorSearch' method + .VectorSearch( + field => field + // Call 'WithField' + // Specify the index-field in which to search for similar values + .WithField(x => x.VectorFromText), + embedding => embedding + // Call 'ForDocument' + // Provide the document ID for which you want to find similar documents. + // The embedding stored in the index for the specified document + // will be used as the "query vector". + .ForDocument("Products/7-A"), + // Optionally, specify the minimum similarity value + minimumSimilarity: 0.82f) + .Customize(x => x.WaitForNonStaleResults()) + .OfType() + .ToList(); +`} + + + + +{`var similarCategories = await asyncSession + .Query() + .VectorSearch( + field => field + .WithField(x => x.VectorFromText), + embedding => embedding + .ForDocument("Products/7-A"), + minimumSimilarity: 0.82f) + .Customize(x => x.WaitForNonStaleResults()) + .OfType() + .ToListAsync(); +`} + + + + +{`var similarProducts = session.Advanced + .DocumentQuery() + .VectorSearch( + field => field + .WithField(x => x.VectorFromText), + embedding => embedding + .ForDocument("Products/7-A"), + minimumSimilarity: 0.82f) + .WaitForNonStaleResults() + .OfType() + .ToList(); +`} + + + + +{`var similarProducts = await asyncSession.Advanced + .AsyncDocumentQuery() + .VectorSearch( + field => field + .WithField(x => x.VectorFromText), + embedding => embedding + .ForDocument("Products/7-A"), + minimumSimilarity: 0.82f) + .WaitForNonStaleResults() + .OfType() + .ToListAsync(); +`} + + + + +{`var similarProducts = session.Advanced + .RawQuery(@" + from index 'Products/ByVector/Text' + // Pass a document ID to the 'forDoc' method to find similar documents + where vector.search(VectorFromText, embedding.forDoc($documentID), 0.82)") + .AddParameter("$documentID", "Products/7-A") + .WaitForNonStaleResults() + .ToList(); +`} + + + + +{`var similarProducts = await asyncSession.Advanced + .AsyncRawQuery(@" + from index 'Products/ByVector/Text' + // Pass a document ID to the 'forDoc' method to find similar documents + where vector.search(VectorFromText, embedding.forDoc($documentID), 0.82)") + .AddParameter("$documentID", "Products/7-A") + .WaitForNonStaleResults() + .ToListAsync(); +`} + + + + +{`from index "Products/ByVector/Text" +// Pass a document ID to the 'forDoc' method to find similar documents +where vector.search(VectorFromText, embedding.forDoc($documentID), 0.82) +{"documentID" : "Products/7-A"} +`} + + + + +Running the above example on RavenDB’s sample data returns the following documents that have similar content in their _Name_ field: +(Note: the results include the referenced document itself, _Products/7-A_) + + + +{`// ID: products/7-A ... Name: "Uncle Bob's Organic Dried Pears" +// ID: products/51-A ... Name: "Manjimup Dried Apples" +// ID: products/6-A ... Name: "Grandma's Boysenberry Spread" +`} + + + +--- + +## Configure the vector field in the Studio + + ![Add vector field](../assets/add-vector-field-1.png) + + ![Customize vector field](../assets/add-vector-field-2.png) + +1. **Vector field name** + Enter the name of the vector field to customize. +2. **Configure Vector Field** + Click this button to customize the field. +3. **Dimensions** + For numerical input only - define the size of the array from your source document. +4. **Edges** + The number of edges that will be created for a vector during indexing. +5. **Source embedding type** + The format of the source embeddings (Text, Single, Int8, or Binary). +6. **Candidates for indexing** + The number of candidates (potential neighboring vectors) that RavenDB evaluates during vector indexing. +7. **Destination embedding type** + The quantization format for the embeddings that will be generated (Text, Single, Int8, or Binary). diff --git a/versioned_docs/version-7.1/ai-integration/vector-search/data-types-for-vector-search.mdx b/versioned_docs/version-7.1/ai-integration/vector-search/data-types-for-vector-search.mdx new file mode 100644 index 0000000000..06caaac31b --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/vector-search/data-types-for-vector-search.mdx @@ -0,0 +1,31 @@ +--- +title: "Data Types for Vector Search" +hide_table_of_contents: true +sidebar_label: Data Types for Vector Search +sidebar_position: 5 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import DataTypesForVectorSearchCsharp from './content/_data-types-for-vector-search-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + diff --git a/versioned_docs/version-7.1/ai-integration/vector-search/indexing-attachments-for-vector-search.mdx b/versioned_docs/version-7.1/ai-integration/vector-search/indexing-attachments-for-vector-search.mdx new file mode 100644 index 0000000000..0cf688186a --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/vector-search/indexing-attachments-for-vector-search.mdx @@ -0,0 +1,31 @@ +--- +title: "Indexing Attachments for Vector Search" +hide_table_of_contents: true +sidebar_label: Indexing Attachments for Vector Search +sidebar_position: 4 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import IndexingAttachmentsForVectorSearchCsharp from './content/_indexing-attachments-for-vector-search-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + diff --git a/versioned_docs/version-7.1/ai-integration/vector-search/ravendb-as-vector-database.mdx b/versioned_docs/version-7.1/ai-integration/vector-search/ravendb-as-vector-database.mdx new file mode 100644 index 0000000000..eb73ac1b65 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/vector-search/ravendb-as-vector-database.mdx @@ -0,0 +1,115 @@ +--- +title: "RavenDB as a Vector Database" +hide_table_of_contents: true +sidebar_label: RavenDB as a Vector Database +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# RavenDB as a Vector Database + + + +* In this article: + * [What is a vector database](../../ai-integration/vector-search/ravendb-as-vector-database.mdx#what-is-a-vector-database) + * [Why choose RavenDB as your vector database](../../ai-integration/vector-search/ravendb-as-vector-database.mdx#why-choose-ravendb-as-your-vector-database) + + + +## What is a vector database + +* A vector database stores data as high-dimensional numerical representations (embedding vectors), + enabling searches based on contextual meaning and vector similarity rather than exact keyword matches. + Instead of relying on traditional indexing, it retrieves relevant results by measuring how close vectors are in a multi-dimensional space. + +* Vector databases are widely used in applications such as: + + * Semantic search – Finding documents based on meaning rather than exact words. + * Recommendation engines – Suggesting products, media, or content based on similarity. + * AI and machine learning – Powering LLMs, multi-modal search, and object detection. + +**Embeddings**: + +* A vector database stores data as high-dimensional vectors in a high-dimensional space. + These vectors, known as **embeddings**, are mathematical representations of your data. + +* Each embedding is an array of numbers (e.g. [0.45, 3.6, 1.25, 0.7, ...]), where each dimension represents specific characteristics of the data, capturing its contextual meaning. + Words, phrases, entire documents, images, audio, and other types of data can all be vectorized. + +* The raw data is converted into embeddings using [transformers](https://huggingface.co/docs/transformers). + To optimize storage and computation, transformers can encode embeddings with lower-precision data types, such as 8-bit integers, through a technique called [quantization](../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#quantization-options). + +**Indexing embeddings and searching**: + +* The embedding vectors are indexed and stored in a vector space. + Their positions reflect relationships and characteristics of the data as determined by the model that generated them. + The distance between two embeddings in the vector space correlates with the similarity of their original inputs within that model's context. + +* Vectors representing similar data are positioned close to each other in the vector space. + This is achieved using algorithms such as [HNSW](https://en.wikipedia.org/wiki/Hierarchical_navigable_small_world), which is designed for indexing and querying embeddings. + HNSW constructs a graph-based structure that efficiently retrieves approximate nearest neighbors in high-dimensional spaces. + +* This architecture enables **similarity searches**. Instead of conventional keyword-based queries, + a vector database lets you find relevant data based on semantic and contextual meaning. + +## Why choose RavenDB as your vector database + +##### An integrated solution: + +* RavenDB provides an integrated solution that combines high-performance NoSQL capabilities with advanced vector indexing and querying features, + enabling efficient storage and management of high-dimensional vector data. + +##### Reduced infrastructure complexity: + +* RavenDB's built-in vector search eliminates the need for external vector databases, + simplifying your infrastructure and reducing maintenance overhead. + +##### AI integration: + +* You can use RavenDB as the **vector database** for your AI-powered applications, including large language models (LLMs). + This eliminates the need to transfer data to expensive external services for vector similarity search, + providing a cost-effective and efficient solution for vector-based operations. + +##### Multiple field types in indexes: + +* An index can consist of multiple index-fields, each having a distinct type, such as a standard field, a spatial field, a full-text search field, or a **vector field**. + This flexibility allows you to work with complex documents containing various data types and retrieve meaningful insights by querying the index across all these fields. + An example is available in [Indexing multiple field types](../../ai-integration/vector-search/vector-search-using-static-index.mdx#indexing-multiple-field-types). + +* Document [attachments](../../ai-integration/vector-search/indexing-attachments-for-vector-search.mdx) can also be indexed as vector fields, and Map-Reduce indexes can incorporate vector fields in their reduce phase, + further extending the versatility of your data processing and search capabilities. + +##### Built-in embedding support: + +* **Textual input**: + Embeddings can be automatically generated from textual content within your documents by defining + [Embeddings generation tasks](../../ai-integration/generating-embeddings/overview.mdx). + These tasks connect to external embedding providers such as **Azure OpenAI, OpenAI, Hugging Face, Google AI, Ollama, or Mistral AI**. + If no task is specified, embeddings will be generated using the built-in [bge-micro-v2](https://huggingface.co/TaylorAI/bge-micro-v2) model. + + When querying with a phrase, RavenDB generates an embedding for the search term using the same model applied to the document data + and compares it against the indexed embeddings. + +* **Numerical arrays input**: + Documents in RavenDB can also contain numerical arrays with **pre-made embeddings** created elsewhere. + Use RavenDB's dedicated data type, [RavenVector](../../ai-integration/vector-search/data-types-for-vector-search.mdx#ravenvector), to store these embeddings in your document entities. + This type is highly optimized to reduce storage space and enhance the speed of reading arrays from disk. + +* **HNSW algorithm usage**: + All embeddings, whether generated from textual input or pre-made numerical arrays, + are indexed and searched for using the [HNSW](https://en.wikipedia.org/wiki/Hierarchical_navigable_small_world) algorithm. + +* **Optimize storage via quantization**: + RavenDB allows you to select the quantization format for the generated embeddings when creating the index. + Learn more in [Quantization options](../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#quantization-options). + +* **Perform vector search**: + Leverage RavenDB's [Auto-indexes](../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx) + and [Static indexes](../../ai-integration/vector-search/vector-search-using-static-index.mdx) to perform a vector search, + retrieving documents based on contextual similarity rather than exact word matches. diff --git a/versioned_docs/version-7.1/ai-integration/vector-search/vector-search-using-dynamic-query.mdx b/versioned_docs/version-7.1/ai-integration/vector-search/vector-search-using-dynamic-query.mdx new file mode 100644 index 0000000000..9df1df682d --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/vector-search/vector-search-using-dynamic-query.mdx @@ -0,0 +1,31 @@ +--- +title: "Vector Search using a Dynamic Query" +hide_table_of_contents: true +sidebar_label: Vector Search using a Dynamic Query +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import VectorSearchUsingDynamicQueryCsharp from './content/_vector-search-using-dynamic-query-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + diff --git a/versioned_docs/version-7.1/ai-integration/vector-search/vector-search-using-static-index.mdx b/versioned_docs/version-7.1/ai-integration/vector-search/vector-search-using-static-index.mdx new file mode 100644 index 0000000000..f288454d1a --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/vector-search/vector-search-using-static-index.mdx @@ -0,0 +1,31 @@ +--- +title: "Vector Search using a Static Index" +hide_table_of_contents: true +sidebar_label: Vector Search using a Static Index +sidebar_position: 3 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import VectorSearchUsingStaticIndexCsharp from './content/_vector-search-using-static-index-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + diff --git a/versioned_docs/version-7.1/ai-integration/vector-search/vector-search_start.mdx b/versioned_docs/version-7.1/ai-integration/vector-search/vector-search_start.mdx new file mode 100644 index 0000000000..c4a6ae42ac --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/vector-search/vector-search_start.mdx @@ -0,0 +1,61 @@ +--- +title: "Vector search: Start" +hide_table_of_contents: true +sidebar_label: Start +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; + +import CardWithImage from "@site/src/components/Common/CardWithImage"; +import CardWithImageHorizontal from "@site/src/components/Common/CardWithImageHorizontal"; +import ColGrid from "@site/src/components/ColGrid"; +import aiImageSearchWithRavenDbImage from "./assets/ai-image-search-with-ravendb.webp"; + +import ayendeBlogImage from "@site/static/img/from-ayende-com.webp"; +import webinarThumbnailPlaceholder from "@site/static/img/webinar.webp"; + +# Vector search + +### Search by meaning and context using vector search operations. +Vector search operations allow you to compare [Embeddings](https://en.wikipedia.org/wiki/Embedding_(machine_learning)) to find content by similarity rather than by exact matches. E.g., to find text by meaning or image by context. +- You can search over embeddings that were generated by RavenDB [ongoing embeddings-generation tasks](../../ai-integration/generating-embeddings/embeddings-generation-task) or by an external embeddings provider. +- You can also generate the embeddings for your documents on-the-fly, while searching. +- When you run a vector search, your search query is converted into an embedding as well, and compared against document embeddings using either a dynamic query for ad-hoc or infrequent searches, or a static index for optimized performance. +- Vector search can be used by other RavenDB AI features. E.g., [AI agents](../../ai-integration/ai-agents/ai-agents_start) can use vector search to retrieve relevant data requested by the LLM. + +### Use cases +Vector search can help wherever you need to find similar items based on proximity rather than exact matches, e.g. - +* **Knowledge and document search** + Find relevant documentation, policies, legal texts, or enterprise reports using natural language queries. +* **Product and content recommendations** + Suggest similar products, articles, videos, or media based on descriptive queries and user preferences. +* **Customer support automation** + Route questions to the best help articles, retrieve guides, and power chatbot responses with relevant information. +* **Business intelligence and analysis** + Profile customers and uncover market trends by comparing behavioral and relationship-based similarities. +* **Media and content analysis** + Discover similar images, moderate content, and monitor social media for brand mentions and sentiment. + +### Technical documentation +Learn about vector search operations, how they use embeddings to find content by meaning or context, their ability to generate embeddings on the fly during searches, and other key aspects of this feature. + + + + + + +#### Learn more: In-depth vector search articles + + + + + + +### Related lives & Videos +Learn more about enhancing your applications using vector search operations. + + + + + diff --git a/versioned_docs/version-7.1/ai-integration/vector-search/what-affects-vector-search-results.mdx b/versioned_docs/version-7.1/ai-integration/vector-search/what-affects-vector-search-results.mdx new file mode 100644 index 0000000000..d8e8c4add4 --- /dev/null +++ b/versioned_docs/version-7.1/ai-integration/vector-search/what-affects-vector-search-results.mdx @@ -0,0 +1,172 @@ +--- +title: "What Affects Vector Search Results" +hide_table_of_contents: true +sidebar_label: What Affects Vector Search Results +sidebar_position: 6 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# What Affects Vector Search Results + + + +* This article explains why vector search results might not always return what you expect, even when relevant documents exist. + It applies to both [Dynamic vector search queries](../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx) and + [Static-index vector search queries](../../ai-integration/vector-search/vector-search-using-static-index.mdx). + +* Vector search in RavenDB uses the [HNSW](https://en.wikipedia.org/wiki/Hierarchical_navigable_small_world) algorithm (Hierarchical Navigable Small World) + to index and search high-dimensional vector embeddings efficiently. + This algorithm prioritizes performance, speed, and scalability over exact precision. + Due to its approximate nature, results may occasionally exclude some relevant documents. + +* Several **indexing-time parameters** affect how the vector graph is built, and **query-time parameters** affect how the graph is searched. + These settings influence the trade-off between speed and accuracy. + +* If full accuracy is required, RavenDB also provides [Exact vector search](../../ai-integration/vector-search/what-affects-vector-search-results.mdx#using-exact-search), + which performs a full scan of all indexed vectors to guarantee the closest matches. +* In this article: + * [The approximate nature of HNSW](../../ai-integration/vector-search/what-affects-vector-search-results.mdx#the-approximate-nature-of-hnsw) + * [Indexing-time parameters](../../ai-integration/vector-search/what-affects-vector-search-results.mdx#indexing-time-parameters) + * [Query-time parameters](../../ai-integration/vector-search/what-affects-vector-search-results.mdx#query-time-parameters) + * [Using exact search](../../ai-integration/vector-search/what-affects-vector-search-results.mdx#using-exact-search) + + + +## The approximate nature of HNSW + +* **Graph structure**: + + * HNSW builds a multi-layer graph, organizing vectors into a series of layers: + Top layers are sparse and support fast, broad navigation. + The bottom layer is dense and includes all indexed vectors for fine-grained matching. + * Each node (vector) is connected only to a limited number of neighbors, selected as the most relevant during indexing (graph build time). + This limitation is controlled by the [Indexing-time parameters](../../ai-integration/vector-search/what-affects-vector-search-results.mdx#indexing-time-parameters) described below. + * This structure speeds up search but increases the chance that a relevant document is not reachable - + especially if it's poorly connected. + +* **Insertion order effects**: + + * Because the HNSW graph is append-only and built incrementally, + the order in which documents are inserted can affect the final graph structure. + * Updates and deletes do not change the structure - deleted vectors are not physically removed, but marked as deleted (soft-deleted), + and updates typically replace a document by marking the old one as deleted and inserting a new one. + * This means that two databases containing the same documents may return different vector search results + if the documents were inserted in a different order. + +* **Greedy search**: + + * HNSW uses a greedy search strategy to perform approximate nearest-neighbor (ANN) searches: + The search starts at the top layer from an entry point. + The algorithm then descends through the layers, always choosing the neighbor closest to the query vector. + * The algorithm doesn't exhaustively explore all possible paths, so it can miss the true global nearest neighbors - + especially if they are not well-connected in the graph. + This design enables HNSW to find relevant results very quickly by focusing only on the most promising paths, making it highly efficient even for large datasets. + * The search is influenced by the [Query-time params](../../ai-integration/vector-search/what-affects-vector-search-results.mdx#query-time-parameters) described below. + Slight variations in graph structure or search parameters can lead to different results. + * While HNSW offers fast search performance at scale and quickly finds points that are likely to be among the nearest neighbors, + it does not guarantee exact results - only approximate matches are returned. + This behavior is expected in all ANN algorithms, not just HNSW. + If full accuracy is critical, consider using [Exact search](../../ai-integration/vector-search/what-affects-vector-search-results.mdx#using-exact-search) instead. + +## Indexing-time parameters + +The structure of the HNSW graph is determined at indexing time. +RavenDB provides the following configuration parameters that control how the graph is built. +These parameters influence how vectors are connected and how effective the search will be. +They help keep memory usage and indexing time under control, but may also limit the graph’s ability to precisely represent all possible proximity relationships. + +* **Number of edges**: + + * This parameter, which corresponds to the _M_ parameter in the original [HNSW paper](https://arxiv.org/abs/1603.09320), + controls how many connections (edges) each vector maintains in the HNSW graph. + Each node (vector) is connected to a limited number of neighbors in each layer - up to the value specified by this param. + These edges define the structure of the graph and affect how vectors are reached during search. + * A **larger** number of edges increases the graph’s density, improving connectivity and typically resulting in more accurate search results, + but it may also increase memory usage, slow down index construction, and result in a larger index. + A **smaller** value reduces memory usage and speeds up indexing, + but can result in a sparser graph with weaker connectivity and reduced search accuracy. + * With **static-indexes** - + This param can be set directly in the index definition. For example, see this [index definition](../../ai-integration/vector-search/vector-search-using-static-index.mdx#indexing-raw-text). + If not explicitly set, or when using **dynamic queries** - + the value is taken from the [Indexing.Corax.VectorSearch.DefaultNumberOfEdges](../../server/configuration/indexing-configuration.mdx#indexingcoraxvectorsearchdefaultnumberofedges) configuration key. + +* **Number of candidates at indexing time**: + + * During index construction, HNSW searches for potential neighbors when inserting each new vector into the graph. + This parameter (commonly referred to as _efConstruction_) controls how many neighboring vectors are considered during this process. + It defines the size of the candidate pool - the number of potential links evaluated for each insertion. + From the candidate pool, HNSW selects up to the configured _number of edges_ for each node. + * A **larger** candidate pool increases the chance of finding better-connected neighbors, improving the overall accuracy of the graph. + However, it may increase indexing time and memory usage. + A **smaller** value speeds up indexing and reduces resource usage, + but can result in a sparser and less accurate graph structure. + * With **static-indexes** - + This param can be set directly in the index definition. For example, see this [index definition](../../ai-integration/vector-search/vector-search-using-static-index.mdx#indexing-raw-text). + If not explicitly set, or when using **dynamic queries** - + the value is taken from the [Indexing.Corax.VectorSearch.DefaultNumberOfCandidatesForIndexing](../../server/configuration/indexing-configuration.mdx#indexingcoraxvectorsearchdefaultnumberofcandidatesforindexing) configuration key. + +For all parameters that can be defined at indexing time (including the ones above), +see [Parameters defined at index definition](../../ai-integration/vector-search/vector-search-using-static-index.mdx#parameters-defined-at-index-definition). + +## Query-time parameters + +Once the index is built, the following query-time parameters influence the vector search - controlling how the HNSW graph is traversed and how results are selected. +These parameters directly affect how many results are found, how similar they are to the input vector, and how they are ranked. + +* **Number of Candidates at query time**: + + * This parameter (commonly referred to as _efSearch_) controls how many nodes in the HNSW graph are evaluated during a vector search - + that is, how many candidates are considered before the search stops. + It defines the size of the priority queue used during the search: the number of best-so-far candidates that RavenDB will track and expand as it descends through the graph. + * A **larger** value increases the breadth of the search, allowing the algorithm to explore a wider set of possible neighbors + and typically improving accuracy and the chances of retrieving all relevant results - but this comes at the cost of slower query performance. + A **smaller** value speeds up queries and reduces resource usage, but increases the chance of missing relevant results due to the more limited exploration. + * This param can be set directly in the query. For example, see this [Query example](../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#querying-raw-text). + If not explicitly set, the value is taken from the [Indexing.Corax.VectorSearch.DefaultNumberOfCandidatesForQuerying](../../server/configuration/indexing-configuration.mdx#indexingcoraxvectorsearchdefaultnumberofcandidatesforquerying) configuration key. + +* **Minimum Similarity**: + + * This parameter defines a threshold between `0.0` and `1.0` that determines how similar a vector must be to the query in order to be included in the results. + * Vectors with a similarity score below this threshold are excluded from the results - + even if they would otherwise be among the top candidates. + Use this to filter out marginal matches, especially when minimum semantic relevance is important. + * This param can be set directly in the query. For example, see this [Query example](../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#querying-raw-text). + If not explicitly set in the query, the value is taken from the [Indexing.Corax.VectorSearch.DefaultMinimumSimilarity](../../server/configuration/indexing-configuration.mdx#indexingcoraxvectorsearchdefaultminimumsimilarity) configuration key. + The default value of this configuration key is `0.0`, which means no similarity filtering is applied - all candidates found during the search are eligible to be returned, + regardless of how dissimilar they are from the query vector. + +* **Search Method**: + + * You can choose between two vector search modes: + * **Approximate search** (default): + Uses the HNSW algorithm for fast, scalable search. While it doesn’t guarantee the absolute nearest vectors, + it is typically accurate and strongly recommended in most scenarios due to its performance. + * **Exact search**: + Performs a full comparison against all indexed vectors to guarantee the closest matches. + Learn more in [Using exact search](../../ai-integration/vector-search/what-affects-vector-search-results.mdx#using-exact-search) below. + +For all parameters that can be defined at query time, see: +Dynamic queries - [The dynamic query parameters](../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#the-dynamic-query-parameters). +Static index queries - [Parameters used at query time](../../ai-integration/vector-search/vector-search-using-static-index.mdx#parameters-used-at-query-time). + +## Using exact search + +* If you need precise control over results and want to avoid the approximations of HNSW, + you can perform an exact search instead. + +* Exact search performs a full scan of the vector space, comparing the query vector to every indexed vector. + This guarantees that the true closest matches are returned. + +* While exact search provides guaranteed accuracy, it is more resource-intensive and may be slower - especially for large indexes. + However, if the index is small, exact search can still offer reasonable performance. + The approximate search remains strongly recommended in most scenarios due to its performance. + Use exact search only when maximum precision is critical and the performance trade-off is acceptable. + +* Exact search can be used with both static index queries and dynamic queries. + For example, see [Dynamic vector search - exact search](../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx#dynamic-vector-search---exact-search). diff --git a/versioned_docs/version-7.1/client-api/_category_.json b/versioned_docs/version-7.1/client-api/_category_.json new file mode 100644 index 0000000000..b5b683de90 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 1, + "label": "Client API" +} diff --git a/versioned_docs/version-7.1/client-api/_creating-document-store-csharp.mdx b/versioned_docs/version-7.1/client-api/_creating-document-store-csharp.mdx new file mode 100644 index 0000000000..9768e15cb9 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/_creating-document-store-csharp.mdx @@ -0,0 +1,125 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Creating a Document Store** is the _first step_ that a RavenDB client application needs to make when working with RavenDB. + +* We recommend that your Document Store implement the [Singleton Pattern](https://csharpindepth.com/articles/Singleton) as demonstrated in +the example code [below](../client-api/creating-document-store.mdx#creating-a-document-store---example). +Creating more than one Document Store may be resource intensive, and one instance is sufficient for most use cases. + +* In this page: + * [Creating a Document Store - Configuration](../client-api/creating-document-store.mdx#creating-a-document-store---configuration) + * [Certificate Disposal](../client-api/creating-document-store.mdx#certificate-disposal) + * [Creating a Document Store - Example](../client-api/creating-document-store.mdx#creating-a-document-store---example) + +## Creating a Document Store - Configuration + +The following properties can be configured when creating a new Document Store: + +* **Urls** (required) + + * An initial URLs list of your RavenDB cluster nodes that is used when the client accesses the database for the _first_ time. + + * Upon the first database access, the client will fetch the [Database Group Topology](../studio/database/settings/manage-database-group.mdx) + from the first server on this list that it successfully connected to. An exception is thrown if the client fails to connect with neither + of the servers specified on this list. The URLs from the Database Group Topology will supersede this initial URLs list for any future + access to that database. + + * **Note**: Do not create a Document Store with URLs that point to servers outside of your cluster. + + * **Note**: This list is not binding. You can always modify your cluster later dynamically, add new nodes or remove existing ones as + necessary. Learn more in [Cluster View Operations](../studio/cluster/cluster-view.mdx#cluster-view-operations). + +* **[Database](../client-api/setting-up-default-database.mdx)** (optional) + The default database which the Client will work against. + A different database can be specified when creating a [Session](../client-api/session/opening-a-session.mdx) if needed. + +* **[Conventions](../client-api/configuration/conventions.mdx)** (optional) + Customize the Client behavior with a variety of options, overriding the default settings. + +* **[Certificate](../client-api/setting-up-authentication-and-authorization.mdx)** (optional) + X.509 certificate used to authenticate the client to the RavenDB server + +After setting the above configurations as necessary, call `.Initialize()` to begin using the Document Store. + + +The Document Store is immutable - all above configuration are frozen upon calling .Initialize(). +Create a new document store object if you need different default configuration values. + + +## Certificate Disposal + +Starting with RavenDB `6.x`, disposing of a store automatically removes any X509Certificate2 certificate installed for +it, to [prevent the accumulation of unneeded certificate files](https://snede.net/the-most-dangerous-constructor-in-net/). + +To **disable** the automatic disposal of certificates, please use the +[DisposeCertificate](../client-api/configuration/conventions.mdx#disposecertificate) convention. + + + +{`// Set conventions as necessary (optional) +Conventions = +\{ + // Disable the automatic disposal of certificates when the store is disposed of + DisposeCertificate = false +\}, +`} + + + + + +## Creating a Document Store - Example + +This example demonstrates how to implement the singleton pattern in the initialization of a Document Store, as well as how to set initial +default configurations. + + + +{`// The \`DocumentStoreHolder\` class holds a single Document Store instance. +public class DocumentStoreHolder +\{ + // Use Lazy to initialize the document store lazily. + // This ensures that it is created only once - when first accessing the public \`Store\` property. + private static Lazy store = new Lazy(CreateStore); + + public static IDocumentStore Store => store.Value; + + private static IDocumentStore CreateStore() + \{ + IDocumentStore store = new DocumentStore() + \{ + // Define the cluster node URLs (required) + Urls = new[] \{ "http://your_RavenDB_cluster_node", + /*some additional nodes of this cluster*/ \}, + + // Set conventions as necessary (optional) + Conventions = + \{ + MaxNumberOfRequestsPerSession = 10, + UseOptimisticConcurrency = true + \}, + + // Define a default database (optional) + Database = "your_database_name", + + // Define a client certificate (optional) + Certificate = new X509Certificate2("C:\\\\path_to_your_pfx_file\\\\cert.pfx"), + + // Initialize the Document Store + \}.Initialize(); + + return store; + \} +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/_creating-document-store-java.mdx b/versioned_docs/version-7.1/client-api/_creating-document-store-java.mdx new file mode 100644 index 0000000000..2eace8873c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/_creating-document-store-java.mdx @@ -0,0 +1,62 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To create an instance of the `DocumentStore` you need to specify a list of URL addresses that point to RavenDB server nodes. + + +Do not open a `DocumentStore` using URL addresses that point to nodes outside your cluster. + + + + +{`try (IDocumentStore store = new DocumentStore( new String[]\{ "http://localhost:8080" \}, "Northwind")) \{ + store.initialize(); + + +\} +`} + + + +This will instantiate a communication channel between your application and the local RavenDB server instance. + +## Initialization + +To be able to work on the `DocumentStore`, you will have to call the `initialize` method to get the fully initialized instance of `IDocumentStore`. + + + +The conventions are frozen after `DocumentStore` initialization so they need to be set before `initialize` is called. + + + +## Singleton + +Because the document store is a heavyweight object, there should only be one instance created per application (singleton). The document store is a thread safe object and its typical +initialization looks like the following: + + + +{`public static class DocumentStoreHolder \{ + + private static IDocumentStore store; + + static \{ + store = new DocumentStore(new String[]\{ "http://localhost:8080" \}, "Northwind"); + \} + + public static IDocumentStore getStore() \{ + return store; + \} +\} +`} + + + + +If you use more than one instance of `DocumentStore` you should dispose it after use. + + + diff --git a/versioned_docs/version-7.1/client-api/_creating-document-store-nodejs.mdx b/versioned_docs/version-7.1/client-api/_creating-document-store-nodejs.mdx new file mode 100644 index 0000000000..53a6db4961 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/_creating-document-store-nodejs.mdx @@ -0,0 +1,57 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To create an instance of the `DocumentStore` you need to specify a list of URL addresses that point to RavenDB server nodes. + + + +{`new DocumentStore(urls, [database], [authOptions]); +`} + + + + +Do not open a `DocumentStore` using URL addresses that point to nodes outside your cluster. + + + + +{`const store = new DocumentStore(["http://localhost:8080"], "Northwind"); +store.initialize(); +`} + + + +The above snippet is going to instantiate a communication channel between your application and the local RavenDB server instance. + +## Initialization + +A `DocumentStore` instance must be initialized before use by calling the `.initialize()` method. + + + +After `DocumentStore` initialization, the conventions are frozen - modification attempts are going to result with error. Conventions need to be set *before* `.initialize()` is called. + + + +## Singleton + +Because the document store is a heavyweight object, there should only be one instance created per application (a singleton - simple to achieve in Node.js by wrapping it in a module). Typical initialization of a document store looks as follows: + + + +{`// documentStoreHolder.js +const store = new DocumentStore("http://localhost:8080", "Northwind"); +store.initialize(); +export \{ store as documentStore \}; +`} + + + + +If you use more than one instance of `DocumentStore`, you should dispose it after use by calling its `.dispose()` method. + + + diff --git a/versioned_docs/version-7.1/client-api/_net-client-versions-csharp.mdx b/versioned_docs/version-7.1/client-api/_net-client-versions-csharp.mdx new file mode 100644 index 0000000000..50a112298e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/_net-client-versions-csharp.mdx @@ -0,0 +1,16 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +.NET client is released for `netstandard2.0` and `netcoreapp2.1` targets and works on 32 and 64 bit platforms. + +## netstandard2.0 + +This target allows you to create applications for **.NET Framework 4.6.1+, .NET Core 2.0+ and UWP (Universal Windows Platform) 10.1**. + +## netcoreapp2.1 + +This target allows you to create applications for **.NET Core 2.1+**. + + diff --git a/versioned_docs/version-7.1/client-api/_setting-up-authentication-and-authorization-csharp.mdx b/versioned_docs/version-7.1/client-api/_setting-up-authentication-and-authorization-csharp.mdx new file mode 100644 index 0000000000..0bfa4f8be3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/_setting-up-authentication-and-authorization-csharp.mdx @@ -0,0 +1,47 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Authentication and authorization** are based on [Client X.509 Certificates](../server/security/authorization/security-clearance-and-permissions.mdx). + +* When your RavenDB instance runs on HTTPS, the server has a **Server Certificate** loaded. +Your application must use a **Client Certificate** in order to access this secure server. + +* Obtain a Client Certificate from your cluster admin. +The Client Certificate is generated by the admin from the [Studio](../server/security/authentication/certificate-management.mdx). + +* The security clearance (authorization level) for the generated Client Certificate is set during the process of generating the +certificate. + +* Pass your Client Certificate to the Document Store before initialization, as shown in the example code +[below](../client-api/setting-up-authentication-and-authorization.mdx#example---initializing-document-store-with-a-client-certificate). +The server will use this certificate to authenticate the client when connection is established. + + +## Example - Initializing Document Store With a Client Certificate + + + +{`// Load a X.509 certificate +X509Certificate2 clientCertificate = new X509Certificate2("C:\\\\path_to_your_pfx_file\\\\cert.pfx"); + +using (IDocumentStore store = new DocumentStore() +\{ + // Pass your certificate to the \`Certificate\` property + Certificate = clientCertificate, + Database = "your_database_name", + Urls = new[] \{"https://your_RavenDB_server_URL"\} +\}.Initialize()) +\{ + // Do your work here +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/_setting-up-authentication-and-authorization-java.mdx b/versioned_docs/version-7.1/client-api/_setting-up-authentication-and-authorization-java.mdx new file mode 100644 index 0000000000..6db07d9a8a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/_setting-up-authentication-and-authorization-java.mdx @@ -0,0 +1,35 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Authentication and authorization is based on [client X.509 certificates](../server/security/authorization/security-clearance-and-permissions.mdx). + +The `certificate` property allows you to pass a certificate which will be used by the RavenDB client to connect to a server. + + +If your RavenDB instance is running on 'https', then your application has to use a client certificate in order to be able to access the server. You can find more information [here](../server/security/overview.mdx). + + +## Example + + + +{`// load certificate +// pem file should contain both public and private key +KeyStore clientStore = CertificateUtils.createKeystore("c:\\\\ravendb\\\\app.client.certificate.pem"); + +try (DocumentStore store = new DocumentStore()) \{ + store.setCertificate(clientStore); + store.setDatabase("Northwind"); + store.setUrls(new String[]\{ "https://my_secured_raven" \}); + + store.initialize(); + + // do your work here +\} +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/_setting-up-authentication-and-authorization-nodejs.mdx b/versioned_docs/version-7.1/client-api/_setting-up-authentication-and-authorization-nodejs.mdx new file mode 100644 index 0000000000..f0adbc80b3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/_setting-up-authentication-and-authorization-nodejs.mdx @@ -0,0 +1,36 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Authentication and authorization is based on [client X.509 certificates](../server/security/authorization/security-clearance-and-permissions.mdx). + +The authentication options argument in `DocumentStore` constructor property allows you to pass a certificate which will be used by the RavenDB client to connect to a server. + + +If your RavenDB server instance is served using `https`, then your application is required to use a client certificate in order to be able to access the server. You can find more information [here](../server/security/overview.mdx). + + +## Example + + + +{`import \{ DocumentStore \} from "ravendb"; +import * as fs from "fs"; + +// load certificate and prepare authentication options +const authOptions = \{ + certificate: fs.readFileSync("C:\\\\ravendb\\\\client-cert.pfx"), + type: "pfx", // or "pem" + password: "my passphrase" +\}; + +const store = new DocumentStore([ "https://my_secured_raven" ], "Northwind", authOptions); +store.initialize(); + +// proceed with your work here +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/_setting-up-default-database-csharp.mdx b/versioned_docs/version-7.1/client-api/_setting-up-default-database-csharp.mdx new file mode 100644 index 0000000000..6cb187a222 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/_setting-up-default-database-csharp.mdx @@ -0,0 +1,83 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + + +* A **default database** can be set in the Document Store. +The default database is used when accessing the Document Store methods without explicitly specifying a database. + +* You can pass a different database when accessing the Document Store methods. +This database will override the default database for that method action only. +The default database value itself will Not change. + +* When accessing the Document Store methods, an exception will be thrown if a default database is Not set and if No other database was +explicitly passed. + +* In this page: + * [Example - Without a Default Database](../client-api/setting-up-default-database.mdx#example---without-a-default-database) + * [Example - With a Default Database](../client-api/setting-up-default-database.mdx#example---with-a-default-database) + +## Example - Without a Default Database + + + +{`using (IDocumentStore store = new DocumentStore +\{ + Urls = new[] \{ "http://your_RavenDB_server_URL" \} + // Default database is not set +\}.Initialize()) +\{ + // Specify the 'Northwind' database when opening a Session + using (IDocumentSession session = store.OpenSession(database: "NorthWind")) + \{ + // Session will operate on the 'Northwind' database + \} + + // Specify the 'Northwind' database when sending an Operation + store.Maintenance.ForDatabase("Northwind").Send(new DeleteIndexOperation("NorthWindIndex")); +\} +`} + + + + + +## Example - With a Default Database + +The default database is defined in the Document Store's `Database` property. + + +{`using (IDocumentStore store = new DocumentStore +\{ + Urls = new[] \{ "http://your_RavenDB_server_URL" \}, + // Default database is set to 'Northwind' + Database = "Northwind" +\}.Initialize()) +\{ + // Using the default database + using (IDocumentSession northwindSession = store.OpenSession()) + \{ + // Session will operate on the default 'Northwind' database + \} + + // Operation for default database + store.Maintenance.Send(new DeleteIndexOperation("NorthWindIndex")); + + // Specify the 'AdventureWorks' database when opening a Session + using (IDocumentSession adventureWorksSession = store.OpenSession(database: "AdventureWorks")) + \{ + // Session will operate on the specifed 'AdventureWorks' database + \} + + // Specify the 'AdventureWorks' database when sending an Operation + store.Maintenance.ForDatabase("AdventureWorks").Send(new DeleteIndexOperation("AdventureWorksIndex")); +\} +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/_setting-up-default-database-java.mdx b/versioned_docs/version-7.1/client-api/_setting-up-default-database-java.mdx new file mode 100644 index 0000000000..5af6eeba84 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/_setting-up-default-database-java.mdx @@ -0,0 +1,66 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +`database` property allows you to setup a default database for a `DocumentStore`. Implication of setting up a default database is that each time you access [operations](../client-api/operations/what-are-operations.mdx) or create a [session](../client-api/session/what-is-a-session-and-how-does-it-work.mdx) without explicitly passing database on which they should operate on then default database is assumed. + +## Example I + + + +{`// without specifying \`database\` +// we will need to specify the database in each action +// if no database is passed explicitly we will get an exception + +try (DocumentStore store = new DocumentStore()) \{ + store.setUrls(new String[]\{ "http://localhost:8080" \}); + store.initialize(); + + try (IDocumentSession session = store.openSession("Northwind")) \{ + // ... + \} + + CompactSettings compactSettings = new CompactSettings(); + compactSettings.setDatabaseName("Northwind"); + store.maintenance().server().send(new CompactDatabaseOperation(compactSettings)); +\} +`} + + + +## Example II + + + +{`// when \`database\` is set to \`Northwind\` +// created \`operations\` or opened \`sessions\` +// will work on \`Northwind\` database by default +// if no database is passed explicitly +try (DocumentStore store = new DocumentStore(new String[]\{ "http://localhost:8080" \}, "Northwind")) \{ + store.initialize(); + + try (IDocumentSession northwindSession = store.openSession()) \{ + // ... + \} + + store.maintenance().send(new DeleteIndexOperation("NorthwindIndex")); + + + try (IDocumentSession adventureWorksSession = store.openSession("AdventureWorks")) \{ + // ... + \} + + store.maintenance().forDatabase("AdventureWorks").send(new DeleteIndexOperation("AdventureWorksIndex")); +\} +`} + + + +## Remarks + + +By default value of `database` property in `DocumentStore` is `null` which means that in any actions that need a database name we will have to specify the database. + + + diff --git a/versioned_docs/version-7.1/client-api/_setting-up-default-database-nodejs.mdx b/versioned_docs/version-7.1/client-api/_setting-up-default-database-nodejs.mdx new file mode 100644 index 0000000000..8dc32d7ea5 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/_setting-up-default-database-nodejs.mdx @@ -0,0 +1,67 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +`database` property allows you to setup a default database for a `DocumentStore`. Implication of setting up a default database is that each time you access [operations](../client-api/operations/what-are-operations.mdx) or create a [session](../client-api/session/what-is-a-session-and-how-does-it-work.mdx) without explicitly passing database on which they should operate on then default database is assumed. + +## Example I + + + +{`// without specifying \`database\` +// we will need to specify the database in each action +// if no database is passed explicitly we will get an error + +const store = new DocumentStore([ "http://localhost:8080" ]); +store.initialize(); + +\{ + const session = store.openSession("Northwind"); + // ... +\} + +const compactSettings = \{ databaseName: "Northwind" \}; +await store.maintenance.server.send( + new CompactDatabaseOperation(compactSettings)); +`} + + + +## Example II + + + +{`// when \`database\` is set to \`Northwind\` +// created \`operations\` or opened \`sessions\` +// will work on \`Northwind\` database by default +// if no database is passed explicitly +const store = new DocumentStore("http://localhost:8080", "Northwind"); +store.initialize(); + +\{ + const northwindSession = store.openSession(); + // ... +\} + +await store.maintenance.send( + new DeleteIndexOperation("NorthwindIndex")); + +\{ + const adventureWorksSession = store.openSession("AdventureWorks"); + // ... +\} + +await store.maintenance.forDatabase("AdventureWorks") + .send(new DeleteIndexOperation("AdventureWorksIndex")); +`} + + + +## Remarks + + +By default value of `database` property in `DocumentStore` is `null` which means that in any action requiring a database name, we will have to specify the database. + + + diff --git a/versioned_docs/version-7.1/client-api/_what-is-a-document-store-csharp.mdx b/versioned_docs/version-7.1/client-api/_what-is-a-document-store-csharp.mdx new file mode 100644 index 0000000000..b3e54156d7 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/_what-is-a-document-store-csharp.mdx @@ -0,0 +1,32 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The **Document Store** is the main Client API object which establishes and manages the communication between your client application and a [RavenDB cluster](../server/clustering/overview.mdx). +Communication is done via HTTP requests. + +* The Document Store holds the [Cluster Topology](../server/clustering/rachis/cluster-topology.mdx), the [Authentication Certificate](../client-api/setting-up-authentication-and-authorization.mdx), +and any configurations & customizations that you may have applied. + +* Caching is built in. All requests to the server(s) and their responses are cached within the Document Store. + +* A single instance of the Document Store ([Singleton Pattern](https://csharpindepth.com/articles/Singleton)) should be created per cluster per the lifetime of your application. + +* The Document Store is thread safe - implemented in a thread safe manner. + +* The Document Store exposes methods to perform operations such as: + * [Session](../client-api/session/what-is-a-session-and-how-does-it-work.mdx) - Use the Session object to perform operations on a specific database + * [Operations](../client-api/operations/what-are-operations.mdx) - Manage the server with a set of low level operation commands + * [Bulk insert](../client-api/bulk-insert/how-to-work-with-bulk-insert-operation.mdx) - Useful when inserting a large amount of data + * [Conventions](../client-api/configuration/conventions.mdx) - Customize the Client API behavior + * [Changes API](../client-api/changes/what-is-changes-api.mdx) - Receive messages from the server + * [Aggressive caching](../client-api/how-to/setup-aggressive-caching.mdx) - Configure caching behavior + * [Events](../client-api/session/how-to/subscribe-to-events.mdx) - Perform custom actions in response to the Session's operations + * [Data Subscriptions](../client-api/data-subscriptions/what-are-data-subscriptions.mdx) - Define & manage data processing on the client side + + + + diff --git a/versioned_docs/version-7.1/client-api/_what-is-a-document-store-java.mdx b/versioned_docs/version-7.1/client-api/_what-is-a-document-store-java.mdx new file mode 100644 index 0000000000..476a85ea28 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/_what-is-a-document-store-java.mdx @@ -0,0 +1,23 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +A document store is our main client API object which establishes and manages the connection channel between an application and a database instance. +It acts as the connection manager and also exposes methods to perform all operations which you can run against an associated server instance. + +The document store object has a list of URL addresses that points to RavenDB server nodes. + +* `DocumentStore` acts against a remote server via HTTP requests, implementing a common `IDocumentStore` interface + +The document store ensures access to the following client API features: + +* [Session](../client-api/session/what-is-a-session-and-how-does-it-work.mdx) +* [Operations](../client-api/operations/what-are-operations.mdx) +* [Conventions](../client-api/configuration/conventions.mdx) +* [Events](../client-api/session/how-to/subscribe-to-events.mdx) +* [Bulk insert](../client-api/bulk-insert/how-to-work-with-bulk-insert-operation.mdx) +* [Changes API](../client-api/changes/what-is-changes-api.mdx) +* [Aggressive cache](../client-api/how-to/setup-aggressive-caching.mdx) + + diff --git a/versioned_docs/version-7.1/client-api/_what-is-a-document-store-nodejs.mdx b/versioned_docs/version-7.1/client-api/_what-is-a-document-store-nodejs.mdx new file mode 100644 index 0000000000..41fc04ec72 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/_what-is-a-document-store-nodejs.mdx @@ -0,0 +1,22 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +A document store is our main client API object which establishes and manages the connection channel between an application and a database instance. +It acts as the connection manager and also exposes methods to perform all operations which you can run against an associated server instance. + +The document store object has a list of URL addresses that points to RavenDB server nodes. + +* `DocumentStore` acts against a remote server via HTTP requests + +The document store ensures access to the following client API features: + +* [Session](../client-api/session/what-is-a-session-and-how-does-it-work.mdx) +* [Operations](../client-api/operations/what-are-operations.mdx) +* [Conventions](../client-api/configuration/conventions.mdx) +* [Events](../client-api/session/how-to/subscribe-to-events.mdx) +* [Bulk insert](../client-api/bulk-insert/how-to-work-with-bulk-insert-operation.mdx) +* [Changes API](../client-api/changes/what-is-changes-api.mdx) + + diff --git a/versioned_docs/version-7.1/client-api/bulk-insert/_category_.json b/versioned_docs/version-7.1/client-api/bulk-insert/_category_.json new file mode 100644 index 0000000000..2fea752c6b --- /dev/null +++ b/versioned_docs/version-7.1/client-api/bulk-insert/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 8, + "label": Bulk Insert, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/bulk-insert/_how-to-work-with-bulk-insert-operation-csharp.mdx b/versioned_docs/version-7.1/client-api/bulk-insert/_how-to-work-with-bulk-insert-operation-csharp.mdx new file mode 100644 index 0000000000..d1b6becbca --- /dev/null +++ b/versioned_docs/version-7.1/client-api/bulk-insert/_how-to-work-with-bulk-insert-operation-csharp.mdx @@ -0,0 +1,206 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* `BulkInsert` is useful when inserting a large quantity of data from the client to the server. +* It is an optimized time-saving approach with a few + [limitations](../../client-api/bulk-insert/how-to-work-with-bulk-insert-operation.mdx#limitations) + like the possibility that interruptions will occur during the operation. + +In this page: + +* [Syntax](../../client-api/bulk-insert/how-to-work-with-bulk-insert-operation.mdx#syntax) +* [`BulkInsertOperation`](../../client-api/bulk-insert/how-to-work-with-bulk-insert-operation.mdx#bulkinsertoperation) + * [Methods](../../client-api/bulk-insert/how-to-work-with-bulk-insert-operation.mdx#methods) + * [Limitations](../../client-api/bulk-insert/how-to-work-with-bulk-insert-operation.mdx#limitations) + * [Example](../../client-api/bulk-insert/how-to-work-with-bulk-insert-operation.mdx#example) +* [`BulkInsertOptions`](../../client-api/bulk-insert/how-to-work-with-bulk-insert-operation.mdx#bulkinsertoptions) + * [`CompressionLevel`](../../client-api/bulk-insert/how-to-work-with-bulk-insert-operation.mdx#section) + * [`SkipOverwriteIfUnchanged`](../../client-api/bulk-insert/how-to-work-with-bulk-insert-operation.mdx#section-1) + + + +## Syntax + + + +{`BulkInsertOperation BulkInsert(string database = null, CancellationToken token = default); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **database** | `string` | The name of the database to perform the bulk operation on.
If `null`, the DocumentStore `Database` will be used. | +| **token** | `CancellationToken` | Cancellation token used to halt the worker operation. | + +| Return Value | | +| ------------- | ----- | +| `BulkInsertOperation`| Instance of `BulkInsertOperation` used for interaction. | + + +{`BulkInsertOperation BulkInsert(string database, BulkInsertOptions options, CancellationToken token = default); +`} + + + +| Parameters | Type | Description | +| ------------- | ------------- | ----- | +| **database** | `string` | The name of the database to perform the bulk operation on.
If `null`, the DocumentStore `Database` will be used. | +| **options** | `BulkInsertOptions` | [Options](../../client-api/bulk-insert/how-to-work-with-bulk-insert-operation.mdx#bulkinsertoptions) to configure BulkInsert. | +| **token** | `CancellationToken` | Cancellation token used to halt the worker operation. | + +| Return Value | | +| ------------- | ----- | +| `BulkInsertOperation`| Instance of `BulkInsertOperation` used for interaction. | + + +{`BulkInsertOperation BulkInsert(BulkInsertOptions options, CancellationToken token = default); +`} + + + +| Parameters | Type | Description | +| ------------- | ------------- | ----- | +| **options** | `BulkInsertOptions` | [Options](../../client-api/bulk-insert/how-to-work-with-bulk-insert-operation.mdx#bulkinsertoptions) to configure BulkInsert. | +| **token** | `CancellationToken` | Cancellation token used to halt the worker operation. | + +| Return Value | | +| ------------- | ----- | +| `BulkInsertOperation`| Instance of `BulkInsertOperation` used for interaction. | + + + +## `BulkInsertOperation` + +The following methods can be used when creating a bulk insert. + +### Methods + +| Signature | Description | +| ----------| ----- | +| **void Abort()** | Abort the operation | +| **void Store(object entity, IMetadataDictionary metadata = null)** | Store the entity, identifier will be generated automatically on client-side. Optional, metadata can be provided for the stored entity. | +| **void Store(object entity, string id, IMetadataDictionary metadata = null)** | Store the entity, with `id` parameter to explicitly declare the entity identifier. Optional, metadata can be provided for the stored entity.| +| **void StoreAsync(object entity, IMetadataDictionary metadata = null)** | Store the entity in an async manner, identifier will be generated automatically on client-side. Optional, metadata can be provided for the stored entity. | +| **void StoreAsync(object entity, string id, IMetadataDictionary metadata = null)** | Store the entity in an async manner, with `id` parameter to explicitly declare the entity identifier. Optional, metadata can be provided for the stored entity.| +| **void Dispose()** | Dispose of an object | +| **void DisposeAsync()** | Dispose of an object in an async manner | + +### Limitations + +* BulkInsert is designed to efficiently push large volumes of data. + Data is therefore streamed and **processed by the server in batches**. + Each batch is fully transactional, but there are no transaction guarantees between the batches + and the operation as a whole is non-transactional. + If the bulk insert operation is interrupted mid-way, some of your data might be persisted + on the server while some of it might not. + * Make sure that your logic accounts for the possibility of an interruption that would cause + some of your data not to persist on the server yet. + * If the operation was interrupted and you choose to re-insert the whole dataset in a new + operation, you can set + [SkipOverwriteIfUnchanged](../../client-api/bulk-insert/how-to-work-with-bulk-insert-operation.mdx#section-1) + as `true` so the operation will overwrite existing documents only if they changed since + the last insertion. + * **If you need full transactionality**, using [session](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx) + may be a better option. + Note that if `session` is used all of the data is processed in a single transaction, so the + server must have sufficient resources to handle the entire data set included in the transaction. +* Bulk insert is **not thread-safe**. + A single bulk insert should not be accessed concurrently. + * Using multiple bulk inserts concurrently on the same client is supported. + * Usage in an async context is also supported. + +### Example + +#### Create bulk insert + +Here we create a bulk insert operation and insert a million documents of type `Employee`: + + + +{`using (BulkInsertOperation bulkInsert = store.BulkInsert()) +{ + for (int i = 0; i < 1000 * 1000; i++) + { + bulkInsert.Store(new Employee + { + FirstName = "FirstName #" + i, + LastName = "LastName #" + i + }); + } +} +`} + + + + +{`BulkInsertOperation bulkInsert = null; +try +{ + bulkInsert = store.BulkInsert(); + for (int i = 0; i < 1000 * 1000; i++) + { + await bulkInsert.StoreAsync(new Employee + { + FirstName = "FirstName #" + i, + LastName = "LastName #" + i + }); + } +} +finally +{ + if (bulkInsert != null) + { + await bulkInsert.DisposeAsync().ConfigureAwait(false); + } +} +`} + + + + + + +## `BulkInsertOptions` + +The following options can be configured for BulkInsert. + +#### `CompressionLevel`: + +| Parameter | Type | Description | +| ------------- | ------------- | ----- | +| **Optimal** | `string` | Compression level to be used when compressing static files. | +| **Fastest**
(Default)| `string` | Compression level to be used when compressing HTTP responses with `GZip` or `Deflate`. | +| **NoCompression** | `string` | Does not compress. | + + +For RavenDB versions up to `6.2`, bulk-insert compression is Disabled (`NoCompression`) by default. +For RavenDB versions from `7.0` on, bulk-insert compression is Enabled (set to `Fastest`) by default. + + +#### `SkipOverwriteIfUnchanged`: + +Use this option to avoid overriding documents when the inserted document and the existing one are similar. + +Enabling this flag can exempt the server of many operations triggered by document-change, +like re-indexation and subscription or ETL-tasks updates. +There is a slight potential cost in the additional comparison that has to be made between +the existing documents and the ones that are being inserted. + + + +{`using (var bulk = store.BulkInsert(new BulkInsertOptions +\{ + SkipOverwriteIfUnchanged = true +\})); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/bulk-insert/_how-to-work-with-bulk-insert-operation-java.mdx b/versioned_docs/version-7.1/client-api/bulk-insert/_how-to-work-with-bulk-insert-operation-java.mdx new file mode 100644 index 0000000000..112d1b54b9 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/bulk-insert/_how-to-work-with-bulk-insert-operation-java.mdx @@ -0,0 +1,69 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +One of the features that is particularly useful when inserting large amount of data is `bulk inserting`. +This is an optimized time-saving approach with few drawbacks that will be described later. + +## Syntax + + + +{`BulkInsertOperation bulkInsert(); + +BulkInsertOperation bulkInsert(String database); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **database** | `String` | The name of the database to perform the bulk operation on.
If `null`, the DocumentStore `Database` will be used. | + +| Return Value | | +| ------------- | ----- | +| `BulkInsertOperation`| Instance of BulkInsertOperation used for interaction. | + +# BulkInsertOperation + +### Methods + +| Signature | Description | +| ----------| ----- | +| **void abort()** | Abort the operation | +| **void store(Object entity, IMetadataDictionary metadata = null)** | store the entity, identifier will be generated automatically on client-side. Optional, metadata can be provided for the stored entity. | +| **void store(Object entity, String id, IMetadataDictionary metadata = null)** | store the entity, with `id` parameter to explicitly declare the entity identifier. Optional, metadata can be provided for the stored entity.| +| **void close()** | Close an object | + +## Limitations + +There are a couple limitations to the API: + +* The bulk insert operation is broken into batches, each batch is treated in its own transaction + so the whole operation isn't treated under a single transaction. +* Bulk insert is not thread safe, a single bulk insert should not be accessed concurrently. + The use of multiple bulk inserts, on the same client, concurrently is supported also the + use in an async context is supported. + +## Example + +### Create bulk insert + +Here we create a bulk insert operation and insert a million documents of type Employee + + + +{`try (BulkInsertOperation bulkInsert = store.bulkInsert()) \{ + for (int i = 0; i < 1_000_000; i++) \{ + Employee employee = new Employee(); + employee.setFirstName("FirstName #" + i); + employee.setLastName("LastName #" + i); + bulkInsert.store(employee); + \} +\} +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/bulk-insert/_how-to-work-with-bulk-insert-operation-nodejs.mdx b/versioned_docs/version-7.1/client-api/bulk-insert/_how-to-work-with-bulk-insert-operation-nodejs.mdx new file mode 100644 index 0000000000..aed647b7e9 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/bulk-insert/_how-to-work-with-bulk-insert-operation-nodejs.mdx @@ -0,0 +1,65 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +One of the features that is particularly useful when inserting large amount of data is `bulk inserting`. +This is an optimized time-saving approach with few drawbacks that will be described later. + +## Syntax + + + +{`documentStore.bulkInsert([database]); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **database** | `string` | The name of the database to perform the bulk operation on.
If `null`, the DocumentStore `Database` will be used. | + +| Return Value | | +| ------------- | ----- | +| `BulkInsertOperation` | Instance of `BulkInsertOperation` used for interaction. | + +# `BulkInsertOperation` + +### Methods + +| Signature | Description | +| ----------| ----- | +| **async abort()** | Aborts the bulk insert operation. Returns a `Promise`. | +| **async store(entity, [metadata])** | store the entity, identifier will be generated automatically on client-side. Optional, metadata can be provided for the stored entity. Returns a `Promise`. | +| **async store(entity, id, [metadata])** | store the entity, with `id` parameter to explicitly declare the entity identifier. Optional, metadata can be provided for the stored entity. Returns a `Promise`. | +| **async finish()** | Finish bulk insert and flush everything to the server. Returns a `Promise`. | + +## Limitations + +There are a couple limitations to the API: + +* The bulk insert operation is broken into batches, each batch is treated in its own transaction + so the whole operation isn't treated under a single transaction. + +## Example + +### Create bulk insert + +Here we create a bulk insert operation and insert a million documents of type Employee + + + +{`\{ + const bulkInsert = documentStore.bulkInsert(); + for (let i = 0; i < 1000000; i++) \{ + const employee = new Employee("FirstName #" + i, "LastName #" + i); + await bulkInsert.store(employee); + \} + + await bulkInsert.finish(); +\} +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/bulk-insert/how-to-work-with-bulk-insert-operation.mdx b/versioned_docs/version-7.1/client-api/bulk-insert/how-to-work-with-bulk-insert-operation.mdx new file mode 100644 index 0000000000..713efc57b9 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/bulk-insert/how-to-work-with-bulk-insert-operation.mdx @@ -0,0 +1,44 @@ +--- +title: "Bulk Insert: How to Work With Bulk Insert Operation" +hide_table_of_contents: true +sidebar_label: How to Work With Bulk Insert Operation +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToWorkWithBulkInsertOperationCsharp from './_how-to-work-with-bulk-insert-operation-csharp.mdx'; +import HowToWorkWithBulkInsertOperationJava from './_how-to-work-with-bulk-insert-operation-java.mdx'; +import HowToWorkWithBulkInsertOperationNodejs from './_how-to-work-with-bulk-insert-operation-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/changes/_category_.json b/versioned_docs/version-7.1/client-api/changes/_category_.json new file mode 100644 index 0000000000..6519a53760 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/changes/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 10, + "label": Changes API, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-counter-changes-csharp.mdx b/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-counter-changes-csharp.mdx new file mode 100644 index 0000000000..8995f849e8 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-counter-changes-csharp.mdx @@ -0,0 +1,216 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Following methods allow you to subscribe to counter changes: + +- [ForCounter](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#forcounter) +- [ForCounterOfDocument](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#forcounterofdocument) +- [ForCountersOfDocument](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#forcountersofdocument) +- [ForAllCounters](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#forallcounters) + +## ForCounter + +Counter changes can be observed using `ForCounter` method. This will subscribe changes from all counters with a given name, no matter in what document counter was changed. + +### Syntax + + + +{`IChangesObservable ForCounter(string counterName); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **counterName** | string | Name of a counter to subscribe to. | + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[CounterChange](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#counterchange)> | Observable that allows to add subscriptions to counter notifications. | + +### Example + + + +{`IDisposable subscription = store + .Changes() + .ForCounter("Likes") + .Subscribe( + change => + \{ + switch (change.Type) + \{ + case CounterChangeTypes.Increment: + // do something + break; + \} + \}); +`} + + + + + +## ForCounterOfDocument + +Specific counter changes of a given document can be observed using `ForCounterOfDocument` method. + +### Syntax + + + +{`IChangesObservable ForCounterOfDocument(string documentId, string counterName); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **documentId** | string | ID of a document to subscribe to. | +| **counterName** | string | Name of a counter to subscribe to. | + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[CounterChange](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#counterchange)> | Observable that allows to add subscriptions to counter notifications. | + +### Example + + + +{`IDisposable subscription = store + .Changes() + .ForCounterOfDocument("companies/1-A", "Likes") + .Subscribe( + change => + \{ + switch (change.Type) + \{ + case CounterChangeTypes.Increment: + // do something + break; + \} + \}); +`} + + + + + +## ForCountersOfDocument + +Counter changes of a specified document can be observed using `ForCountersOfDocument` method. + +### Syntax + + + +{`IChangesObservable ForCountersOfDocument(string documentId); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **documentId** | string | ID of a document to subscribe to. | + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[CounterChange](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#counterchange)> | Observable that allows to add subscriptions to counter notifications. | + +### Example + + + +{`IDisposable subscription = store + .Changes() + .ForCountersOfDocument("companies/1-A") + .Subscribe( + change => + \{ + switch (change.Type) + \{ + case CounterChangeTypes.Increment: + // do something + break; + \} + \}); +`} + + + + + +## ForAllCounters + +Changes for all counters can be observed using `ForAllCounters` method. + +### Syntax + + + +{`IChangesObservable ForAllCounters(); +`} + + + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[CounterChange](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#counterchange)> | Observable that allows to add subscriptions to counter notifications. | + +### Example + + + +{`IDisposable subscription = store + .Changes() + .ForAllCounters() + .Subscribe( + change => + \{ + switch (change.Type) + \{ + case CounterChangeTypes.Increment: + // do something + break; + \} + \}); +`} + + + + + +## CounterChange + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **Type** | [CounterChangeTypes](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#counterchangetypes) | Counter change type enum | +| **Name** | string | Counter name | +| **Value** | long | Counter value after the change | +| **DocumentId** | string | Counter document identifier | +| **ChangeVector** | string | Counter's ChangeVector| + + + +## CounterChangeTypes + +| Name | Value | +| ---- | ----- | +| **None** | `0` | +| **Put** | `1` | +| **Delete** | `2` | +| **Increment** | `4` | + + + +## Remarks + + +To get more method overloads, especially ones supporting **delegates**, please add the +[System.Reactive.Core](https://www.nuget.org/packages/System.Reactive.Core/) package to your project. + + + diff --git a/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-counter-changes-java.mdx b/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-counter-changes-java.mdx new file mode 100644 index 0000000000..aa6cd872e6 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-counter-changes-java.mdx @@ -0,0 +1,197 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Following methods allow you to subscribe to counter changes: + +- [ForCounter](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#forcounter) +- [ForCounterOfDocument](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#forcounterofdocument) +- [ForCountersOfDocument](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#forcountersofdocument) +- [ForAllCounters](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#forallcounters) + +## ForCounter + +Counter changes can be observed using `forCounter` method. This will subscribe changes from all counters with a given name, no matter in what document counter was changed. + +### Syntax + + + +{`IChangesObservable forCounter(String counterName); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **counterName** | String | Name of a counter to subscribe to. | + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[CounterChange](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#counterchange)> | Observable that allows to add subscriptions to counter notifications. | + +### Example + + + +{`store + .changes() + .forCounter("likes") + .subscribe(Observers.create(change -> \{ + switch (change.getType()) \{ + case INCREMENT: + // do something ... + break; + \} + \})); +`} + + + + + +## ForCounterOfDocument + +Specific counter changes of a given document can be observed using `forCounterOfDocument` method. + +### Syntax + + + +{`IChangesObservable forCounterOfDocument(String documentId, String counterName); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **documentId** | String | ID of a document to subscribe to. | +| **counterName** | String | Name of a counter to subscribe to. | + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[CounterChange](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#counterchange)> | Observable that allows to add subscriptions to counter notifications. | + +### Example + + + +{`store + .changes() + .forCounterOfDocument("companies/1-A", "likes") + .subscribe(Observers.create(change -> \{ + switch (change.getType()) \{ + case INCREMENT: + // do something + break; + \} + \})); +`} + + + + + +## ForCountersOfDocument + +Counter changes of a specified document can be observed using `forCountersOfDocument` method. + +### Syntax + + + +{`IChangesObservable forCountersOfDocument(String documentId); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **documentId** | String | ID of a document to subscribe to. | + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[CounterChange](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#counterchange)> | Observable that allows to add subscriptions to counter notifications. | + +### Example + + + +{`store + .changes() + .forCountersOfDocument("companies/1-A") + .subscribe(Observers.create(change -> \{ + switch (change.getType()) \{ + case INCREMENT: + // do something ... + break; + \} + \})); +`} + + + + + +## ForAllCounters + +Changes for all counters can be observed using `forAllCounters` method. + +### Syntax + + + +{`IChangesObservable ForAllCounters(); +`} + + + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[CounterChange](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#counterchange)> | Observable that allows to add subscriptions to counter notifications. | + +### Example + + + +{`store + .changes() + .forAllCounters() + .subscribe(Observers.create(change -> \{ + switch (change.getType()) \{ + case INCREMENT: + // do something ... + break; + \} + \})); +`} + + + + + +## CounterChange + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **Type** | [CounterChangeTypes](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#counterchangetypes) | Counter change type enum | +| **Name** | String | Counter name | +| **Value** | Long | Counter value after the change | +| **DocumentId** | String | Counter document identifier | +| **ChangeVector** | String | Counter's ChangeVector| + + + +## CounterChangeTypes + +| Name | Value | +| ---- | ----- | +| **NONE** | `0` | +| **PUT** | `1` | +| **DELETE** | `2` | +| **INCREMENT** | `4` | + + + + diff --git a/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-document-changes-csharp.mdx b/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-document-changes-csharp.mdx new file mode 100644 index 0000000000..de87eb28f3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-document-changes-csharp.mdx @@ -0,0 +1,215 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Following methods allow you to subscribe to document changes: + +- [ForDocument](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#fordocument) +- [ForDocumentsInCollection](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#fordocumentsincollection) +- [ForDocumentsStartingWith](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#fordocumentsstartingwith) +- [ForAllDocuments](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#foralldocuments) + +## ForDocument + +Single document changes can be observed using `ForDocument` method. + +### Syntax + + + +{`IChangesObservable ForDocument(string docId); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **docId** | string | ID of a document for which notifications will be processed. | + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[DocumentChange](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#documentchange)> | Observable that allows to add subscriptions to notifications for given document. | + +### Example + + + +{`IDisposable subscription = store + .Changes() + .ForDocument("employees/1") + .Subscribe( + change => + \{ + switch (change.Type) + \{ + case DocumentChangeTypes.Put: + // do something + break; + case DocumentChangeTypes.Delete: + // do something + break; + \} + \}); +`} + + + + + +## ForDocumentsInCollection + +To observe all document changes in particular collection use `ForDocumentInCollection` method. This method filters documents by `@collection` metadata property value. + +### Syntax + + + +{`IChangesObservable ForDocumentsInCollection(string collectionName); + +IChangesObservable ForDocumentsInCollection(); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **collectionName** | string | Name of document collection for which notifications will be processed. | + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[DocumentChange](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#documentchange)> | Observable that allows to add subscriptions to notifications for given document collection name. | + + +Overload with `TEntity` type uses `Conventions.GetCollectionName` to get collection name. + + +### Example + + + +{`IDisposable subscription = store + .Changes() + .ForDocumentsInCollection() + .Subscribe(change => Console.WriteLine("\{0\} on document \{1\}", change.Type, change.Id)); +`} + + + +or + + + +{`string collectionName = store.Conventions.FindCollectionName(typeof(Employee)); +IDisposable subscription = store + .Changes() + .ForDocumentsInCollection(collectionName) + .Subscribe(change => Console.WriteLine("\{0\} on document \{1\}", change.Type, change.Id)); +`} + + + + + +## ForDocumentsStartingWith + +To observe all document changes for documents with ID that contains given prefix use `ForDocumentsStartingWith` method. + +### Syntax + + + +{`IChangesObservable ForDocumentsStartingWith(string docIdPrefix); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **docIdPrefix** | string | Document ID prefix for which notifications will be processed. | + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[DocumentChange](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#documentchange)> | Observable that allows to add subscriptions to notifications for given document ID prefix. | + +### Example + + + +{`IDisposable subscription = store + .Changes() + .ForDocumentsStartingWith("employees/1") // employees/1, employees/10, employees/11, etc. + .Subscribe(change => Console.WriteLine("\{0\} on document \{1\}", change.Type, change.Id)); +`} + + + + + +## ForAllDocuments + +To observe all document changes use `ForAllDocuments` method. + +### Syntax + + + +{`IChangesObservable ForAllDocuments(); +`} + + + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[DocumentChange](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#documentchange)> | Observable that allows to add subscriptions to notifications for all documents. | + +### Example + + + +{`IDisposable subscription = store + .Changes() + .ForAllDocuments() // employees/1, orders/1, customers/1, etc. + .Subscribe(change => Console.WriteLine("\{0\} on document \{1\}", change.Type, change.Id)); +`} + + + + + +## DocumentChange + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **Type** | [DocumentChangeTypes](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#documentchangetypes) | Document change type enum | +| **Id** | string | Document identifier | +| **CollectionName** | string | Document's collection name | +| **TypeName** | string | Type name | +| **ChangeVector** | string | Document's ChangeVector| + + + +## DocumentChangeTypes + +| Name | Value | +| ---- | ----- | +| **None** | `0` | +| **Put** | `1` | +| **Delete** | `2` | +| **BulkInsertStarted** | `4` | +| **BulkInsertEnded** | `8` | +| **BulkInsertError** | `16` | +| **DeleteOnTombstoneReplication** | `32` | +| **Conflict** | `64` | +| **Common** | `Put & Delete` | + + + +## Remarks + + +To get more method overloads, especially ones supporting **delegates**, please add the +[System.Reactive.Core](https://www.nuget.org/packages/System.Reactive.Core/) package to your project. + + + diff --git a/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-document-changes-java.mdx b/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-document-changes-java.mdx new file mode 100644 index 0000000000..891331315d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-document-changes-java.mdx @@ -0,0 +1,212 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Following methods allow you to subscribe to document changes: + +- [forDocument](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#fordocument) +- [forDocumentsInCollection](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#fordocumentsincollection) +- [forDocumentsStartingWith](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#fordocumentsstartingwith) +- [forAllDocuments](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#foralldocuments) + +## forDocument + +Single document changes can be observed using `forDocument` method. + +### Syntax + + + +{`IChangesObservable forDocument(String docId); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **docId** | String | ID of a document for which notifications will be processed. | + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[DocumentChange](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#documentchange)> | Observable that allows to add subscriptions to notifications for given document. | + +### Example + + + +{`CleanCloseable subscription = store.changes() + .forDocument("employees/1") + .subscribe(Observers.create(change -> \{ + switch (change.getType()) \{ + case PUT: + // do something + break; + case DELETE: + // do something + break; + \} + \})); +`} + + + + + +## forDocumentsInCollection + +To observe all document changes in particular collection use `forDocumentInCollection` method. This method filters documents by `@collection` metadata property value. + +### Syntax + + + +{`IChangesObservable forDocumentsInCollection(String collectionName); + +IChangesObservable forDocumentsInCollection(Class clazz); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **collectionName** | String | Name of document collection for which notifications will be processed. | + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[DocumentChange](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#documentchange)> | Observable that allows to add subscriptions to notifications for given document collection name. | + + +Overload with `TEntity` type uses `conventions.GetCollectionName` to get collection name. + + +### Example + + + +{`CleanCloseable subscription = store + .changes() + .forDocumentsInCollection(Employee.class) + .subscribe(Observers.create(change -> \{ + System.out.println(change.getType() + " on document " + change.getId()); + \})); +`} + + + +or + + + +{`String collectionName = store.getConventions().getFindCollectionName().apply(Employee.class); +CleanCloseable subscription = store + .changes() + .forDocumentsInCollection(collectionName) + .subscribe(Observers.create(change -> \{ + System.out.println(change.getType() + " on document " + change.getId()); + \})); +`} + + + + + +## forDocumentsStartingWith + +To observe all document changes for documents with ID that contains given prefix use `forDocumentsStartingWith` method. + +### Syntax + + + +{`IChangesObservable forDocumentsStartingWith(String docIdPrefix); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **docIdPrefix** | String | Document ID prefix for which notifications will be processed. | + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[DocumentChange](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#documentchange)> | Observable that allows to add subscriptions to notifications for given document ID prefix. | + +### Example + + + +{`CleanCloseable subscription = store + .changes() + .forDocumentsStartingWith("employees/1") // employees/1, employees/10, employees/11, etc. + .subscribe(Observers.create(change -> \{ + System.out.println(change.getType() + " on document " + change.getId()); + \})); +`} + + + + + +## forAllDocuments + +To observe all document changes use `forAllDocuments` method. + +### Syntax + + + +{`IChangesObservable forAllDocuments(); +`} + + + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[DocumentChange](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#documentchange)> | Observable that allows to add subscriptions to notifications for all documents. | + +### Example + + + +{`CleanCloseable subscription = store + .changes() + .forAllDocuments() + .subscribe(Observers.create(change -> \{ + System.out.println(change.getType() + " on document " + change.getId()); + \})); +`} + + + + + +## DocumentChange + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **Type** | [DocumentChangeTypes](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#documentchangetypes) | Document change type enum | +| **Id** | String | Document identifier | +| **CollectionName** | String | Document's collection name | +| **TypeName** | String | Type name | +| **ChangeVector** | String | Document's ChangeVector| + + + +## DocumentChangeTypes + +| Name | +| ---- | +| **NONE** | +| **PUT** | +| **DELETE** | +| **BULK_INSERT_STARTED** | +| **BULK_INSERT_ENDED** | +| **BULK_INSERT_ERROR** | +| **DELETE_ON_TOMBSTONE_REPLICATION** | +| **CONFLICT** | +| **COMMON** | + + + + diff --git a/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-document-changes-nodejs.mdx b/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-document-changes-nodejs.mdx new file mode 100644 index 0000000000..27cb7f7883 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-document-changes-nodejs.mdx @@ -0,0 +1,207 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Following methods allow you to subscribe to document changes: + +- [forDocument()](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#fordocument) +- [forDocumentsInCollection()](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#fordocumentsincollection) +- [forDocumentsStartingWith()](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#fordocumentsstartingwith) +- [forAllDocuments()](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#foralldocuments) + +## forDocument + +Single document changes can be observed using `forDocument()` method. + +### Syntax + + + +{`store.changes().forDocument(docId); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **docId** | string | ID of a document for which notifications will be processed. | + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[DocumentChange](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#documentchange)> | Observable that allows to add listeners for events for given document. | + +### Example + + + +{`store.changes().forDocument("employees/1") + .on("error", err => \{ + //handle error + \}) + .on("data", change => \{ + switch (change.type) \{ + case "Put": + // do something + break; + case "Delete": + // do something + break; + \} + \}); +`} + + + + + +## forDocumentsInCollection + +To observe all document changes in particular collection use `forDocumentInCollection()` method. This method filters documents by `@collection` metadata property value. + +### Syntax + + + +{`store.changes().forDocumentsInCollection(collectionName); +store.changes().forDocumentsInCollection(clazz); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **collectionName** | string | Name of document collection for which notifications will be processed. | + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[DocumentChange](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#documentchange)> | Observable that allows to add subscriptions to notifications for given document collection name. | + + +Overload with entity type uses `conventions.getCollectionNameForType()` to get collection name. + + +### Example + + + +{`store.changes().forDocumentsInCollection(Employee) + .on("data", change => \{ + console.log(change.type + " on document " + change.id); + \}); +`} + + + +or + + + +{`const collectionName = store.conventions.getCollectionNameForType(Employee); +store.changes() + .forDocumentsInCollection(collectionName) + .on("data", change => \{ + console.log(change.type + " on document " + change.id); + \}); +`} + + + + + +## forDocumentsStartingWith + +To observe all document changes for documents with ID that contains given prefix use `forDocumentsStartingWith()` method. + +### Syntax + + + +{`store.changes().forDocumentsStartingWith(docIdPrefix); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **docIdPrefix** | string | Document ID prefix for which notifications will be processed. | + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[DocumentChange](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#documentchange)> | Observable that allows to add subscriptions to notifications for given document ID prefix. | + +### Example + + + +{`store.changes() + .forDocumentsStartingWith("employees/1") // employees/1, employees/10, employees/11, etc. + .on("data", change => \{ + console.log(change.type + " on document " + change.id); + \}); +`} + + + + + +## forAllDocuments + +To observe all document changes use `forAllDocuments()` method. + +### Syntax + + + +{`store.changes().forAllDocuments(); +`} + + + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[DocumentChange](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#documentchange)> | Observable that allows to add subscriptions to notifications for all documents. | + +### Example + + + +{`store.changes().forAllDocuments() + .on("data", change => \{ + console.log(change.type + " on document " + change.id); + \}); +`} + + + + + +## DocumentChange + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **type** | [DocumentChangeTypes](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#documentchangetypes) | Document change type enum | +| **id** | string | Document identifier | +| **collectionName** | string | Document's collection name | +| **typeName** | string | Type name | +| **changeVector** | string | Document's ChangeVector| + + + +## DocumentChangeTypes + +| Name | +| ---- | +| **None** | +| **Put** | +| **Delete** | +| **BulkInsertStarted** | +| **BulkInsertEnded** | +| **BulkInsertError** | +| **DeleteOnTombstoneReplication** | +| **Conflict** | +| **Common** | + + + + diff --git a/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-index-changes-csharp.mdx b/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-index-changes-csharp.mdx new file mode 100644 index 0000000000..f7c65184ce --- /dev/null +++ b/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-index-changes-csharp.mdx @@ -0,0 +1,159 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Following methods allow you to subscribe to index changes: + +- [ForIndex](../../client-api/changes/how-to-subscribe-to-index-changes.mdx#forindex) +- [ForAllIndexes](../../client-api/changes/how-to-subscribe-to-index-changes.mdx#forallindexes) + +## ForIndex + +Index changes for one index can be observed using `ForIndex` method. + +### Syntax + + + +{`IChangesObservable ForIndex(string indexName); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **indexName** | string | Name of an index for which notifications will be processed. | + +| Return value | | +| ------------- | ----- | +| IChangesObservable<[IndexChange](../../client-api/changes/how-to-subscribe-to-index-changes.mdx#indexchange)> | Observable that allows to add subscriptions to notifications for index with given name. | + +### Example + + + +{`IDisposable subscription = store + .Changes() + .ForIndex("Orders/All") + .Subscribe( + change => + \{ + switch (change.Type) + \{ + case IndexChangeTypes.None: + //Do someting + break; + case IndexChangeTypes.BatchCompleted: + //Do someting + break; + case IndexChangeTypes.IndexAdded: + //Do someting + break; + case IndexChangeTypes.IndexRemoved: + //Do someting + break; + case IndexChangeTypes.IndexDemotedToIdle: + //Do someting + break; + case IndexChangeTypes.IndexPromotedFromIdle: + //Do someting + break; + case IndexChangeTypes.IndexDemotedToDisabled: + //Do someting + break; + case IndexChangeTypes.IndexMarkedAsErrored: + //Do someting + break; + case IndexChangeTypes.SideBySideReplace: + //Do someting + break; + case IndexChangeTypes.IndexPaused: + //Do someting + break; + case IndexChangeTypes.LockModeChanged: + //Do someting + break; + case IndexChangeTypes.PriorityChanged: + //Do someting + break; + default: + throw new ArgumentOutOfRangeException(); + \} + \}); +`} + + + + + +## ForAllIndexes + +Index changes for all indexex can be observed using `ForAllIndexes` method. + +| Return value | | +| ------------- | ----- | +| IChangesObservable<[IndexChange](../../client-api/changes/how-to-subscribe-to-index-changes.mdx#indexchange)> | Observable that allows to add subscriptions to notifications for all indexes. | + +### Syntax + + + +{`IChangesObservable ForAllIndexes(); +`} + + + +### Example + + + +{`IDisposable subscription = store + .Changes() + .ForAllIndexes() + .Subscribe(change => Console.WriteLine("\{0\} on index \{1\}", change.Type, change.Name)); +`} + + + + + +## IndexChange + +### Properties + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **Type** | [IndexChangeTypes](../../client-api/changes/how-to-subscribe-to-index-changes.mdx#indexchangetypes) | Change type | +| **Name** | string | Index name | +| **Etag** | long? | Index Etag | + + + +## IndexChangeTypes + +| Name | Value | +| ---- | ----- | +| **None** | `0` | +| **BatchCompleted** | `1` | +| **IndexAdded** | `8` | +| **IndexRemoved** | `16` | +| **IndexDemotedToIdle** | `32` | +| **IndexPromotedFromIdle** | `64` | +| **IndexDemotedToDisabled** | `256` | +| **IndexMarkedAsErrored** | `512` | +| **SideBySideReplace** | `1024` | +| **IndexPaused** | `4096` | +| **LockModeChanged** | `8192` | +| **PriorityChanged** | `16384` | + + + +## Remarks + + +To get more method overloads, especially ones supporting **delegates**, please add the +[System.Reactive.Core](https://www.nuget.org/packages/System.Reactive.Core/) package to your project. + + + diff --git a/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-index-changes-java.mdx b/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-index-changes-java.mdx new file mode 100644 index 0000000000..caa4af0fb7 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-index-changes-java.mdx @@ -0,0 +1,155 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Following methods allow you to subscribe to index changes: + +- [forIndex](../../client-api/changes/how-to-subscribe-to-index-changes.mdx#forindex) +- [forAllIndexes](../../client-api/changes/how-to-subscribe-to-index-changes.mdx#forallindexes) + +## forIndex + +Index changes for one index can be observed using `forIndex` method. + +### Syntax + + + +{`IChangesObservable forIndex(String indexName); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **indexName** | String | Name of an index for which notifications will be processed. | + +| Return value | | +| ------------- | ----- | +| IChangesObservable<[IndexChange](../../client-api/changes/how-to-subscribe-to-index-changes.mdx#indexchange)> | Observable that allows to add subscriptions to notifications for index with given name. | + +### Example + + + +{`CleanCloseable subscription = store + .changes() + .forIndex("Orders/All") + .subscribe(Observers.create(change -> \{ + switch (change.getType()) \{ + case NONE: + // do something + break; + case BATCH_COMPLETED: + // do something + break; + case INDEX_ADDED: + // do something + break; + case INDEX_REMOVED: + // do something + break; + case INDEX_DEMOTED_TO_IDLE: + // do something + break; + case INDEX_PROMOTED_FROM_IDLE: + // do something + break; + case INDEX_DEMOTED_TO_DISABLED: + // do something + break; + case INDEX_MARKED_AS_ERRORED: + // do something + break; + case SIDE_BY_SIDE_REPLACE: + // do something + break; + case RENAMED: + // do something + break; + case INDEX_PAUSED: + // do something + break; + case LOCK_MODE_CHANGED: + // do something + break; + case PRIORITY_CHANGED: + // do something + break; + default: + throw new IllegalArgumentException(); + \} + \})); +`} + + + + + +## forAllIndexes + +Index changes for all indexex can be observed using `forAllIndexes` method. + +| Return value | | +| ------------- | ----- | +| IChangesObservable<[IndexChange](../../client-api/changes/how-to-subscribe-to-index-changes.mdx#indexchange)> | Observable that allows to add subscriptions to notifications for all indexes. | + +### Syntax + + + +{`IChangesObservable forAllIndexes(); +`} + + + +### Example + + + +{`CleanCloseable subscription = store + .changes() + .forAllIndexes() + .subscribe(Observers.create(change -> \{ + System.out.println(change.getType() + " on index " + change.getName()); + \})); +`} + + + + + +## IndexChange + +### Properties + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **Type** | [IndexChangeTypes](../../client-api/changes/how-to-subscribe-to-index-changes.mdx#indexchangetypes) | Change type | +| **Name** | String | Index name | +| **Etag** | Long | Index Etag | + + + +## IndexChangeTypes + +| Name | +| ---- | +| **NONE** | +| **BATCH_COMPLETED** | +| **INDEX_ADDED** | +| **INDEX_REMOVED** | +| **INDEX_DEMOTED_TO_IDLE** | +| **INDEX_PROMOTED_TO_IDLE** | +| **INDEX_DEMOTED_TO_DISABLED** | +| **INDEX_MARKED_AS_ERRORED** | +| **SIDE_BY_SIDE_REPLACE** | +| **RENAMED** | +| **INDEX_PAUSED** | +| **LOCK_MODE_CHANGED** | +| **PRIORITY_CHANGED** | + + + + diff --git a/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-index-changes-nodejs.mdx b/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-index-changes-nodejs.mdx new file mode 100644 index 0000000000..97601e4904 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-index-changes-nodejs.mdx @@ -0,0 +1,151 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Following methods allow you to subscribe to index changes: + +- [forIndex()](../../client-api/changes/how-to-subscribe-to-index-changes.mdx#forindex) +- [forAllIndexes()](../../client-api/changes/how-to-subscribe-to-index-changes.mdx#forallindexes) + +## forIndex + +Index changes for one index can be observed using `forIndex()` method. + +### Syntax + + + +{`store.changes().forIndex(indexName); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **indexName** | string | Name of an index for which notifications will be processed. | + +| Return value | | +| ------------- | ----- | +| IChangesObservable<[IndexChange](../../client-api/changes/how-to-subscribe-to-index-changes.mdx#indexchange)> | Observable that allows to add subscriptions to notifications for index with given name. | + +### Example + + + +{`store.changes().forIndex("Orders/All") + .on("data", change => \{ + switch (change.type) \{ + case "None": + // do something + break; + case "BatchCompleted": + // do something + break; + case "IndexAdded": + // do something + break; + case "IndexRemoved": + // do something + break; + case "IndexDemotedToIdle": + // do something + break; + case "IndexPromotedFromIdle": + // do something + break; + case "IndexDemotedToDisabled": + // do something + break; + case "IndexMarkedAsErrored": + // do something + break; + case "SideBySideReplace": + // do something + break; + case "Renamed": + // do something + break; + case "IndexPaused": + // do something + break; + case "LockModeChanged": + // do something + break; + case "PriorityChanged": + // do something + break; + default: + throw new Error("Not supported."); + \} + \}); +`} + + + + + +## forAllIndexes + +Index changes for all indexex can be observed using `forAllIndexes()` method. + +| Return value | | +| ------------- | ----- | +| IChangesObservable<[IndexChange](../../client-api/changes/how-to-subscribe-to-index-changes.mdx#indexchange)> | Observable that allows to add subscriptions to notifications for all indexes. | + +### Syntax + + + +{`store.changes().forAllIndexes(); +`} + + + +### Example + + + +{`store.changes().forAllIndexes() + .on("data", change => \{ + console.log(change.type + " on index " + change.name); + \}); +`} + + + + + +## IndexChange + +### Properties + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **type** | [IndexChangeTypes](../../client-api/changes/how-to-subscribe-to-index-changes.mdx#indexchangetypes) | Change type | +| **name** | string | Index name | +| **etag** | number | Index Etag | + + + +## IndexChangeTypes + +| Name | +| ---- | +| **None** | +| **BatchCompleted** | +| **IndexAdded** | +| **IndexRemoved** | +| **IndexDemotedToIdle** | +| **IndexPromotedToIdle** | +| **IndexDemotedToDisabled** | +| **IndexMarkedAsErrored** | +| **SideBySideReplace** | +| **Renamed** | +| **IndexPaused** | +| **LockModeChanged** | +| **PriorityChanged** | + + + + diff --git a/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-operation-changes-csharp.mdx b/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-operation-changes-csharp.mdx new file mode 100644 index 0000000000..97d0889015 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-operation-changes-csharp.mdx @@ -0,0 +1,168 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +The following methods allow you to subscribe to operation changes: + +- [ForOperationId](../../client-api/changes/how-to-subscribe-to-operation-changes.mdx#foroperationid) +- [ForAllOperations](../../client-api/changes/how-to-subscribe-to-operation-changes.mdx#foralloperations) + +## ForOperationId + +Operation changes for one operation can be observed using the `ForOperationId` method. + + +Please note that from RavenDB 6.2 on, operation changes can be tracked only on a **specific node**. +The purpose of this change is to improve results consistency, as an operation may behave very differently +on different nodes and cross-cluster tracking of an operation may become confusing and ineffective if +the operation fails over from one node to another. +Tracking operations will therefore be possible only if the `Changes` API was +[opened](../../client-api/changes/what-is-changes-api.mdx#accessing-changes-api) using a method that limits +tracking to a single node: `store.Changes(dbName, nodeTag)` + + +### Syntax + + + +{`IChangesObservable ForOperationId(long operationId); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **operationId** | long | ID of an operation for which notifications will be processed. | + +| Return value | | +| ------------- | ----- | +| IChangesObservable<[OperationStatusChange](../../client-api/changes/how-to-subscribe-to-operation-changes.mdx#operationchange)> | Observable that allows you to add subscriptions to notifications for an operation with a given ID. | + +### Example + + + +{`IDisposable subscription = store + .Changes(dbName, nodeTag) + .ForOperationId(operationId) + .Subscribe( + change => + \{ + switch (change.State.Status) + \{ + case OperationStatus.InProgress: + //Do Something + break; + case OperationStatus.Completed: + //Do Something + break; + case OperationStatus.Faulted: + //Do Something + break; + case OperationStatus.Canceled: + //Do Something + break; + default: + throw new ArgumentOutOfRangeException(); + \} + \}); +`} + + + + + +## ForAllOperations + +Operations changes for all Operations can be observed using the `ForAllOperations` method. + + +Please note that from RavenDB 6.2 on, operation changes can be tracked only on a **specific node**. +The purpose of this change is to improve results consistency, as an operation may behave very differently +on different nodes and cross-cluster tracking of an operation may become confusing and ineffective if +the operation fails over from one node to another. +Tracking operations will therefore be possible only if the `Changes` API was +[opened](../../client-api/changes/what-is-changes-api.mdx#accessing-changes-api) using a method that limits +tracking to a single node: `store.Changes(dbName, nodeTag)` + + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[OperationStatusChange](../../client-api/changes/how-to-subscribe-to-operation-changes.mdx#operationchange)> | Observable that allows to add subscriptions to notifications for all operations. | + +### Syntax + + + +{`IChangesObservable ForAllOperations(); +`} + + + +### Example + + + +{`IDisposable subscription = store + .Changes(dbName, nodeTag) + .ForAllOperations() + .Subscribe(change => Console.WriteLine("Operation #\{1\} reports progress: \{0\}", change.State.Progress.ToJson(), change.OperationId)); +`} + + + + + +## OperationChange + +### Properties + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **State** | [OperationState](../../client-api/changes/how-to-subscribe-to-operation-changes.mdx#operationstate) | Operation state | +| **OperationId** | long | Operation ID | + + + +## OperationState + +### Members + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **Result** | [IOperationResult](../../client-api/changes/how-to-subscribe-to-operation-changes.mdx#operationresult) | Operation result | +| **Progress** | IOperationProgress| Instance of IOperationProgress (json representation of the progress) | +| **Status** | [OperationStatus](../../client-api/changes/how-to-subscribe-to-operation-changes.mdx#operationstatus) | Operation status | + + +## OperationResult + +### Members + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **Message** | string | Operation message | +| **ShouldPersist** | bool | determine whether or not the result should be saved in the storage | + + +## OperationStatus + +# OperationStatus (enum) + +| Name | Description | +| ---- | ----- | +| **InProgress** | `Indicates that the operation made progress` | +| **Completed** | `Indicates that the operation has completed` | +| **Faulted** | `Indicates that the operation is faulted` | +| **Canceled** | `Indicates that the operation has been Canceled` | + + +## Remarks + + +To get more method overloads, especially ones supporting **delegates**, please add the +[System.Reactive.Core](https://www.nuget.org/packages/System.Reactive.Core/) package to your project. + + + diff --git a/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-operation-changes-java.mdx b/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-operation-changes-java.mdx new file mode 100644 index 0000000000..afd9a0c2d2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-operation-changes-java.mdx @@ -0,0 +1,95 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +The following methods allow you to subscribe to operation changes: + +- [forOperationId](../../client-api/changes/how-to-subscribe-to-operation-changes.mdx#foroperation) +- [forAllOperations](../../client-api/changes/how-to-subscribe-to-operation-changes.mdx#foralloperations) + +## forOperation + +Operation changes for one operation can be observed using the `forOperationId` method. + +### Syntax + + + +{`IChangesObservable forOperationId(long operationId); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **operationId** | long | ID of an operation for which notifications will be processed. | + +| Return value | | +| ------------- | ----- | +| IChangesObservable<[OperationStatusChange](../../client-api/changes/how-to-subscribe-to-operation-changes.mdx#operationchange)> | Observable that allows you to add subscriptions to notifications for an operation with a given ID. | + +### Example + + + +{`CleanCloseable subscription = store + .changes() + .forOperationId(operationId) + .subscribe(Observers.create(change -> \{ + ObjectNode operationState = change.getState(); + + // do something + \})); +`} + + + + + +## forAllOperations + +Operations changes for all Operations can be observed using the `forAllOperations` method. + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[OperationStatusChange](../../client-api/changes/how-to-subscribe-to-operation-changes.mdx#operationchange)> | Observable that allows to add subscriptions to notifications for all operations. | + +### Syntax + + + +{`IChangesObservable forAllOperations(); +`} + + + +### Example + + + +{`CleanCloseable subscription = store + .changes() + .forAllOperations() + .subscribe(Observers.create(change -> \{ + System.out.println("Operation #" + change.getOperationId()); + \})); +`} + + + + + +## OperationChange + +### Properties + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **State** | ObjectNode | Operation state | +| **OperationId** | long | Operation ID | + + + + + diff --git a/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-operation-changes-nodejs.mdx b/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-operation-changes-nodejs.mdx new file mode 100644 index 0000000000..a7de35dedf --- /dev/null +++ b/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-operation-changes-nodejs.mdx @@ -0,0 +1,91 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +The following methods allow you to subscribe to operation changes: + +- [forOperationId()](../../client-api/changes/how-to-subscribe-to-operation-changes.mdx#foroperation) +- [forAllOperations()](../../client-api/changes/how-to-subscribe-to-operation-changes.mdx#foralloperations) + +## forOperation + +Operation changes for one operation can be observed using the `forOperationId()` method. + +### Syntax + + + +{`store.changes().forOperationId(operationId); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **operationId** | number | ID of an operation for which notifications will be processed. | + +| Return value | | +| ------------- | ----- | +| IChangesObservable<[OperationStatusChange](../../client-api/changes/how-to-subscribe-to-operation-changes.mdx#operationchange)> | Observable that allows you to add subscriptions to notifications for an operation with a given ID. | + +### Example + + + +{`store.changes().forOperationId(operationId) + .on("data", change => \{ + const operationState = change.state; + + // do something + \}); +`} + + + + + +## forAllOperations + +Operations changes for all Operations can be observed using the `forAllOperations()` method. + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[OperationStatusChange](../../client-api/changes/how-to-subscribe-to-operation-changes.mdx#operationchange)> | Observable that allows to add subscriptions to notifications for all operations. | + +### Syntax + + + +{`store.changes().forAllOperations(); +`} + + + +### Example + + + +{`store.changes().forAllOperations() + .on("data", change => \{ + console.log("Operation #" + change.operationId); + \}); +`} + + + + + +## OperationChange + +### Properties + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **state** | object | Operation state | +| **operationId** | number | Operation ID | + + + + + diff --git a/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-time-series-changes-csharp.mdx b/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-time-series-changes-csharp.mdx new file mode 100644 index 0000000000..b1cad23dbf --- /dev/null +++ b/versioned_docs/version-7.1/client-api/changes/_how-to-subscribe-to-time-series-changes-csharp.mdx @@ -0,0 +1,231 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the following methods to subscribe to Time Series Changes: + * `ForTimeSeries` + Track **all** time series with a given name + * `ForTimeSeriesOfDocument` + Overload #1: Track **a specific** time series of a chosen document + Overload #2: Track **any** time series of a chosen document + * `ForAllTimeSeries` + Track **all** time series + +* In this page: + * [ForTimeSeries](../../client-api/changes/how-to-subscribe-to-time-series-changes.mdx#fortimeseries) + * [ForTimeSeriesOfDocument](../../client-api/changes/how-to-subscribe-to-time-series-changes.mdx#fortimeseriesofdocument) + * [ForAllTimeSeries](../../client-api/changes/how-to-subscribe-to-time-series-changes.mdx#foralltimeseries) + + +## ForTimeSeries + +Subscribe to changes in **all time-series with a given name**, no matter which document they belong to, +using the `ForTimeSeries` method. + +#### Syntax + + + +{`IChangesObservable ForTimeSeries(string timeSeriesName); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **timeSeriesName** | string | Name of a time series to subscribe to. | + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[TimeSeriesChange](../../client-api/changes/how-to-subscribe-to-time-series-changes.mdx#timeserieschange)> | Observable that allows to add subscriptions to time series notifications. | + +#### Example + + + +{`IDisposable subscription = store + .Changes() + .ForTimeSeries("Likes") + .Subscribe + (change => + \{ + switch (change.Type) + \{ + case TimeSeriesChangeTypes.Delete: + // do something + break; + \} + \}); +`} + + + + + +## ForTimeSeriesOfDocument + +Use `ForTimeSeriesOfDocument` to subscribe to changes in **time series of a chosen document**. + +* Two overload methods allow you to + * Track **a specific** time series of the chosen document + * Track **any** time series of the chosen document +### Overload #1 +Use this `ForTimeSeriesOfDocument` overload to track changes in a **specific time** series of the chosen document. + +#### Syntax + + + +{`IChangesObservable ForTimeSeriesOfDocument(string documentId, string timeSeriesName); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **documentId** | string | ID of a document to subscribe to. | +| **timeSeriesName** | string | Name of a time series to subscribe to. | + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[TimeSeriesChange](../../client-api/changes/how-to-subscribe-to-time-series-changes.mdx#timeserieschange)> | Observable that allows to add subscriptions to time series notifications. | + +#### Example + + + +{`IDisposable subscription = store + .Changes() + .ForTimeSeriesOfDocument("companies/1-A", "Likes") + .Subscribe + (change => + \{ + switch (change.Type) + \{ + case TimeSeriesChangeTypes.Delete: + // do something + break; + \} + \}); +`} + + +### Overload #2 +Use this `ForTimeSeriesOfDocument` overload to track changes in **any time series** of the chosen document. + +#### Syntax + + + +{`IChangesObservable ForTimeSeriesOfDocument(string documentId); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **documentId** | string | ID of a document to subscribe to. | + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[TimeSeriesChange](../../client-api/changes/how-to-subscribe-to-time-series-changes.mdx#timeserieschange)> | Observable that allows to add subscriptions to time series notifications. | + +#### Example + + + +{`IDisposable subscription = store + .Changes() + .ForTimeSeriesOfDocument("companies/1-A") + .Subscribe + (change => + \{ + switch (change.Type) + \{ + case TimeSeriesChangeTypes.Delete: + // do something + break; + \} + \}); +`} + + + + + +## ForAllTimeSeries + +Subscribe to changes in **all time-series** using the `ForAllTimeSeries` method. + +#### Syntax + + + +{`IChangesObservable ForAllTimeSeries(); +`} + + + +| Return Value | | +| ------------- | ----- | +| IChangesObservable<[TimeSeriesChange](../../client-api/changes/how-to-subscribe-to-time-series-changes.mdx#timeserieschange)> | Observable that allows to add subscriptions to time series notifications. | + +#### Example + + + +{`IDisposable subscription = store + .Changes() + .ForAllTimeSeries() + .Subscribe + (change => + \{ + switch (change.Type) + \{ + case TimeSeriesChangeTypes.Delete: + // do something + break; + \} + \}); +`} + + + + + +## TimeSeriesChange + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **Type** | [TimeSeriesChangeTypes](../../client-api/changes/how-to-subscribe-to-time-series-changes.mdx#timeserieschangetypes) | Time series change type enum | +| **Name** | string | Time Series Name | +| **DocumentId** | string | Time series Document Identifier | +| **CollectionName** | string | Time series document Collection Name | +| **From** | DateTime | Time series values From date | +| **To** | DateTime | Time series values To date | +| **ChangeVector** | string | Time series Change Vector | + + + +## TimeSeriesChangeTypes + +| Name | Value | +| ---- | ----- | +| **None** | `0` | +| **Put** | `1` | +| **Delete** | `2` | +| **Mixed** | `3` | + + + +## Remarks + + +To get more method overloads, especially ones supporting **delegates**, please add the +[System.Reactive.Core](https://www.nuget.org/packages/System.Reactive.Core/) package to your project. + + + diff --git a/versioned_docs/version-7.1/client-api/changes/_what-is-changes-api-csharp.mdx b/versioned_docs/version-7.1/client-api/changes/_what-is-changes-api-csharp.mdx new file mode 100644 index 0000000000..057621ea30 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/changes/_what-is-changes-api-csharp.mdx @@ -0,0 +1,210 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The Changes API is a Push Notifications service, that allows a RavenDB Client to + receive messages from a RavenDB Server regarding events that occurred on the server. +* A client can subscribe to events related to documents, indexes, operations, counters, and time series. +* Using the Changes API allows you to notify users of various changes without requiring + any expensive polling. + +* In this page: + * [Accessing Changes API](../../client-api/changes/what-is-changes-api.mdx#accessing-changes-api) + * [Connection interface](../../client-api/changes/what-is-changes-api.mdx#connection-interface) + * [Subscribing](../../client-api/changes/what-is-changes-api.mdx#subscribing) + * [Unsubscribing](../../client-api/changes/what-is-changes-api.mdx#unsubscribing) + * [FAQ](../../client-api/changes/what-is-changes-api.mdx#faq) + * [Changes API and Database Timeout](../../client-api/changes/what-is-changes-api.mdx#changes-api-and-database-timeout) + * [Changes API and Method Overloads](../../client-api/changes/what-is-changes-api.mdx#changes-api-and-method-overloads) + * [Changes API -vs- Data Subscriptions](../../client-api/changes/what-is-changes-api.mdx#changes-api--vs--data-subscriptions) + +## Accessing Changes API + +The changes subscription is accessible by a document store through its `IDatabaseChanges` +or `ISingleNodeDatabaseChanges` interfaces. + + + +{`IDatabaseChanges Changes(string database = null); +`} + + + + +{`ISingleNodeDatabaseChanges Changes(string database, string nodeTag); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **database** | `string` | Name of database to open changes API for. If `null`, the `Database` configured in DocumentStore will be used. | +| **nodeTag** | `string` | Tag of the cluster node to open changes API for. | + +| Return value | | +| ------------- | ----- | +| `IDatabaseChanges` | Instance implementing `IDatabaseChanges` interface. | +| `ISingleNodeDatabaseChanges` | Instance implementing `ISingleNodeDatabaseChanges` interface. | + +* Use `IDatabaseChanges` to subscribe to database changes. +* Use `ISingleNodeDatabaseChanges` to limit tracking to a specific node. + + Note that from RavenDB 6.2 on, some changes can be tracked not cross-cluster but only + **on a specific node**. In these cases, it is required that you open the Changes API using + the second overload, passing both a database name and a node tag: `store.Changes(dbName, nodeTag)` + + + + +## Connection interface + +`IDatabaseChanges` inherits from `IConnectableChanges` interface that represent the connection. + + + +{`public interface IConnectableChanges : IDisposable + where TChanges : IDatabaseChanges +\{ + // returns state of the connection + bool Connected \{ get; \} + + // A task that ensures that the connection to the server was established. + Task EnsureConnectedNow(); + + //An event handler to detect changed to the connection status + event EventHandler ConnectionStatusChanged; + + //An action to take if an error occured in the connection to the server + event Action OnError; +\} +`} + + + + + +## Subscribing + +To receive notifications regarding server-side events, subscribe using one of the following methods. + +* **For Document Changes:** + - [ForAllDocuments](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#foralldocuments) + Track changes for all document + - [ForDocument](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#fordocument) + Track changes for a given document (by Doc ID) + - [ForDocumentsInCollection](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#fordocumentsincollection) + Track changes for all documents in a given collection + - [ForDocumentsStartingWith](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#fordocumentsstartingwith) + Track changes for documents whose ID contains a given prefix + +* **For Index Changes:** + - [ForAllIndexes](../../client-api/changes/how-to-subscribe-to-index-changes.mdx#forallindexes) + Track changes for all indexes + - [ForIndex](../../client-api/changes/how-to-subscribe-to-index-changes.mdx#forindex) + Track changes for a given index (by Index Name) + +* **For Operation Changes:** + Operation changes can be tracked only [on a specific node](../../client-api/changes/what-is-changes-api.mdx#accessing-changes-api). + - [ForAllOperations](../../client-api/changes/how-to-subscribe-to-operation-changes.mdx#foralloperations) + Track changes for all operation + - [ForOperationId](../../client-api/changes/how-to-subscribe-to-operation-changes.mdx#foroperationid) + Track changes for a given operation (by Operation ID) + +* **For Counter Changes:** + - [ForAllCounters](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#forallcounters) + Track changes for all counters + - [ForCounter](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#forcounter) + Track changes for a given counter (by Counter Name) + - [ForCounterOfDocument](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#forcounterofdocument) + Track changes for a specific counter of a chosen document (by Doc ID and Counter Name) + - [ForCountersOfDocument](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#forcountersofdocument) + Track changes for all counters of a chosen document (by Doc ID) + +* **For Time Series Changes:** + - [ForAllTimeSeries](../../client-api/changes/how-to-subscribe-to-time-series-changes.mdx#foralltimeseries) + Track changes for all time series + - [ForTimeSeries](../../client-api/changes/how-to-subscribe-to-time-series-changes.mdx#fortimeseries) + Track changes for all time series with a given name + - [ForTimeSeriesOfDocument](../../client-api/changes/how-to-subscribe-to-time-series-changes.mdx#fortimeseriesofdocument) + Track changes for - + * a **specific time series** of a given document (by Doc ID and Time Series Name) + * **any time series** of a given document (by Doc ID) + + + +## Unsubscribing + +To end a subscription (stop listening for particular notifications) you must +`Dispose` of the subscription. + + + +{`IDatabaseChanges changes = store.Changes(); +await changes.EnsureConnectedNow(); +var subscription = changes + .ForAllDocuments() + .Subscribe(change => Console.WriteLine("\{0\} on document \{1\}", change.Type, change.Id)); +try +\{ + // application code here +\} +finally +\{ + if (subscription != null) + subscription.Dispose(); +\} +`} + + + + + +## FAQ + +#### Changes API and Database Timeout + +One or more open Changes API connections will prevent a database from becoming +idle and unloaded, regardless of [the configuration value for database idle timeout](../../server/configuration/database-configuration.mdx#databasesmaxidletimeinsec). +#### Changes API and Method Overloads + + +To get more method overloads, especially ones supporting **delegates**, please add the +[System.Reactive.Core](https://www.nuget.org/packages/System.Reactive.Core/) package to your project. + + + + +## Changes API -vs- Data Subscriptions + +**Changes API** and [Data Subscription](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx) +are services that a RavenDB Server provides subscribing clients. +Both services respond to events that take place on the server, by sending updates +to their subscribers. + +* **Changes API is a Push Notifications Service**. + * Changes API subscribers receive **notifications** regarding events that + took place on the server, without receiving the actual data entities + affected by these events. + For the modification of a document, for example, the client will receive + a [DocumentChange](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#documentchange) + object with details like the document's ID and collection name. + + * The server does **not** keep track of sent notifications or + checks clients' usage of them. It is a client's responsibility + to manage its reactions to such notifications. + +* **Data Subscription is a Data Consumption Service**. + * A Data Subscription task keeps track of document modifications in the + database and delivers the documents in an orderly fashion when subscribers + indicate they are ready to receive them. + * The process is fully managed by the server, leaving very little for + the subscribers to do besides consuming the delivered documents. + +| | Data Subscriptions | Changes API | +|------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| What can the server Track | [Documents](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#documents-processing)
[Revisions](../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx)
[Counters](../../client-api/data-subscriptions/creation/examples.mdx#including-counters)
Time Series | [Documents](../../client-api/changes/how-to-subscribe-to-document-changes.mdx)
[Indexes](../../client-api/changes/how-to-subscribe-to-index-changes.mdx)
[Operations](../../client-api/changes/how-to-subscribe-to-operation-changes.mdx)
[Counters](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx)
[Time Series](../../client-api/changes/how-to-subscribe-to-time-series-changes.mdx) | +| What can the server Deliver | Documents
Revisions
Counters
Time Series | Notifications | +| Management | Managed by the Server | Managed by the Client | diff --git a/versioned_docs/version-7.1/client-api/changes/_what-is-changes-api-java.mdx b/versioned_docs/version-7.1/client-api/changes/_what-is-changes-api-java.mdx new file mode 100644 index 0000000000..a2049a09a9 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/changes/_what-is-changes-api-java.mdx @@ -0,0 +1,153 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The Changes API is a Push Notifications service, that allows a RavenDB Client to + receive messages from a RavenDB Server regarding events that occurred on the server. +* A client can subscribe to events related to documents, indexes, operations, counters, and time series. +* Using the Changes API allows you to notify users of various changes without requiring + any expensive polling. + +* In this page: + * [Accessing Changes API](../../client-api/changes/what-is-changes-api.mdx#accessing-changes-api) + * [Connection interface](../../client-api/changes/what-is-changes-api.mdx#connection-interface) + * [Subscribing](../../client-api/changes/what-is-changes-api.mdx#subscribing) + * [Unsubscribing](../../client-api/changes/what-is-changes-api.mdx#unsubscribing) + * [Note](../../client-api/changes/what-is-changes-api.mdx#note) + * [Changes API -vs- Data Subscriptions](../../client-api/changes/what-is-changes-api.mdx#changes-api--vs--data-subscriptions) + +## Accessing Changes API + +The changes subscription is accessible by a document store through its `IDatabaseChanges` interface. + + + +{`IDatabaseChanges changes(); + +IDatabaseChanges changes(String database); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **database** | `String` | Name of database to open changes API for. If `null`, the `Database` configured in DocumentStore will be used. | + +| Return value | | +| ------------- | ----- | +| IDatabaseChanges | Instance implementing IDatabaseChanges interface. | + + + +## Connection interface + +`IDatabaseChanges` inherits from `IConnectableChanges` interface that represent the connection. + + + +{`public interface IConnectableChanges extends CleanCloseable \{ + + boolean isConnected(); + + void ensureConnectedNow(); + + void addConnectionStatusChanged(EventHandler handler); + + void removeConnectionStatusChanged(EventHandler handler); + + void addOnError(Consumer handler); + + void removeOnError(Consumer handler); +\} +`} + + + + + +## Subscribing + +To receive notifications regarding server-side events, subscribe using one of the following methods. + +- [forAllDocuments](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#foralldocuments) +- [forAllIndexes](../../client-api/changes/how-to-subscribe-to-index-changes.mdx#forallindexes) +- [forAllOperations](../../client-api/changes/how-to-subscribe-to-operation-changes.mdx#foralloperations) +- [forDocument](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#fordocument) +- [forDocumentsInCollection](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#fordocumentsincollection) +- [forDocumentsStartingWith](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#fordocumentsstartingwith) +- [forIndex](../../client-api/changes/how-to-subscribe-to-index-changes.mdx#forindex) +- [forOperationId](../../client-api/changes/how-to-subscribe-to-operation-changes.mdx#foroperation) + + + +## Unsubscribing + +To end a subscription (stop listening for particular notifications) you must +`close` the subscription. + + + +{`IDatabaseChanges subscription = store.changes(); + +subscription.ensureConnectedNow(); + +subscription.forAllDocuments().subscribe(Observers.create(change -> \{ + System.out.println(change.getType() + " on document " + change.getId()); +\})); + +try \{ + // application code here +\} finally \{ + if (subscription != null) \{ + subscription.close(); + \} +\} +`} + + + + + +## Note + + +One or more open Changes API connections will prevent a database from becoming +idle and unloaded, regardless of [the configuration value for database idle timeout](../../server/configuration/database-configuration.mdx#databasesmaxidletimeinsec). + + + + +## Changes API -vs- Data Subscriptions + +**Changes API** and [Data Subscription](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx) +are services that a RavenDB Server provides subscribing clients. +Both services respond to events that take place on the server, by sending updates +to their subscribers. + +* **Changes API is a Push Notifications Service**. + * Changes API subscribers receive **notifications** regarding events that + took place on the server, without receiving the actual data entities + affected by these events. + For the modification of a document, for example, the client will receive + a [DocumentChange](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#documentchange) + object with details like the document's ID and collection name. + + * The server does **not** keep track of sent notifications or + checks clients' usage of them. It is a client's responsibility + to manage its reactions to such notifications. + +* **Data Subscription is a Data Consumption Service**. + * A Data Subscription task keeps track of document modifications in the + database and delivers the documents in an orderly fashion when subscribers + indicate they are ready to receive them. + * The process is fully managed by the server, leaving very little for + the subscribers to do besides consuming the delivered documents. + +| | Data Subscriptions | Changes API | +|------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| What can the server Track | [Documents](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#documents-processing)
[Revisions](../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx)
[Counters](../../client-api/data-subscriptions/creation/examples.mdx#including-counters)
Time Series | [Documents](../../client-api/changes/how-to-subscribe-to-document-changes.mdx)
[Indexes](../../client-api/changes/how-to-subscribe-to-index-changes.mdx)
[Operations](../../client-api/changes/how-to-subscribe-to-operation-changes.mdx)
[Counters](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx)
[Time Series](../../client-api/changes/how-to-subscribe-to-time-series-changes.mdx) | +| What can the server Deliver | Documents
Revisions
Counters
Time Series | Notifications | +| Management | Managed by the Server | Managed by the Client | diff --git a/versioned_docs/version-7.1/client-api/changes/_what-is-changes-api-nodejs.mdx b/versioned_docs/version-7.1/client-api/changes/_what-is-changes-api-nodejs.mdx new file mode 100644 index 0000000000..62d76f1a71 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/changes/_what-is-changes-api-nodejs.mdx @@ -0,0 +1,152 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The Changes API is a Push Notifications service, that allows a RavenDB Client to + receive messages from a RavenDB Server regarding events that occurred on the server. +* A client can subscribe to events related to documents, indexes, operations, counters, and time series. +* Using the Changes API allows you to notify users of various changes without requiring + any expensive polling. + +* In this page: + * [Accessing Changes API](../../client-api/changes/what-is-changes-api.mdx#accessing-changes-api) + * [Connection interface](../../client-api/changes/what-is-changes-api.mdx#connection-interface) + * [Subscribing](../../client-api/changes/what-is-changes-api.mdx#subscribing) + * [Unsubscribing](../../client-api/changes/what-is-changes-api.mdx#unsubscribing) + * [FAQ](../../client-api/changes/what-is-changes-api.mdx#faq) + * [Changes API and Database Timeout](../../client-api/changes/what-is-changes-api.mdx#changes-api-and-database-timeout) + * [Changes API and Method Overloads](../../client-api/changes/what-is-changes-api.mdx#changes-api-and-method-overloads) + * [Changes API -vs- Data Subscriptions](../../client-api/changes/what-is-changes-api.mdx#changes-api--vs--data-subscriptions) + +## Accessing Changes API + +The changes subscription is accessible by a document store through its `IDatabaseChanges` interface. + + + +{`store.changes([database]); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **database** | `string` | Name of database to open changes API for. If `null`, the `database` configured in DocumentStore will be used. | + +| Return value | | +| ------------- | ----- | +| `IDatabaseChanges` object | Instance implementing IDatabaseChanges interface. | + + + +## Connection interface + +Changes object interface extends `IConnectableChanges` interface that represents the connection. It exposes the following properties, methods and events. + +| Properties and methods | | | +| ------------- | ------------- | ----- | +| **connected** | boolean | Indicates whether it's connected or not | +| **on("connectionStatus")** | method | Adds a listener for 'connectionStatus' event | +| **on("error")** | method | Adds a listener for 'error' event | +| **ensureConnectedNow()** | method | Returns a `Promise` resolved once connection to the server is established. | + + + +## Subscribing + +To receive notifications regarding server-side events, subscribe using one of the following methods. + +- [forAllDocuments()](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#foralldocuments) +- [forAllIndexes()](../../client-api/changes/how-to-subscribe-to-index-changes.mdx#forallindexes) +- [forAllOperations()](../../client-api/changes/how-to-subscribe-to-operation-changes.mdx#foralloperations) +- [forDocument()](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#fordocument) +- [forDocumentsInCollection()](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#fordocumentsincollection) +- [forDocumentsStartingWith()](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#fordocumentsstartingwith) +- [forIndex()](../../client-api/changes/how-to-subscribe-to-index-changes.mdx#forindex) +- [forOperationId()](../../client-api/changes/how-to-subscribe-to-operation-changes.mdx#foroperation) + + + +## Unsubscribing + +To end a subscription (stop listening for particular notifications) use `dispose`. + + + +{`const changes = store.changes(); + +await changes.ensureConnectedNow(); + +const allDocsChanges = changes.forAllDocuments() + .on("data", change => \{ + console.log(change.type + " on document " + change.id); + \}) + .on("error", err => \{ + // handle error + \}); + +// ... + +try \{ + // application code here +\} finally \{ + // dispose changes after use + if (changes != null) \{ + changes.dispose(); + \} +\} +`} + + + + + +## FAQ + +#### Changes API and Database Timeout + +One or more open Changes API connections will prevent a database from becoming +idle and unloaded, regardless of [the configuration value for database idle timeout](../../server/configuration/database-configuration.mdx#databasesmaxidletimeinsec). +#### Changes API and Method Overloads + + +To get more method overloads, especially ones supporting **delegates**, please add the +[System.Reactive.Core](https://www.nuget.org/packages/System.Reactive.Core/) package to your project. + + + + +## Changes API -vs- Data Subscriptions + +**Changes API** and [Data Subscription](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx) +are services that a RavenDB Server provides subscribing clients. +Both services respond to events that take place on the server, by sending updates +to their subscribers. + +* **Changes API is a Push Notifications Service**. + * Changes API subscribers receive **notifications** regarding events that + took place on the server, without receiving the actual data entities + affected by these events. + For the modification of a document, for example, the client will receive + a [DocumentChange](../../client-api/changes/how-to-subscribe-to-document-changes.mdx#documentchange) + object with details like the document's ID and collection name. + + * The server does **not** keep track of sent notifications or + checks clients' usage of them. It is a client's responsibility + to manage its reactions to such notifications. + +* **Data Subscription is a Data Consumption Service**. + * A Data Subscription task keeps track of document modifications in the + database and delivers the documents in an orderly fashion when subscribers + indicate they are ready to receive them. + * The process is fully managed by the server, leaving very little for + the subscribers to do besides consuming the delivered documents. + +| | Data Subscriptions | Changes API | +|------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| What can the server Track | [Documents](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#documents-processing)
[Revisions](../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx)
[Counters](../../client-api/data-subscriptions/creation/examples.mdx#including-counters)
Time Series | [Documents](../../client-api/changes/how-to-subscribe-to-document-changes.mdx)
[Indexes](../../client-api/changes/how-to-subscribe-to-index-changes.mdx)
[Operations](../../client-api/changes/how-to-subscribe-to-operation-changes.mdx)
[Counters](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx)
[Time Series](../../client-api/changes/how-to-subscribe-to-time-series-changes.mdx) | +| What can the server Deliver | Documents
Revisions
Counters
Time Series | Notifications | +| Management | Managed by the Server | Managed by the Client | diff --git a/versioned_docs/version-7.1/client-api/changes/how-to-subscribe-to-counter-changes.mdx b/versioned_docs/version-7.1/client-api/changes/how-to-subscribe-to-counter-changes.mdx new file mode 100644 index 0000000000..eab5ff976d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/changes/how-to-subscribe-to-counter-changes.mdx @@ -0,0 +1,34 @@ +--- +title: "Changes API: How to Subscribe to Counter Changes" +hide_table_of_contents: true +sidebar_label: How to Subscribe to Counter Changes +sidebar_position: 4 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToSubscribeToCounterChangesCsharp from './_how-to-subscribe-to-counter-changes-csharp.mdx'; +import HowToSubscribeToCounterChangesJava from './_how-to-subscribe-to-counter-changes-java.mdx'; + +export const supportedLanguages = ["csharp", "java"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/changes/how-to-subscribe-to-document-changes.mdx b/versioned_docs/version-7.1/client-api/changes/how-to-subscribe-to-document-changes.mdx new file mode 100644 index 0000000000..6cedd947fc --- /dev/null +++ b/versioned_docs/version-7.1/client-api/changes/how-to-subscribe-to-document-changes.mdx @@ -0,0 +1,39 @@ +--- +title: "Changes API: How to Subscribe to Document Changes" +hide_table_of_contents: true +sidebar_label: How to Subscribe to Document Changes +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToSubscribeToDocumentChangesCsharp from './_how-to-subscribe-to-document-changes-csharp.mdx'; +import HowToSubscribeToDocumentChangesJava from './_how-to-subscribe-to-document-changes-java.mdx'; +import HowToSubscribeToDocumentChangesNodejs from './_how-to-subscribe-to-document-changes-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/changes/how-to-subscribe-to-index-changes.mdx b/versioned_docs/version-7.1/client-api/changes/how-to-subscribe-to-index-changes.mdx new file mode 100644 index 0000000000..a0936f7f7f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/changes/how-to-subscribe-to-index-changes.mdx @@ -0,0 +1,38 @@ +--- +title: "Changes API: How to Subscribe to Index Changes" +hide_table_of_contents: true +sidebar_label: How to Subscribe to Index Changes +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToSubscribeToIndexChangesCsharp from './_how-to-subscribe-to-index-changes-csharp.mdx'; +import HowToSubscribeToIndexChangesJava from './_how-to-subscribe-to-index-changes-java.mdx'; +import HowToSubscribeToIndexChangesNodejs from './_how-to-subscribe-to-index-changes-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/changes/how-to-subscribe-to-operation-changes.mdx b/versioned_docs/version-7.1/client-api/changes/how-to-subscribe-to-operation-changes.mdx new file mode 100644 index 0000000000..6eb458757a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/changes/how-to-subscribe-to-operation-changes.mdx @@ -0,0 +1,38 @@ +--- +title: "Changes API: How to Subscribe to Operation Changes" +hide_table_of_contents: true +sidebar_label: How to Subscribe to Operation Changes +sidebar_position: 3 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToSubscribeToOperationChangesCsharp from './_how-to-subscribe-to-operation-changes-csharp.mdx'; +import HowToSubscribeToOperationChangesJava from './_how-to-subscribe-to-operation-changes-java.mdx'; +import HowToSubscribeToOperationChangesNodejs from './_how-to-subscribe-to-operation-changes-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/changes/how-to-subscribe-to-time-series-changes.mdx b/versioned_docs/version-7.1/client-api/changes/how-to-subscribe-to-time-series-changes.mdx new file mode 100644 index 0000000000..df37e449d0 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/changes/how-to-subscribe-to-time-series-changes.mdx @@ -0,0 +1,29 @@ +--- +title: "Changes API: How to Subscribe to Time Series Changes" +hide_table_of_contents: true +sidebar_label: How to Subscribe to Time Series Changes +sidebar_position: 5 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToSubscribeToTimeSeriesChangesCsharp from './_how-to-subscribe-to-time-series-changes-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/changes/what-is-changes-api.mdx b/versioned_docs/version-7.1/client-api/changes/what-is-changes-api.mdx new file mode 100644 index 0000000000..b4d4dfbb0c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/changes/what-is-changes-api.mdx @@ -0,0 +1,45 @@ +--- +title: "What Is the Changes API" +hide_table_of_contents: true +sidebar_label: What is Changes API +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import WhatIsChangesApiCsharp from './_what-is-changes-api-csharp.mdx'; +import WhatIsChangesApiJava from './_what-is-changes-api-java.mdx'; +import WhatIsChangesApiNodejs from './_what-is-changes-api-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/cluster/_category_.json b/versioned_docs/version-7.1/client-api/cluster/_category_.json new file mode 100644 index 0000000000..6cd44859ac --- /dev/null +++ b/versioned_docs/version-7.1/client-api/cluster/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 16, + "label": Cluster Related, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/cluster/_document-conflicts-in-client-side-csharp.mdx b/versioned_docs/version-7.1/client-api/cluster/_document-conflicts-in-client-side-csharp.mdx new file mode 100644 index 0000000000..884d7efbfa --- /dev/null +++ b/versioned_docs/version-7.1/client-api/cluster/_document-conflicts-in-client-side-csharp.mdx @@ -0,0 +1,105 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +## What are conflicts? +When two or more changes of a single document are done concurrently in two separate nodes, +RavenDB cannot know which one of the changes is the correct one. This is called document conflict. +For more information about conflicts and their resolution, see [article about conflicts](../../server/clustering/replication/replication-conflicts.mdx). + + +By default, RavenDB will solve conflicts using "resolve to latest" strategy, thus the conflict will be resolved to a document with the latest 'modified date'. + + +## When is a conflict exception thrown? +DocumentConflictException will be thrown for any access of a conflicted document. +Fetching attachments of a conflicted document will throw `InvalidOperationException` on the server. + +## How can the conflict can be resolved from the client side? + * PUT of a document with ID that belongs to conflicted document will resolve the conflict. + + + +{`using (var session = store.OpenSession()) +\{ + session.Store(new User \{Name = "John Doe"\}, "users/123"); // users/123 is a conflicted document + session.SaveChanges(); //when this request is finished, the conflict for users/132 is resolved. +\} +`} + + + + * DELETE of a conflicted document will resolve its conflict. + + + +{`using (var session = store.OpenSession()) +\{ + session.Delete("users/123"); // users/123 is a conflicted document + session.SaveChanges(); //when this request is finished, the conflict for users/132 is resolved. +\} +`} + + + + * Incoming replication will resolve conflict if the incoming document has a larger [change vector](../../server/clustering/replication/change-vector.mdx). + +## Modifying conflict resolution from the client-side +In RavenDB we can resolve conflicts either by resolving to the latest or by using a conflict resolution script to decide which one of the conflicted document variants are the ones that need to be kept. The following is an example of how we can set a conflict resolution script from the client-side. + + +{`using (var documentStore = new DocumentStore +\{ + Urls = new []\{ "http://" \}, + Database = "" +\}) +\{ + var resolveByCollection = new Dictionary + \{ + \{ + "ShoppingCarts", new ScriptResolver //specify conflict resolution for collection + \{ + // conflict resolution script is written in javascript + Script = @" + var final = docs[0]; + for(var i = 1; i < docs.length; i++) + \{ + var currentCart = docs[i]; + for(var j = 0; j < currentCart.Items.length; j++) + \{ + var item = currentCart.Items[j]; + var match = final.Items + .find( i => i.ProductId == item.ProductId); + if(!match) + \{ + // not in cart, add + final.Items.push(item); + \} + else + \{ + match.Quantity = Math.max( + item.Quantity , + match.Quantity); + \} + \} + \} + return final; // the conflict will be resolved to this variant + " + \} + \} + \}; + + var op = new ModifyConflictSolverOperation( + documentStore.Database, + resolveByCollection, //we specify conflict resolution scripts by document collection + resolveToLatest: true); // if true, RavenDB will resolve conflict to the latest + // if there is no resolver defined for a given collection or + // the script returns null + + await documentStore.Maintenance.Server.SendAsync(op); +\} +`} + + + diff --git a/versioned_docs/version-7.1/client-api/cluster/_document-conflicts-in-client-side-java.mdx b/versioned_docs/version-7.1/client-api/cluster/_document-conflicts-in-client-side-java.mdx new file mode 100644 index 0000000000..3ae2563dab --- /dev/null +++ b/versioned_docs/version-7.1/client-api/cluster/_document-conflicts-in-client-side-java.mdx @@ -0,0 +1,97 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +## What are conflicts? +When two or more changes of a single document are done concurrently in two separate nodes, +RavenDB cannot know which one of the changes is the correct one. This is called document conflict. +For more information about conflicts and their resolution, see [article about conflicts](../../server/clustering/replication/replication-conflicts.mdx). + + +By default, RavenDB will solve conflicts using "resolve to latest" strategy, thus the conflict will be resolved to a document with the latest 'modified date'. + + +## When is a conflict exception thrown? +DocumentConflictException will be thrown for any access of a conflicted document. +Fetching attachments of a conflicted document will throw `InvalidOperationException` on the server. + +## How can the conflict can be resolved from the client side? + * PUT of a document with ID that belongs to conflicted document will resolve the conflict. + + + +{`try (IDocumentSession session = store.openSession()) \{ + User user = new User(); + user.setName("John Doe"); + + session.store(user, "users/123"); + // users/123 is a conflicted document + session.saveChanges(); + // when this request is finished, the conflict for user/132 is resolved. +\} +`} + + + + * DELETE of a conflicted document will resolve its conflict. + + + +{`try (IDocumentSession session = store.openSession()) \{ + session.delete("users/123"); // users/123 is a conflicted document + session.saveChanges(); //when this request is finished, the conflict for users/132 is resolved. +\} +`} + + + + * Incoming replication will resolve conflict if the incoming document has a larger [change vector](../../server/clustering/replication/change-vector.mdx). + +## Modifying conflict resolution from the client-side +In RavenDB we can resolve conflicts either by resolving to the latest or by using a conflict resolution script to decide which one of the conflicted document variants are the ones that need to be kept. The following is an example of how we can set a conflict resolution script from the client-side. + + +{`try (IDocumentStore documentStore = new DocumentStore( + new String[] \{ "http://" \}, "")) \{ + + Map resolveByCollection = new HashMap<>(); + ScriptResolver scriptResolver = new ScriptResolver(); + scriptResolver.setScript( + " var final = docs[0];" + + " for(var i = 1; i < docs.length; i++)" + + " \{" + + " var currentCart = docs[i];" + + " for(var j = 0; j < currentCart.Items.length; j++)" + + " \{" + + " var item = currentCart.Items[j];" + + " var match = final.Items" + + " .find( i => i.ProductId == item.ProductId);" + + " if (!match)" + + " \{" + + " // not in cart, add" + + " final.Items.push(item);" + + " \} else \{ " + + " match.Quantity = Math.max(" + + " item.Quantity ," + + " match.Quantity);" + + " \}" + + " \}" + + " \}" + + " return final; // the conflict will be resolved to this variant"); + resolveByCollection.put("ShoppingCarts", scriptResolver); + + ModifyConflictSolverOperation op = new ModifyConflictSolverOperation( + documentStore.getDatabase(), + resolveByCollection, //we specify conflict resolution scripts by document collection + true // if true, RavenDB will resolve conflict to the latest + // if there is no resolver defined for a given collection or + // the script returns null + ); + + store.maintenance().server().send(op); +\} +`} + + + diff --git a/versioned_docs/version-7.1/client-api/cluster/_how-client-integrates-with-replication-and-cluster-csharp.mdx b/versioned_docs/version-7.1/client-api/cluster/_how-client-integrates-with-replication-and-cluster-csharp.mdx new file mode 100644 index 0000000000..b27fc3cdf6 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/cluster/_how-client-integrates-with-replication-and-cluster-csharp.mdx @@ -0,0 +1,131 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In this page: + * [Failover behavior](../../client-api/cluster/how-client-integrates-with-replication-and-cluster.mdx#failover-behavior) + * [Cluster topology in the client](../../client-api/cluster/how-client-integrates-with-replication-and-cluster.mdx#cluster-topology-in-the-client) + * [Topology discovery](../../client-api/cluster/how-client-integrates-with-replication-and-cluster.mdx#topology-discovery) + * [Configuring topology nodes](../../client-api/cluster/how-client-integrates-with-replication-and-cluster.mdx#configuring-topology-nodes) + * [Write assurance and database groups](../../client-api/cluster/how-client-integrates-with-replication-and-cluster.mdx#write-assurance-and-database-groups) + + +## Failover behavior + +* In RavenDB, the replication is _not_ a bundle and is always enabled if there are two nodes or more in the cluster. + This means that the failover mechanism is always turned on by default. + +* The client contains a list of cluster nodes per database group. + Each time the client needs to do a request to a database, it will choose a node that contains this database from this list to send the request to. + If the node is down and the request fails, it will select another node from this list. + +* The choice of which node to select depends on the `ReadBalanceBehavior` and `LoadBalanceBehavior` configuration values. + For more information about the different values and the node selection process, see [Load balancing client requests](../../client-api/configuration/load-balance/overview.mdx). + + + Each failure to connect to a node spawns a health check for that node. + For more information see [Cluster Node Health Check](health-check). + + + + +## Cluster topology in the client + +When the client is initialized, it fetches the topologies and populates the nodes list for the load-balancing and failover functionality. +During the lifetime of a RavenDB Client object, it periodically receives the cluster and the databases topologies from the server. +The **topology** is updated with the following logic: + +* Each topology has an etag, which is a number +* Each time the topology has changed, the etag is incremented +* For each request, the client adds the latest topology etag it has to the header +* If the current topology etag at the server is higher than the one in the client, the server adds `"Refresh-Topology:true"` to the response header +* If a client detects the `"Refresh-Topology:true"` header in the response, the client will fetch the updated topology from the server. + Note: if `ReadBalanceBehavior.FastestNode` is selected, the client will schedule a speed test to determine the fastest node. +* In addition, every 5 minutes, the client fetches the current topology from the server if no requests are made within that time frame. +The **client configuration** is handled in a similar way: + +* Each client configuration has an etag attached +* Each time the configuration has changed at the server-side, the server adds `"Refresh-Client-Configuration"` to the response +* When the client detects the aforementioned header in the response, it schedules fetching the new configuration + + + +## Topology discovery + +In RavenDB, the cluster topology has an etag that increments with each topology change. + +#### How and when the topology is updated: + +* The first time any request is sent to RavenDB server, the client fetches cluster topology +* Each subsequent request happens with a fetched topology etag in the HTTP headers, under the key `Topology-Etag` +* If the response contains the `Refresh-Topology: true` header, then a thread responsible for updating the topology will be spawned + + + +## Configuring topology nodes + +Listing any node in the initialization of the cluster in the client is enough to be able to properly connect to the specified database. +Each node in the cluster contains the full topology of all databases and all nodes that are in the cluster. +Nevertheless, it is possible to specify multiple node urls at the initialization. But why list multiple nodes in the cluster, if url of any cluster node will do? + +By listing multiple nodes in the cluster, we can ensure that if a single node is down and we bring a new client up, we'll still be able to get the initial topology. +If the cluster sizes are small (three to five nodes), we'll typically list all the nodes in the cluster. +But for larger clusters, we'll usually just list enough nodes that having them all go down at once will mean that you have more pressing concerns then a new client coming up. + + + +{`using (var store = new DocumentStore +\{ + Database = "TestDB", + Urls = new [] \{ + "http://[node A url]", + "http://[node B url]", + "http://[node C url]" + \} +\}) +\{ + store.Initialize(); + + // the rest of ClientAPI code +\} +`} + + + + + +## Write assurance and database groups + +In RavenDB clusters, the databases are hosted in [database groups](../../glossary/database-group.mdx). +Since there is a master-master replication configured between database group members, a write to one of the nodes will be replicated to all other instances of the group. +If there are some writes that are important, it is possible to make the client wait until the transaction data gets replicated to multiple nodes. +It is called a 'write assurance', and it is available with the `WaitForReplicationAfterSaveChanges()` method. + + + +{`using (var session = store.OpenSession()) +\{ + var user = new User + \{ + Name = "John Dow" + \}; + + session.Store(user); + + //make sure that the comitted data is replicated to 2 nodes + //before returning from the SaveChanges() call. + session.Advanced + .WaitForReplicationAfterSaveChanges(replicas: 2); + + session.SaveChanges(); +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/cluster/_how-client-integrates-with-replication-and-cluster-java.mdx b/versioned_docs/version-7.1/client-api/cluster/_how-client-integrates-with-replication-and-cluster-java.mdx new file mode 100644 index 0000000000..a9b44f2816 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/cluster/_how-client-integrates-with-replication-and-cluster-java.mdx @@ -0,0 +1,125 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In this page: + * [Failover behavior](../../client-api/cluster/how-client-integrates-with-replication-and-cluster.mdx#failover-behavior) + * [Cluster topology in the client](../../client-api/cluster/how-client-integrates-with-replication-and-cluster.mdx#cluster-topology-in-the-client) + * [Topology discovery](../../client-api/cluster/how-client-integrates-with-replication-and-cluster.mdx#topology-discovery) + * [Configuring topology nodes](../../client-api/cluster/how-client-integrates-with-replication-and-cluster.mdx#configuring-topology-nodes) + * [Write assurance and database groups](../../client-api/cluster/how-client-integrates-with-replication-and-cluster.mdx#write-assurance-and-database-groups) + + +## Failover behavior + +* In RavenDB, the replication is _not_ a bundle and is always enabled if there are two nodes or more in the cluster. + This means that the failover mechanism is always turned on by default. + +* The client contains a list of cluster nodes per database group. + Each time the client needs to do a request to a database, it will choose a node that contains this database from this list to send the request to. + If the node is down and the request fails, it will select another node from this list. + +* The choice of which node to select depends on the `ReadBalanceBehavior` and `LoadBalanceBehavior` configuration values. + For more information about the different values and the node selection process, see [Load balancing client requests](../../client-api/configuration/load-balance/overview.mdx). + + + Each failure to connect to a node spawns a health check for that node. + For more information see [Cluster Node Health Check](health-check). + + + + +## Cluster topology in the client + +When the client is initialized, it fetches the topologies and populates the nodes list for the load-balancing and failover functionality. +During the lifetime of a RavenDB Client object, it periodically receives the cluster and the databases topologies from the server. +The **topology** is updated with the following logic: + +* Each topology has an etag, which is a number +* Each time the topology has changed, the etag is incremented +* For each request, the client adds the latest topology etag it has to the header +* If the current topology etag at the server is higher than the one in the client, the server adds `"Refresh-Topology:true"` to the response header +* If a client detects the `"Refresh-Topology:true"` header in the response, the client will fetch the updated topology from the server. + Note: if `ReadBalanceBehavior.FASTEST_NODE` is selected, the client will schedule a speed test to determine the fastest node. +* In addition, every 5 minutes, the client fetches the current topology from the server if no requests are made within that time frame. +The **client configuration** is handled in a similar way: + +* Each client configuration has an etag attached +* Each time the configuration has changed at the server-side, the server adds `"Refresh-Client-Configuration"` to the response +* When the client detects the aforementioned header in the response, it schedules fetching the new configuration + + + +## Topology discovery + +In RavenDB, the cluster topology has an etag that increments with each topology change. + +#### How and when the topology is updated: + +* The first time any request is sent to RavenDB server, the client fetches cluster topology +* Each subsequent request happens with a fetched topology etag in the HTTP headers, under the key `Topology-Etag` +* If the response contains the `Refresh-Topology: true` header, then a thread responsible for updating the topology will be spawned + + + +## Configuring topology nodes + +Listing any node in the initialization of the cluster in the client is enough to be able to properly connect to the specified database. +Each node in the cluster contains the full topology of all databases and all nodes that are in the cluster. +Nevertheless, it is possible to specify multiple node urls at the initialization. But why list multiple nodes in the cluster, if url of any cluster node will do? + +By listing multiple nodes in the cluster, we can ensure that if a single node is down and we bring a new client up, we'll still be able to get the initial topology. +If the cluster sizes are small (three to five nodes), we'll typically list all the nodes in the cluster. +But for larger clusters, we'll usually just list enough nodes that having them all go down at once will mean that you have more pressing concerns then a new client coming up. + + + +{`try (IDocumentStore store = new DocumentStore(new String[]\{ + "http://[node A url]", + "http://[node B url]", + "http://[node C url]" +\}, "TestDB")) \{ + + + store.initialize(); + + // the rest of ClientAPI code +\} +`} + + + + + +## Write assurance and database groups + +In RavenDB clusters, the databases are hosted in database groups. +Since there is a master-master replication configured between database group members, a write to one of the nodes will be replicated to all other instances of the group. +If there are some writes that are important, it is possible to make the client wait until the transaction data gets replicated to multiple nodes. +It is called a 'write assurance', and it is available with the `WaitForReplicationAfterSaveChanges()` method. + + + +{`try (IDocumentSession session = store.openSession()) \{ + User user = new User(); + user.setName("John Dow"); + + session.store(user); + + //make sure that the comitted data is replicated to 2 nodes + //before returning from the saveChanges() call. + session + .advanced() + .waitForReplicationAfterSaveChanges(); +\} +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/cluster/_how-client-integrates-with-replication-and-cluster-nodejs.mdx b/versioned_docs/version-7.1/client-api/cluster/_how-client-integrates-with-replication-and-cluster-nodejs.mdx new file mode 100644 index 0000000000..c844bea78e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/cluster/_how-client-integrates-with-replication-and-cluster-nodejs.mdx @@ -0,0 +1,120 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In this page: + * [Failover behavior](../../client-api/cluster/how-client-integrates-with-replication-and-cluster.mdx#failover-behavior) + * [Cluster topology in the client](../../client-api/cluster/how-client-integrates-with-replication-and-cluster.mdx#cluster-topology-in-the-client) + * [Topology discovery](../../client-api/cluster/how-client-integrates-with-replication-and-cluster.mdx#topology-discovery) + * [Configuring topology nodes](../../client-api/cluster/how-client-integrates-with-replication-and-cluster.mdx#configuring-topology-nodes) + * [Write assurance and database groups](../../client-api/cluster/how-client-integrates-with-replication-and-cluster.mdx#write-assurance-and-database-groups) + + +## Failover behavior + +* In RavenDB, the replication is _not_ a bundle and is always enabled if there are two nodes or more in the cluster. + This means that the failover mechanism is always turned on by default. + +* The client contains a list of cluster nodes per database group. + Each time the client needs to do a request to a database, it will choose a node that contains this database from this list to send the request to. + If the node is down and the request fails, it will select another node from this list. + +* The choice of which node to select depends on the `ReadBalanceBehavior` and `LoadBalanceBehavior` configuration values. + For more information about the different values and the node selection process, see [Load balancing client requests](../../client-api/configuration/load-balance/overview.mdx). + + + Each failure to connect to a node spawns a health check for that node. + For more information see [Cluster Node Health Check](health-check). + + + + +## Cluster topology in the client + +When the client is initialized, it fetches the topologies and populates the nodes list for the load-balancing and failover functionality. +During the lifetime of a RavenDB Client object, it periodically receives the cluster and the databases topologies from the server. +The **topology** is updated with the following logic: + +* Each topology has an etag, which is a number +* Each time the topology has changed, the etag is incremented +* For each request, the client adds the latest topology etag it has to the header +* If the current topology etag at the server is higher than the one in the client, the server adds `"Refresh-Topology: true"` to the response header +* If a client detects the `"Refresh-Topology: true"` header in the response, the client will fetch the updated topology from the server. + Note: if `ReadBalanceBehavior` `FastestNode` is selected, the client will schedule a speed test to determine the fastest node. +* In addition, every 5 minutes, the client fetches the current topology from the server if no requests are made within that time frame. +The **client configuration** is handled in a similar way: + +* Each client configuration has an etag attached +* Each time the configuration has changed at the server-side, the server adds `"Refresh-Client-Configuration"` to the response +* When the client detects the aforementioned header in the response, it schedules fetching the new configuration + + + +## Topology discovery + +In RavenDB, the cluster topology has an etag that increments with each topology change. + +#### How and when the topology is updated: + +* The first time any request is sent to RavenDB server, the client fetches cluster topology +* Each subsequent request happens with a fetched topology etag in the HTTP headers, under the key `Topology-Etag` +* If the response contains the `Refresh-Topology: true` header, then a thread responsible for updating the topology will be spawned + + + +## Configuring topology nodes + +Listing any node in the initialization of the cluster in the client is enough to be able to properly connect to the specified database. +Each node in the cluster contains the full topology of all databases and all nodes that are in the cluster. +Nevertheless, it is possible to specify multiple node urls at the initialization. But why list multiple nodes in the cluster, if url of any cluster node will do? + +By listing multiple nodes in the cluster, we can ensure that if a single node is down and we bring a new client up, we'll still be able to get the initial topology. +If the cluster sizes are small (three to five nodes), we'll typically list all the nodes in the cluster. +But for larger clusters, we'll usually just list enough nodes that having them all go down at once will mean that you have more pressing concerns then a new client coming up. + + + +{`const store = new DocumentStore([ + "http://[node A url]", + "http://[node B url]", + "http://[node C url]" +], "TestDB"); + +store.initialize(); + +// the rest of ClientAPI code +`} + + + + + +## Write assurance and database groups + +In RavenDB clusters, the databases are hosted in database groups. +Since there is a master-master replication configured between database group members, a write to one of the nodes will be replicated to all other instances of the group. +If there are some writes that are important, it is possible to make the client wait until the transaction data gets replicated to multiple nodes. +It is called a 'write assurance', and it is available with the `waitForReplicationAfterSaveChanges()` method. + + + +{`const session = store.openSession(); +const user = new User("John Doe"); + +await session.store(user); + +//make sure that the comitted data is replicated to 2 nodes +//before returning from the saveChanges() call. +session + .advanced + .waitForReplicationAfterSaveChanges(); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/cluster/document-conflicts-in-client-side.mdx b/versioned_docs/version-7.1/client-api/cluster/document-conflicts-in-client-side.mdx new file mode 100644 index 0000000000..5b64f9d83e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/cluster/document-conflicts-in-client-side.mdx @@ -0,0 +1,29 @@ +--- +title: "Cluster: Document Conflicts in Client-side" +hide_table_of_contents: true +sidebar_label: Document Conflict Exceptions at Client-Side +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import DocumentConflictsInClientSideCsharp from './_document-conflicts-in-client-side-csharp.mdx'; +import DocumentConflictsInClientSideJava from './_document-conflicts-in-client-side-java.mdx'; + +export const supportedLanguages = ["csharp", "java"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/cluster/health-check.mdx b/versioned_docs/version-7.1/client-api/cluster/health-check.mdx new file mode 100644 index 0000000000..eb7431bc90 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/cluster/health-check.mdx @@ -0,0 +1,25 @@ +--- +title: "Cluster: Cluster Node Health Check" +hide_table_of_contents: true +sidebar_label: Cluster Node Health Check +sidebar_position: 3 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Cluster: Cluster Node Health Check + +A health check sends an HTTP request to the `/databases/[Database Name]/stats` endpoint. +If the request is successful, it will reset node failure counters which will cause the client to try sending operations to that specific node again. + +### When Does it Trigger? + +Any time a low-level [operation](../operations/what-are-operations.mdx) fails to connect to a node, the client spawns a health check thread for that particular node. +The thread will periodically ping the not responding server until it gets a proper response. +The frequency of pinging the non-responsive server will start from 100ms and will gradually increase until it reaches 5sec intervals. + diff --git a/versioned_docs/version-7.1/client-api/cluster/how-client-integrates-with-replication-and-cluster.mdx b/versioned_docs/version-7.1/client-api/cluster/how-client-integrates-with-replication-and-cluster.mdx new file mode 100644 index 0000000000..385bb13c7c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/cluster/how-client-integrates-with-replication-and-cluster.mdx @@ -0,0 +1,42 @@ +--- +title: "Client Integration with the Cluster" +hide_table_of_contents: true +sidebar_label: Client Integration with the Cluster +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowClientIntegratesWithReplicationAndClusterCsharp from './_how-client-integrates-with-replication-and-cluster-csharp.mdx'; +import HowClientIntegratesWithReplicationAndClusterJava from './_how-client-integrates-with-replication-and-cluster-java.mdx'; +import HowClientIntegratesWithReplicationAndClusterNodejs from './_how-client-integrates-with-replication-and-cluster-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/cluster/speed-test.mdx b/versioned_docs/version-7.1/client-api/cluster/speed-test.mdx new file mode 100644 index 0000000000..c505a18751 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/cluster/speed-test.mdx @@ -0,0 +1,40 @@ +--- +title: "Cluster: Speed Test" +hide_table_of_contents: true +sidebar_label: Client Speed Test +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Cluster: Speed Test + + +* In RavenDB Client API, if the [Read Balance Behavior](../../client-api/configuration/load-balance/read-balance-behavior.mdx) is configured for the _Fastest Node_, + then under certain conditions, the client executes a `Speed Test` for each node in the cluster so that the fastest node can be accessed for ***Read*** requests. + +* When doing a `Speed Test`, the client checks the response time from all the nodes in the topology. + This is done per 'Read' request that is executed. + +* Once the Speed Test is finished, the client stores the fastest node found. + After that, the speed test will be repeated every minute. + +## When does the Speed Test Trigger? + +The Speed Test is triggered in the following cases: + +* When the client configuration has changed to `FastestNode` + Once the client configuration is updated on the server, the next response from the server to the client will include the following header: `Refresh-Client-Configuration`. + When the client sees such a header for the first time, it will start the Speed Test - if indeed configuration is set to _FastestNode_. + +* Every 5 minutes the client checks the server for the current nodes' topology. + At this periodic check, the Speed Test will be triggered if _FastestNode_ is set. + +* Any time when the nodes' topology changes, and again - only if _FastestNode_ is set. + + diff --git a/versioned_docs/version-7.1/client-api/commands/_category_.json b/versioned_docs/version-7.1/client-api/commands/_category_.json new file mode 100644 index 0000000000..053c45fac9 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 11, + "label": Commands, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/commands/_overview-csharp.mdx b/versioned_docs/version-7.1/client-api/commands/_overview-csharp.mdx new file mode 100644 index 0000000000..c6e3277d64 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/_overview-csharp.mdx @@ -0,0 +1,227 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* RavenDB's Client API is structured in layers. + At the highest layer, you interact with the [document store](../../client-api/what-is-a-document-store.mdx) and the [document session](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx), + which handle most common database tasks like loading, saving, and querying documents. + +* Beneath this high-level interface are Operations and Commands: + + * **Operations**: + + * Operations provide management functionality outside the session's context, + like creating a database, performing bulk actions, or managing server-wide configurations. + + * Learn more about Operations in [what are Operations](../../client-api/operations/what-are-operations.mdx). + + * **Commands**: + + * All high-level methods and Operations are built on top of Commands. + Commands form the lowest-level operations that directly communicate with the server. + + * For example, a session’s _Load_ method translates internally to a _LoadOperation_, + which ultimately relies on a _GetDocumentsCommand_ to fetch data from the server. + + * Commands are responsible for sending the appropriate request to the server using a `Request Executor`, + and parsing the server's response. + + * All commands can be executed using either the [Store's _Request Executor_](../../client-api/commands/overview.mdx#execute-command---using-the-store) + or the [Session's _Request Executor_](../../client-api/commands/overview.mdx#execute-command---using-the-session), + regardless of whether the command is session-related or not. + +* This layered structure lets you work at any level, depending on your needs. + +* In this page: + * [Execute command - using the Store Request Executor](../../client-api/commands/overview.mdx#execute-command---using-the-store-request-executor) + * [Execute command - using the Session Request Executor](../../client-api/commands/overview.mdx#execute-command---using-the-session-request-executor) + * [Available commands](../../client-api/commands/overview.mdx#available-commands) + * [Syntax](../../client-api/commands/overview.mdx#syntax) + + +## Execute command - using the Store Request Executor + +This example shows how to execute the low-level `CreateSubscriptionCommand` via the **Store**. +(For examples of creating a subscription using higher-level methods, see [subscription creation examples](../../client-api/data-subscriptions/creation/examples.mdx)). + + + + +{`// Using the store object +using (var store = new DocumentStore()) +// Allocate a context from the store's context pool for executing the command +using (store.GetRequestExecutor().ContextPool.AllocateOperationContext(out var context)) +{ + // Define a command + var cmd = new CreateSubscriptionCommand(store.Conventions, + new SubscriptionCreationOptions() + { + Name = "Orders subscription", + Query = "from Orders" + }); + + // Call 'Execute' on the store's Request Executor to send the command to the server, + // pass the command and the store context. + store.GetRequestExecutor().Execute(cmd, context); +} +`} + + + + +{`// Using the store object +using (var store = new DocumentStore()) +// Allocate a context from the store's context pool for executing the command +using (store.GetRequestExecutor().ContextPool.AllocateOperationContext(out var context)) +{ + // Define a command + var cmd = new CreateSubscriptionCommand(store.Conventions, + new SubscriptionCreationOptions() + { + Name = "Orders subscription", + Query = "from Orders" + }); + + // Call 'ExecuteAsync' on the store's Request Executor to send the command to the server, + // pass the command and the store context. + await store.GetRequestExecutor().ExecuteAsync(cmd, context); +} +`} + + + + + + +## Execute command - using the Session Request Executor + +This example shows how to execute the low-level `GetDocumentsCommand` via the **Session**. +(For loading a document using higher-level methods, see [loading entities](../../client-api/session/loading-entities.mdx)). + + + + +{`// Using the session +using (var session = store.OpenSession()) +{ + // Define a command + var cmd = new GetDocumentsCommand(store.Conventions, "orders/1-A", null, false); + + // Call 'Execute' on the session's Request Executor to send the command to the server + // Pass the command and the 'Session.Advanced.Context' + session.Advanced.RequestExecutor.Execute(cmd, session.Advanced.Context); + + // Access the results + var blittable = (BlittableJsonReaderObject)cmd.Result.Results[0]; + + // Deserialize the blittable JSON into your typed object + var order = session.Advanced.JsonConverter.FromBlittable(ref blittable, + "orders/1-A", false); + + // Now you have a strongly-typed Order object that can be accessed + var orderedAt = order.OrderedAt; +} +`} + + + + +{`// Using the session +using (var asyncSession = store.OpenAsyncSession()) +{ + // Define a command + var cmd = new GetDocumentsCommand(store.Conventions, "orders/1-A", null, false); + + // Call 'ExecuteAsync' on the session's Request Executor to send the command to the server + // Pass the command and the 'Session.Advanced.Context' + await asyncSession.Advanced.RequestExecutor.ExecuteAsync(cmd, + asyncSession.Advanced.Context); + + // Access the results + var blittable = (BlittableJsonReaderObject)cmd.Result.Results[0]; + + // Deserialize the blittable JSON into your typed object + var order = asyncSession.Advanced.JsonConverter.FromBlittable(ref blittable, + "orders/1-A", true); + + // Now you have a strongly-typed Order object that can be accessed + var orderedAt = order.OrderedAt; +} +`} + + + + +* Note that the transaction created for the HTTP request when executing the command + is separate from the transaction initiated by the session's [SaveChanges](../../client-api/session/saving-changes.mdx) method, + even if both are called within the same code block. + +* Learn more about transactions in RavenDB in [Transaction support](../../client-api/faq/transaction-support.mdx). + + + +## Available commands + +* **The following low-level commands, which inherit from `RavenCommand`, are available**: + + * ConditionalGetDocumentsCommand + * CreateSubscriptionCommand + * [DeleteDocumentCommand](../../client-api/commands/documents/delete.mdx) + * DeleteSubscriptionCommand + * DropSubscriptionConnectionCommand + * ExplainQueryCommand + * GetClusterTopologyCommand + * GetConflictsCommand + * GetDatabaseTopologyCommand + * [GetDocumentsCommand](../../client-api/commands/documents/get.mdx) + * GetIdentitiesCommand + * GetNextOperationIdCommand + * GetNodeInfoCommand + * GetOperationStateCommand + * GetRawStreamResultCommand + * GetRevisionsBinEntryCommand + * GetRevisionsCommand + * GetSubscriptionsCommand + * GetSubscriptionStateCommand + * GetTcpInfoCommand + * GetTrafficWatchConfigurationCommand + * HeadAttachmentCommand + * HeadDocumentCommand + * HiLoReturnCommand + * IsDatabaseLoadedCommand + * KillOperationCommand + * MultiGetCommand + * NextHiLoCommand + * NextIdentityForCommand + * [PutDocumentCommand](../../client-api/commands/documents/put.mdx) + * PutSecretKeyCommand + * QueryCommand + * QueryStreamCommand + * SeedIdentityForCommand + * [SingleNodeBatchCommand](../../client-api/commands/batches/how-to-send-multiple-commands-using-a-batch.mdx) + * WaitForRaftIndexCommand + + + +## Syntax + + + +{`void Execute(RavenCommand command, + JsonOperationContext context, + SessionInfo sessionInfo = null); + +Task ExecuteAsync(RavenCommand command, + JsonOperationContext context, + SessionInfo sessionInfo = null, + CancellationToken token = default(CancellationToken)); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/commands/_overview-java.mdx b/versioned_docs/version-7.1/client-api/commands/_overview-java.mdx new file mode 100644 index 0000000000..5fac60b00a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/_overview-java.mdx @@ -0,0 +1,127 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* RavenDB's Client API is structured in layers. + At the highest layer, you interact with the [document store](../../client-api/what-is-a-document-store.mdx) and the [document session](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx), + which handle most common database tasks like loading, saving, and querying documents. + +* Beneath this high-level interface are Operations and Commands: + + * **Operations**: + + * Operations provide management functionality outside the session's context, + like creating a database, performing bulk actions, or managing server-wide configurations. + + * Learn more about Operations in [what are Operations](../../client-api/operations/what-are-operations.mdx). + + * **Commands**: + + * All high-level methods and Operations are built on top of Commands. + Commands form the lowest-level operations that directly communicate with the server. + + * For example, a session’s _Load_ method translates internally to a _LoadOperation_, + which ultimately relies on a _GetDocumentsCommand_ to fetch data from the server. + + * Commands are responsible for sending the appropriate request to the server using a `Request Executor`, + and parsing the server's response. + + * All commands can be executed using either the Store's _Request Executor_ or the Session's _Request Executor_, + regardless of whether the command is session-related or not. + +* This layered structure lets you work at any level, depending on your needs. + +* In this page: + * [Examples](../../client-api/commands/overview.mdx#examples) + * [Available commands](../../client-api/commands/overview.mdx#available-commands) + * [Syntax](../../client-api/commands/overview.mdx#syntax) + + +## Examples + +#### GetDocumentsCommand + + + +{`try (IDocumentSession session = store.openSession()) \{ + GetDocumentsCommand command = new GetDocumentsCommand("orders/1-A", null, false); + session.advanced().getRequestExecutor().execute(command); + ObjectNode order = (ObjectNode) command.getResult().getResults().get(0); +\} +`} + + + +#### DeleteDocumentCommand + + + +{`try (IDocumentSession session = store.openSession()) \{ + DeleteDocumentCommand command = new DeleteDocumentCommand("employees/1-A", null); + session.advanced().getRequestExecutor().execute(command); +\} +`} + + + + + +## Available commands + +* **The Following low-level commands are available**: + * ConditionalGetDocumentsCommand + * CreateSubscriptionCommand + * [DeleteDocumentCommand](../../client-api/commands/documents/delete.mdx) + * DeleteSubscriptionCommand + * DropSubscriptionConnectionCommand + * ExplainQueryCommand + * GetClusterTopologyCommand + * GetConflictsCommand + * GetDatabaseTopologyCommand + * [GetDocumentsCommand](../../client-api/commands/documents/get.mdx) + * GetIdentitiesCommand + * GetNextOperationIdCommand + * GetNodeInfoCommand + * GetOperationStateCommand + * GetRawStreamResultCommand + * GetRevisionsBinEntryCommand + * GetRevisionsCommand + * GetSubscriptionsCommand + * GetSubscriptionStateCommand + * GetTcpInfoCommand + * GetTrafficWatchConfigurationCommand + * HeadAttachmentCommand + * HeadDocumentCommand + * HiLoReturnCommand + * IsDatabaseLoadedCommand + * KillOperationCommand + * MultiGetCommand + * NextHiLoCommand + * NextIdentityForCommand + * [PutDocumentCommand](../../client-api/commands/documents/put.mdx) + * PutSecretKeyCommand + * QueryCommand + * QueryStreamCommand + * SeedIdentityForCommand + * [SingleNodeBatchCommand](../../client-api/commands/batches/how-to-send-multiple-commands-using-a-batch.mdx) + * WaitForRaftIndexCommand + + + +## Syntax + + + +{`public void execute(RavenCommand command); + +public void execute(RavenCommand command, SessionInfo sessionInfo); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/commands/_overview-nodejs.mdx b/versioned_docs/version-7.1/client-api/commands/_overview-nodejs.mdx new file mode 100644 index 0000000000..9dc3adc380 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/_overview-nodejs.mdx @@ -0,0 +1,154 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* RavenDB's Client API is structured in layers. + At the highest layer, you interact with the [document store](../../client-api/what-is-a-document-store.mdx) and the [document session](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx), + which handle most common database tasks like loading, saving, and querying documents. + +* Beneath this high-level interface are Operations and Commands: + + * **Operations**: + + * Operations provide management functionality outside the session's context, + like creating a database, performing bulk actions, or managing server-wide configurations. + + * Learn more about Operations in [what are Operations](../../client-api/operations/what-are-operations.mdx). + + * **Commands**: + + * All high-level methods and Operations are built on top of Commands. + Commands form the lowest-level operations that directly communicate with the server. + + * For example, a session’s _Load_ method translates internally to a _LoadOperation_, + which ultimately relies on a _GetDocumentsCommand_ to fetch data from the server. + + * Commands are responsible for sending the appropriate request to the server using a `Request Executor`, + and parsing the server's response. + + * All commands can be executed using either the [Store's _Request Executor_](../../client-api/commands/overview.mdx#execute-command---using-the-store) + or the [Session's _Request Executor_](../../client-api/commands/overview.mdx#execute-command---using-the-session), + regardless of whether the command is session-related or not. + +* This layered structure lets you work at any level, depending on your needs. + +* In this page: + * [Execute command - using the Store Request Executor](../../client-api/commands/overview.mdx#execute-command---using-the-store-request-executor) + * [Execute command - using the Session Request Executor](../../client-api/commands/overview.mdx#execute-command---using-the-session-request-executor) + * [Available commands](../../client-api/commands/overview.mdx#available-commands) + * [Syntax](../../client-api/commands/overview.mdx#syntax) + + +## Execute command - using the Store Request Executor + +This example shows how to execute the low-level `CreateSubscriptionCommand` via the **Store**. +(For examples of creating a subscription using higher-level methods, see [subscription creation examples](../../client-api/data-subscriptions/creation/examples.mdx)). + + + +{`// Define a command +const cmd = new CreateSubscriptionCommand(\{ + name: "Orders subscription", + query: "from Orders" +\}); + +// Call 'execute' on the store's Request Executor to run the command on the server +// Pass the command +await documentStore.getRequestExecutor().execute(cmd); +`} + + + + + +## Execute command - using the Session Request Executor + +This example shows how to execute the low-level `GetDocumentsCommand` via the **Session**. +(For loading a document using higher-level methods, see [loading entities](../../client-api/session/loading-entities.mdx)). + + + +{`const session = documentStore.openSession(); + +// Define a command +const cmd = new GetDocumentsCommand( + \{ conventions: documentStore.conventions, id: "orders/1-A" \}); + +// Call 'execute' on the session's Request Executor to run the command on the server +// Pass the command +await session.advanced.requestExecutor.execute(cmd); + +// Access the results +const order = command.result.results[0]; +const orderedAt = order.OrderedAt; +`} + + + +* Note that the transaction created for the HTTP request when executing the command + is separate from the transaction initiated by the session's [SaveChanges](../../client-api/session/saving-changes.mdx) method, + even if both are called within the same code block. + +* Learn more about transactions in RavenDB in [Transaction support](../../client-api/faq/transaction-support.mdx). + + + +## Available commands + +* **The Following low-level commands are available**: + * ConditionalGetDocumentsCommand + * CreateSubscriptionCommand + * [DeleteDocumentCommand](../../client-api/commands/documents/delete.mdx) + * DeleteSubscriptionCommand + * DropSubscriptionConnectionCommand + * ExplainQueryCommand + * GetClusterTopologyCommand + * GetConflictsCommand + * GetDatabaseTopologyCommand + * [GetDocumentsCommand](../../client-api/commands/documents/get.mdx) + * GetIdentitiesCommand + * GetNextOperationIdCommand + * GetNodeInfoCommand + * GetOperationStateCommand + * GetRawStreamResultCommand + * GetRevisionsBinEntryCommand + * GetRevisionsCommand + * GetSubscriptionsCommand + * GetSubscriptionStateCommand + * GetTcpInfoCommand + * GetTrafficWatchConfigurationCommand + * HeadAttachmentCommand + * HeadDocumentCommand + * HiLoReturnCommand + * IsDatabaseLoadedCommand + * KillOperationCommand + * MultiGetCommand + * NextHiLoCommand + * NextIdentityForCommand + * [PutDocumentCommand](../../client-api/commands/documents/put.mdx) + * PutSecretKeyCommand + * QueryCommand + * QueryStreamCommand + * SeedIdentityForCommand + * [SingleNodeBatchCommand](../../client-api/commands/batches/how-to-send-multiple-commands-using-a-batch.mdx) + * WaitForRaftIndexCommand + + + +## Syntax + + + +{`execute(command); +execute(command, sessionInfo); +execute(command, sessionInfo, executeOptions); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/commands/batches/_category_.json b/versioned_docs/version-7.1/client-api/commands/batches/_category_.json new file mode 100644 index 0000000000..c638dbab0d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/batches/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 2, + "label": Batching Commands, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/commands/batches/_how-to-send-multiple-commands-using-a-batch-csharp.mdx b/versioned_docs/version-7.1/client-api/commands/batches/_how-to-send-multiple-commands-using-a-batch-csharp.mdx new file mode 100644 index 0000000000..e92e999df4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/batches/_how-to-send-multiple-commands-using-a-batch-csharp.mdx @@ -0,0 +1,333 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the low-level `SingleNodeBatchCommand` to send **multiple commands** in a **single request** to the server. + This reduces the number of remote calls and allows several operations to share the same transaction. + +* All the commands sent in the batch are executed as a **single transaction** on the node the client communicated with. + If any command fails, the entire batch is rolled back, ensuring data integrity. + +* The commands are replicated to other nodes in the cluster only AFTER the transaction is successfully completed on that node. + +* In this page: + * [Examples](../../../client-api/commands/batches/how-to-send-multiple-commands-using-a-batch.mdx#examples) + * [Available batch commands](../../../client-api/commands/batches/how-to-send-multiple-commands-using-a-batch.mdx#available-batch-commands) + * [Syntax](../../../client-api/commands/batches/how-to-send-multiple-commands-using-a-batch.mdx#syntax) + + +## Examples + + + +#### Send multiple commands - using the Store's request executor: + + + +{`using (var store = new DocumentStore()) +using (store.GetRequestExecutor() + .ContextPool.AllocateOperationContext(out var storeContext)) +{ + // Define the list of batch commands to execute + var commands = new List + { + new PutCommandData("employees/999", null, new DynamicJsonValue + { + ["FirstName"] = "James", + ["@metadata"] = new DynamicJsonValue + { + ["@collection"] = "employees" + } + }), + + new PatchCommandData("employees/2-A", null, new PatchRequest + { + Script = "this.HomePhone = 'New phone number';" + }, null), + + new DeleteCommandData("employees/3-A", null) + }; + + // Define the SingleNodeBatchCommand command + var batchCommand = new SingleNodeBatchCommand(store.Conventions, commands); + + // Execute the batch command, + // all the 3 commands defined in the list will be executed in a single transaction + store.GetRequestExecutor().Execute(batchCommand, storeContext); + + // Can access the batch command results: + var commandResults = batchCommand.Result.Results; + Assert.Equal(3, commandResults.Length); + + var blittable = (BlittableJsonReaderObject)commandResults[0]; + + blittable.TryGetMember("Type", out var commandType); + Assert.Equal("PUT", commandType.ToString()); + + blittable.TryGetMember("@id", out var documentId); + Assert.Equal("employees/999", documentId.ToString()); +} +`} + + + + +{`using (var store = new DocumentStore()) +using (store.GetRequestExecutor() + .ContextPool.AllocateOperationContext(out var storeContext)) +{ + // Define the list of batch commands to execute + var commands = new List + { + new PutCommandData("employees/999", null, new DynamicJsonValue + { + ["FirstName"] = "James", + ["@metadata"] = new DynamicJsonValue + { + ["@collection"] = "employees" + } + }), + + new PatchCommandData("employees/2-A", null, new PatchRequest + { + Script = "this.HomePhone = 'New phone number';" + }, null), + + new DeleteCommandData("employees/3-A", null) + }; + + // Define the SingleNodeBatchCommand command + var batchCommand = new SingleNodeBatchCommand(store.Conventions, + commands); + + // Execute the batch command, + // all the 3 commands defined in the list will be executed in a single transaction + await store.GetRequestExecutor().ExecuteAsync(batchCommand, storeContext); + + // Can access the batch command results: + var commandResults = batchCommand.Result.Results; + Assert.Equal(3, commandResults.Length); + + var blittable = (BlittableJsonReaderObject)commandResults[0]; + + blittable.TryGetMember("Type", out var commandType); + Assert.Equal("PUT", commandType.ToString()); + + blittable.TryGetMember("@id", out var documentId); + Assert.Equal("employees/999", documentId.ToString()); +} +`} + + + + + + + +#### Send multiple commands - using the Session's request executor: +* `SingleNodeBatchCommand` can also be executed using the session's request executor. + +* Note that the transaction created for the HTTP request when executing `SingleNodeBatchCommand` + is separate from the transaction initiated by the session's [SaveChanges](../../../client-api/session/saving-changes.mdx) method, even if both are called within the same code block. + Learn more about transactions in RavenDB in [Transaction support](../../../client-api/faq/transaction-support.mdx). + + + + +{`using (var session = store.OpenSession()) +{ + // Define the list of batch commands to execute + var commands = new List + { + new PutCommandData("employees/999", null, new DynamicJsonValue + { + ["FirstName"] = "James", + ["@metadata"] = new DynamicJsonValue + { + ["@collection"] = "employees" + } + }), + + new PatchCommandData("employees/2-A", null, new PatchRequest + { + Script = "this.HomePhone = 'New phone number';" + }, null), + + new DeleteCommandData("employees/3-A", null) + }; + + // Define the SingleNodeBatchCommand command + var batchCommand = new SingleNodeBatchCommand(store.Conventions, + commands); + + // Execute the batch command, + // all the 3 commands defined in the list will be executed in a single transaction + session.Advanced.RequestExecutor.Execute(batchCommand, session.Advanced.Context); + + // Can access the batch command results: + var commandResults = batchCommand.Result.Results; + Assert.Equal(3, commandResults.Length); + + var blittable = (BlittableJsonReaderObject)commandResults[0]; + + blittable.TryGetMember("Type", out var commandType); + Assert.Equal("PUT", commandType.ToString()); + + blittable.TryGetMember("@id", out var documentId); + Assert.Equal("employees/999", documentId.ToString()); +} +`} + + + + +{`using (var session = store.OpenAsyncSession()) +{ + // Define the list of batch commands to execute + var commands = new List + { + new PutCommandData("employees/999", null, new DynamicJsonValue + { + ["FirstName"] = "James", + ["@metadata"] = new DynamicJsonValue + { + ["@collection"] = "employees" + } + }), + + new PatchCommandData("employees/2-A", null, new PatchRequest + { + Script = "this.HomePhone = 'New phone number';" + }, null), + + new DeleteCommandData("employees/3-A", null) + }; + + // Define the SingleNodeBatchCommand command + var batchCommand = new SingleNodeBatchCommand(store.Conventions, + commands); + + // Execute the batch command, + // all the 3 commands defined in the list will be executed in a single transaction + await session.Advanced.RequestExecutor.ExecuteAsync( + batchCommand, session.Advanced.Context); + + // Can access the batch command results: + var commandResults = batchCommand.Result.Results; + Assert.Equal(3, commandResults.Length); + + var blittable = (BlittableJsonReaderObject)commandResults[0]; + + blittable.TryGetMember("Type", out var commandType); + Assert.Equal("PUT", commandType.ToString()); + + blittable.TryGetMember("@id", out var documentId); + Assert.Equal("employees/999", documentId.ToString()); +} +`} + + + + + + + +## Available batch commands + +**The following commands can be sent in a batch via `SingleNodeBatchCommand`**: +(These commands implement the `ICommandData` interface). + + * BatchPatchCommandData + * CopyAttachmentCommandData + * CountersBatchCommandData + * DeleteAttachmentCommandData + * DeleteCommandData + * DeleteCompareExchangeCommandData + * DeletePrefixedCommandData + * ForceRevisionCommandData + * IncrementalTimeSeriesBatchCommandData + * JsonPatchCommandData + * MoveAttachmentCommandData + * PatchCommandData + * PutAttachmentCommandData + * PutCommandData + * PutCompareExchangeCommandData + * TimeSeriesBatchCommandData + + + +## Syntax + + + +{`public SingleNodeBatchCommand( + DocumentConventions conventions, + IList commands, + BatchOptions options = null) +`} + + + + +{`public class BatchOptions +\{ + public TimeSpan? RequestTimeout \{ get; set; \} + public ReplicationBatchOptions ReplicationOptions \{ get; set; \} + public IndexBatchOptions IndexOptions \{ get; set; \} + public ShardedBatchOptions ShardedOptions \{ get; set; \} +\} + +public class ReplicationBatchOptions +\{ + // If set to true, + // will wait for replication to be performed on at least a majority of DB instances. + public bool WaitForReplicas \{ get; set; \} + + public int NumberOfReplicasToWaitFor \{ get; set; \} + public TimeSpan WaitForReplicasTimeout \{ get; set; \} + public bool Majority \{ get; set; \} + public bool ThrowOnTimeoutInWaitForReplicas \{ get; set; \} +\} + +public sealed class IndexBatchOptions +\{ + public bool WaitForIndexes \{ get; set; \} + public TimeSpan WaitForIndexesTimeout \{ get; set; \} + public bool ThrowOnTimeoutInWaitForIndexes \{ get; set; \} + public string[] WaitForSpecificIndexes \{ get; set; \} +\} + +public class ShardedBatchOptions +\{ + public ShardedBatchBehavior BatchBehavior \{ get; set; \} +\} +`} + + + + +{`// Executing \`SingleNodeBatchCommand\` returns the following object: +// ================================================================ + +public class BatchCommandResult +\{ + public BlittableJsonReaderArray Results \{ get; set; \} + public long? TransactionIndex \{ get; set; \} +\} + +public sealed class BlittableArrayResult +\{ + public BlittableJsonReaderArray Results \{ get; set; \} + public long TotalResults \{ get; set; \} + public string ContinuationToken \{ get; set; \} +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/commands/batches/_how-to-send-multiple-commands-using-a-batch-java.mdx b/versioned_docs/version-7.1/client-api/commands/batches/_how-to-send-multiple-commands-using-a-batch-java.mdx new file mode 100644 index 0000000000..8a2c048c6f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/batches/_how-to-send-multiple-commands-using-a-batch-java.mdx @@ -0,0 +1,75 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To send **multiple commands** in a **single request**, reducing the number of remote calls and allowing several operations to share **same transaction**, `BatchCommand` should be used. + +## Syntax + + + +{`public BatchCommand(DocumentConventions conventions, List commands, BatchOptions options) +`} + + + +### The following commands can be sent using a batch + +* DeleteCommandData +* DeletePrefixedCommandData +* PutCommandData +* PatchCommandData +* DeleteAttachmentCommandData +* PutAttachmentCommandData + +### Batch Options + + + +{`public class BatchOptions \{ + private boolean waitForReplicas; + private int numberOfReplicasToWaitFor; + private Duration waitForReplicasTimeout; + private boolean majority; + private boolean throwOnTimeoutInWaitForReplicas; + + private boolean waitForIndexes; + private Duration waitForIndexesTimeout; + private boolean throwOnTimeoutInWaitForIndexes; + private String[] waitForSpecificIndexes; + + // getters and setters +\} +`} + + + + +## Example + + + +{`try (IDocumentSession session = documentStore.openSession()) \{ + + ObjectNode user3 = mapper.createObjectNode(); + user3.put("Name", "James"); + + PutCommandDataWithJson user3Cmd = new PutCommandDataWithJson("users/3", null, user3); + + DeleteCommandData deleteCmd = new DeleteCommandData("users/2-A", null); + List commands = Arrays.asList(user3Cmd, deleteCmd); + + BatchCommand batch = new BatchCommand(documentStore.getConventions(), commands); + session.advanced().getRequestExecutor().execute(batch); + +\} +`} + + + + +All the commands in the batch will succeed or fail as a **transaction**. Other users will not be able to see any of the changes until the entire batch completes. + + + diff --git a/versioned_docs/version-7.1/client-api/commands/batches/_how-to-send-multiple-commands-using-a-batch-nodejs.mdx b/versioned_docs/version-7.1/client-api/commands/batches/_how-to-send-multiple-commands-using-a-batch-nodejs.mdx new file mode 100644 index 0000000000..986fd6ac70 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/batches/_how-to-send-multiple-commands-using-a-batch-nodejs.mdx @@ -0,0 +1,192 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the low-level `SingleNodeBatchCommand` to send **multiple commands** in a **single request** to the server. + This reduces the number of remote calls and allows several operations to share the same transaction. + +* All the commands sent in the batch are executed as a **single transaction** on the node the client communicated with. + If any command fails, the entire batch is rolled back, ensuring data integrity. + +* The commands are replicated to other nodes in the cluster only AFTER the transaction is successfully completed on that node. + +* In this page: + * [Examples](../../../client-api/commands/batches/how-to-send-multiple-commands-using-a-batch.mdx#examples) + * [Available batch commands](../../../client-api/commands/batches/how-to-send-multiple-commands-using-a-batch.mdx#available-batch-commands) + * [Syntax](../../../client-api/commands/batches/how-to-send-multiple-commands-using-a-batch.mdx#syntax) + + +## Examples + + + +#### Send multiple commands - using the Store's request executor: + + +{`// This patch request will be used in the following 'PatchCommandData' command +let patchRequest = new PatchRequest(); +patchRequest.script = "this.HomePhone = 'New phone number'"; + +// Define the list of batch commands to execute +const commands = [ + new PutCommandDataBase("employees/999", null, null, \{ + FirstName: "James", + "@metadata": \{ + "@collection": "employees" + \} + \}), + + new PatchCommandData("employees/2-A", null, patchRequest), + + new DeleteCommandData("employees/3-A", null) +]; + +// Define the 'SingleNodeBatchCommand' command +const batchCommand = new SingleNodeBatchCommand(documentStore.conventions, commands); + +// Execute the batch command, +// all the 3 commands defined in the list will be executed in a single transaction +await documentStore.getRequestExecutor().execute(batchCommand); + +// Can access the batch command results +const commandResults = batchCommand.result.results; +assert.equal(commandResults.length, 3); +assert.equal(commandResults[0].type, "PUT"); +assert.equal(commandResults[0]["@id"], "employees/999"); +`} + + + + + + +#### Send multiple commands - using the Session's request executor: +* `SingleNodeBatchCommand` can also be executed using the session's request executor. + +* Note that the transaction created for the HTTP request when executing `SingleNodeBatchCommand` + is separate from the transaction initiated by the session's [saveChanges](../../../client-api/session/saving-changes.mdx) method, even if both are called within the same code block. + Learn more about transactions in RavenDB in [Transaction support](../../../client-api/faq/transaction-support.mdx). + + + +{`const session = documentStore.openSession(); + +// This patch request will be used in the following 'PatchCommandData' command +let patchRequest = new PatchRequest(); +patchRequest.script = "this.HomePhone = 'New phone number'"; + +// Define the list of batch commands to execute +const commands = [ + new PutCommandDataBase("employees/999", null, null, \{ + FirstName: "James", + "@metadata": \{ + "@collection": "employees" + \} + \}), + + new PatchCommandData("employees/2-A", null, patchRequest), + + new DeleteCommandData("employees/3-A", null) +]; + +// Define the 'SingleNodeBatchCommand' command +const batchCommand = new SingleNodeBatchCommand(documentStore.conventions, commands); + +// Execute the batch command, +// all the 3 commands defined in the list will be executed in a single transaction +await session.advanced.requestExecutor.execute(batchCommand); + +// Can access the batch command results +const commandResults = batchCommand.result.results; +assert.equal(commandResults.length, 3); +assert.equal(commandResults[0].type, "PUT"); +assert.equal(commandResults[0]["@id"], "employees/999"); +`} + + + + + + +## Available batch commands + +* **The following commands can be sent in a batch via `SingleNodeBatchCommand`**: + + * BatchPatchCommandData + * CopyAttachmentCommandData + * CountersBatchCommandData + * DeleteAttachmentCommandData + * DeleteCommandData + * DeleteCompareExchangeCommandData + * DeletePrefixedCommandData + * ForceRevisionCommandData + * IncrementalTimeSeriesBatchCommandData + * JsonPatchCommandData + * MoveAttachmentCommandData + * PatchCommandData + * PutAttachmentCommandData + * PutCommandData + * PutCompareExchangeCommandData + * TimeSeriesBatchCommandData + + + +## Syntax + + + +{`SingleNodeBatchCommand(conventions, commands); +SingleNodeBatchCommand(conventions, commands, batchOptions); +`} + + + + +{`// The batchOptions object: +\{ + replicationOptions; // ReplicationBatchOptions + indexOptions; // IndexBatchOptions + shardedOptions; // ShardedBatchOptions +\} + +// The ReplicationBatchOptions object: +\{ + timeout?; // number + throwOnTimeout?; // boolean + replicas?; // number + majority?; // boolean +\} + +// The IndexBatchOptions object: +\{ + timeout?; // number + throwOnTimeout?; // boolean + indexes?; // string[] +\} + +// The ShardedBatchOptions object: +\{ + batchBehavior; // ShardedBatchBehavior +\} +`} + + + + +{`// Executing \`SingleNodeBatchCommand\` returns the following object: +// ================================================================ + +class BatchCommandResult \{ + results; // any[] + transactionIndex; // number +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/commands/batches/how-to-send-multiple-commands-using-a-batch.mdx b/versioned_docs/version-7.1/client-api/commands/batches/how-to-send-multiple-commands-using-a-batch.mdx new file mode 100644 index 0000000000..5faa99697f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/batches/how-to-send-multiple-commands-using-a-batch.mdx @@ -0,0 +1,47 @@ +--- +title: "Send Multiple Commands in a Batch" +hide_table_of_contents: true +sidebar_label: Send Multiple Commands +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToSendMultipleCommandsUsingABatchCsharp from './_how-to-send-multiple-commands-using-a-batch-csharp.mdx'; +import HowToSendMultipleCommandsUsingABatchJava from './_how-to-send-multiple-commands-using-a-batch-java.mdx'; +import HowToSendMultipleCommandsUsingABatchNodejs from './_how-to-send-multiple-commands-using-a-batch-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/commands/documents/_category_.json b/versioned_docs/version-7.1/client-api/commands/documents/_category_.json new file mode 100644 index 0000000000..65668e24f1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/documents/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 1, + "label": Document Commands, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/commands/documents/_delete-csharp.mdx b/versioned_docs/version-7.1/client-api/commands/documents/_delete-csharp.mdx new file mode 100644 index 0000000000..295eacedb8 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/documents/_delete-csharp.mdx @@ -0,0 +1,149 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the low-level `DeleteDocumentCommand` to remove a document from the database. + +* To delete a document using a higher-level method, see [deleting entities](../../../client-api/session/deleting-entities.mdx). + +* In this page: + + * [Examples](../../../client-api/commands/documents/delete.mdx#examples) + * [Syntax](../../../client-api/commands/documents/delete.mdx#syntax) + + +## Examples + + + +**Delete document command - using the Store's request executor**: + + + +{`using (var store = new DocumentStore()) +using (store.GetRequestExecutor().ContextPool.AllocateOperationContext(out var context)) +{ + var command = new DeleteDocumentCommand("employees/1-A", null); + store.GetRequestExecutor().Execute(command, context); +} +`} + + + + +{`using (var store = new DocumentStore()) +using (store.GetRequestExecutor().ContextPool.AllocateOperationContext(out var context)) +{ + var command = new DeleteDocumentCommand("employees/1-A", null); + await store.GetRequestExecutor().ExecuteAsync(command, context); +} +`} + + + + + + + +**Delete document command - using the Session's request executor**: + + + +{`var command = new DeleteDocumentCommand("employees/1-A", null); +session.Advanced.RequestExecutor.Execute(command, session.Advanced.Context); +`} + + + + +{`var command = new DeleteDocumentCommand("employees/1-A", null); +await asyncSession.Advanced.RequestExecutor.ExecuteAsync(command, asyncSession.Advanced.Context); +`} + + + + + + + +**Delete document command - with concurrency check**: + + + +{`// Load a document +var employeeDocument = session.Load("employees/2-A"); +var cv = session.Advanced.GetChangeVectorFor(employeeDocument); + +// Modify the document content and save changes +// The change-vector of the stored document will change +employeeDocument.Title = "Some new title"; +session.SaveChanges(); + +try +{ + // Try to delete the document with the previous change-vector + var command = new DeleteDocumentCommand("employees/2-A", cv); + session.Advanced.RequestExecutor.Execute(command, session.Advanced.Context); +} +catch (Exception e) +{ + // A concurrency exception is thrown + // since the change-vector of the document in the database + // does not match the change-vector specified in the delete command + Assert.IsType(e); +} +`} + + + + +{`// Load a document +var employeeDocument = await asyncSession.LoadAsync("employees/2-A"); +var cv = asyncSession.Advanced.GetChangeVectorFor(employeeDocument); + +// Modify the document content and save changes +// The change-vector of the stored document will change +employeeDocument.Title = "Some new title"; +asyncSession.SaveChangesAsync(); + +try +{ + // Try to delete the document with the previous change-vector + var command = new DeleteDocumentCommand("employees/2-A", cv); + await asyncSession.Advanced.RequestExecutor.ExecuteAsync(command, asyncSession.Advanced.Context); +} +catch (Exception e) +{ + // A concurrency exception is thrown + // since the change-vector of the document in the database + // does not match the change-vector specified in the delete command + Assert.IsType(e); +} +`} + + + + + + + +## Syntax + + + +{`public DeleteDocumentCommand(string id, string changeVector) +`} + + + +| Parameter | Type | Description | +|------------------|----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **id** | `string` | The ID of the document to delete. | +| **changeVector** | `string` | The change-vector of the document you wish to delete,
used for [optimistic concurrency control](../../../server/clustering/replication/change-vector.mdx#concurrency-control--change-vectors).
Pass `null` to skip the check and force the deletion. | + + + + diff --git a/versioned_docs/version-7.1/client-api/commands/documents/_delete-java.mdx b/versioned_docs/version-7.1/client-api/commands/documents/_delete-java.mdx new file mode 100644 index 0000000000..b71810c017 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/documents/_delete-java.mdx @@ -0,0 +1,32 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +**Delete** is used to remove a document from a database. + +## Syntax + + + +{`public DeleteDocumentCommand(String id, String changeVector) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **id** | `String` | ID of a document to be deleted | +| **changeVector** | `String` | Entity Change Vector, used for concurrency checks (`null` to skip check) | + +## Example + + + +{`DeleteDocumentCommand command = new DeleteDocumentCommand("employees/1-A", null); +session.advanced().getRequestExecutor().execute(command); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/commands/documents/_delete-nodejs.mdx b/versioned_docs/version-7.1/client-api/commands/documents/_delete-nodejs.mdx new file mode 100644 index 0000000000..6bdbf75f57 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/documents/_delete-nodejs.mdx @@ -0,0 +1,98 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the low-level `DeleteDocumentCommand` to remove a document from the database. + +* To delete a document using a higher-level method, see [deleting entities](../../../client-api/session/deleting-entities.mdx). + +* In this page: + + * [Examples](../../../client-api/commands/documents/delete.mdx#examples) + * [Syntax](../../../client-api/commands/documents/delete.mdx#syntax) + + +## Examples + + + +**Delete document command - using the Store's request executor**: + + +{`// Define the Delete Command +// Pass the document ID & whether to make a concurrency check +const command = new DeleteDocumentCommand("employees/1-A", null); + +// Send the command to the server using the Store's Request Executor +await documentStore.getRequestExecutor().execute(command); +`} + + + + + + +**Delete document command - using the Session's request executor**: + + +{`const command = new DeleteDocumentCommand("employees/1-A", null); + +// Send the command to the server using the Session's Request Executor +await session.advanced.requestExecutor.execute(command); +`} + + + + + + +**Delete document command - with concurrency check**: + + +{`// Load a document +const employeeDocument = await session.load('employees/2-A'); +const cv = session.advanced.getChangeVectorFor(employeeDocument); + +// Modify the document content and save changes +// The change-vector of the stored document will change +employeeDocument.Title = "Some new title"; +await session.saveChanges(); + +try \{ + // Try to delete the document with the previous change-vector + const command = new DeleteDocumentCommand("employees/2-A", cv); + await session.advanced.requestExecutor.execute(command); +\} +catch (err) \{ + // A concurrency exception is thrown + // since the change-vector of the document in the database + // does not match the change-vector specified in the delete command + assert.equal(err.name, "ConcurrencyException"); +\} +`} + + + + + + +## Syntax + + + +{`DeleteDocumentCommand(id, changeVector); +`} + + + +| Parameter | Type | Description | +|------------------|----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **id** | `string` | The ID of the document to delete. | +| **changeVector** | `string` | The change-vector of the document you wish to delete,
used for [optimistic concurrency control](../../../server/clustering/replication/change-vector.mdx#concurrency-control--change-vectors).
Pass `null` to skip the check and force the deletion. | + + + + diff --git a/versioned_docs/version-7.1/client-api/commands/documents/_delete-php.mdx b/versioned_docs/version-7.1/client-api/commands/documents/_delete-php.mdx new file mode 100644 index 0000000000..1665aa3ea5 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/documents/_delete-php.mdx @@ -0,0 +1,45 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `DeleteDocumentCommand` to remove a document from the database. + +* In this page: + + * [Example](../../../client-api/commands/documents/delete.mdx#example) + * [Syntax](../../../client-api/commands/documents/delete.mdx#syntax) + + +## Example + + + +{`$command = new DeleteDocumentCommand("employees/1-A", null); +$session->advanced()->getRequestExecutor()->execute($command); +`} + + + + + + +## Syntax + + + +{`DeleteDocumentCommand(?string $idOrCopy, ?string $changeVector = null); +`} + + + +| Parameters | Type | Description | +|------------|------|-------------| +| **idOrCopy** | `string` | ID of a document to be deleted | +| **changeVector** | `string` (optional) | Entity Change Vector, used for concurrency checks (`None` to skip check) | + + + + diff --git a/versioned_docs/version-7.1/client-api/commands/documents/_delete-python.mdx b/versioned_docs/version-7.1/client-api/commands/documents/_delete-python.mdx new file mode 100644 index 0000000000..bcb7560c07 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/documents/_delete-python.mdx @@ -0,0 +1,46 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `DeleteDocumentCommand` to remove a document from the database. + +* In this page: + + * [Example](../../../client-api/commands/documents/delete.mdx#example) + * [Syntax](../../../client-api/commands/documents/delete.mdx#syntax) + + +## Example + + + +{`command = DeleteDocumentCommand("employees/1-A", None) +session.advanced.request_executor.execute_command(command) +`} + + + + + + +## Syntax + + + +{`class DeleteDocumentCommand(VoidRavenCommand): + def __init__(self, key: str, change_vector: Optional[str] = None): ... +`} + + + +| Parameters | Type | Description | +|------------|------|-------------| +| **key** | `str` | ID of a document to be deleted | +| **change_vector** | `str` (optional) | Entity Change Vector, used for concurrency checks (`None` to skip check) | + + + + diff --git a/versioned_docs/version-7.1/client-api/commands/documents/_get-csharp.mdx b/versioned_docs/version-7.1/client-api/commands/documents/_get-csharp.mdx new file mode 100644 index 0000000000..547e4e59ac --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/documents/_get-csharp.mdx @@ -0,0 +1,691 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the low-level `GetDocumentsCommand` to retrieve documents from the database. + +* To retrieve documents using a higher-level method, see [loading entities](../../../client-api/session/loading-entities.mdx) or [query for documents](../../../client-api/session/querying/how-to-query.mdx). + +* In this page: + - [Get single document](../../../client-api/commands/documents/get.mdx#get-single-document) + - [Get multiple documents](../../../client-api/commands/documents/get.mdx#get-multiple-documents) + - [Get metadata only](../../../client-api/commands/documents/get.mdx#get-metadata-only) + - [Get paged documents](../../../client-api/commands/documents/get.mdx#get-paged-documents) + - [Get documents - by ID prefix](../../../client-api/commands/documents/get.mdx#get-documents---by-id-prefix) + - [Get documents - with includes](../../../client-api/commands/documents/get.mdx#get-documents---with-includes) + - [Syntax](../../../client-api/commands/documents/get.mdx#syntax) + + +## Get single document + +* The following examples demonstrate how to retrieve a document using either the _Store's request executor_ + or the _Session's request executor_. +* The examples in the rest of the article use the _Store's request executor_, but you can apply the Session's implementation shown here to ALL cases. + + +**Get document command - using the Store's request executor**: + + + +{`using (var store = new DocumentStore()) +using (store.GetRequestExecutor().ContextPool.AllocateOperationContext(out var context)) +{ + // Define the 'GetDocumentsCommand' + var command = new GetDocumentsCommand(store.Conventions, + "orders/1-A", null, metadataOnly: false); + + // Call 'Execute' on the Store's Request Executor to send the command to the server + store.GetRequestExecutor().Execute(command, context); + + // Access the results + var blittable = (BlittableJsonReaderObject)command.Result.Results[0]; + + // Deserialize the blittable JSON into a strongly-typed 'Order' object + var order = (Order)store.Conventions.Serialization.DefaultConverter + .FromBlittable(typeof(Order), blittable); + + var orderedAt = order.OrderedAt; +} +`} + + + + +{`using (var store = new DocumentStore()) +using (store.GetRequestExecutor().ContextPool.AllocateOperationContext(out var context)) +{ + // Define the 'GetDocumentsCommand' + var command = new GetDocumentsCommand(store.Conventions, + "orders/1-A", null, metadataOnly: false); + + // Call 'ExecuteAsync' on the Store's Request Executor to send the command to the server + await store.GetRequestExecutor().ExecuteAsync(command, context); + + // Access the results + var blittable = (BlittableJsonReaderObject)command.Result.Results[0]; + + // Deserialize the blittable JSON into a strongly-typed 'Order' object + var order = (Order)store.Conventions.Serialization.DefaultConverter + .FromBlittable(typeof(Order), blittable); + + var orderedAt = order.OrderedAt; +} +`} + + + + + + + +**Get document command - using the Session's request executor**: + + + +{`using (var store = new DocumentStore()) +using (var session = store.OpenSession()) +{ + // Define the 'GetDocumentsCommand' + var command = new GetDocumentsCommand(store.Conventions, + "orders/1-A", null, metadataOnly: false); + + // Call 'Execute' on the Session's Request Executor to send the command to the server + session.Advanced.RequestExecutor.Execute(command, session.Advanced.Context); + + // Access the results + var blittable = (BlittableJsonReaderObject)command.Result.Results[0]; + + // Deserialize the blittable JSON into a strongly-typed 'Order' object + // Setting the last param to 'true' will cause the session to track the 'Order' entity + var order = session.Advanced.JsonConverter.FromBlittable(ref blittable, + "orders/1-A", trackEntity: true); + + var orderedAt = order.OrderedAt; +} +`} + + + + +{`using (var store = new DocumentStore()) +using (var asyncSession = store.OpenAsyncSession()) +{ + // Define the 'GetDocumentsCommand' + var command = new GetDocumentsCommand(store.Conventions, + "orders/1-A", null, metadataOnly: false); + + // Call 'ExecuteAsync' on the Session's Request Executor to send the command to the server + await asyncSession.Advanced.RequestExecutor.ExecuteAsync( + command, asyncSession.Advanced.Context); + + // Access the results + var blittable = (BlittableJsonReaderObject)command.Result.Results[0]; + + // Deserialize the blittable JSON into a strongly-typed 'Order' object + // Setting the last param to 'true' will cause the session to track the 'Order' entity + var order = asyncSession.Advanced.JsonConverter.FromBlittable(ref blittable, + "orders/1-A", trackEntity: true); + + var orderedAt = order.OrderedAt; +} +`} + + + + + + + +## Get multiple documents + + + +**Get multiple documents**: + + +{`using (var store = new DocumentStore()) +using (store.GetRequestExecutor().ContextPool.AllocateOperationContext(out var context)) +\{ + // Pass a list of document IDs to the get command + var command = new GetDocumentsCommand(store.Conventions, + new[] \{ "orders/1-A", "employees/2-A", "products/1-A" \}, null, false); + + store.GetRequestExecutor().Execute(command, context); + + // Access results + var orderBlittable = (BlittableJsonReaderObject)command.Result.Results[0]; + var orderDocument = (Order)store.Conventions.Serialization.DefaultConverter + .FromBlittable(typeof(Order), orderBlittable); + + var employeeBlittable = (BlittableJsonReaderObject)command.Result.Results[1]; + var employeeDocument = (Employee)store.Conventions.Serialization.DefaultConverter + .FromBlittable(typeof(Employee), orderBlittable); + + var productBlittable = (BlittableJsonReaderObject)command.Result.Results[2]; + var productDocument = (Product)store.Conventions.Serialization.DefaultConverter + .FromBlittable(typeof(Product), productBlittable); +\} +`} + + + + + + +**Get multiple documents - missing documents**: + + +{`using (var store = new DocumentStore()) +using (store.GetRequestExecutor().ContextPool.AllocateOperationContext(out var context)) +\{ + // Assuming that employees/9999-A doesn't exist + var command = new GetDocumentsCommand(store.Conventions, + new[] \{ "orders/1-A", "employees/9999-A", "products/3-A" \}, null, false); + + store.GetRequestExecutor().Execute(command, context); + + // Results will contain 'null' for any missing document + var results = command.Result.Results; // orders/1-A, null, products/3-A + Assert.Null(results[1]); +\} +`} + + + + + + +## Get metadata only + + + +{`using (var store = new DocumentStore()) +using (store.GetRequestExecutor().ContextPool.AllocateOperationContext(out var context)) +\{ + // Pass 'true' in the 'metadataOnly' param to retrieve only the document METADATA + var command = new GetDocumentsCommand(store.Conventions, + "orders/1-A", null, metadataOnly: true); + + store.GetRequestExecutor().Execute(command, context); + + // Access results + var blittable = (BlittableJsonReaderObject)command.Result.Results[0]; + var documentMetadata = (BlittableJsonReaderObject)blittable["@metadata"]; + + // Print out all metadata properties + foreach (var propertyName in documentMetadata.GetPropertyNames()) + \{ + documentMetadata.TryGet(propertyName, out var propertyValue); + Console.WriteLine("\{0\} = \{1\}", propertyName, propertyValue); + \} +\} +`} + + + + + +## Get paged documents + +* You can retrieve documents in pages by specifying how many documents to skip and how many to fetch. +* Using this overload, no specific collection is specified, the documents will be fetched from ALL collections. + + + +{`using (var store = new DocumentStore()) +using (store.GetRequestExecutor().ContextPool.AllocateOperationContext(out var context)) +\{ + // Specify the number of documents to skip (start) + // and the number of documents to get (pageSize) + var command = new GetDocumentsCommand(start: 0, pageSize: 128); + + store.GetRequestExecutor().Execute(command, context); + + // The documents are sorted by the last modified date, + // with the most recent modifications appearing first. + var firstDocs = command.Result.Results; +\} +`} + + + + + +## Get documents - by ID prefix + + + +**Retrieve documents that match a specified ID prefix**: + + +{`using (var store = new DocumentStore()) +using (store.GetRequestExecutor().ContextPool.AllocateOperationContext(out var context)) +\{ + // Return up to 50 documents with ID that starts with 'products/' + var command = new GetDocumentsCommand(store.Conventions, + startWith: "products/", + startAfter: null, + matches: null, + exclude: null, + start: 0, + pageSize: 50, + metadataOnly: false); + + store.GetRequestExecutor().Execute(command, context); + + // Access a Product document + var blittable = (BlittableJsonReaderObject)command.Result.Results[0]; + var product = (Product)store.Conventions.Serialization.DefaultConverter + .FromBlittable(typeof(Product), blittable); +\} +`} + + + + + + +**Retrieve documents that match a specified ID prefix - with "matches" pattern**: + + +{`using (var store = new DocumentStore()) +using (store.GetRequestExecutor().ContextPool.AllocateOperationContext(out var context)) +\{ + // Return up to 50 documents with IDs that start with 'orders/' + // and the rest of the ID either begins with '23', + // or contains any character at the 1st position and ends with '10-A' + // e.g. orders/234-A, orders/810-A + var command = new GetDocumentsCommand(store.Conventions, + startWith: "orders/", + startAfter: null, + matches: "23*|?10-A", + exclude: null, + start: 0, + pageSize: 50, + metadataOnly: false); + + store.GetRequestExecutor().Execute(command, context); + + // Access an Order document + var blittable = (BlittableJsonReaderObject)command.Result.Results[0]; + var order = (Order)store.Conventions.Serialization.DefaultConverter + .FromBlittable(typeof(Order), blittable); + + Assert.True(order.Id.StartsWith("orders/23") || + Regex.IsMatch(order.Id, @"^orders/.\{1\}10-A$")); +\} +`} + + + + + + +**Retrieve documents that match a specified ID prefix - with "exclude" pattern**: + + +{`using (var store = new DocumentStore()) +using (store.GetRequestExecutor().ContextPool.AllocateOperationContext(out var context)) +\{ + // Return up to 50 documents with IDs that start with 'orders/' + // and the rest of the ID excludes documents ending with '10-A', + // e.g. will return orders/820-A, but not orders/810-A + var command = new GetDocumentsCommand(store.Conventions, + startWith: "orders/", + startAfter: null, + matches: null, + exclude: "*10-A", + start: 0, + pageSize: 50, + metadataOnly: false); + + store.GetRequestExecutor().Execute(command, context); + + // Access an Order document + var blittable = (BlittableJsonReaderObject)command.Result.Results[0]; + var order = (Order)store.Conventions.Serialization.DefaultConverter + .FromBlittable(typeof(Order), blittable); + + Assert.True(order.Id.StartsWith("orders/") && !order.Id.EndsWith("10-A")); +\} +`} + + + + + + +## Get documents - with includes + + + +**Include related documents**: + + +{`using (var store = new DocumentStore()) +using (store.GetRequestExecutor().ContextPool.AllocateOperationContext(out var context)) +\{ + // Fetch document products/77-A and include its related Supplier document + var command = new GetDocumentsCommand(store.Conventions, + id:"products/77-A", + includes: new[] \{ "Supplier" \}, + metadataOnly: false); + + store.GetRequestExecutor().Execute(command, context); + + var productBlittable = (BlittableJsonReaderObject)command.Result.Results[0]; + if (productBlittable.TryGet("Supplier", out var supplierId)) + \{ + // Access the related document that was included + var supplierBlittable = + (BlittableJsonReaderObject)command.Result.Includes[supplierId]; + + var supplier = (Supplier)store.Conventions.Serialization.DefaultConverter + .FromBlittable(typeof(Supplier), supplierBlittable); + \} +\} +`} + + + + + + +**Include counters**: + + +{`using (var store = new DocumentStore()) +using (store.GetRequestExecutor().ContextPool.AllocateOperationContext(out var context)) +\{ + // Fetch document products/77-A and include the specified counters + var command = new GetDocumentsCommand(store.Conventions, + ids:new[] \{"products/77-A"\}, + includes: null, + // Pass the names of the counters to include. In this example, + // the counter names in RavenDB's sample data are stars... + counterIncludes: new[] \{ "⭐", "⭐⭐" \}, + timeSeriesIncludes: null, + compareExchangeValueIncludes: null, + metadataOnly: false); + + store.GetRequestExecutor().Execute(command, context); + + // Access the included counters results + var counters = (BlittableJsonReaderObject)command.Result.CounterIncludes; + var countersBlittableArray = + (BlittableJsonReaderArray)counters["products/77-A"]; + + var counter = (BlittableJsonReaderObject)countersBlittableArray[0]; + var counterName = counter["CounterName"]; + var counterValue = counter["TotalValue"]; +\} +`} + + + + + + +**Include time series**: + + +{`using (var store = new DocumentStore()) +using (store.GetRequestExecutor().ContextPool.AllocateOperationContext(out var context)) +\{ + // Fetch document employees/1-A and include the specified time series + var command = new GetDocumentsCommand(store.Conventions, + ids:new[] \{"employees/1-A"\}, + includes: null, + counterIncludes: null, + // Specify the time series name and the time range + timeSeriesIncludes: new[] \{ new TimeSeriesRange + \{ + Name = "HeartRates", + From = DateTime.MinValue, + To = DateTime.MaxValue + \} \}, + compareExchangeValueIncludes:null, + metadataOnly: false); + + store.GetRequestExecutor().Execute(command, context); + + // Access the included time series results + var timeSeriesBlittable = + (BlittableJsonReaderObject)command.Result.TimeSeriesIncludes["employees/1-A"]; + + var timeSeriesBlittableArray = + (BlittableJsonReaderArray)timeSeriesBlittable["HeartRates"]; + + var ts = (BlittableJsonReaderObject)timeSeriesBlittableArray[0]; + var entries = (BlittableJsonReaderArray)ts["Entries"]; + + var tsEntry = (BlittableJsonReaderObject)entries[0]; + var entryTimeStamp = tsEntry["Timestamp"]; + var entryValues = tsEntry["Values"]; +\} +`} + + + + + + +**Include revisions**: + + +{`using (var store = new DocumentStore()) +using (store.GetRequestExecutor().ContextPool.AllocateOperationContext(out var context)) +\{ + // Fetch document orders/826-A and include the specified revisions + var command = new GetDocumentsCommand(store.Conventions, + ids:new[] \{"orders/826-A"\}, + includes: null, + counterIncludes: null, + // Specify list of document fields (part of document orders/826-A), + // where each field is expected to contain the change-vector + // of the revision you wish to include. + revisionsIncludesByChangeVector: new[] + \{ + "RevisionChangeVectorField1", + "RevisionChangeVectorField2" + \}, + revisionIncludeByDateTimeBefore: null, + timeSeriesIncludes: null, + compareExchangeValueIncludes: null, + metadataOnly: false); + + store.GetRequestExecutor().Execute(command, context); + + // Access the included revisions + var revisions = (BlittableJsonReaderArray)command.Result.RevisionIncludes; + + var revisionObj = (BlittableJsonReaderObject)revisions[0]; + var revision = (BlittableJsonReaderObject)revisionObj["Revision"]; +\} +`} + + + + +{`using (var store = new DocumentStore()) +using (store.GetRequestExecutor().ContextPool.AllocateOperationContext(out var context)) +\{ + // Fetch document orders/826-A and include the specified revisions + var command = new GetDocumentsCommand(store.Conventions, + ids:new[] \{"orders/826-A"\}, + includes: null, + counterIncludes: null, + // Another option is to specify a single document field (part of document orders/826-A). + // This field is expected to contain a list of all the change-vectors + // for the revisions you wish to include. + revisionsIncludesByChangeVector: new[] + \{ + "RevisionsChangeVectors" + \}, + revisionIncludeByDateTimeBefore: null, + timeSeriesIncludes: null, + compareExchangeValueIncludes: null, + metadataOnly: false); + + store.GetRequestExecutor().Execute(command, context); + + // Access the included revisions + var revisions = (BlittableJsonReaderArray)command.Result.RevisionIncludes; + + var revisionObj = (BlittableJsonReaderObject)revisions[0]; + var revision = (BlittableJsonReaderObject)revisionObj["Revision"]; +\} +`} + + + + + + +**Include compare-exchange values**: + + +{`using (var store = new DocumentStore()) +using (store.GetRequestExecutor().ContextPool.AllocateOperationContext(out var context)) +\{ + // Fetch document orders/826-A and include the specified compare-exchange + var command = new GetDocumentsCommand(store.Conventions, + ids:new[] \{"orders/826-A"\}, + includes: null, + counterIncludes: null, + revisionsIncludesByChangeVector: null, + revisionIncludeByDateTimeBefore: null, + timeSeriesIncludes: null, + // Similar to the previous "include revisions" examples, + // EITHER: + // Specify a list of document fields (part of document orders/826-A), + // where each field is expected to contain a compare-exchange KEY + // for the compare-exchange item you wish to include + // OR: + // Specify a single document field that contains a list of all keys to include. + compareExchangeValueIncludes: [ + "CmpXchgItemField1", + "CmpXchgItemField2" + ], + metadataOnly: false); + + store.GetRequestExecutor().Execute(command, context); + + // Access the included compare-exchange items + var cmpXchgItems = + (BlittableJsonReaderObject)command.Result.CompareExchangeValueIncludes; + + var cmpXchgItemKey = cmpXchgItems.GetPropertyNames()[0]; // The cmpXchg KEY NAME + var cmpXchgItemObj = (BlittableJsonReaderObject)cmpXchgItems[cmpXchgItemKey]; + + var cmpXchgItemValueObj = (BlittableJsonReaderObject)cmpXchgItemObj["Value"]; + var cmpXchgItemValue = cmpXchgItemValueObj["Object"]; // The cmpXchg KEY VALUE +\} +`} + + + + + + +## Syntax + + + +{`// Available overloads: +// ==================== + +public GetDocumentsCommand(int start, int pageSize) + +public GetDocumentsCommand(DocumentConventions conventions, + string id, + string[] includes, + bool metadataOnly); + +public GetDocumentsCommand(DocumentConventions conventions, + string[] ids, + string[] includes, + bool metadataOnly); + +public GetDocumentsCommand(DocumentConventions conventions, + string[] ids, + string[] includes, + string[] counterIncludes, + IEnumerable timeSeriesIncludes, + string[] compareExchangeValueIncludes, + bool metadataOnly); + +public GetDocumentsCommand(DocumentConventions conventions, + string[] ids, + string[] includes, + string[] counterIncludes, + IEnumerable revisionsIncludesByChangeVector, + DateTime? revisionIncludeByDateTimeBefore, + IEnumerable timeSeriesIncludes, + string[] compareExchangeValueIncludes, + bool metadataOnly); + +public GetDocumentsCommand(DocumentConventions conventions, + string[] ids, + string[] includes, + bool includeAllCounters, + IEnumerable timeSeriesIncludes, + string[] compareExchangeValueIncludes, + bool metadataOnly); + +public GetDocumentsCommand(DocumentConventions conventions, + string startWith, + string startAfter, + string matches, string exclude, + int start, int pageSize, + bool metadataOnly); + +public GetDocumentsCommand(int start, int pageSize); +`} + + + +| Parameter | Type | Description | +|-------------------------------------|-----------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------| +| **conventions** | `DocumentConventions` | The store's conventions. | +| **id** | `string` | ID of the document to get. | +| **ids** | `string[]` | IDs of the documents to get. | +| **includes** | `string[]` | Related documents to fetch along with the document. | +| **counterIncludes** | `string[]` | Counters to fetch along with the document. | +| **includeAllCounters** | `bool` | Whether to include all counters. | +| **timeSeriesIncludes** | `AbstractTimeSeriesRange[]` | Time series to fetch along with the document. | +| **compareExchangeValueIncludes** | `string[]` | List of document fields containing cmpXchg keys of the compare-exchange items you wish to include. | +| **revisionsIncludesByChangeVector** | `string[]` | List of document fields containing change-vectors of the revisions you wish to include. | +| **revisionIncludeByDateTimeBefore** | `DateTime` | When this date is provided, retrieve the most recent revision that was created before this date value. | +| **metadataOnly** | `bool` | Whether to fetch the whole document or just the metadata. | +| **start** | `int` | Number of documents that should be skipped. | +| **pageSize** | `int` | Maximum number of documents that will be retrieved. | +| **startsWith** | `string` | Fetch only documents with this prefix. | +| **startAfter** | `string` | Skip 'document fetching' until the given ID is found, and return documents after that ID (default: null). | +| **matches** | `string` | Pipe ('|') separated values for which document IDs (after `startsWith`) should be matched.
(`?` any single character, `*` any characters). | +| **exclude** | `string` | Pipe ('|') separated values for which document IDs (after `startsWith`) should Not be matched.
(`?` any single character, `*` any characters). | + + + +{`// The \`GetDocumentCommand\` result: +// ================================ + +public class PutResult +\{ + public BlittableJsonReaderObject Includes \{ get; set; \} + public BlittableJsonReaderArray Results \{ get; set; \} + public BlittableJsonReaderObject CounterIncludes \{ get; set; \} + public BlittableJsonReaderArray RevisionIncludes \{ get; set; \} + public BlittableJsonReaderObject TimeSeriesIncludes \{ get; set; \} + public BlittableJsonReaderObject CompareExchangeValueIncludes \{ get; set; \} +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/commands/documents/_get-java.mdx b/versioned_docs/version-7.1/client-api/commands/documents/_get-java.mdx new file mode 100644 index 0000000000..9dba018881 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/documents/_get-java.mdx @@ -0,0 +1,260 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +There are a few methods that allow you to retrieve documents from a database: + +- [Get single document](../../../client-api/commands/documents/get.mdx#get-single-document) +- [Get multiple documents](../../../client-api/commands/documents/get.mdx#get-multiple-documents) +- [Get paged documents](../../../client-api/commands/documents/get.mdx#get-paged-documents) +- [Get documents by starts with](../../../client-api/commands/documents/get.mdx#get-by-starts-with) +- [Get metadata only](../../../client-api/commands/documents/get.mdx#get-metadata-only) + +## Get single document + +**GetDocumentsCommand** can be used to retrieve a single document + +### Syntax + + + +{`public GetDocumentsCommand(String id, String[] includes, boolean metadataOnly) +`} + + + +| Parameter | Type | Description | +|------------------|------------|-----------------------------------------------------------| +| **id** | `String` | ID of the documents to get. | +| **includes** | `String[]` | Related documents to fetch along with the document. | +| **metadataOnly** | `boolean` | Whether to fetch the whole document or just the metadata. | + +### Example + + + +{`GetDocumentsCommand command = new GetDocumentsCommand( + "orders/1-A", null, false); +session.advanced().getRequestExecutor().execute(command); +ObjectNode order = (ObjectNode) command.getResult().getResults().get(0); +`} + + + + + +## Get multiple documents + +**GetDocumentsCommand** can also be used to retrieve a list of documents. + +### Syntax + + + +{`public GetDocumentsCommand(String[] ids, String[] includes, boolean metadataOnly) +`} + + + +| Parameter | Type | Description | +|------------------|------------|--------------------------------------------------------| +| **ids** | `String[]` | IDs of the documents to get. | +| **includes** | `String` | Related documents to fetch along with the documents. | +| **metadataOnly** | `boolean` | Whether to fetch whole documents or just the metadata. | + +### Example I + + + +{`GetDocumentsCommand command = new GetDocumentsCommand( + new String[]\{"orders/1-A", "employees/3-A"\}, null, false); +session.advanced().getRequestExecutor().execute(command); +ObjectNode order = (ObjectNode) command.getResult().getResults().get(0); +ObjectNode employee = (ObjectNode) command.getResult().getResults().get(1); +`} + + + +### Example II - Using Includes + + + +{`// Fetch emploees/5-A and his boss. +GetDocumentsCommand command = new GetDocumentsCommand( + "employees/5-A", new String[]\{"ReportsTo"\}, false); +session.advanced().getRequestExecutor().execute(command); + +ObjectNode employee = (ObjectNode) command.getResult().getResults().get(0); +String bossId = employee.get("ReportsTo").asText(); +ObjectNode boss = (ObjectNode) command.getResult().getIncludes().get(bossId); +`} + + + +### Example III - Missing Documents + + + +{`// Assuming that products/9999-A doesn't exist. +GetDocumentsCommand command = new GetDocumentsCommand( + new String[]\{"products/1-A", "products/9999-A", "products/3-A"\}, null, false); +session.advanced().getRequestExecutor().execute(command); +ArrayNode products = command.getResult().getResults(); // products/1-A, null, products/3-A +`} + + + + + +## Get paged documents + +**GetDocumentsCommand** can also be used to retrieve a paged set of documents. + +### Syntax + + + +{`public GetDocumentsCommand(int start, int pageSize) +`} + + + +| Parameter | Type | Description | +|--------------|-------|------------------------------------------------------| +| **start** | `int` | Number of documents that should be skipped. | +| **pageSize** | `int` | Maximum number of documents that will be retrieved. | + +### Example + + + +{`GetDocumentsCommand command = new GetDocumentsCommand(0, 128); +session.advanced().getRequestExecutor().execute(command); +ArrayNode firstDocs = command.getResult().getResults(); +`} + + + + + +## Get by starts with + +**GetDocumentsCommand** can be used to retrieve multiple documents for a specified ID prefix. + +### Syntax + + + +{`public GetDocumentsCommand( + String startWith, + String startAfter, + String matches, + String exclude, + int start, + int pageSize, + boolean metadataOnly) +`} + + + +| Parameter | Type | Description | +|------------------|-----------|--------------------------------------------------------------------------------------------------------------------------------------------------------| +| **startsWith** | `String` | Prefix for which documents should be returned. | +| **startAfter** | `String` | Skip 'document fetching' until the given ID is found, and return documents after that ID (default: null). | +| **matches** | `String` | Pipe ('|') separated values for which document IDs (after 'startsWith') should be matched ('?' any single character, '*' any characters). | +| **exclude** | `String` | Pipe ('|') separated values for which document IDs (after 'startsWith') should **not** be matched ('?' any single character, '*' any characters). | +| **start** | `int` | Number of documents that should be skipped. | +| **pageSize** | `int` | Maximum number of documents that will be retrieved. | +| **metadataOnly** | `boolean` | Specifies whether or not only document metadata should be returned. | + +### Example I + + + +{`GetDocumentsCommand command = new GetDocumentsCommand( + "products", //startWith + null, //startAfter + null, // matches + null, //exclude + 0, // start + 128, // pageSize + false //metadataOnly +); + +session.advanced().getRequestExecutor().execute(command); +ArrayNode products = command.getResult().getResults(); +`} + + + +### Example II + + + +{`// return up to 128 documents with key that starts with 'products/' +// and rest of the key begins with "1" or "2", eg. products/10, products/25 +GetDocumentsCommand command = new GetDocumentsCommand( + "products", //startWith + null, // startAfter + "1*|2*", // matches + null, // exclude + 0, //start + 128, //pageSize + false); //metadataOnly +`} + + + +### Example III + + + +{`// return up to 128 documents with key that starts with 'products/' +// and rest of the key have length of 3, begins and ends with "1" +// and contains any character at 2nd position e.g. products/101, products/1B1 +GetDocumentsCommand command = new GetDocumentsCommand( + "products", //startWith + null, // startAfter + "1?1", // matches + null, // exclude + 0, //start + 128, //pageSize + false); //metadataOnly +session.advanced().getRequestExecutor().execute(command); +ArrayNode products = command.getResult().getResults(); +`} + + + + + +## Get metadata only + +**GetDocumentsCommand** can be used to retrieve the metadata of documents. + +### Example + + + +{`GetDocumentsCommand command = new GetDocumentsCommand("orders/1-A", null, true); +session.advanced().getRequestExecutor().execute(command); + +JsonNode result = command.getResult().getResults().get(0); +ObjectNode documentMetadata = (ObjectNode) result.get("@metadata"); + +// Print out all the metadata properties. +Iterator fieldIterator = documentMetadata.fieldNames(); + +while (fieldIterator.hasNext()) \{ + String field = fieldIterator.next(); + JsonNode value = documentMetadata.get(field); + System.out.println(field + " = " + value); +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/commands/documents/_get-nodejs.mdx b/versioned_docs/version-7.1/client-api/commands/documents/_get-nodejs.mdx new file mode 100644 index 0000000000..ecd6b849eb --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/documents/_get-nodejs.mdx @@ -0,0 +1,495 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the low-level `GetDocumentsCommand` to retrieve documents from the database. + +* To retrieve documents using a higher-level method, see [loading entities](../../../client-api/session/loading-entities.mdx) or [query for documents](../../../client-api/session/querying/how-to-query.mdx). + +* In this page: + - [Get single document](../../../client-api/commands/documents/get.mdx#get-single-document) + - [Get multiple documents](../../../client-api/commands/documents/get.mdx#get-multiple-documents) + - [Get metadata only](../../../client-api/commands/documents/get.mdx#get-metadata-only) + - [Get paged documents](../../../client-api/commands/documents/get.mdx#get-paged-documents) + - [Get documents - by ID prefix](../../../client-api/commands/documents/get.mdx#get-documents---by-id-prefix) + - [Get documents - with includes](../../../client-api/commands/documents/get.mdx#get-documents---with-includes) + - [Syntax](../../../client-api/commands/documents/get.mdx#syntax) + + +## Get single document + +* The following examples demonstrate how to retrieve a document using either the _Store's request executor_ + or the _Session's request executor_. +* The examples in the rest of the article use the _Store's request executor_, but you can apply the Session's implementation shown here to ALL cases. + + +**Get document command - using the Store's request executor**: + + +{`// Define the 'GetDocumentsCommand' +const command = new GetDocumentsCommand(\{ + conventions: documentStore.conventions, + id: "orders/1-A" +\}); + +// Call 'execute' on the Store's Request Executor to send the command to the server +await documentStore.getRequestExecutor().execute(command); + +// Access the results +const order = command.result.results[0]; +const orderedAt = order.OrderedAt; +`} + + + + + + +**Get document command - using the Session's request executor**: + + +{`const session = documentStore.openSession(); + +// Define the 'GetDocumentsCommand' +const command = new GetDocumentsCommand(\{ + conventions: documentStore.conventions, + id: "orders/1-A" +\}); + +// Call 'execute' on the Session's Request Executor to send the command to the server +await session.advanced.requestExecutor.execute(command); + +// Access the results +const order = command.result.results[0]; +const orderedAt = order.OrderedAt; +`} + + + + + + +## Get multiple documents + + + +**Get multiple documents**: + + +{`// Pass a list of document IDs to the get command +const command = new GetDocumentsCommand(\{ + conventions: documentStore.conventions, + ids: ["orders/1-A", "employees/2-A", "products/1-A"] +\}); + +await documentStore.getRequestExecutor().execute(command); + +// Access results +const order = command.result.results[0]; +const employee = command.result.results[1]; +const product = command.result.results[2]; +`} + + + + + + +**Get multiple documents - missing documents**: + + +{`// Assuming that employees/9999-A doesn't exist +const command = new GetDocumentsCommand(\{ + conventions: documentStore.conventions, + ids: [ "orders/1-A", "employees/9999-A", "products/3-A" ] +\}); + +await documentStore.getRequestExecutor().execute(command); + +// Results will contain 'null' for any missing document +const results = command.result.results; // orders/1-A, null, products/3-A +assert.equal(results[1], null); +`} + + + + + + +## Get metadata only + + + +{`// Pass 'true' in the 'metadataOnly' param to retrieve only the document METADATA +const command = new GetDocumentsCommand(\{ + conventions: documentStore.conventions, + id: "orders/1-A", + metadataOnly: true +\}); + +await documentStore.getRequestExecutor().execute(command); + +// Access results +const results = command.result.results[0]; +const metadata = results["@metadata"]; + +// Print out all metadata properties +for (const propertyName in metadata) \{ + console.log(\`$\{propertyName\} = $\{metadata[propertyName]\}\`); +\} +`} + + + + + +## Get paged documents + +* You can retrieve documents in pages by specifying how many documents to skip and how many to fetch. +* Using this overload, no specific collection is specified, the documents will be fetched from ALL collections. + + + +{`// Specify the number of documents to skip (start) +// and the number of documents to get (pageSize) +const command = new GetDocumentsCommand(\{ + conventions: documentStore.conventions, + start: 0, + pageSize: 128 +\}); + +await documentStore.getRequestExecutor().execute(command); + +// The documents are sorted by the last modified date, +// with the most recent modifications appearing first. +const firstDocs = command.result.results; +`} + + + + + +## Get documents - by ID prefix + + + +**Retrieve documents that match a specified ID prefix**: + + +{`// Return up to 50 documents with ID that starts with 'products/' +const command = new GetDocumentsCommand(\{ + conventions: documentStore.conventions, + startsWith: "products/", + start: 0, + pageSize: 50 +\}); + +await documentStore.getRequestExecutor().execute(command); + +// Access a Product document +const product = command.result.results[0]; +`} + + + + + + +**Retrieve documents that match a specified ID prefix - with "matches" pattern**: + + +{`// Return up to 50 documents with IDs that start with 'orders/' +// and the rest of the ID either begins with '23', +// or contains any character at the 1st position and ends with '10-A' +// e.g. orders/234-A, orders/810-A +const command = new GetDocumentsCommand(\{ + conventions: documentStore.conventions, + startsWith: "orders/", + matches: "23*|?10-A", + start: 0, + pageSize: 50 +\}); + +await documentStore.getRequestExecutor().execute(command); + +// Access an Order document +const order = command.result.results[0]; + +const orderId = order["@metadata"]["@id"]; +assert.ok(orderId.startsWith("orders/23") || /^orders\\/.\{1\}10-A$/.test(orderId)); +`} + + + + + + +**Retrieve documents that match a specified ID prefix - with "exclude" pattern**: + + +{`// Return up to 50 documents with IDs that start with 'orders/' +// and the rest of the ID excludes documents ending with '10-A', +// e.g. will return orders/820-A, but not orders/810-A +const command = new GetDocumentsCommand(\{ + conventions: documentStore.conventions, + startsWith: "orders/", + exclude: "*10-A", + start: 0, + pageSize: 50 +\}); + +await documentStore.getRequestExecutor().execute(command); + +// Access an Order document +const order = command.result.results[0]; + +const orderId = order["@metadata"]["@id"]; +assert.ok(orderId.startsWith("orders/") && !orderId.endsWith("10-A")); +`} + + + + + + +## Get documents - with includes + + + +**Include related documents**: + + +{`// Fetch document products/77-A and include its related Supplier document +const command = new GetDocumentsCommand(\{ + conventions: documentStore.conventions, + id: "products/77-A", + includes: [ "Supplier" ] +\}); + +await documentStore.getRequestExecutor().execute(command); + +// Access the related document that was included +const product = command.result.results[0]; +const supplierId = product["Supplier"]; +const supplier = command.result.includes[supplierId]; +`} + + + + + + +**Include counters**: + + +{`// Fetch document products/77-A and include the specified counters +const command = new GetDocumentsCommand(\{ + conventions: documentStore.conventions, + id: "products/77-A", + // Pass the names of the counters to include. In this example, + // the counter names in RavenDB's sample data are stars... + counterIncludes: ["⭐", "⭐⭐"] +\}); + +await documentStore.getRequestExecutor().execute(command); + +// Access the included counters results +const counters = command.result.counterIncludes["products/77-A"] +const counter = counters[0]; + +const counterName = counter["counterName"]; +const counterValue = counter["totalValue"]; +`} + + + + + + +**Include time series**: + + +{`// Fetch document employees/1-A and include the specified time series +const command = new GetDocumentsCommand(\{ + conventions: documentStore.conventions, + ids: ["employees/1-A"], + // Specify the time series name and the time range + timeSeriesIncludes: [ + \{ + name: "HeartRates", + from: new Date("2020-04-01T00:00:00.000Z"), + to: new Date("2024-12-31T00:00:00.000Z") + \} + ] +\}); + +await documentStore.getRequestExecutor().execute(command); + +// Access the included time series results +const timeseries = command.result.timeSeriesIncludes["employees/1-A"]; +const tsEntries = timeseries["HeartRates"][0].entries; + +const entryTimeStamp = tsEntries[0].timestamp; +const entryValues = tsEntries[0].values; +`} + + + + + + +**Include revisions**: + + +{`// Fetch document orders/826-A and include the specified revisions +const command = new GetDocumentsCommand(\{ + conventions: documentStore.conventions, + ids: ["orders/826-A"], + // Specify list of document fields (part of document orders/826-A), + // where each field is expected to contain the change-vector + // of the revision you wish to include. + revisionsIncludesByChangeVector: [ + "RevisionChangeVectorField1", + "RevisionChangeVectorField2" + ] +\}); + +await documentStore.getRequestExecutor().execute(command); + +// Access the included revisions +const revisionObj = command.result.revisionIncludes[0]; +const revision = revisionObj.Revision; +`} + + + + +{`// Fetch document orders/826-A and include the specified revisions +const command = new GetDocumentsCommand(\{ + conventions: documentStore.conventions, + ids: ["orders/826-A"], + // Another option is to specify a single document field (part of document orders/826-A). + // This field is expected to contain a list of all the change-vectors + // for the revisions you wish to include. + revisionsIncludesByChangeVector: [ + "RevisionsChangeVectors" + ] +\}); + +await documentStore.getRequestExecutor().execute(command); + +// Access the included revisions +const revisionObj = command.result.revisionIncludes[0]; +const revision = revisionObj.Revision; +`} + + + + + + +**Include compare-exchange values**: + + +{`// Fetch document orders/826-A and include the specified compare-exchange +const command = new GetDocumentsCommand(\{ + conventions: documentStore.conventions, + ids: ["orders/826-A"], + // Similar to the previous "include revisions" examples, + // EITHER: + // Specify a list of document fields (part of document orders/826-A), + // where each field is expected to contain a compare-exchange KEY + // for the compare-exchange item you wish to include + // OR: + // Specify a single document field that contains a list of all keys to include. + compareExchangeValueIncludes: [ + "CmpXchgItemField1", + "CmpXchgItemField2" + ] +\}); + +await documentStore.getRequestExecutor().execute(command); + +// Access the included compare-exchange items +const cmpXchgItems = command.result.compareExchangeValueIncludes; + +const cmpXchgItemKey = Object.keys(cmpXchgItems)[0]; +const cmpXchgItemValue = cmpXchgItem[cmpXchgItemKey].value.Object; +`} + + + + + + +## Syntax + + + +{`// Available overloads: +// ==================== + +new GetDocumentsCommand(\{ + conventions, id, + includes?, counterIncludes?, includeAllCounters?, metadataOnly? +\}); + +new GetDocumentsCommand(\{ + conventions, ids, + includes?, timeSeriesIncludes?, compareExchangeValueIncludes?, + revisionsIncludesByChangeVector?, revisionIncludeByDateTimeBefore?, + counterIncludes?, includeAllCounters?, metadataOnly? +\}); + +new GetDocumentsCommand(\{ + conventions, start, pageSize, + startsWith?, startsAfter?, matches?, exclude?, + counterIncludes?, includeAllCounters?, metadataOnly? +\}); +`} + + + +| Parameter | Type | Description | +|-------------------------------------|-----------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------| +| **conventions** | `DocumentConventions` | The store's conventions. | +| **id** | `string` | ID of the document to get. | +| **ids** | `string[]` | IDs of the documents to get. | +| **includes** | `string[]` | Related documents to fetch along with the document. | +| **counterIncludes** | `string[]` | Counters to fetch along with the document. | +| **includeAllCounters** | `boolean` | Whether to include all counters. | +| **timeSeriesIncludes** | `AbstractTimeSeriesRange[]` | Time series to fetch along with the document. | +| **compareExchangeValueIncludes** | `string[]` | List of document fields containing cmpXchg keys of the compare-exchange items you wish to include. | +| **revisionsIncludesByChangeVector** | `string[]` | List of document fields containing the change-vectors of the revisions you wish to include. | +| **revisionIncludeByDateTimeBefore** | `Date` | When this date is provided, retrieve the most recent revision that was created before this date value. | +| **metadataOnly** | `boolean` | Whether to fetch the whole document or just the metadata. | +| **start** | `number` | Number of documents that should be skipped. | +| **pageSize** | `number` | Maximum number of documents that will be retrieved. | +| **startsWith** | `string` | Fetch only documents with this prefix. | +| **startAfter** | `string` | Skip 'document fetching' until the given ID is found, and return documents after that ID (default: null). | +| **matches** | `string` | Pipe ('|') separated values for which document IDs (after `startsWith`) should be matched.
(`?` any single character, `*` any characters). | +| **exclude** | `string` | Pipe ('|') separated values for which document IDs (after `startsWith`) should Not be matched.
(`?` any single character, `*` any characters). | + + + +{`// The \`GetDocumentCommand\` result object: +// ======================================= + +\{ + includes; // object + results; // any[] + counterIncludes; // object + revisionIncludes; // any[] + timeSeriesIncludes; // object + compareExchangeValueIncludes; // object +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/commands/documents/_get-php.mdx b/versioned_docs/version-7.1/client-api/commands/documents/_get-php.mdx new file mode 100644 index 0000000000..0b84b3040e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/documents/_get-php.mdx @@ -0,0 +1,255 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetDocumentsCommand` to retrieve documents from the database. + +* In this page: + - [Get single document](../../../client-api/commands/documents/get.mdx#get-single-document) + - [Get multiple documents](../../../client-api/commands/documents/get.mdx#get-multiple-documents) + - [Get paged documents](../../../client-api/commands/documents/get.mdx#get-paged-documents) + - [Get documents by ID prefix](../../../client-api/commands/documents/get.mdx#get-documents-by-id-prefix) + + +## Get single document + +**GetDocumentsCommand** can be used to retrieve a single document + +#### Syntax: + + + +{`public static function forSingleDocument(string $id, StringArray|array|null $includes = null, bool $metadataOnly = false): GetDocumentsCommand; +`} + + + +| Parameters | Type | Description | +|------------|------|-------------| +| **id** | `string` | ID of the documents to get | +| **includes** | `StringArray` or `array` or `null` | Related documents to fetch along with the document | +| **metadataOnly** | `bool` | Whether to fetch the whole document or just its metadata. | +#### Example: + + + +{`$command = GetDocumentsCommand::forSingleDocument("orders/1-A", null, false); +$session->advanced()->getRequestExecutor()->execute($command); + +/** @var GetDocumentsResult $documentsResult */ +$documentsResult = $command->getResult(); +$order = $documentsResult->getResults()[0]; +`} + + + + + +## Get multiple documents + +**GetDocumentsCommand** can also be used to retrieve a list of documents. + +#### Syntax: + + + +{`public static function forMultipleDocuments(StringArray|array|null $ids, StringArray|array|null $includes, bool $metadataOnly = false): GetDocumentsCommand; +`} + + + +| Parameters | Type | Description | +|------------|------|-------------| +| **ids** | `StringArray` or `array` or `null` | IDs of the documents to get | +| **includes** | `StringArray` or `array` or `null` | Related documents to fetch along with the documents | +| **metadataOnly** | `bool` | Whether to fetch whole documents or just the metadata | +#### Example I + + + +{`$command = GetDocumentsCommand::forMultipleDocuments(["orders/1-A", "employees/3-A"], null, false); +$session->advanced()->getRequestExecutor()->execute($command); + +/** @var GetDocumentsResult $result */ +$result = $command->getResult(); +$order = $result->getResults()[0]; +$employee = $result->getResults()[1]; +`} + + + +#### Example II - Using Includes + + + +{`// Fetch employees/5-A and his boss. +$command = GetDocumentsCommand::forSingleDocument("employees/5-A", [ "ReportsTo" ], false); +$session->advanced()->getRequestExecutor()->execute($command); +/** @var GetDocumentsResult $result */ +$result = $command->getResult(); +$employee = $result->getResults()[0]; +if (array_key_exists("ReportsTo", $employee)) \{ + $bossId = $employee["ReportsTo"]; + + $boss = $result->getIncludes()[$bossId]; +\} +`} + + + +#### Example III - Missing Documents + + + +{`// Assuming that products/9999-A doesn't exist. +$command = GetDocumentsCommand::forMultipleDocuments([ "products/1-A", "products/9999-A", "products/3-A" ], null, false); +$session->advanced()->getRequestExecutor()->execute($command); + +/** @var GetDocumentsResult $result */ +$result = $command->getResult(); +$products = $result->getResults(); // products/1-A, null, products/3-A +`} + + + + + +## Get paged documents + +**GetDocumentsCommand** can also be used to retrieve a paged set of documents. + +#### Syntax: + + + +{`public static function withStartAndPageSize(int $start, int $pageSize): GetDocumentsCommand; +`} + + + +| Parameters | Type | Description | +|------------|------|-------------| +| **start** | `int` | number of documents that should be skipped | +| **pageSize** | `int` | maximum number of documents that will be retrieved | +#### Example: + + + +{`$command = GetDocumentsCommand::withStartAndPageSize(0, 128); +$session->advanced()->getRequestExecutor()->execute($command); + +/** @var GetDocumentsResult $result */ +$result = $command->getResult(); +$firstDocs = $result->getResults(); +`} + + + + + +## Get documents by ID prefix + +**GetDocumentsCommand** can be used to retrieve multiple documents for a specified ID prefix. + +#### Syntax: + + + +{`public static function withStartWith( + ?string $startWith, + ?string $startAfter, + ?string $matches, + ?string $exclude, + int $start, + int $pageSize, + bool $metadataOnly +): GetDocumentsCommand; +`} + + + +| Parameters | Type | Description | +|------------|------|-------------| +| **startWith** | `?string` | prefix for which documents should be returned | +| **startAfter** | `?string` | skip 'document fetching' until the given ID is found, and return documents after that ID (default: None) | +| **matches** | `?string` | pipe ('|') separated values for which document IDs (after `startWith`) should be matched ('?' any single character, '*' any characters) | +| **exclude** | `?string` | pipe ('|') separated values for which document IDs (after `startWith`) should **not** be matched ('?' any single character, '*' any characters) | +| **start** | `int` | number of documents that should be skipped | +| **pageSize** | `int` | maximum number of documents that will be retrieved | +| **metadataOnly** | `bool` | specifies whether or not only document metadata should be returned | +#### Example I + + + +{`// return up to 128 documents with key that starts with 'products' +$command = GetDocumentsCommand::withStartWith( + startWith: "products", + startAfter: null, + matches: null, + exclude: null, + start: 0, + pageSize: 128, + metadataOnly: false); +$session->advanced()->getRequestExecutor()->execute($command); + +/** @var GetDocumentsResult $result */ +$result = $command->getResult(); +$products = $result->getResults(); +`} + + + +#### Example II + + + +{`// return up to 128 documents with key that starts with 'products/' +// and rest of the key begins with "1" or "2" e.g. products/10, products/25 +$command = GetDocumentsCommand::withStartWith( + startWith: "products", + startAfter: null, + matches: "1*|2*", + exclude: null, + start: 0, + pageSize: 128, + metadataOnly: false); +$session->advanced()->getRequestExecutor()->execute($command); + +/** @var GetDocumentsResult $result */ +$result = $command->getResult(); +$products = $result->getResults(); +`} + + + +#### Example III + + + +{`// return up to 128 documents with key that starts with 'products/' +// and rest of the key have length of 3, begins and ends with "1" +// and contains any character at 2nd position e.g. products/101, products/1B1 +$command = GetDocumentsCommand::withStartWith( + startWith: "products", + startAfter: null, + matches: "1?1", + exclude: null, + start: 0, + pageSize: 128, + metadataOnly: false); + +$session->advanced()->getRequestExecutor()->execute($command); + +/** @var GetDocumentsResult $result */ +$result = $command->getResult(); +$products = $result->getResults(); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/commands/documents/_get-python.mdx b/versioned_docs/version-7.1/client-api/commands/documents/_get-python.mdx new file mode 100644 index 0000000000..1a94160777 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/documents/_get-python.mdx @@ -0,0 +1,225 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetDocumentsCommand` to retrieve documents from the database. + +* In this page: + - [Get single document](../../../client-api/commands/documents/get.mdx#get-single-document) + - [Get multiple documents](../../../client-api/commands/documents/get.mdx#get-multiple-documents) + - [Get paged documents](../../../client-api/commands/documents/get.mdx#get-paged-documents) + - [Get documents by ID prefix](../../../client-api/commands/documents/get.mdx#get-documents-by-id-prefix) + + +## Get single document + +**GetDocumentsCommand** can be used to retrieve a single document + +#### Syntax: + + + +{`# GetDocumentsCommand.from_single_id(...) +@classmethod +def from_single_id( + cls, key: str, includes: List[str] = None, metadata_only: bool = None +) -> GetDocumentsCommand: ... +`} + + + +| Parameter | Type | Description | +|-------------------|-------------|-----------------------------------------------------------| +| **key** | `str` | ID of the documents to get. | +| **includes** | `List[str]` | Related documents to fetch along with the document. | +| **metadata_only** | `bool` | Whether to fetch the whole document or just the metadata. | +#### Example: + + + +{`command = GetDocumentsCommand.from_single_id("orders/1-A", None, False) +session.advanced.request_executor.execute_command(command) +order = command.result.results[0] +`} + + + + + +## Get multiple documents + +**GetDocumentsCommand** can also be used to retrieve a list of documents. + +#### Syntax: + + + +{`# GetDocumentsCommand.from_multiple_ids(...) +@classmethod +def from_multiple_ids( + cls, + keys: List[str], + includes: List[str] = None, + counter_includes: List[str] = None, + time_series_includes: List[str] = None, + compare_exchange_value_includes: List[str] = None, + metadata_only: bool = False, +) -> GetDocumentsCommand: ... +`} + + + +| Parameter | Type | Description | +|-------------------|-------------|--------------------------------------------------------| +| **keys** | `List[str]` | IDs of the documents to get. | +| **includes** | `List[str]` | Related documents to fetch along with the documents. | +| **metadata_only** | `bool` | Whether to fetch whole documents or just the metadata. | +#### Example I + + + +{`command = GetDocumentsCommand.from_multiple_ids(["orders/1-A", "employees/3-A"]) +session.advanced.request_executor.execute_command(command) +order = command.result.results[0] +employee = command.result.results[1] +`} + + + +#### Example II - Using Includes + + + +{`# Fetch employees/5-A and his boss. +command = GetDocumentsCommand.from_single_id("employees/5-A", ["ReportsTo"], False) +session.advanced.request_executor.execute_command(command) +employee = command.result.results[0] +boss = command.result.includes.get(employee.get("ReportsTo", None), None) +`} + + + +#### Example III - Missing Documents + + + +{`# Assuming that products/9999-A doesn't exist +command = GetDocumentsCommand.from_multiple_ids(["products/1-A", "products/9999-A", "products/3-A"]) +session.advanced.request_executor.execute_command(command) +products = command.result.results # products/1-A, products/3-A +`} + + + + + +## Get paged documents + +**GetDocumentsCommand** can also be used to retrieve a paged set of documents. + +#### Syntax: + + + +{`# GetDocumentsCommand.from_paging(...) +@classmethod +def from_paging(cls, start: int, page_size: int) -> GetDocumentsCommand: ... +`} + + + +| Parameter | Type | Description | +|---------------|-------|-----------------------------------------------------| +| **start** | `int` | Number of documents that should be skipped. | +| **page_size** | `int` | Maximum number of documents that will be retrieved. | +#### Example: + + + +{`command = GetDocumentsCommand.from_paging(0, 128) +session.advanced.request_executor.execute_command(command) +first_docs = command.result.results +`} + + + + + +## Get documents by ID prefix + +**GetDocumentsCommand** can be used to retrieve multiple documents for a specified ID prefix. + +#### Syntax: + + + +{`# GetDocumentsCommand.from_starts_with(...) +@classmethod +def from_starts_with( + cls, + start_with: str, + start_after: str = None, + matches: str = None, + exclude: str = None, + start: int = None, + page_size: int = None, + metadata_only: bool = None, +) -> GetDocumentsCommand: ... +`} + + + +| Parameter | Type | Description | +|-------------------|--------|--------------------------------------------------------------------------------------------------------------------------------------------------------| +| **start_with** | `str` | Prefix for which documents should be returned. | +| **start_after** | `str` | Skip 'document fetching' until the given ID is found, and return documents after that ID (default: None). | +| **matches** | `str` | Pipe ('|') separated values for which document IDs (after `start_with`) should be matched ('?' any single character, '*' any characters). | +| **exclude** | `str` | Pipe ('|') separated values for which document IDs (after `start_with`) should **not** be matched ('?' any single character, '*' any characters). | +| **start** | `int` | Number of documents that should be skipped. | +| **page_size** | `int` | Maximum number of documents that will be retrieved. | +| **metadata_only** | `bool` | Specifies whether or not only document metadata should be returned. | +#### Example I + + + +{`# return up to 128 documents with key that starts with 'products' +command = GetDocumentsCommand.from_starts_with("products", start=0, page_size=128) +session.advanced.request_executor.execute_command(command) +products = command.result.results +`} + + + +#### Example II + + + +{`# return up to 128 documents with key that starts with 'products/' +# and rest of the key begins with "1" or "2" e.g. products/10, products/25 +commands = GetDocumentsCommand.from_starts_with("products", matches="1*2|2*", start=0, page_size=128) +session.advanced.request_executor.execute_command(command) +products = command.result.results +`} + + + +#### Example III + + + +{`# return up to 128 documents with key that starts with 'products/' +# and rest of the key have length of 3, begins and ends with "1" +# and contains any character at 2nd position e.g. products/101, products/1B1 +commands = GetDocumentsCommand.from_starts_with("products", matches="1?1", start=0, page_size=128) +session.advanced.request_executor.execute_command(command) +products = command.result.results +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/commands/documents/_put-csharp.mdx b/versioned_docs/version-7.1/client-api/commands/documents/_put-csharp.mdx new file mode 100644 index 0000000000..158a252a6a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/documents/_put-csharp.mdx @@ -0,0 +1,202 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the low-level `PutDocumentCommand` to insert a new document to the database or update an existing document. + +* When using `PutDocumentCommand`, you must explicitly **specify the collection** to which the document will belong, + otherwise, the document will be placed in the `@empty` collection. See how this is done in the example below. + +* To insert a document to the database using a higher-level method, see [storing entities](../../../client-api/session/storing-entities.mdx). + To update an existing document using a higher-level method, see [update entities](../../../client-api/session/updating-entities.mdx). + +* In this page: + + * [Examples](../../../client-api/commands/documents/put.mdx#examples) + * [Syntax](../../../client-api/commands/documents/put.mdx#syntax) + + +## Examples + + + +**Put document command - using the Store's request executor**: + + + +{`using (var store = new DocumentStore()) +using (store.GetRequestExecutor().ContextPool.AllocateOperationContext(out var context)) +{ + // Define the document to 'put' as a blittable object + var blittableDocument = context.ReadObject(new DynamicJsonValue() + { + ["@metadata"] = new DynamicJsonValue() + { + ["@collection"] = "Categories" + }, + ["Name"] = "My category", + ["Description"] = "My category description" + }, "categories/999"); + + // Define the PutDocumentCommand + var command = new PutDocumentCommand(store.Conventions, + "categories/999", null, blittableDocument); + + // Call 'Execute' on the Store Request Executor to send the command to the server + store.GetRequestExecutor().Execute(command, context); + + // Access the command result + var putResult = command.Result; + var theDocumentID = putResult.Id; + var theDocumentCV = putResult.ChangeVector; +} +`} + + + + +{`using (var store = new DocumentStore()) +using (store.GetRequestExecutor().ContextPool.AllocateOperationContext(out var context)) +{ + // Define the document to 'put' as a blittable object + var blittableDocument = context.ReadObject(new DynamicJsonValue() + { + ["@metadata"] = new DynamicJsonValue() + { + ["@collection"] = "Categories" + }, + ["Name"] = "My category", + ["Description"] = "My category description" + }, "categories/999"); + + // Define the PutDocumentCommand + var command = new PutDocumentCommand(store.Conventions, + "categories/999", null, blittableDocument); + + // Call 'ExecuteAsync' on the Store Request Executor to send the command to the server + await store.GetRequestExecutor().ExecuteAsync(command, context); + + // Access the command result + var putResult = command.Result; + var theDocumentID = putResult.Id; + var theDocumentCV = putResult.ChangeVector; +} +`} + + + + + + + +**Put document command - using the Session's request executor**: + + + +{`// Create a new document entity +var doc = new Category +{ + Name = "My category", + Description = "My category description" +}; + +// Specify the collection to which the document will belong +var docInfo = new DocumentInfo +{ + Collection = "Categories" +}; + +// Convert your entity to a BlittableJsonReaderObject +var blittableDocument = session.Advanced.JsonConverter.ToBlittable(doc, docInfo); + +// Define the PutDocumentCommand +var command = new PutDocumentCommand(store.Conventions, + "categories/999", null, blittableDocument); + +// Call 'Execute' on the Session Request Executor to send the command to the server +session.Advanced.RequestExecutor.Execute(command, session.Advanced.Context); + +// Access the command result +var putResult = command.Result; +var theDocumentID = putResult.Id; +var theDocumentCV = putResult.ChangeVector; +`} + + + + +{`// Create a new document entity +var doc = new Category +{ + Name = "My category", + Description = "My category description" +}; + +// Specify the collection to which the document will belong +var docInfo = new DocumentInfo +{ + Collection = "Categories" +}; + +// Convert your entity to a BlittableJsonReaderObject +var blittableDocument = asyncSession.Advanced.JsonConverter.ToBlittable(doc, docInfo); + +// Define the PutDocumentCommand +var command = new PutDocumentCommand(store.Conventions, + "categories/999", null, blittableDocument); + +// Call 'Execute' on the Session Request Executor to send the command to the server +await asyncSession.Advanced.RequestExecutor.ExecuteAsync( + command, asyncSession.Advanced.Context); + +// Access the command result +var putResult = command.Result; +var theDocumentID = putResult.Id; +var theDocumentCV = putResult.ChangeVector; +`} + + + + + + + +## Syntax + + + +{`public PutDocumentCommand(DocumentConventions conventions, + string id, string changeVector, BlittableJsonReaderObject document) +`} + + + +| Parameter | Type | Description | +|------------------|-----------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **id** | `string` | Unique ID under which document will be stored. | +| **changeVector** | `string` | The change-vector of the document you wish to update,
used for [optimistic concurrency control](../../../server/clustering/replication/change-vector.mdx#concurrency-control--change-vectors).
Pass `null` to skip the check and force the 'put'. | +| **document** | `BlittableJsonReaderObject` | The document to store. Use:
`session.Advanced.JsonConverter.ToBlittable(doc, docInfo);` to convert your entity to a `BlittableJsonReaderObject`. | + + + +{`// The \`PutDocumentCommand\` result: +// ================================ + +public class PutResult +\{ + /// The ID under which document was stored + public string Id \{ get; set; \} + + // The changeVector that was assigned to the stored document + public string ChangeVector \{ get; set; \} +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/commands/documents/_put-java.mdx b/versioned_docs/version-7.1/client-api/commands/documents/_put-java.mdx new file mode 100644 index 0000000000..3ad69808aa --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/documents/_put-java.mdx @@ -0,0 +1,41 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +**Put** is used to insert or update a document in a database. + +## Syntax + + + +{`public PutDocumentCommand(String id, String changeVector, ObjectNode document) +`} + + + +| Parameter | Type | Description | +|------------------|--------------|----------------------------------------------------------------------------------------------------------------------------------------| +| **id** | `String` | Unique ID under which document will be stored | +| **changeVector** | `String` | Entity changeVector, used for concurrency checks (`null` to skip check) | +| **document** | `ObjectNode` | The document to store. You may use `session.advanced().getEntityToJson().convertEntityToJson` to convert your entity to a `ObjectNode` | + +## Example + + + +{`Category doc = new Category(); +doc.setName("My category"); +doc.setDescription("My category description"); + +DocumentInfo docInfo = new DocumentInfo(); +docInfo.setCollection("Categories"); + +ObjectNode jsonDoc = session.advanced().getEntityToJson().convertEntityToJson(doc, docInfo); +PutDocumentCommand command = new PutDocumentCommand("categories/999", null, jsonDoc); +session.advanced().getRequestExecutor().execute(command); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/commands/documents/_put-nodejs.mdx b/versioned_docs/version-7.1/client-api/commands/documents/_put-nodejs.mdx new file mode 100644 index 0000000000..cbffcb8a25 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/documents/_put-nodejs.mdx @@ -0,0 +1,132 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the low-level `PutDocumentCommand` to insert a new document to the database or update an existing document. + +* When using `PutDocumentCommand`, you must explicitly **specify the collection** to which the document will belong, + otherwise, the document will be placed in the `@empty` collection. See how this is done in the example below. + +* To insert a document to the database using a higher-level method, see [storing entities](../../../client-api/session/storing-entities.mdx). + To update an existing document using a higher-level method, see [update entities](../../../client-api/session/updating-entities.mdx). + +* In this page: + + * [Examples](../../../client-api/commands/documents/put.mdx#examples) + * [Syntax](../../../client-api/commands/documents/put.mdx#syntax) + + +## Examples + + + +**Put document command - using the Store's request executor**: + + +{`// Define the json document to 'put' +const jsonDocument = \{ + name: "My category", + description: "My category description", + "@metadata": \{ + "@collection": "categories" + \} +\} + +// Define the 'PutDocumentCommand' +// Pass the document ID, whether to make concurrency checks, +// and the json document to store +const command = new PutDocumentCommand("categories/999", null, jsonDocument); + +// Call 'execute' on the Store Request Executor to send the command to the server +await documentStore.getRequestExecutor().execute(command); + +// Access the command result +const result = command.result; +const theDocumentID = result.id; +const theDocumentCV = result.changeVector; + +assert.strictEqual(theDocumentID, "categories/999"); +`} + + + + + + +**Put document command - using the Session's request executor**: + + +{`const session = documentStore.openSession(); + +// Create a new entity +const category = new Category(); +category.name = "My category"; +category.description = "My category description"; + +// To be able to specify under which collection the document should be stored +// you need to convert the entity to a json document first. + +// Passing the entity as is instead of the json document +// will result in storing the document under the "@empty" collection. + +const documentInfo = new DocumentInfo(); +documentInfo.collection = "categories"; // The target collection +const jsonDocument = EntityToJson.convertEntityToJson( + category, documentStore.conventions, documentInfo); + +// Define the 'PutDocumentCommand' +// Pass the document ID, whether to make concurrency checks, +// and the json document to store +const command = new PutDocumentCommand("categories/999", null, jsonDocument); + +// Call 'execute' on the Session Request Executor to send the command to the server +await session.advanced.requestExecutor.execute(command); + +// Access the command result +const result = command.result; +const theDocumentID = result.id; +const theDocumentCV = result.changeVector; + +assert.strictEqual(theDocumentID, "categories/999"); +`} + + + + + + +## Syntax + + + +{`PutDocumentCommand(id, changeVector, document); +`} + + + +| Parameter | Type | Description | +|------------------|----------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **id** | `string` | Unique ID under which document will be stored. | +| **changeVector** | `string` | The change-vector of the document you wish to update,
used for [optimistic concurrency control](../../../server/clustering/replication/change-vector.mdx#concurrency-control--change-vectors).
Pass `null` to skip the check and force the 'put'. | +| **document** | `object` | The document to store. | + + + +{`// Executing \`PutDocumentCommand\` returns the following object: +\{ + // The document id under which the entity was stored + id; // string + + // The change vector assigned to the stored document + changeVector; // string +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/commands/documents/_put-php.mdx b/versioned_docs/version-7.1/client-api/commands/documents/_put-php.mdx new file mode 100644 index 0000000000..eda40e730d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/documents/_put-php.mdx @@ -0,0 +1,59 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `PutDocumentCommand` to insert a document to the database or update an existing document. + +* In this page: + + * [Example](../../../client-api/commands/documents/put.mdx#example) + * [Syntax](../../../client-api/commands/documents/put.mdx#syntax) + + +## Example + + + +{`// Create a new document +$doc = new Category(); +$doc->setName("My category"); +$doc->setDescription("My category description"); + +// Create metadata on the document +$docInfo = new DocumentInfo(); +$docInfo->setCollection("Categories"); + +// Convert your entity to a BlittableJsonReaderObject +$jsonDoc = $session->advanced()->getEntityToJson()->convertEntityToJson($doc, $docInfo); + +// The Put command (parameters are document ID, changeVector check is null, the document to store) +$command = new PutDocumentCommand("categories/999", null, $jsonDoc); +// RequestExecutor sends the command to the server +$session->advanced()->getRequestExecutor()->execute($command); +`} + + + + + +## Syntax + + + +{`PutDocumentCommand(string $idOrCopy, ?string $changeVector, array $document); +`} + + + +| Parameters | Type | Description | +| ------------- | ------------- | ----- | +| **idOrCopy** | `string` | Unique ID under which document will be stored | +| **changeVector** | `string` (optional) | Entity changeVector, used for concurrency checks (`None` to skip check) | +| **document** | `array` | The document to store | + + + + diff --git a/versioned_docs/version-7.1/client-api/commands/documents/_put-python.mdx b/versioned_docs/version-7.1/client-api/commands/documents/_put-python.mdx new file mode 100644 index 0000000000..241e6e9fe9 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/documents/_put-python.mdx @@ -0,0 +1,57 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `PutDocumentCommand` to insert a document to the database or update an existing document. + +* In this page: + + * [Example](../../../client-api/commands/documents/put.mdx#example) + * [Syntax](../../../client-api/commands/documents/put.mdx#syntax) + + +## Example + + + +{`# Create a new document +doc = Category(name="My category", description="My category description") + +# Create metadata on the document +doc_info = DocumentInfo(collection="Categories") + +# Convert your entity to a dict +dict_doc = session.entity_to_json.convert_entity_to_json_static(doc, session.conventions, doc_info) + +# The put command (parameters are document ID, change vector check is None, the document to store) +command = PutDocumentCommand("employees/1-A", None, dict_doc) +# Request executor sends the command to the server +session.advanced.request_executor.execute_command(command) +`} + + + + + +## Syntax + + + +{`class PutDocumentCommand(RavenCommand[PutResult]): + def __init__(self, key: str, change_vector: Optional[str], document: dict): ... +`} + + + +| Parameter | Type | Description | +|-------------------|------------------|-------------------------------------------------------------------------| +| **key** | `str` | Unique ID under which document will be stored | +| **change_vector** | `str` (optional) | Entity changeVector, used for concurrency checks (`None` to skip check) | +| **document** | `dict` | The document to store | + + + + diff --git a/versioned_docs/version-7.1/client-api/commands/documents/delete.mdx b/versioned_docs/version-7.1/client-api/commands/documents/delete.mdx new file mode 100644 index 0000000000..1d868f5335 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/documents/delete.mdx @@ -0,0 +1,49 @@ +--- +title: "Delete Document Command" +hide_table_of_contents: true +sidebar_label: Delete Document +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import DeleteCsharp from './_delete-csharp.mdx'; +import DeleteJava from './_delete-java.mdx'; +import DeletePython from './_delete-python.mdx'; +import DeletePhp from './_delete-php.mdx'; +import DeleteNodejs from './_delete-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/commands/documents/get.mdx b/versioned_docs/version-7.1/client-api/commands/documents/get.mdx new file mode 100644 index 0000000000..f1f38452a6 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/documents/get.mdx @@ -0,0 +1,49 @@ +--- +title: "Get Documents Command" +hide_table_of_contents: true +sidebar_label: Get Documents +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetCsharp from './_get-csharp.mdx'; +import GetJava from './_get-java.mdx'; +import GetPython from './_get-python.mdx'; +import GetPhp from './_get-php.mdx'; +import GetNodejs from './_get-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/commands/documents/put.mdx b/versioned_docs/version-7.1/client-api/commands/documents/put.mdx new file mode 100644 index 0000000000..d11cce533f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/documents/put.mdx @@ -0,0 +1,49 @@ +--- +title: "Put Document Command" +hide_table_of_contents: true +sidebar_label: Put Document +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import PutCsharp from './_put-csharp.mdx'; +import PutJava from './_put-java.mdx'; +import PutPython from './_put-python.mdx'; +import PutPhp from './_put-php.mdx'; +import PutNodejs from './_put-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/commands/overview.mdx b/versioned_docs/version-7.1/client-api/commands/overview.mdx new file mode 100644 index 0000000000..edf2a9175d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/commands/overview.mdx @@ -0,0 +1,40 @@ +--- +title: "Commands Overview" +hide_table_of_contents: true +sidebar_label: Commands Overview +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import OverviewCsharp from './_overview-csharp.mdx'; +import OverviewJava from './_overview-java.mdx'; +import OverviewNodejs from './_overview-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/configuration/_category_.json b/versioned_docs/version-7.1/client-api/configuration/_category_.json new file mode 100644 index 0000000000..a20298e082 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 13, + "label": Configuration, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/configuration/_conventions-csharp.mdx b/versioned_docs/version-7.1/client-api/configuration/_conventions-csharp.mdx new file mode 100644 index 0000000000..5889ddc81c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/_conventions-csharp.mdx @@ -0,0 +1,1259 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Conventions** in RavenDB are customizable settings that users can configure to tailor client behaviors according to their preferences. + +* In this article: + * [How to set conventions](../../client-api/configuration/conventions.mdx#how-to-set-conventions) + * [Conventions:](../../client-api/configuration/conventions.mdx#conventions:) + [AddIdFieldToDynamicObjects](../../client-api/configuration/conventions.mdx#addidfieldtodynamicobjects) + [AggressiveCache.Duration](../../client-api/configuration/conventions.mdx#aggressivecacheduration) + [AggressiveCache.Mode](../../client-api/configuration/conventions.mdx#aggressivecachemode) + [AsyncDocumentIdGenerator](../../client-api/configuration/conventions.mdx#asyncdocumentidgenerator) + [CreateHttpClient](../../client-api/configuration/conventions.mdx#createhttpclient) + [DisableAtomicDocumentWritesInClusterWideTransaction](../../client-api/configuration/conventions.mdx#disableatomicdocumentwritesinclusterwidetransaction) + [DisableTcpCompression](../../client-api/configuration/conventions.mdx#disabletcpcompression) + [DisableTopologyCache](../../client-api/configuration/conventions.mdx#disabletopologycache) + [DisableTopologyUpdates](../../client-api/configuration/conventions.mdx#disabletopologyupdates) + [DisposeCertificate](../../client-api/configuration/conventions.mdx#disposecertificate) + [FindClrType](../../client-api/configuration/conventions.mdx#findclrtype) + [FindClrTypeName](../../client-api/configuration/conventions.mdx#findclrtypename) + [FindClrTypeNameForDynamic](../../client-api/configuration/conventions.mdx#findclrtypenamefordynamic) + [FindCollectionName](../../client-api/configuration/conventions.mdx#findcollectionname) + [FindCollectionNameForDynamic](../../client-api/configuration/conventions.mdx#findcollectionnamefordynamic) + [FindIdentityProperty](../../client-api/configuration/conventions.mdx#findidentityproperty) + [FindIdentityPropertyNameFromCollectionName](../../client-api/configuration/conventions.mdx#findidentitypropertynamefromcollectionname) + [FindProjectedPropertyNameForIndex](../../client-api/configuration/conventions.mdx#findprojectedpropertynameforindex) + [FindPropertyNameForDynamicIndex](../../client-api/configuration/conventions.mdx#findpropertynamefordynamicindex) + [FindPropertyNameForIndex](../../client-api/configuration/conventions.mdx#findpropertynameforindex) + [FirstBroadcastAttemptTimeout](../../client-api/configuration/conventions.mdx#firstbroadcastattempttimeout) + [HttpClientType](../../client-api/configuration/conventions.mdx#httpclienttype) + [HttpVersion](../../client-api/configuration/conventions.mdx#httpversion) + [IdentityPartsSeparator](../../client-api/configuration/conventions.mdx#identitypartsseparator) + [LoadBalanceBehavior](../../client-api/configuration/conventions.mdx#loadbalancebehavior) + [LoadBalancerContextSeed](../../client-api/configuration/conventions.mdx#loadbalancebehavior) + [LoadBalancerPerSessionContextSelector](../../client-api/configuration/conventions.mdx#loadbalancebehavior) + [MaxHttpCacheSize](../../client-api/configuration/conventions.mdx#maxhttpcachesize) + [MaxNumberOfRequestsPerSession](../../client-api/configuration/conventions.mdx#maxnumberofrequestspersession) + [Modify serialization of property name](../../client-api/configuration/conventions.mdx#modify-serialization-of-property-name) + [OperationStatusFetchMode](../../client-api/configuration/conventions.mdx#operationstatusfetchmode) + [PreserveDocumentPropertiesNotFoundOnModel](../../client-api/configuration/conventions.mdx#preservedocumentpropertiesnotfoundonmodel) + [ReadBalanceBehavior](../../client-api/configuration/conventions.mdx#readbalancebehavior) + [RequestTimeout](../../client-api/configuration/conventions.mdx#requesttimeout) + [ResolveTypeFromClrTypeName](../../client-api/configuration/conventions.mdx#resolvetypefromclrtypename) + [SaveEnumsAsIntegers](../../client-api/configuration/conventions.mdx#saveenumsasintegers) + [SecondBroadcastAttemptTimeout](../../client-api/configuration/conventions.mdx#secondbroadcastattempttimeout) + [SendApplicationIdentifier](../../client-api/configuration/conventions.mdx#sendapplicationidentifier) + [ShouldIgnoreEntityChanges](../../client-api/configuration/conventions.mdx#shouldignoreentitychanges) + [TopologyCacheLocation](../../client-api/configuration/conventions.mdx#topologycachelocation) + [TransformTypeCollectionNameToDocumentIdPrefix](../../client-api/configuration/conventions.mdx#transformtypecollectionnametodocumentidprefix) + [UseHttpCompression](../../client-api/configuration/conventions.mdx#usehttpcompression) + [UseHttpDecompression](../../client-api/configuration/conventions.mdx#usehttpdecompression) + [HttpCompressionAlgorithm](../../client-api/configuration/conventions.mdx#httpcompressionalgorithm) + [UseOptimisticConcurrency](../../client-api/configuration/conventions.mdx#useoptimisticconcurrency) + [WaitForIndexesAfterSaveChangesTimeout](../../client-api/configuration/conventions.mdx#waitforindexesaftersavechangestimeout) + [WaitForNonStaleResultsTimeout](../../client-api/configuration/conventions.mdx#waitfornonstaleresultstimeout) + [WaitForReplicationAfterSaveChangesTimeout](../../client-api/configuration/conventions.mdx#waitforreplicationaftersavechangestimeout) + + +## How to set conventions + +* Access the conventions via the `Conventions` property of the `DocumentStore` object. + +* The conventions set on a Document Store will apply to ALL [sessions](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx) and [operations](../../client-api/operations/what-are-operations.mdx) associated with that store. + +* Customizing the conventions can only be set **before** calling `DocumentStore.Initialize()`. + Trying to do so after calling _Initialize()_ will throw an exception. + + + +{`using (var store = new DocumentStore() +\{ + Conventions = + \{ + // Set conventions HERE, e.g.: + MaxNumberOfRequestsPerSession = 50, + AddIdFieldToDynamicObjects = false + // ... + \} +\}.Initialize()) +\{ + // * Here you can interact with the RavenDB store: + // open sessions, create or query for documents, perform operations, etc. + + // * Conventions CANNOT be set here after calling Initialize() +\} +`} + + + + + +## Conventions: + + + +#### AddIdFieldToDynamicObjects +* Use the `AddIdFieldToDynamicObjects` convention to determine whether an `Id` field is automatically added + to [dynamic objects](https://learn.microsoft.com/en-us/dotnet/csharp/advanced-topics/interop/using-type-dynamic) when [storing new entities](../../client-api/session/storing-entities.mdx) via the session. + +* DEFAULT: `true` + + + +{`// Syntax: +public bool AddIdFieldToDynamicObjects \{ get; set; \} +`} + + + + + + +#### AggressiveCache.Duration +* Use the `AggressiveCache.Duration` convention to define the [aggressive cache](../../client-api/how-to/setup-aggressive-caching.mdx) duration period. + +* DEFAULT: `1 day` + + + +{`// Syntax: +public TimeSpan Duration \{ get; set; \} +`} + + + + + + +#### AggressiveCache.Mode +* Use the `AggressiveCache.Mode` convention to define the [aggressive cache](../../client-api/how-to/setup-aggressive-caching.mdx) mode. + (`AggressiveCacheMode.TrackChanges` or `AggressiveCacheMode.DoNotTrackChanges`) + +* DEFAULT: `AggressiveCacheMode.TrackChanges` + + + +{`// Syntax: +public AggressiveCacheMode Mode \{ get; set; \} +`} + + + + + + +#### AsyncDocumentIdGenerator +* Use the `AsyncDocumentIdGenerator` convention to define the document ID generator method + used when storing a document without explicitly specifying its `Id`. + +* You can override this global ID generator for specific object types using the [RegisterAsyncIdConvention](../../client-api/configuration/identifier-generation/type-specific.mdx) convention. + +* DEFAULT: + The default document ID generator is the `GenerateDocumentIdAsync` method, which is part of the `HiLoIdGenerator` object within the _DocumentStore_. + This method implements the [HiLo algorithm](../../client-api/document-identifiers/hilo-algorithm.mdx) to ensure efficient ID generation when storing a document without explicitly specifying its `Id`. + + + +{`// Customize ID generation for all collections +AsyncDocumentIdGenerator = (database, obj) => +\{ + var objectType = obj.GetType().Name; // e.g., Person, Order, etc. + var timestamp = DateTime.UtcNow.Ticks; // Get the current timestamp + + // Format the ID as \{ObjectType\}/\{Ticks\} + var id = $"\{objectType\}/\{timestamp\}"; + + return Task.FromResult(id); +\} +`} + + + + +{`// Syntax: +public Func> AsyncDocumentIdGenerator \{ get; set; \} +`} + + + + + + +#### CreateHttpClient +* Use the `CreateHttpClient` convention to modify the HTTP client your client application uses. + +* For example, implementing your own HTTP client can be useful when you'd like your clients to provide the server with tracing info. + +* If you override the default `CreateHttpClient` convention we advise that you also set the HTTP client type + correctly using the [HttpClientType](../../client-api/configuration/conventions.mdx#httpclienttype) convention. + + + +{`CreateHttpClient = handler => +\{ + // Your HTTP client code here, e.g.: + var httpClient = new MyHttpClient(new HttpClientXRayTracingHandler(new HttpClientHandler())); + return httpClient; +\} +`} + + + + +{`// Syntax: +public Func CreateHttpClient \{ get; set; \} +`} + + + + + + +#### DisableAtomicDocumentWritesInClusterWideTransaction +* EXPERT ONLY: + Use the `DisableAtomicDocumentWritesInClusterWideTransaction` convention to disable automatic + atomic writes with cluster write transactions. + +* When set to `true`, will only consider explicitly-added compare exchange values to validate cluster-wide transactions. + +* DEFAULT: `false` + + + +{`// Syntax: +public bool? DisableAtomicDocumentWritesInClusterWideTransaction \{ get; set; \} +`} + + + + + + +#### DisableTcpCompression +* When setting the `DisableTcpCompression` convention to `true`, TCP data will not be compressed. + +* DEFAULT: `false` + + + +{`// Syntax: +public bool DisableTcpCompression \{ get; set; \} +`} + + + + + + +#### DisableTopologyCache +* By default, the client caches the cluster's topology in `*.raven-cluster-topology` files on disk. + When all servers provided in the `DocumentStore.Urls` property are down or unavailable, + the client will load the topology from the latest file and try to connect to nodes that are not listed in the URL property. + +* This behavior can be disabled when setting the `DisableTopologyCache` convention to `true`. + In such a case: + + * The client will not load the topology from the cache upon failing to connect to a server. + * Even if the client is configured to [receive topology updates](../../client-api/configuration/conventions.mdx#disabletopologyupdates) from the server, + no topology files will be saved on disk, thus preventing the accumulation of these files. + +* DEFAULT: `false` + + + +{`// Syntax: +public bool DisableTopologyCache \{ get; set; \} +`} + + + + + + +#### DisableTopologyUpdates +* When setting the `DisableTopologyUpdates` convention to `true`, + no database topology updates will be sent from the server to the client (e.g. adding or removing a node). + +* DEFAULT: `false` + + + +{`// Syntax: +public bool DisableTopologyUpdates \{ get; set; \} +`} + + + + + + +#### DisposeCertificate +* When setting the `DisposeCertificate` convention to `true`, + the `DocumentStore.Certificate` will be disposed of during DocumentStore disposal. + +* DEFAULT: `true` + + + +{`// Syntax: +public bool DisposeCertificate \{ get; set; \} +`} + + + + + + +#### FindClrType +* Use the `FindClrType` convention to define a function that finds the CLR type of a document. + +* DEFAULT: + The CLR type is retrieved from the `Raven-Clr-Type` property under the `@metadata` key in the document. + + + +{`// The default implementation is: +FindClrType = (_, doc) => +\{ + if (doc.TryGet(Constants.Documents.Metadata.Key, out BlittableJsonReaderObject metadata) && + metadata.TryGet(Constants.Documents.Metadata.RavenClrType, out string clrType)) + return clrType; + + return null; +\} +`} + + + + +{`// Syntax: +public Func FindClrType \{ get; set; \} +`} + + + + + + +#### FindClrTypeName +* Use the `FindClrTypeName` convention to define a function that returns the CLR type name from a given type. + +* DEFAULT: Return the entity's full name, including the assembly name. + + + +{`// Syntax: +public Func FindClrTypeName \{ get; set; \} +`} + + + + + + +#### FindClrTypeNameForDynamic +* Use the `FindClrTypeNameForDynamic` convention to define a function that returns the CLR type name + from a dynamic entity. + +* DEFAULT: The dynamic entity type is returned. + + + +{`// The dynamic entity's type is returned by default +FindClrTypeNameForDynamic = dynamicEntity => dynamicEntity.GetType() +`} + + + + +{`// Syntax: +public Func FindClrTypeNameForDynamic \{ get; set; \} +`} + + + + + + +#### FindCollectionName +* Use the `FindCollectionName` convention to define a function that will customize + the collection name from a given type. + +* DEFAULT: The collection name will be the plural form of the type name. + + + +{`// Here the collection name will be the type name separated by dashes +FindCollectionName = type => String.Join("-", type.Name.ToCharArray()) +`} + + + + +{`// Syntax: +public Func FindCollectionName \{ get; set; \} +`} + + + + + + +#### FindCollectionNameForDynamic +* Use the `FindCollectionNameForDynamic` convention to define a function that will customize the + collection name from a dynamic type. + +* DEFAULT: The collection name will be the entity's type. + + + +{`// Here the collection name will be some property of the dynamic entity +FindCollectionNameForDynamic = dynamicEntity => dynamicEntity.SomeProperty +`} + + + + +{`// Syntax: +public Func FindCollectionNameForDynamic \{ get; set; \} +`} + + + + + + +#### FindIdentityProperty +* Use the `FindIdentityProperty` convention to define a function that finds the specified ID property + in the entity. + +* DEFAULT: The entity's `Id` property serves as the ID property. + + + +{`// If there exists a property with name "CustomizedId" then it will be the entity's ID property +FindIdentityProperty = memberInfo => memberInfo.Name == "CustomizedId" +`} + + + + +{`// Syntax: +public Func FindIdentityProperty \{ get; set; \} +`} + + + + + + +#### FindIdentityPropertyNameFromCollectionName +* Use the `FindIdentityPropertyNameFromCollectionName` convention to define a function that customizes + the entity's ID property from the collection name. + +* DEFAULT: Will use the `Id` property. + + + +{`// Will use property "CustomizedId" as the ID property +FindIdentityPropertyNameFromCollectionName = collectionName => "CustomizedId" +`} + + + + +{`// Syntax: +public Func FindIdentityPropertyNameFromCollectionName \{ get; set; \} +`} + + + + + + +#### FindProjectedPropertyNameForIndex +* Use the `FindProjectedPropertyNameForIndex` convention to define a function that customizes the + **projected** field names that will be used in the RQL generated by the client and sent to the server when querying a static index. + +* This can be useful when projecting **nested properties** that are not [Stored in the index](../../indexes/storing-data-in-index.mdx). + +* The function receives the following input: + the index type, the index name, the current path, and the property path that is used in the query. + +* DEFAULT: `null` + When `FindProjectedPropertyNameForIndex` is set to `null` (or returns `null`), + the [FindPropertyNameForIndex](../../client-api/configuration/conventions.mdx#findpropertynameforindex) convention is used instead. +**Example**: +Consider the following index, which indexes the nested `School.Id` property from _Student_ documents: + + + + +{`public class Students_BySchoolId : AbstractIndexCreationTask +{ + public class IndexEntry + { + public string Name { get; set; } + public string SchoolId { get; set; } + } + + public Students_BySchoolId() + { + Map = students => from student in students + select new IndexEntry + { + Name = student.StudentName, + SchoolId = student.School.Id // index nested property + }; + } +} +`} + + + + +{`public class Student +{ + public string StudentName { get; set; } + public School School { get; set; } + // ... other student properties +} + +public class School +{ + public string SchoolName { get; set; } + public string Id { get; set; } +} +`} + + + + +When querying the index and projecting fields from the matching _Student_ documents, +if the `FindProjectedPropertyNameForIndex` convention is Not set, +the client will use the [FindPropertyNameForIndex](../../client-api/configuration/conventions.mdx#findpropertynameforindex) convention instead when constructing the RQL sent to the server. + +This results in the following RQL query: +(Note that while the high-level query uses `.Select(student => student.School.Id)`, +the RQL sent to the server contains `School_Id`) + + + + +{`// Query the index +var query = session.Query() + .Where(x => x.Name == "someStudentName") + .OfType() + // Project only the School.Id property from the Student document in the results + .Select(student => student.School.Id) + .ToList(); +`} + + + + +{`from index 'Students/BySchoolId' +where Name == "someStudentName" +select School_Id +// Since the FindProjectedPropertyNameForIndex convention was not yet defined, +// the 'School_Id' property name was generated using the FindPropertyNameForIndex convention. +// ('School.Id' was converted to 'School_Id') +`} + + + + +The RQL generated by the above query projects the `School_Id` field, so the server first attempts to fetch this property from the [Stored index fields](../../indexes/storing-data-in-index.mdx) +(this is the default behavior, learn more in [Projection behavior with a static-index](../../indexes/querying/projections.mdx#projection-behavior-with-a-static-index)). + +However, because this property is Not stored in the index, the server then tries to retrieve it from the _Student_ document instead. +But the document does not contain a flat `School_Id` field — it contains the nested property `School.Id`, +and so no results are returned for the `School_Id` field. + +To resolve this issue, +set the `FindProjectedPropertyNameForIndex` convention to return the nested property name that the client should use when constructing the RQL query sent to the server: + + + +{`FindProjectedPropertyNameForIndex = (indexedType, indexName, path, prop) => path + prop +`} + + + +Now, when using the same query, the RQL sent to the server will contain the nested `School.Id` property name, +and the query will return results: + + + + +{`// Query the index +var query = session.Query() + .Where(x => x.Name == "someStudentName") + .OfType() + // Project only the School.Id property from the Student document in the results + .Select(student => student.School.Id) + .ToList(); +`} + + + + +{`from index 'Students/BySchoolId' +where Name == "someStudentName" +select School.Id +// The RQL sent to the server now contains 'School.Id', +// as defined by the FindProjectedPropertyNameForIndex convention. +`} + + + + + +{`// Syntax: +public Func FindProjectedPropertyNameForIndex \{ get; set; \} +`} + + + + + + +#### FindPropertyNameForDynamicIndex +* Use the `FindPropertyNameForDynamicIndex` convention to define a function that customizes the + property name that will be used in the RQL sent to the server when making a dynamic query. + +* The function receives the following input: + the index type, the index name, the current path, and the property path that is used in the query predicate. + + + +{`// The DEFAULT function: +FindPropertyNameForDynamicIndex = (Type indexedType, string indexedName, string path, string prop) => + path + prop +`} + + + + +{`// Syntax: +public Func FindPropertyNameForDynamicIndex \{ get; set; \} +`} + + + + + + +#### FindPropertyNameForIndex +* Use the `FindPropertyNameForIndex` convention to define a function that customizes the name of the + index-field property that will be used in the RQL sent to the server when querying a static index. + +* The function receives the following input: + the index type, the index name, the current path, and the property path that is used in the query predicate. + +* DEFAULT: `[].` & `.` are replaced by `_` + + + +{`// The DEFAULT function: +FindPropertyNameForIndex = (Type indexedType, string indexedName, string path, string prop) => + (path + prop).Replace("[].", "_").Replace(".", "_") +`} + + + + +{`// Syntax: +public Func FindPropertyNameForIndex \{ get; set; \} +`} + + + + + + +#### FirstBroadcastAttemptTimeout +* Use the `FirstBroadcastAttemptTimeout` convention to set the timeout for the first broadcast attempt. + +* In the first attempt, the request executor will send a single request to the selected node. + Learn about the "selected node" in: [Client logic for choosing a node](../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + +* A [second attempt](../../client-api/configuration/conventions.mdx#secondbroadcastattempttimeout) will be held upon failure. + +* DEFAULT: `5 seconds` + + + +{`FirstBroadcastAttemptTimeout = TimeSpan.FromSeconds(10) +`} + + + + +{`// Syntax: +public TimeSpan FirstBroadcastAttemptTimeout \{ get; set; \} +`} + + + + + + +#### HttpClientType +* Use the `HttpClientType` convention to set the type of HTTP client you're using. + +* RavenDB uses the HTTP type internally to manage its cache. + +* If you override the [CreateHttpClient](../../client-api/configuration/conventions.mdx#createhttpclient) convention to use a non-default HTTP client, + we advise that you also set `HttpClientType` so it returns the client type you are actually using. + + + +{`// The type of HTTP client you are using +HttpClientType = typeof(MyHttpClient) +`} + + + + +{`// Syntax: +public Type HttpClientType \{ get; set; \} +`} + + + + + + +#### HttpVersion +* Use the `HttpVersion` convention to set the Http version the client will use when communicating + with the server. + +* DEFAULT: + * When this convention is explicitly set to `null`, the default HTTP version provided by your .NET framework is used. + * Otherwise, the default HTTP version is set to `System.Net.HttpVersion.Version20` (HTTP 2.0). + + + +{`// Syntax: +public Version HttpVersion \{ get; set; \} +`} + + + + + + +#### IdentityPartsSeparator +* Use the `IdentityPartsSeparator` convention to customize the **default ID separator** for document IDs generated automatically by the + [HiLo algorithm](../../client-api/document-identifiers/hilo-algorithm). + +* The value can be any char except `|` (pipe), which is reserved for identity IDs. + +* DEFAULT: `/` (forward slash) + +* Applies only to: [HiLo IDs](../../server/kb/document-identifier-generation.mdx#hilo-algorithm-id). + + + +{`// Syntax: +public char IdentityPartsSeparator \{ get; set; \} +`} + + + + + + +#### LoadBalanceBehavior +#### LoadBalancerPerSessionContextSelector +#### LoadBalancerContextSeed +* Configure the **load balance behavior** by setting the following conventions: + * `LoadBalanceBehavior` + * `LoadBalancerPerSessionContextSelector` + * `LoadBalancerContextSeed` + +* Learn more in the dedicated [Load balance behavior](../../client-api/configuration/load-balance/load-balance-behavior.mdx) article. + + + + +#### MaxHttpCacheSize +* Use the `MaxHttpCacheSize` convention to set the maximum HTTP cache size. + This setting will affect all the databases accessed by the Document Store. + +* DEFAULT: + + | System | Usable Memory | Default Value | + |----------|-------------------------------------------------------------------------------------------------------|----------------------------| + | 64-bit | Lower than or equal to 3GB
Greater than 3GB and Lower than or equal to 6GB
Greater than 6GB | 64MB
128MB
512MB | + | 32-bit | | 32MB | + +* **Disabling Caching**: + + * To disable caching globally, set `MaxHttpCacheSize` to zero. + * To disable caching per session, see: [Disable caching per session](../../client-api/session/configuration/how-to-disable-caching.mdx). + +* Note: RavenDB also supports Aggressive Caching. + Learn more about this in the [Setup aggressive caching](../../client-api/how-to/setup-aggressive-caching.mdx) article. + + + +{`MaxHttpCacheSize = new Size(256, SizeUnit.Megabytes) // Set max cache size +`} + + + + +{`MaxHttpCacheSize = new Size(0, SizeUnit.Megabytes) // Disable caching +`} + + + + +{`// Syntax: +public Size MaxHttpCacheSize \{ get; set; \} +`} + + + +
+ + +#### MaxNumberOfRequestsPerSession +* Use the `MaxNumberOfRequestsPerSession` convention to set the maximum number of requests per session. + +* DEFAULT: `30` + + + +{`// Syntax: +public int MaxNumberOfRequestsPerSession \{ get; set; \} +`} + + + + + + +#### Modify serialization of property name +* Different clients use different casing conventions for entity field names. For example: + + | Language | Default casing | Example | + |------------|-----------------|------------| + | C# | PascalCase | OrderLines | + | Java | camelCase | orderLines | + | JavaScript | camelCase | orderLines | + +* By default, when saving an entity, the naming convention used by the client is reflected in the JSON document properties on the server-side. + This default serialization behavior can be customized to facilitate language interoperability. + +* **Example**: + + Set `CustomizeJsonSerializer` and `PropertyNameConverter` to serialize an entity's properties as camelCase from a C# client: + + + +{`Serialization = new NewtonsoftJsonSerializationConventions +\{ + // .Net properties will be serialized as camelCase in the JSON document when storing an entity + // and deserialized back to PascalCase + CustomizeJsonSerializer = s => s.ContractResolver = new CamelCasePropertyNamesContractResolver() +\}, + +// In addition, the following convention is required when +// making a query that filters by a field name and when indexing. +PropertyNameConverter = memberInfo => FirstCharToLower(memberInfo.Name) +`} + + + + +{`private string FirstCharToLower(string str) => $"\{Char.ToLower(str[0])\}\{str.Substring(1)\}"; +`} + + + + +{`// Syntax: +public ISerializationConventions Serialization \{ get; set; \} +`} + + + + + + +#### OperationStatusFetchMode +* Use the `OperationStatusFetchMode` convention to set the way an [operation](../../client-api/operations/what-are-operations.mdx) is getting its status when [waiting for completion](../../client-api/operations/what-are-operations.mdx#wait-for-completion). + +* DEFAULT: + By default, the value is set to `ChangesApi` which uses the WebSocket protocol underneath when a connection is established with the server. + +* On some older systems like Windows 7 the WebSocket protocol might not be available due to the OS and .NET Framework limitations. + To bypass this issue, the value can be changed to `Polling`. + + + +{`OperationStatusFetchMode = OperationStatusFetchMode.ChangesApi // ChangesApi | Polling +`} + + + + +{`// Syntax: +public OperationStatusFetchMode OperationStatusFetchMode \{ get; set; \} +`} + + + + + + +#### PreserveDocumentPropertiesNotFoundOnModel +* Loading a document using a different model will result in the removal of the missing model properties + from the loaded entity, and no exception is thrown. + +* Setting the `PreserveDocumentPropertiesNotFoundOnModel` convention to `true` + allows the client to check (via [whatChanged](../../client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx#get-session-changes) + or via [WhatChangedFor](../../client-api/session/how-to/check-if-entity-has-changed.mdx#get-entity-changes) methods) + for the missing properties on the entity after loading the document. + +* DEFAULT: `true` + + + +{`// Syntax: +public bool PreserveDocumentPropertiesNotFoundOnModel \{ get; set; \} +`} + + + + + + +#### ReadBalanceBehavior +* Configure the **read request behavior** by setting the `ReadBalanceBehavior` convention. + +* Learn more in the dedicated [Read balance behavior](../../client-api/configuration/load-balance/read-balance-behavior.mdx) article. + + + + +#### RequestTimeout +* Use the `RequestTimeout` convention to define the global request timeout value for all `RequestExecutors` created per database. + +* DEFAULT: `null` (the default HTTP client timeout will be applied - 12h) + + + +{`RequestTimeout = TimeSpan.FromSeconds(90) +`} + + + + +{`// Syntax: +public TimeSpan? RequestTimeout \{ get; set; \} +`} + + + + + + +#### ResolveTypeFromClrTypeName +* Use the `ResolveTypeFromClrTypeName` convention to define a function that resolves the CLR type + from the CLR type name. + +* DEFAULT: The type is returned. + + + +{`// The type itself is returned by default +ResolveTypeFromClrTypeName = clrType => clrType.GetType() +`} + + + + +{`// Syntax: +public Func ResolveTypeFromClrTypeName \{ get; set; \} +`} + + + + + + +#### SaveEnumsAsIntegers +* When setting the `SaveEnumsAsIntegers` convention to `true`, + C# `enum` types will be stored and queried as integers, rather than their string representations. + +* DEFAULT: `false` (save as strings) + + + +{`// Syntax: +public bool SaveEnumsAsIntegers \{ get; set; \} +`} + + + + + + +#### SecondBroadcastAttemptTimeout +* Use the `SecondBroadcastAttemptTimeout` convention to set the timeout for the second broadcast attempt. + +* Upon failure of the [first attempt](../../client-api/configuration/conventions.mdx#firstbroadcastattempttimeout) the request executor will resend the command to all nodes simultaneously. + +* DEFAULT: `30 seconds` + + + +{`SecondBroadcastAttemptTimeout = TimeSpan.FromSeconds(20) +`} + + + + +{`public TimeSpan SecondBroadcastAttemptTimeout \{ get; set; \} +`} + + + + + + +#### SendApplicationIdentifier +* Use the `SendApplicationIdentifier` convention to `true` to enable sending a unique application identifier to the RavenDB Server. + +* Setting to _true_ allows the server to issue performance hint notifications to the client, + e.g. during robust topology update requests which could indicate a Client API misuse impacting the overall performance. + +* DEFAULT: `true` + + + +{`// Syntax: +public bool SendApplicationIdentifier \{ get; set; \} +`} + + + + + + +#### ShouldIgnoreEntityChanges +* Set the `ShouldIgnoreEntityChanges` convention to disable entity tracking for certain entities. + +* Learn more in [Customize tracking in conventions](../../client-api/session/configuration/how-to-disable-tracking.mdx#customize-tracking-in-conventions). + + + + +#### TopologyCacheLocation +* Use the `TopologyCacheLocation` convention to change the location of the topology cache files + (`*.raven-database-topology` & `*.raven-cluster-topology`). + +* Directory existence and writing permissions will be checked when setting this value. + +* DEFAULT: `AppContext.BaseDirectory` (The application's base directory) + + + +{`TopologyCacheLocation = @"C:\\RavenDB\\TopologyCache" +`} + + + + +{`// Syntax: +public string TopologyCacheLocation \{ get; set; \} +`} + + + + + + +#### TransformTypeCollectionNameToDocumentIdPrefix +* Use the `TransformTypeCollectionNameToDocumentIdPrefix` convention to define a function that will + customize the document ID prefix from the collection name. + +* DEFAULT: + By default, the document id prefix is determined as follows: + +| Number of uppercase letters in collection name | Document ID prefix | +|--------------------------------------------------|-------------------------------------------------------------| +| `<= 1` | Use the collection name with all lowercase letters | +| `> 1` | Use the collection name as is, preserving the original case | + + + +{`// Syntax: +public Func TransformTypeCollectionNameToDocumentIdPrefix \{ get; set; \} +`} + + + + + + +#### UseHttpCompression +* When setting the `UseHttpCompression` convention to `true`, + then `Gzip` compression will be used when sending content of HTTP request. + +* When the convention is set to `false`, content will not be compressed. + +* DEFAULT: `true` + + + +{`// Syntax: +public bool UseHttpCompression \{ get; set; \} +`} + + + + + + +#### UseHttpDecompression +* When setting the `UseHttpDecompression` convention to `true`, + the client can accept compressed HTTP response content and will use zstd/gzip/deflate decompression methods. + +* DEFAULT: `true` + + + +{`// Syntax: +public bool UseHttpDecompression \{ get; set; \} +`} + + + + + + + +#### HttpCompressionAlgorithm +* Use this convention to set the HTTP compression algorithm + (see [UseHttpDecompression](../../client-api/configuration/conventions.mdx#usehttpcompression) above). + +* DEFAULT: `Zstd` + + In RavenDB versions up to `6.2`, HTTP compression is set to `Gzip` by default. + In RavenDB versions from `7.0` on, the default has changed and is now `Zstd`. + + + + +{`// Syntax: +public HttpCompressionAlgorithm HttpCompressionAlgorithm \{ get; set; \} +`} + + + + + + + + +#### UseOptimisticConcurrency +* When setting the `UseOptimisticConcurrency` convention to `true`, + Optimistic Concurrency checks will be applied for all sessions opened from the Document Store. + +* Learn more about Optimistic Concurrency and the various ways to enable it in the + [how to enable optimistic concurrency](../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx) + article. + +* DEFAULT: `false` + + + +{`// Syntax: +public bool UseOptimisticConcurrency \{ get; set; \} +`} + + + + + + +#### WaitForIndexesAfterSaveChangesTimeout +* Use the `WaitForIndexesAfterSaveChangesTimeout` convention to set the default timeout for the + `DocumentSession.Advanced.WaitForIndexesAfterSaveChanges` method. + +* DEFAULT: 15 Seconds + + + +{`WaitForIndexesAfterSaveChangesTimeout = TimeSpan.FromSeconds(10) +`} + + + + +{`// Syntax: +public TimeSpan WaitForIndexesAfterSaveChangesTimeout \{ get; set; \} +`} + + + + + + +#### WaitForNonStaleResultsTimeout +* Use the `WaitForNonStaleResultsTimeout` convention to set the default timeout used by the + `WaitForNonStaleResults` method when querying. + +* DEFAULT: 15 Seconds + + + +{`WaitForNonStaleResultsTimeout = TimeSpan.FromSeconds(10) +`} + + + + +{`// Syntax: +public TimeSpan WaitForNonStaleResultsTimeout \{ get; set; \} +`} + + + + + + +#### WaitForReplicationAfterSaveChangesTimeout +* Use the `WaitForReplicationAfterSaveChangesTimeout` convention to set the default timeout for the + `DocumentSession.Advanced.WaitForReplicationAfterSaveChanges`method. + +* DEFAULT: 15 Seconds + + + +{`WaitForReplicationAfterSaveChangesTimeout = TimeSpan.FromSeconds(10) +`} + + + + +{`// Syntax: +public TimeSpan WaitForReplicationAfterSaveChangesTimeout \{ get; set; \} +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/configuration/_conventions-nodejs.mdx b/versioned_docs/version-7.1/client-api/configuration/_conventions-nodejs.mdx new file mode 100644 index 0000000000..f5b81fa38a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/_conventions-nodejs.mdx @@ -0,0 +1,599 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Conventions** in RavenDB are customizable settings that users can configure to tailor client behaviors according to their preferences. + +* In this article: + * [How to set conventions](../../client-api/configuration/conventions.mdx#how-to-set-conventions) + * [Conventions:](../../client-api/configuration/conventions.mdx#conventions:) + [customFetch](../../client-api/configuration/conventions.mdx#customfetch) + [disableAtomicDocumentWritesInClusterWideTransaction](../../client-api/configuration/conventions.mdx#disableatomicdocumentwritesinclusterwidetransaction) + [disableTopologyUpdates](../../client-api/configuration/conventions.mdx#disabletopologyupdates) + [findCollectionName](../../client-api/configuration/conventions.mdx#findcollectionname) + [findJsType](../../client-api/configuration/conventions.mdx#_findjstype) + [findJsTypeName](../../client-api/configuration/conventions.mdx#_findjstypename) + [firstBroadcastAttemptTimeout](../../client-api/configuration/conventions.mdx#firstbroadcastattempttimeout) + [identityPartsSeparator](../../client-api/configuration/conventions.mdx#identitypartsseparator) + [loadBalanceBehavior](../../client-api/configuration/conventions.mdx#loadbalancebehavior) + [loadBalancerContextSeed](../../client-api/configuration/conventions.mdx#loadbalancebehavior) + [loadBalancerPerSessionContextSelector](../../client-api/configuration/conventions.mdx#loadbalancebehavior) + [maxHttpCacheSize](../../client-api/configuration/conventions.mdx#maxhttpcachesize) + [maxNumberOfRequestsPerSession](../../client-api/configuration/conventions.mdx#maxnumberofrequestspersession) + [readBalanceBehavior](../../client-api/configuration/conventions.mdx#readbalancebehavior) + [requestTimeout](../../client-api/configuration/conventions.mdx#requesttimeout) + [secondBroadcastAttemptTimeout](../../client-api/configuration/conventions.mdx#secondbroadcastattempttimeout) + [sendApplicationIdentifier](../../client-api/configuration/conventions.mdx#sendapplicationidentifier) + [shouldIgnoreEntityChanges](../../client-api/configuration/conventions.mdx#shouldignoreentitychanges) + [storeDatesInUtc](../../client-api/configuration/conventions.mdx#storedatesinutc) + [storeDatesWithTimezoneInfo](../../client-api/configuration/conventions.mdx#storedateswithtimezoneinfo) + [syncJsonParseLimit](../../client-api/configuration/conventions.mdx#syncjsonparselimit) + [throwIfQueryPageSizeIsNotSet](../../client-api/configuration/conventions.mdx#throwifquerypagesizeisnotset) + [transformClassCollectionNameToDocumentIdPrefix](../../client-api/configuration/conventions.mdx#transformclasscollectionnametodocumentidprefix) + [useCompression](../../client-api/configuration/conventions.mdx#usecompression) + [useJsonlStreaming](../../client-api/configuration/conventions.mdx#usejsonlstreaming) + [useOptimisticConcurrency](../../client-api/configuration/conventions.mdx#useoptimisticconcurrency) + [waitForIndexesAfterSaveChangesTimeout](../../client-api/configuration/conventions.mdx#waitforindexesaftersavechangestimeout) + [waitForNonStaleResultsTimeout](../../client-api/configuration/conventions.mdx#waitfornonstaleresultstimeout) + [waitForReplicationAfterSaveChangesTimeout](../../client-api/configuration/conventions.mdx#waitforreplicationaftersavechangestimeout) + + +## How to set conventions + +* Access the conventions via the `conventions` property of the `DocumentStore` object. + +* The conventions set on a Document Store will apply to ALL [sessions](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx) and [operations](../../client-api/operations/what-are-operations.mdx) associated with that store. + +* Customizing the conventions can only be set **before** calling `documentStore.initialize()`. + Trying to do so after calling _initialize()_ will throw an exception. + + + +{`const documentStore = new DocumentStore(["serverUrl_1", "serverUrl_2", "..."], "DefaultDB"); + +// Set conventions HERE, e.g.: +documentStore.conventions.maxNumberOfRequestsPerSession = 50; +documentStore.conventions.disableTopologyUpdates = true; + +documentStore.initialize(); + +// * Here you can interact with the RavenDB store: +// open sessions, create or query for documents, perform operations, etc. + +// * Conventions CANNOT be set here after calling initialize() +`} + + + + + +## Conventions: + + + +#### customFetch +* Use the `customFetch` convention to override the default _fetch_ method. + This method is useful to enable RavenDB Node.js client on CloudFlare Workers. + +* DEFAULT: undefined + + + +{`// Returns an object +get customFetch(); +// Set an object bound to worker with type: mtls_certificate +set customFetch(customFetch); +`} + + + + + + +#### disableAtomicDocumentWritesInClusterWideTransaction +* EXPERT ONLY: + Use the `disableAtomicDocumentWritesInClusterWideTransaction` convention to disable automatic + atomic writes with cluster write transactions. + +* When set to `true`, will only consider explicitly added compare exchange values to validate cluster wide transactions. + +* DEFAULT: `false` + + + +{`// Returns a boolean value +get disableAtomicDocumentWritesInClusterWideTransaction(); +// Set a boolean value +set disableAtomicDocumentWritesInClusterWideTransaction( + disableAtomicDocumentWritesInClusterWideTransaction +); +`} + + + + + + +#### disableTopologyUpdates +* When setting the `disableTopologyUpdates` convention to `true`, + no database topology updates will be sent from the server to the client (e.g. adding or removing a node). + +* DEFAULT: `false` + + + +{`// Returns a boolean value +get disableTopologyUpdates(); +// Set a boolean value +set disableTopologyUpdates(value); +`} + + + + + + +#### findCollectionName +* Use the `findCollectionName` convention to define a function that will customize the collection name + from given type. + +* DEFAULT: The collection name will be the plural form of the type name. + + + +{`// Returns a method +get findCollectionName(); +// Set a method +set findCollectionName(value); +`} + + + + + + +#### findJsType +* Use the `findJsType` convention to define a function that finds the class of a document (if exists). + +* The type is retrieved from the `Raven-Node-Type` property under the `@metadata` key in the document. + +* DEFAULT: `null` + + + +{`// Returns a method +get findJsType(); +// Set a method +set findJsType(value); +`} + + + + + + +#### findJsTypeName +* Use the `findJsTypeName` convention to define a function that returns the class type name from a given type. + +* The class name will be stored in the entity metadata. + +* DEFAULT: `null` + + + +{`// Returns a method +get findJsTypeName(); +// Set a method +set findJsTypeName(value); +`} + + + + + + +#### firstBroadcastAttemptTimeout +* Use the `firstBroadcastAttemptTimeout` convention to set the timeout for the first broadcast attempt. + +* In the first attempt, the request executor will send a single request to the selected node. + Learn about the "selected node" in: [Client logic for choosing a node](../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + +* A [second attempt](../../client-api/configuration/conventions.mdx#secondbroadcastattempttimeout) will be held upon failure. + +* DEFAULT: `5 seconds` + + + +{`// Returns a number +get firstBroadcastAttemptTimeout(); +// Set a number +set firstBroadcastAttemptTimeout(firstBroadcastAttemptTimeout); +`} + + + + + + +#### identityPartsSeparator +* Use the `identityPartsSeparator` convention to customize the **default ID separator** for document IDs generated automatically by the + [HiLo algorithm](../../client-api/document-identifiers/hilo-algorithm). + +* The value can be any char except `|` (pipe), which is reserved for identity IDs. + +* DEFAULT: `/` (forward slash) + +* Applies only to: [HiLo IDs](../../server/kb/document-identifier-generation.mdx#hilo-algorithm-id). + + + +{`// Returns a string +get identityPartsSeparator(); +// Set a string +set identityPartsSeparator(value); +`} + + + + + + +#### loadBalanceBehavior +#### loadBalancerPerSessionContextSelector +#### loadBalancerContextSeed +* Configure the **load balance behavior** by setting the following conventions: + * `loadBalanceBehavior` + * `loadBalancerPerSessionContextSelector` + * `loadBalancerContextSeed` + +* Learn more in the dedicated [Load balance behavior](../../client-api/configuration/load-balance/load-balance-behavior.mdx) article. + + + + +#### maxHttpCacheSize +* Use the `MaxHttpCacheSize` convention to set the maximum HTTP cache size. + This setting will affect all the databases accessed by the Document Store. + +* DEFAULT: `128 MB` + +* **Disabling Caching**: + + * To disable caching globally, set `MaxHttpCacheSize` to zero. + * To disable caching per session, see: [Disable caching per session](../../client-api/session/configuration/how-to-disable-caching.mdx). + +* Note: RavenDB also supports Aggressive Caching. + Learn more about that in article [Setup aggressive caching](../../client-api/how-to/setup-aggressive-caching.mdx). + + + +{`// Returns a number +get maxHttpCacheSize(); +// Set a number +set maxHttpCacheSize(value); +`} + + + + + + +#### maxNumberOfRequestsPerSession +* Use the `maxNumberOfRequestsPerSession` convention to set the maximum number of requests per session. + +* DEFAULT: `30` + + + +{`// Returns a number +get maxNumberOfRequestsPerSession(); +// Set a number +set maxNumberOfRequestsPerSession(value); +`} + + + + + + +#### readBalanceBehavior +* Configure the **read request behavior** by setting the `readBalanceBehavior` convention. + +* Learn more in the dedicated [Read balance behavior](../../client-api/configuration/load-balance/read-balance-behavior.mdx) article. + + + + +#### requestTimeout +* Use the `requestTimeout` convention to define the global request timeout value for all `RequestExecutors` created per database. + +* DEFAULT: `null` (the default HTTP client timout will be applied - 12h) + + + +{`// Returns a number +get requestTimeout(); +// Set a number +set requestTimeout(value); +`} + + + + + + +#### secondBroadcastAttemptTimeout +* Use the `secondBroadcastAttemptTimeout` convention to set the timeout for the second broadcast attempt. + +* Upon failure of the [first attempt](../../client-api/configuration/conventions.mdx#firstbroadcastattempttimeout) the request executor will resend the command to all nodes simultaneously. + +* DEFAULT: `30 seconds` + + + +{`// Returns a number +get secondBroadcastAttemptTimeout(); +// Set a number +set secondBroadcastAttemptTimeout(timeout); +`} + + + + + + +#### sendApplicationIdentifier +* Use the `sendApplicationIdentifier` convention to `true` to enable sending a unique application identifier to the RavenDB Server. + +* Setting to _true_ allows the server to issue performance hint notifications to the client, + e.g. during robust topology update requests which could indicate a Client API misuse impacting the overall performance. + +* DEFAULT: `true` + + + +{`// Returns a boolean +get sendApplicationIdentifier(); +// Set a boolean +set sendApplicationIdentifier(sendApplicationIdentifier) +`} + + + + + + +#### shouldIgnoreEntityChanges +* Set the `shouldIgnoreEntityChanges` convention to disable entity tracking for certain entities. + +* Learn more in [Customize tracking in conventions](../../client-api/session/configuration/how-to-disable-tracking.mdx#customize-tracking-in-conventions). + + + + +#### storeDatesInUtc +* When setting the `storeDatesInUtc` convention to `true`, + DateTime values will be stored in the database in UTC format. + +* DEFAULT: `false` + + + +{`// Returns a boolean +get storeDatesInUtc(); +// Set a boolean +set storeDatesInUtc(value); +`} + + + + + + +#### storeDatesWithTimezoneInfo +* When setting the `storeDatesWithTimezoneInfo` to `true`, + DateTime values will be stored in the database with their time zone information included. + +* DEFAULT: `false` + + + +{`// Returns a boolean +get storeDatesWithTimezoneInfo(); +// Set a boolean +set storeDatesWithTimezoneInfo(value); +`} + + + + + + +#### syncJsonParseLimit +* Use the `syncJsonParseLimit` convention to define the maximum size for the _sync_ parsing of the JSON data responses received from the server. + For data exceeding this size, the client switches to _async_ parsing. + +* DEFAULT: `2 * 1_024 * 1_024` + + + +{`// Returns a number +get syncJsonParseLimit(); +// Set a number +set syncJsonParseLimit(value); +`} + + + + + + +#### throwIfQueryPageSizeIsNotSet +* When setting the `throwIfQueryPageSizeIsNotSet` convention to `true`, + an exception will be thrown if a query is performed without explicitly setting a page size. + +* This can be useful during development to identify potential performance bottlenecks + since there is no limitation on the number of results returned from the server. + +* DEFAULT: `false` + + + +{`// Returns a boolean +get throwIfQueryPageSizeIsNotSet(); +// Set a boolean +set throwIfQueryPageSizeIsNotSet(value); +`} + + + + + + +#### transformClassCollectionNameToDocumentIdPrefix +* Use the `transformTypeCollectionNameToDocumentIdPrefix` convention to define a function that will + customize the document ID prefix from the the collection name. + +* DEFAULT: + By default, the document id prefix is determined as follows: + +| Number of uppercase letters in collection name | Document ID prefix | +|--------------------------------------------------|-------------------------------------------------------------| +| `<= 1` | Use the collection name with all lowercase letters | +| `> 1` | Use the collection name as is, preserving the original case | + + + +{`// Returns a method +get transformClassCollectionNameToDocumentIdPrefix(); +// Set a method +set transformClassCollectionNameToDocumentIdPrefix(value); +`} + + + + + + +#### useCompression +* Set the `useCompression` convention to true in order to accept the **response** in compressed format and the automatic decompression of the HTTP response content. + +* A `Gzip` compression is always applied when sending content in an HTTP request. + +* DEFAULT: `true` + + + +{`// Returns a boolean +get useCompression(); +// Set a boolean +set useCompression(value); +`} + + + + + + + +#### useJsonlStreaming +* Set the `useJsonlStreaming` convention to `true` when streaming query results as JSON Lines (JSONL) format. + +* DEFAULT: `true` + + + +{`// Returns a boolean +get useJsonlStreaming(); +// Set a boolean +set useJsonlStreaming(value); +`} + + + + + + +#### useOptimisticConcurrency +* When setting the `useOptimisticConcurrency` convention to `true`, + Optimistic Concurrency checks will be applied for all sessions opened from the Document Store. + +* Learn more about Optimistic Concurrency and the various ways to enable it in article + [how to enable optimistic concurrency](../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx). + +* DEFAULT: `false` + + + +{`// Returns a boolean +get useOptimisticConcurrency(); +// Set a boolean +set useOptimisticConcurrency(value); +`} + + + + + + +#### waitForIndexesAfterSaveChangesTimeout +* Use the `waitForIndexesAfterSaveChangesTimeout` convention to set the default timeout for the + `documentSession.advanced.waitForIndexesAfterSaveChanges` method. + +* DEFAULT: 15 Seconds + + + +{`// Returns a number +get waitForIndexesAfterSaveChangesTimeout(); +// Set a number +set waitForIndexesAfterSaveChangesTimeout(value); +`} + + + + + + +#### waitForNonStaleResultsTimeout +* Use the `waitForNonStaleResultsTimeout` convention to set the default timeout used by the + `waitForNonStaleResults` method when querying. + +* DEFAULT: 15 Seconds + + + +{`// Returns a number +get waitForNonStaleResultsTimeout(); +// Set a number +set waitForNonStaleResultsTimeout(value); +`} + + + + + + +#### waitForReplicationAfterSaveChangesTimeout +* Use the `waitForReplicationAfterSaveChangesTimeout` convention to set the default timeout for the + `documentSession.advanced.waitForReplicationAfterSaveChanges`method. + +* DEFAULT: 15 Seconds + + + +{`// Returns a number +get waitForReplicationAfterSaveChangesTimeout(); +// Set a number +set waitForReplicationAfterSaveChangesTimeout(value); +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/configuration/_deserialization-csharp.mdx b/versioned_docs/version-7.1/client-api/configuration/_deserialization-csharp.mdx new file mode 100644 index 0000000000..52783c3ae3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/_deserialization-csharp.mdx @@ -0,0 +1,98 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +Use the methods described in this page to customize the [conventions](../../client-api/configuration/conventions.mdx) +by which entities are deserialized as they are received by the client. + +* In this page: + * [CustomizeJsonDeserializer](../../client-api/configuration/deserialization.mdx#customizejsondeserializer) + * [DeserializeEntityFromBlittable](../../client-api/configuration/deserialization.mdx#deserializeentityfromblittable) + * [PreserveDocumentPropertiesNotFoundOnModel](../../client-api/configuration/deserialization.mdx#preservedocumentpropertiesnotfoundonmodel) + * [DefaultRavenSerializationBinder](../../client-api/configuration/deserialization.mdx#defaultravenserializationbinder) + * [Number Deserialization](../../client-api/configuration/deserialization.mdx#number-deserialization) + + +## Deserialization + +## CustomizeJsonDeserializer + +* The `JsonSerializer` object is used by the client to deserialize entities + loaded from the server. +* Use the `CustomizeJsonDeserializer` convention to modify `JsonSerializer` + by registering a deserialization customization action. + + + +{`Conventions = +\{ + Serialization = new NewtonsoftJsonSerializationConventions + \{ + CustomizeJsonDeserializer = serializer => throw new CodeOmitted() + \} +\} +`} + + + +## DeserializeEntityFromBlittable + +* Use the `DeserializeEntityFromBlittable` convention to customize entity + deserialization from a blittable JSON. + + + +{`Conventions = +\{ + Serialization = new NewtonsoftJsonSerializationConventions + \{ + DeserializeEntityFromBlittable = (type, blittable) => throw new CodeOmitted() + \} +\} +`} + + + +## PreserveDocumentPropertiesNotFoundOnModel + +* Some document properties are not deserialized to an object. +* Set the `PreserveDocumentPropertiesNotFoundOnModel` convention to `true` + to **preserve** such properties when the document is saved. +* Set the `PreserveDocumentPropertiesNotFoundOnModel` convention to `false` + to **remove** such properties when the document is saved. +* Default: `true` + + + +{`Conventions = +\{ + PreserveDocumentPropertiesNotFoundOnModel = true +\} +`} + + + +## DefaultRavenSerializationBinder + +Use the `DefaultRavenSerializationBinder` convention and its methods to +prevent gadgets from running RCE (Remote Code Execution) attacks while +data is deserialized by the client. + +Read about this security convention and maintaining deserialization security +[here](../../client-api/security/deserialization-security.mdx). + + +## Number Deserialization + +* RavenDB client supports all common numeric value types (including `int`, `long`, + `double`, `decimal`, etc.) out of the box. +* Note that although deserialization of `decimals` is fully supported, there are + [server side limitations](../../server/kb/numbers-in-ravendb.mdx) to numbers in this range. +* Other number types, like `BigInteger`, must be handled using custom deserialization. + + + + diff --git a/versioned_docs/version-7.1/client-api/configuration/_deserialization-java.mdx b/versioned_docs/version-7.1/client-api/configuration/_deserialization-java.mdx new file mode 100644 index 0000000000..0b62b54a37 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/_deserialization-java.mdx @@ -0,0 +1,24 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +## Customize ObjectMapper + +If you need to customize Jackson `ObjectMapper` object used by the client when sending entities to the server you can access and modify its instance: + + + +{`ObjectMapper entityMapper = conventions.getEntityMapper(); +entityMapper.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, true); +`} + + + +## Numbers (de)serialization + +RavenDB client supports out of the box all common numeric value types: `int`, `long`, `double` etc. +Note that although the (de)serialization of `decimals` is fully supported, there are [server side limitations](../../server/kb/numbers-in-ravendb.mdx) to numbers in that range. +Other number types like `BigInteger` must be treated using custom (de)serialization. + + diff --git a/versioned_docs/version-7.1/client-api/configuration/_serialization-csharp.mdx b/versioned_docs/version-7.1/client-api/configuration/_serialization-csharp.mdx new file mode 100644 index 0000000000..0770186e13 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/_serialization-csharp.mdx @@ -0,0 +1,121 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +Use the methods described in this page to customize the [conventions](../../client-api/configuration/conventions.mdx) +by which entities are serialized as they are sent by the client to the server. + +* In this page: + * [CustomizeJsonSerializer](../../client-api/configuration/serialization.mdx#customizejsonserializer) + * [JsonContractResolver](../../client-api/configuration/serialization.mdx#jsoncontractresolver) + * [BulkInsert.TrySerializeEntityToJsonStream](../../client-api/configuration/serialization.mdx#bulkinserttryserializeentitytojsonstream) + * [IgnoreByRefMembers and IgnoreUnsafeMembers](../../client-api/configuration/serialization.mdx#ignorebyrefmembers-and-ignoreunsafemembers) + + +## Serialization + +## CustomizeJsonSerializer + +* The `JsonSerializer` object is used by the client to serialize entities + sent by the client to the server. +* Use the `CustomizeJsonSerializer ` convention to modify `JsonSerializer` + by registering a serialization customization action. + + + +{`Serialization = new NewtonsoftJsonSerializationConventions +\{ + CustomizeJsonSerializer = serializer => throw new CodeOmitted() +\} +`} + + + +## JsonContractResolver + +* The default `JsonContractResolver` convention used by RavenDB will serialize + **all** properties and **all** public fields. +* Change this behavior by providing your own implementation of the `IContractResolver` + interface. + + + +{`Serialization = new NewtonsoftJsonSerializationConventions +\{ + JsonContractResolver = new CustomJsonContractResolver() +\} +`} + + + + + +{`public class CustomJsonContractResolver : IContractResolver +\{ + public JsonContract ResolveContract(Type type) + \{ + throw new CodeOmitted(); + \} +\} +`} + + + +* You can also customize the behavior of the **default resolver** by inheriting + from `DefaultRavenContractResolver` and overriding specific methods. + + + +{`public class CustomizedRavenJsonContractResolver : DefaultRavenContractResolver +\{ + public CustomizedRavenJsonContractResolver(ISerializationConventions conventions) : base(conventions) + \{ + \} + + protected override JsonProperty CreateProperty(MemberInfo member, MemberSerialization memberSerialization) + \{ + throw new CodeOmitted(); + \} +\} +`} + + + +## BulkInsert.TrySerializeEntityToJsonStream + +* Adjust [Bulk Insert](../../client-api/bulk-insert/how-to-work-with-bulk-insert-operation.mdx) + behavior by using the `TrySerializeEntityToJsonStream` convention to register a custom + serialization implementation. + + + +{`BulkInsert = +\{ + TrySerializeEntityToJsonStream = (entity, metadata, writer) => throw new CodeOmitted(), +\} +`} + + + +## IgnoreByRefMembers and IgnoreUnsafeMembers + +* By default, if you try to store an entity with `ref` or unsafe members, + the Client will throw an exception when [`session.SaveChanges()`](../../client-api/session/saving-changes.mdx) + is called. +* Set the `IgnoreByRefMembers` convention to `true` to simply ignore `ref` + members when an attempt to store an entity with `ref` members is made. + The entity will be uploaded to the server with all non-`ref` members without + throwing an exception. + The document structure on the server-side will not contain fields for those + `ref` members. +* Set the `IgnoreUnsafeMembers` convention to `true` to ignore all pointer + members in the same manner. +* `IgnoreByRefMembers` default value: `false` +* `IgnoreUnsafeMembers` default value: `false` + + + + diff --git a/versioned_docs/version-7.1/client-api/configuration/_serialization-java.mdx b/versioned_docs/version-7.1/client-api/configuration/_serialization-java.mdx new file mode 100644 index 0000000000..0b62b54a37 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/_serialization-java.mdx @@ -0,0 +1,24 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +## Customize ObjectMapper + +If you need to customize Jackson `ObjectMapper` object used by the client when sending entities to the server you can access and modify its instance: + + + +{`ObjectMapper entityMapper = conventions.getEntityMapper(); +entityMapper.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, true); +`} + + + +## Numbers (de)serialization + +RavenDB client supports out of the box all common numeric value types: `int`, `long`, `double` etc. +Note that although the (de)serialization of `decimals` is fully supported, there are [server side limitations](../../server/kb/numbers-in-ravendb.mdx) to numbers in that range. +Other number types like `BigInteger` must be treated using custom (de)serialization. + + diff --git a/versioned_docs/version-7.1/client-api/configuration/conventions.mdx b/versioned_docs/version-7.1/client-api/configuration/conventions.mdx new file mode 100644 index 0000000000..4dddba9f76 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/conventions.mdx @@ -0,0 +1,42 @@ +--- +title: "Conventions" +hide_table_of_contents: true +sidebar_label: Conventions +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import ConventionsCsharp from './_conventions-csharp.mdx'; +import ConventionsNodejs from './_conventions-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/configuration/deserialization.mdx b/versioned_docs/version-7.1/client-api/configuration/deserialization.mdx new file mode 100644 index 0000000000..bb0a0498ba --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/deserialization.mdx @@ -0,0 +1,44 @@ +--- +title: "Conventions: Deserialization" +hide_table_of_contents: true +sidebar_label: DeSerialization +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import DeserializationCsharp from './_deserialization-csharp.mdx'; +import DeserializationJava from './_deserialization-java.mdx'; + +export const supportedLanguages = ["csharp", "java"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/configuration/identifier-generation/_category_.json b/versioned_docs/version-7.1/client-api/configuration/identifier-generation/_category_.json new file mode 100644 index 0000000000..ac4d1b2d83 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/identifier-generation/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 4, + "label": Identifier generation, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/configuration/identifier-generation/_global-csharp.mdx b/versioned_docs/version-7.1/client-api/configuration/identifier-generation/_global-csharp.mdx new file mode 100644 index 0000000000..1bdd8ddec4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/identifier-generation/_global-csharp.mdx @@ -0,0 +1,153 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +#Global Identifier Generation Conventions + + + +Documents that have the same `@collection` metadata belong to the same [collection](../../../client-api/faq/what-is-a-collection.mdx) on the server side. Collection names are also used to build document identifiers. There are two functions that the client uses to determine a collection name for a given type. The first one is used for standard objects with a well-defined type: + + + +{`FindCollectionName = type => // function that provides the collection name based on the entity type +`} + + + +The second one is dedicated for dynamic objects: + + + +{`FindCollectionNameForDynamic = + dynamicObject => // function to determine the collection name for the given dynamic object +`} + + + + + +The `FindCollectionNameForDynamic` only works on objects that inherit from [IDynamicMetaObjectProvider](https://docs.microsoft.com/en-us/dotnet/api/system.dynamic.idynamicmetaobjectprovider) interface. In .NET there are two built-in types that implement that interface, the [ExpandoObject](https://docs.microsoft.com/en-us/dotnet/api/system.dynamic.expandoobject) and [DynamicObject](https://docs.microsoft.com/en-us/dotnet/api/system.dynamic.dynamicobject). + +For example, if we want to determine a collection using a `Collection` property from a dynamic object, we need to set `FindCollectionNameForDynamic` as follows: + + + +{`FindCollectionNameForDynamic = o => o.Collection +`} + + + +After that we can store our dynamic object as follows: + + + +{`dynamic car = new ExpandoObject(); +car.Name = "Ford"; +car.Collection = "Cars"; + +session.Store(car); + +dynamic animal = new ExpandoObject(); +animal.Name = "Rhino"; +animal.Collection = "Animals"; + +session.Store(animal); +`} + + + + + +## TransformTypeCollectionNameToDocumentIdPrefix + +Collection names determined by recently described convention functions aren't directly used as prefixes in document identifiers. There is a convention function called `TransformTypeCollectionNameToDocumentIdPrefix` which takes the collection name and produces the prefix: + + + +{`TransformTypeCollectionNameToDocumentIdPrefix = + collectionName => // transform the collection name to the prefix of a identifier, e.g. [prefix]/12 +`} + + + +Its default behavior for a collection which contains one upper character is to simply convert it to lower case string. `Users` would be transformed into `users`. For collection names containing more upper characters, there will be no change. The collection name: `LineItems` would output the following prefix: `LineItems`. + +## FindClrTypeName and FindClrType + +In the metadata of all documents stored in a database, you can find the following property which specifies the client-side type. For instance: + + + +{`\{ + "Raven-Clr-Type": "Orders.Shipper, Northwind" +\} +`} + + + +This property is used by RavenDB client to perform a conversion between a .NET object and a JSON document stored in a database. A function responsible for retrieving the CLR type of an entity is defined by `FindClrTypeName` convention: + + + +{`FindClrTypeName = type => // use reflection to determine the type; +`} + + + +To properly perform the revert conversion that is from a JSON result into a .NET object, we need to retrieve the CLR type from the `Raven-Clr-Type` metadata: + + + +{`FindClrType = (id, doc) => +\{ + if (doc.TryGet(Constants.Documents.Metadata.Key, out BlittableJsonReaderObject metadata) && + metadata.TryGet(Constants.Documents.Metadata.RavenClrType, out string clrType)) + return clrType; + + return null; +\}, +`} + + + +## FindIdentityProperty + +The client must know where in your entity an identifier is stored to be properly able to transform it into JSON document. It uses the `FindIdentityProperty` convention for that. The default and very common convention is that a property named `Id` is the identifier, so is the default one: + + + +{`FindIdentityProperty = memberInfo => memberInfo.Name == "Id" +`} + + + +You can provide a customization based on the `MemberInfo` parameter to indicate which property or field keeps the identifier. The client will iterate over all object properties and take the first one according to the defined predicate. + +## FindIdentityPropertyNameFromCollectionName + +It can happen that sometimes the results returned by the server don't have identifiers defined (for example if you run a projection query). However, they have `@collection` in metadata. + +To perform the conversion into a .NET object, a function that finds the identity property name for a given entity name is applied: + + + +{`FindIdentityPropertyNameFromCollectionName = collectionName => "Id" +`} + + + +## IdentityPartsSeparator + +According to the default, convention document identifiers have the following format: `[collectionName]/[identityValue]-[nodeTag]`. The slash character (`/`) separates the two parts of an identifier. +You can overwrite it by using `IdentityPartsSeparator` convention. Its default definition is: + + + +{`IdentityPartsSeparator = "/" +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/configuration/identifier-generation/_global-java.mdx b/versioned_docs/version-7.1/client-api/configuration/identifier-generation/_global-java.mdx new file mode 100644 index 0000000000..862d1299b6 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/identifier-generation/_global-java.mdx @@ -0,0 +1,112 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +#Global Identifier Generation Conventions + + + +Documents that have the same `@collection` metadata belong to the same [collection](../../../client-api/faq/what-is-a-collection.mdx) on the server side. Collection names are also used to build document identifiers. + + + +{`conventions.setFindCollectionName( + clazz -> // function that provides the collection name based on the entity class +`} + + + +## TransformClassCollectionNameToDocumentIdPrefix + +Collection names determined by recently described convention functions aren't directly used as prefixes in document identifiers. There is a convention function called `TransformClassCollectionNameToDocumentIdPrefix` which takes the collection name and produces the prefix: + + + +{`conventions.setTransformClassCollectionNameToDocumentIdPrefix( + collectionName -> // transform the collection name to the prefix of a identifier, e.g. [prefix]/12 +`} + + + +Its default behavior is that for a collection which contains one upper character it simply converts it to lower case string. `Users` would be transformed into `users`. For collection names containing more upper characters there will be no change. The collection name: `LineItems` would output the following prefix: `LineItems`. + +## FindJavaClassName and FindJavaClass + +In the metadata of all documents stored by RavenDB Java Client, you can find the following property which specifies the client-side type. For instance: + + + +{`\{ + "Raven-Java-Type": "com.example.Customer" +\} +`} + + + +This property is used by RavenDB client to perform a conversion between a Java object and a JSON document stored in a database. A function responsible for retrieving the Java class of an entity is defined by `findJavaClassName` convention: + + + +{`conventions.setFindJavaClassName( + clazz -> // use reflection to determinate the type +`} + + + +To properly perform the revert conversion that is from a JSON result into a Java object, we need to retrieve the Java class from the `Raven-Java-Type` metadata: + + + +{`conventions.setFindJavaClass((id, doc) -> \{ + return Optional.ofNullable((ObjectNode) doc.get(Constants.Documents.Metadata.KEY)) + .map(x -> x.get(Constants.Documents.Metadata.RAVEN_JAVA_TYPE)) + .map(x -> x.asText()) + .orElse(null); +\}); +`} + + + + +## FindIdentityProperty + +The client must know where in your entity an identifier is stored to be properly able to transform it into JSON document. It uses the `FindIdentityProperty` convention for that. The default and very common convention is that a property named `Id` is the identifier, so is the default one: + + + +{`conventions.setFindIdentityProperty(fieldInfo -> "Id".equals(fieldInfo.getName())); +`} + + + +You can provide a customization based on the `FieldInfo` parameter to indicate which property or field keeps the identifier. The client will iterate over all object properties and take the first one according to the defined predicate. + +## FindIdentityPropertyNameFromCollectionName + +It can happen that sometimes the results returned by the server don't have identifiers defined (for example if you run a projection query) however they have `@collection` in metadata. + +To perform the conversion into a Java object, a function that finds the identity property name for a given entity name is applied: + + + +{`conventions.setFindIdentityPropertyNameFromCollectionName( + collectionName -> "Id" +); +`} + + + +## IdentityPartsSeparator + +According to the default, convention document identifiers have the following format: `[collectionName]/[identityValue]-[nodeTag]`. The slash character (`/`) separates the two parts of an identifier. +You can overwrite it by using `IdentityPartsSeparator` convention. Its default definition is: + + + +{`conventions.setIdentityPartsSeparator("/"); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/configuration/identifier-generation/_global-nodejs.mdx b/versioned_docs/version-7.1/client-api/configuration/identifier-generation/_global-nodejs.mdx new file mode 100644 index 0000000000..06bae1337e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/identifier-generation/_global-nodejs.mdx @@ -0,0 +1,115 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +#Global Identifier Generation Conventions + + + +Documents that have the same `@collection` metadata belong to the same [collection](../../../client-api/faq/what-is-a-collection.mdx) on the server side. Collection names are also used to build document identifiers. + + + +{`conventions.findCollectionName = + type => // function that provides the collection name based on the entity type +`} + + + +## TransformClassCollectionNameToDocumentIdPrefix + +Collection names determined by recently described convention functions aren't directly used as prefixes in document identifiers. There is a convention function called `transformClassCollectionNameToDocumentIdPrefix()` which takes the collection name and produces the prefix: + + + +{`conventions.transformClassCollectionNameToDocumentIdPrefix = + collectionName => // transform the collection name to the prefix of an identifier, e.g. [prefix]/12 +`} + + + +Its default behavior is that for a collection which contains one upper character it simply converts it to lower case string. `Users` would be transformed into `users`. For collection names containing more upper characters there will be no change. The collection name: `LineItems` would output the following prefix: `LineItems`. + +## FindJsTypeName and FindJsType + +In the metadata of all documents stored by RavenDB Node.js Client, you can find the following property which specifies the client-side type. For instance: + + + +{`\{ + "Raven-Node-Type": "Customer" +\} +`} + + + +This property is used by RavenDB client to perform a conversion between a JS object and a JSON document stored in a database. A function responsible for retrieving the JS type of an entity is defined by `findJsTypeName()` convention: + + + +{`conventions.findJsTypeName = + type => // determine the type name based on type +`} + + + +To properly perform the reverse conversion that is from a JSON result into a JS object, we need to retrieve the JS type from the `Raven-Node-Type` metadata: + + + +{`conventions.findJsType((id, doc) => \{ + const metadata = doc["@metadata"]; + if (metadata) \{ + const jsType = metadata["Raven-Node-Type"]; + return this.getJsTypeByDocumentType(jsType); + \} + + return null; +\}); +`} + + + + +## FindIdentityPropertyNameFromCollectionName + +It can happen that sometimes the results returned by the server don't have identifiers defined (for example if you run a projection query) however they have `@collection` in metadata. + +To perform the conversion into a JS object, a function that finds the identity property name for a given collection name is applied: + + + +{`conventions.findIdentityPropertyNameFromCollectionName = + collectionName => "id"; +`} + + + +## IdentityPartsSeparator + +By default, convention document identifiers have the following format: `[collectionName]/[identityValue]-[nodeTag]`. The slash character (`/`) separates the two parts of an identifier. +You can overwrite it by using `IdentityPartsSeparator` convention. Its default definition is: + + + +{`conventions.identityPartsSeparator = "/"; +`} + + + +## FindCollectionNameForObjectLiteral + +This convention is *not defined by default*. It's only useful when using object literals as entities. It defines how the client obtains a collection name for an object literal. If it's undefined object literals stored with `session.store()` are going to land up in `@empty` collection having a UUID for an ID. + +For instance here's mapping of the *category* field to collection name: + + +{`conventions.findCollectionNameForObjectLiteral = + entity => entity["category"]; + // function that provides the collection name based on the entity object +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/configuration/identifier-generation/_type-specific-csharp.mdx b/versioned_docs/version-7.1/client-api/configuration/identifier-generation/_type-specific-csharp.mdx new file mode 100644 index 0000000000..4307b91211 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/identifier-generation/_type-specific-csharp.mdx @@ -0,0 +1,134 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +#Type-Specific Identifier Generation + +[In the previous article](../../../client-api/configuration/identifier-generation/global.mdx), Global Identifier generation conventions were introduced. Any customization made by using those conventions changes the behavior for all stored entities. +Now we will show how to override the default ID generation in a more granular way, for particular types of entities. + +To override default document identifier generation algorithms, you can register custom conventions per an entity type. You can include your own identifier generation logic. + + + + + +{`DocumentConventions RegisterAsyncIdConvention(Func> func); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **func** | Func<string, TEntity, Task<string>> | Identifier generation function that supplies a result in async way for given database name (`string`) and entity object (`TEntity`). | + +| Return Value | | +| ------------- | ----- | +| DocumentConventions | Current `DocumentConventions` instance. | + + +This method applied to both synchronous and asynchronous operations + + + +The database name parameter is passed to the register convention methods to allow users to make Id generation decision per database. + + +### Example + +Let's say that you want to use semantic identifiers for `Employee` objects. Instead of `employee/[identity]` you want to have identifiers like `employees/[lastName]/[firstName]` +(for the sake of simplicity, let us not consider the uniqueness of such identifiers). What you need to do is to create the convention that will combine the `employee` prefix, `LastName` and `FirstName` properties of an employee. + + + +{`store.Conventions.RegisterAsyncIdConvention( + (dbname, employee) => + Task.FromResult(string.Format("employees/\{0\}/\{1\}", employee.LastName, employee.FirstName))); +`} + + + +Now, when you store a new entity: + + + +{`using (var session = store.OpenSession()) +\{ + session.Store(new Employee + \{ + FirstName = "James", + LastName = "Bond" + \}); + + session.SaveChanges(); +\} +`} + + + +the client will associate the `employees/Bond/James` identifier with it. + +## Inheritance + +Registered conventions are inheritance-aware so all types that can be assigned from registered type will fall into that convention according to inheritance-hierarchy tree. + +### Example + +If we create a new class `EmployeeManager` that will derive from our `Employee` class and keep the convention registered in the last example, both types will use the following: + + + +{`using (var session = store.OpenSession()) +\{ + session.Store(new Employee // employees/Smith/Adam + \{ + FirstName = "Adam", + LastName = "Smith" + \}); + + session.Store(new EmployeeManager // employees/Jones/David + \{ + FirstName = "David", + LastName = "Jones" + \}); + + session.SaveChanges(); +\} +`} + + + +If we register two conventions, one for `Employee` and the second for `EmployeeManager` then they will be picked for their specific types. + + + +{`store.Conventions.RegisterAsyncIdConvention( + (dbname, employee) => + Task.FromResult(string.Format("employees/\{0\}/\{1\}", employee.LastName, employee.FirstName))); + +store.Conventions.RegisterAsyncIdConvention( + (dbname, employee) => + Task.FromResult(string.Format("managers/\{0\}/\{1\}", employee.LastName, employee.FirstName))); + +using (var session = store.OpenSession()) +\{ + session.Store(new Employee // employees/Smith/Adam + \{ + FirstName = "Adam", + LastName = "Smith" + \}); + + session.Store(new EmployeeManager // managers/Jones/David + \{ + FirstName = "David", + LastName = "Jones" + \}); + + session.SaveChanges(); +\} +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/configuration/identifier-generation/_type-specific-java.mdx b/versioned_docs/version-7.1/client-api/configuration/identifier-generation/_type-specific-java.mdx new file mode 100644 index 0000000000..3c9a0bfaf2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/identifier-generation/_type-specific-java.mdx @@ -0,0 +1,124 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +#Type-Specific Identifier Generation + +[In the previous article](../../../client-api/configuration/identifier-generation/global.mdx), Global Identifier generation conventions were introduced. Any customization made by using those conventions changes the behavior for all stored entities. +Now we will show how to override the default ID generation in a more granular way, for particular types of entities. + +To override default document identifier generation algorithms, you can register custom conventions per an entity type. You can include your own identifier generation logic. + + + + + +{`public DocumentConventions registerIdConvention(Class clazz, BiFunction function); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **function** | BiFunction<String, TEntity, String> | Identifier generation function that supplies a result for given database name (`String`) and entity object (`TEntity`). | + +| Return Value | | +| ------------- | ----- | +| DocumentConventions | Current `DocumentConventions` instance. | + + +The database name parameter is passed to the register convention methods to allow users to make Id generation decision per database. + + +### Example + +Let's say that you want to use semantic identifiers for `Employee` objects. Instead of `employee/[identity]` you want to have identifiers like `employees/[lastName]/[firstName]` +(for the sake of simplicity, let us not consider the uniqueness of such identifiers). What you need to do is to create the convention that will combine the `employee` prefix, `LastName` and `FirstName` properties of an employee. + + + +{`store.getConventions().registerIdConvention(Employee.class, + (dbName, employee) -> + String.format("employees/%s/%s", employee.getLastName(), employee.getFirstName())); +`} + + + +Now, when you store a new entity: + + + +{`try (IDocumentSession session = store.openSession()) \{ + Employee employee = new Employee(); + employee.setFirstName("James"); + employee.setLastName("Bond"); + + session.store(employee); + session.saveChanges(); +\} +`} + + + +the client will associate the `employees/Bond/James` identifier with it. + +## Inheritance + +Registered conventions are inheritance-aware so all types that can be assigned from registered type will fall into that convention according to inheritance-hierarchy tree. + +### Example + +If we create a new class `EmployeeManager` that will derive from our `Employee` class and keep the convention registered in the last example, both types will use the following: + + + +{`try (IDocumentSession session = store.openSession()) \{ + Employee adam = new Employee(); + adam.setFirstName("Adam"); + adam.setLastName("Smith"); + session.store(adam); // employees/Smith/Adam + + EmployeeManager david = new EmployeeManager(); + david.setFirstName("David"); + david.setLastName("Jones"); + session.store(david); // employees/Jones/David + + session.saveChanges(); +\} +`} + + + +If we register two conventions, one for `Employee` and the second for `EmployeeManager` then they will be picked for their specific types. + + + +{`store.getConventions().registerIdConvention(Employee.class, + (dbName, employee) -> + String.format("employees/%s/%s", employee.getLastName(), employee.getFirstName()) +); + +store.getConventions().registerIdConvention(EmployeeManager.class, + (dbName, employee) -> + String.format("managers/%s/%s", employee.getLastName(), employee.getFirstName()) +); + +try (IDocumentSession session = store.openSession()) \{ + Employee adam = new Employee(); + adam.setFirstName("Adam"); + adam.setLastName("Smith"); + session.store(adam); // employees/Smith/AdamReadBalanceBehavior + + EmployeeManager david = new EmployeeManager(); + david.setFirstName("David"); + david.setLastName("Jones"); + session.store(david); // managers/Jones/David + + session.saveChanges(); +\} +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/configuration/identifier-generation/_type-specific-nodejs.mdx b/versioned_docs/version-7.1/client-api/configuration/identifier-generation/_type-specific-nodejs.mdx new file mode 100644 index 0000000000..c4d7801587 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/identifier-generation/_type-specific-nodejs.mdx @@ -0,0 +1,93 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +#Type-Specific Identifier Generation + +[In the previous article](../../../client-api/configuration/identifier-generation/global.mdx), Global Identifier generation conventions were introduced. Any customization made by using those conventions changes the behavior for all stored entities. +Now we will show how to override the default ID generation in a more granular way, for particular types of entities. + +To override default document identifier generation algorithms, you can register custom conventions per an entity type. You can include your own identifier generation logic. + + + + + +{`conventions.registerIdConvention(clazz, idConvention); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| clazz | class or object | Entity type | +| idConvention | function `(databaseName, entity) => Promise` | Identifier generation function that supplies a result for given database name and entity object. Must return a `Promise` resolving to a string. | + +| Return Value | | +| ------------- | ----- | +| DocumentConventions | Current `DocumentConventions` instance. | + + +The database name parameter is passed to the register convention methods to allow users to make Id generation decision per database. + + +### Example + +Let's say that you want to use semantic identifiers for `Employee` objects. Instead of `employee/[identity]` you want to have identifiers like `employees/[lastName]/[firstName]` +(for the sake of simplicity, let us not consider the uniqueness of such identifiers). What you need to do is to create the convention that will combine the `employee` prefix, `LastName` and `FirstName` properties of an employee. + + + +{`store.conventions.registerIdConvention(Employee, + (dbName, entity) => Promise.resolve(\`employees/$\{entity.lastName\}/$\{entity.firstName\}\`)); + +// or using async keyword +store.conventions.registerIdConvention(Employee, + async (dbName, entity) => \`employees/$\{entity.lastName\}/$\{entity.firstName\}\`); +`} + + + +Now, when you store a new entity: + + + +{`const session = store.openSession(); +const employee = new Employee("James", "Bond"); + +await session.store(employee); +await session.saveChanges(); +`} + + + +the client will associate the `employees/Bond/James` identifier with it. + + +ID convention function must return a Promise since it *can* be asynchronous. + + +### Example: Object literal based entities + + + +{`// for object literal based entities you can pass type descriptor object +const typeDescriptor = \{ + name: "Employee", + isType(entity) \{ + // if it quacks like a duck... ekhm employee + return entity + && "firstName" in entity + && "lastName" in entity + && "boss" in entity; + \} +\}; + +store.conventions.registerIdConvention(typeDescriptor, + async (dbName, entity) => \`employees/$\{entity.lastName\}/$\{entity.firstName\}\`); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/configuration/identifier-generation/global.mdx b/versioned_docs/version-7.1/client-api/configuration/identifier-generation/global.mdx new file mode 100644 index 0000000000..56a95491c6 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/identifier-generation/global.mdx @@ -0,0 +1,38 @@ +--- +title: "Global Identifier Generation Conventions" +hide_table_of_contents: true +sidebar_label: Global +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GlobalCsharp from './_global-csharp.mdx'; +import GlobalJava from './_global-java.mdx'; +import GlobalNodejs from './_global-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/configuration/identifier-generation/type-specific.mdx b/versioned_docs/version-7.1/client-api/configuration/identifier-generation/type-specific.mdx new file mode 100644 index 0000000000..4804187b63 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/identifier-generation/type-specific.mdx @@ -0,0 +1,37 @@ +--- +title: "Type-Specific Identifier Generation" +hide_table_of_contents: true +sidebar_label: Type-specific +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import TypeSpecificCsharp from './_type-specific-csharp.mdx'; +import TypeSpecificJava from './_type-specific-java.mdx'; +import TypeSpecificNodejs from './_type-specific-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/configuration/load-balance/_category_.json b/versioned_docs/version-7.1/client-api/configuration/load-balance/_category_.json new file mode 100644 index 0000000000..b8dcac545a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/load-balance/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 3, + "label": Load balancing client requests, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/configuration/load-balance/_load-balance-behavior-csharp.mdx b/versioned_docs/version-7.1/client-api/configuration/load-balance/_load-balance-behavior-csharp.mdx new file mode 100644 index 0000000000..e5d2d7cb04 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/load-balance/_load-balance-behavior-csharp.mdx @@ -0,0 +1,266 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The `loadBalanceBehavior` configuration allows you to specify which sessions should + communicate with the same node. + +* Sessions that are assigned the **same context** will have all their _Read_ & _Write_ + requests routed to the **same node**. Gain load balancing by assigning **different contexts** + to **different sessions**. +* In this page: + * [LoadBalanceBehavior options](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#loadbalancebehavior-options) + * [Initialize LoadBalanceBehavior on the client](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#initialize-loadbalancebehavior-on-the-client) + * [Set LoadBalanceBehavior on the server:](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#set-loadbalancebehavior-on-the-server) + * [By operation](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#set-loadbalancebehavior-on-the-server---by-operation) + * [From Studio](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#set-loadbalancebehavior-on-the-server---from-studio) + * [When to use](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#when-to-use) + + +## LoadBalanceBehavior options + +### `None` (default option) + +* Requests will be handled based on the `ReadBalanceBehavior` configuration. + See the conditional flow described in [Client logic for choosing a node](../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + * **_Read_** requests: + The client will calculate the target node from the configured [ReadBalanceBehavior Option](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#readbalancebehavior-options). + * **_Write_** requests: + Will be sent to the [preferred node](../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node). + The data will then be replicated to all the other nodes in the database group. +### `UseSessionContext` + +* **Load-balance** + + * When this option is enabled, the client will calculate the target node from the session-id. + The session-id is hashed from a **context string** and an optional **seed** given by the user. + The context string together with the seed are referred to as **"The session context"**. + + * Per session, the client will select a node from the topology list based on this session-context. + So sessions that use the **same** context will target the **same** node. + + * All **_Read & Write_** requests made on the session (i.e a query or a load request, etc.) + will address this calculated node. + _Read & Write_ requests that are made on the store (i.e. executing an [operation](../../../client-api/operations/what-are-operations.mdx)) + will go to the preferred node. + + * All _Write_ requests will be replicated to all the other nodes in the database group as usual. + +* **Failover** + + * In case of a failure, the client will try to access the next node from the topology nodes list. + + + +## Initialize LoadBalanceBehavior on the client + +* The `LoadBalanceBehavior` convention can be set **on the client** when initializing the Document Store. + This will set the load balance behavior for the default database that is set on the store. + +* This setting can be **overriden** by setting 'LoadBalanceBehavior' on the server, see [below](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#set-loadbalancebehavior-on-the-server). +**Initialize conventions**: + + + +{`// Initialize 'LoadBalanceBehavior' on the client: +var documentStore = new DocumentStore +\{ + Urls = new[] \{"ServerURL_1", "ServerURL_2", "..."\}, + Database = "DefaultDB", + Conventions = new DocumentConventions + \{ + // Enable the session-context feature + // If this is not enabled then a context string set in a session will be ignored + LoadBalanceBehavior = LoadBalanceBehavior.UseSessionContext, + + // Assign a method that sets the default context string + // This string will be used for sessions that do Not provide a context string + // A sample GetDefaultContext method is defined below + LoadBalancerPerSessionContextSelector = GetDefaultContext, + + // Set a seed + // The seed is 0 by default, provide any number to override + LoadBalancerContextSeed = 5 + \} +\}.Initialize(); +`} + + + + +{`// A customized method for getting a default context string +private string GetDefaultContext(string dbName) +\{ + // Method is invoked by RavenDB with the database name + // Use that name - or return any string of your choice + return "DefaultContextString"; +\} +`} + + +**Session usage**: + + + +{`// Open a session that will use the DEFAULT store values: +using (var session = documentStore.OpenSession()) +\{ + // For all Read & Write requests made in this session, + // node to access is calculated from string & seed values defined on the store + var employee = session.Load("employees/1-A"); +\} +`} + + + + +{`// Open a session that will use a UNIQUE context string: +using (var session = documentStore.OpenSession()) +\{ + // Call SetContext, pass a unique context string for this session + session.Advanced.SessionInfo.SetContext("SomeOtherContext"); + + // For all Read & Write requests made in this session, + // node to access is calculated from the unique string & the seed defined on the store + var employee = session.Load("employees/1-A"); +\} +`} + + + + + +## Set LoadBalanceBehavior on the server + + + +**Note**: + +* Setting the load balance behavior on the server, either by an **Operation** or from the **Studio**, + only 'enables the feature' and sets the seed. + +* For the feature to be in effect, you still need to define the context string itself: + * either per session, call `session.Advanced.SessionInfo.SetContext` + * or, on the document store, set a default value for - `LoadBalancerPerSessionContextSelector` + + +#### Set LoadBalanceBehavior on the server - by operation: + +* The `LoadBalanceBehavior` configuration can be set **on the server** by sending an [operation](../../../client-api/operations/what-are-operations.mdx). + +* The operation can modify the default database only, or all databases - see examples below. + +* Once configuration on the server has changed, the running client will get updated with the new settings. + See [keeping client up-to-date](../../../client-api/configuration/load-balance/overview.mdx#keeping-the-client-topology-up-to-date). + + + + +{`// Setting 'LoadBalanceBehavior' on the server by sending an operation: +using (documentStore) +{ + // Define the client configuration to put on the server + var configurationToSave = new ClientConfiguration + { + // Enable the session-context feature + // If this is not enabled then a context string set in a session will be ignored + LoadBalanceBehavior = LoadBalanceBehavior.UseSessionContext, + + // Set a seed + // The seed is 0 by default, provide any number to override + LoadBalancerContextSeed = 10, + + // NOTE: + // The session's context string is Not set on the server + // You still need to set it on the client: + // * either as a convention on the document store + // * or pass it to 'SetContext' method on the session + + // Configuration will be in effect when Disabled is set to false + Disabled = false + }; + + // Define the put configuration operation for the DEFAULT database + var putConfigurationOp = new PutClientConfigurationOperation(configurationToSave); + + // Execute the operation by passing it to Maintenance.Send + documentStore.Maintenance.Send(putConfigurationOp); + + // After the operation has executed: + // all Read & Write requests, per session, will address the node calculated from: + // * the seed set on the server & + // * the session's context string set on the client +} +`} + + + + +{`// Setting 'LoadBalanceBehavior' on the server by sending an operation: +using (documentStore) +{ + // Define the client configuration to put on the server + var configurationToSave = new ClientConfiguration + { + // Enable the session-context feature + // If this is not enabled then a context string set in a session will be ignored + LoadBalanceBehavior = LoadBalanceBehavior.UseSessionContext, + + // Set a seed + // The seed is 0 by default, provide any number to override + LoadBalancerContextSeed = 10, + + // NOTE: + // The session's context string is Not set on the server + // You still need to set it on the client: + // * either as a convention on the document store + // * or pass it to 'SetContext' method on the session + + // Configuration will be in effect when Disabled is set to false + Disabled = false + }; + + // Define the put configuration operation for ALL databases + var putConfigurationOp = new PutServerWideClientConfigurationOperation(configurationToSave); + + // Execute the operation by passing it to Maintenance.Server.Send + documentStore.Maintenance.Server.Send(putConfigurationOp); + + // After the operation has executed: + // all Read & Write requests, per session, will address the node calculated from: + // * the seed set on the server & + // * the session's context string set on the client +} +`} + + + +#### Set LoadBalanceBehavior on the server - from Studio: + +* The `LoadBalanceBehavior` configuration can be set from the Studio's [Client Configuration view](../../../studio/database/settings/client-configuration-per-database.mdx). + Setting it from the Studio will set this configuration directly **on the server**. + +* Once configuration on the server has changed, the running client will get updated with the new settings. + See [keeping client up-to-date](../../../client-api/configuration/load-balance/overview.mdx#keeping-the-client-topology-up-to-date). + + + +## When to use + +* Distributing _Read & Write_ requests among the cluster nodes can be beneficial + when a set of sessions handle a specific set of documents or similar data. + Load balancing can be achieved by routing requests from the sessions that handle similar topics to the same node, while routing other sessions to other nodes. + +* Another usage example can be setting the session's context to be the current user. + Thus spreading the _Read & Write_ requests per user that logs into the application. + +* Once setting the load balance to be per session-context, + in the case when detecting that many or all sessions send requests to the same node, + a further level of node randomization can be added by changing the seed. + + + + diff --git a/versioned_docs/version-7.1/client-api/configuration/load-balance/_load-balance-behavior-nodejs.mdx b/versioned_docs/version-7.1/client-api/configuration/load-balance/_load-balance-behavior-nodejs.mdx new file mode 100644 index 0000000000..99039ddfb6 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/load-balance/_load-balance-behavior-nodejs.mdx @@ -0,0 +1,257 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The `loadBalanceBehavior` configuration allows you to specify which sessions should + communicate with the same node. + +* Sessions that are assigned the **same context** will have all their _Read_ & _Write_ + requests routed to the **same node**. Gain load balancing by assigning **different contexts** + to **different sessions**. +* In this page: + * [LoadBalanceBehavior options](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#loadbalancebehavior-options) + * [Initialize LoadBalanceBehavior on the client](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#initialize-loadbalancebehavior-on-the-client) + * [Set LoadBalanceBehavior on the server:](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#set-loadbalancebehavior-on-the-server) + * [By operation](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#set-loadbalancebehavior-on-the-server---by-operation) + * [From Studio](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#set-loadbalancebehavior-on-the-server---from-studio) + * [When to use](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#when-to-use) + + +## LoadBalanceBehavior options + +### `None` (default option) + +* Requests will be handled based on the `readBalanceBehavior` configuration. + See the conditional flow described in [Client logic for choosing a node](../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + * **_Read_** requests: + The client will calculate the target node from the configured [readBalanceBehavior Option](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#readbalancebehavior-options). + * **_Write_** requests: + Will be sent to the [preferred node](../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node). + The data will then be replicated to all the other nodes in the database group. +### `UseSessionContext` + +* **Load-balance** + + * When this option is enabled, the client will calculate the target node from the session-id. + The session-id is hashed from a **context string** and an optional **seed** given by the user. + The context string together with the seed are referred to as **"The session context"**. + + * Per session, the client will select a node from the topology list based on this session-context. + So sessions that use the **same** context will target the **same** node. + + * All **_Read & Write_** requests made on the session (i.e a query or a load request, etc.) + will address this calculated node. + _Read & Write_ requests that are made on the store (i.e. executing an [operation](../../../client-api/operations/what-are-operations.mdx)) + will go to the preferred node. + + * All _Write_ requests will be replicated to all the other nodes in the database group as usual. + +* **Failover** + + * In case of a failure, the client will try to access the next node from the topology nodes list. + + + +## Initialize loadBalanceBehavior on the client + +* The `loadBalanceBehavior` convention can be set **on the client** when initializing the Document Store. + This will set the load balance behavior for the default database that is set on the store. + +* This setting can be **overriden** by setting 'loadBalanceBehavior' on the server, see [below](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#set-loadbalancebehavior-on-the-server). +**Initialize conventions**: + + + +{`// Initialize 'loadBalanceBehavior' on the client: +// =============================================== + +const documentStore = new DocumentStore(["serverUrl_1", "serverUrl_2", "..."], "DefaultDB"); + +// Enable the session-context feature +// If this is not enabled then a context string set in a session will be ignored +documentStore.conventions.loadBalanceBehavior = "UseSessionContext"; + +// Assign a method that sets the default context string +// This string will be used for sessions that do Not provide a context string +// A sample getDefaultContext method is defined below +documentStore.conventions.loadBalancerPerSessionContextSelector = getDefaultContext; + +// Set a seed +// The seed is 0 by default, provide any number to override +documentStore.conventions.loadBalancerContextSeed = 5 + +documentStore.initialize(); +`} + + + + +{`// A customized method for getting a default context string +const getDefaultContext = (dbName) => \{ + // Method is invoked by RavenDB with the database name + // Use that name - or return any string of your choice + return "defaultContextString"; +\} +`} + + +**Session usage**: + + + +{`// Open a session that will use the DEFAULT store values: +const session = documentStore.openSession(); + +// For all Read & Write requests made in this session, +// node to access is calculated from string & seed values defined on the store +const employee = await session.load("employees/1-A"); +`} + + + + +{`// Open a session that will use a UNIQUE context string: +const session = documentStore.openSession(); + +// Call setContext, pass a unique context string for this session +session.advanced.sessionInfo.setContext("SomeOtherContext"); + +// For all Read & Write requests made in this session, +// node to access is calculated from the unique string & the seed defined on the store +const employee = await session.load("employees/1-A"); +`} + + + + + +## Set loadBalanceBehavior on the server + + + +**Note**: + +* Setting the load balance behavior on the server, either by an **Operation** or from the **Studio**, + only 'enables the feature' and sets the seed. + +* For the feature to be in effect, you still need to define the context string itself: + * either per session, call `session.advanced.sessionInfo.setContext` + * or, on the document store, set a default value for - `loadBalancerPerSessionContextSelector` + + +#### Set LoadBalanceBehavior on the server - by operation: + +* The `loadBalanceBehavior` configuration can be set **on the server** by sending an [operation](../../../client-api/operations/what-are-operations.mdx). + +* The operation can modify the default database only, or all databases - see examples below. + +* Once configuration on the server has changed, the running client will get updated with the new settings. + See [keeping client up-to-date](../../../client-api/configuration/load-balance/overview.mdx#keeping-the-client-topology-up-to-date). + + + + +{`// Setting 'loadBalanceBehavior' on the server by sending an operation: +// ==================================================================== + +// Define the client configuration to put on the server +const configurationToSave = { + // Enable the session-context feature + // If this is not enabled then a context string set in a session will be ignored + loadBalanceBehavior: "UseSessionContext", + + // Set a seed + // The seed is 0 by default, provide any number to override + loadBalancerContextSeed: 10, + + // NOTE: + // The session's context string is Not set on the server + // You still need to set it on the client: + // * either as a convention on the document store + // * or pass it to 'setContext' method on the session + + // Configuration will be in effect when 'disabled' is set to false + disabled: false +}; + +// Define the put configuration operation for the DEFAULT database +const putConfigurationOp = new PutClientConfigurationOperation(configurationToSave); + +// Execute the operation by passing it to maintenance.send +await documentStore.maintenance.send(putConfigurationOp); + +// After the operation has executed: +// all Read & Write requests, per session, will address the node calculated from: +// * the seed set on the server & +// * the session's context string set on the client +`} + + + + +{`// Setting 'loadBalanceBehavior' on the server by sending an operation: +// ==================================================================== + +// Define the client configuration to put on the server +const configurationToSave = { + // Enable the session-context feature + // If this is not enabled then a context string set in a session will be ignored + loadBalanceBehavior: "UseSessionContext", + + // Set a seed + // The seed is 0 by default, provide any number to override + loadBalancerContextSeed: 10, + + // NOTE: + // The session's context string is Not set on the server + // You still need to set it on the client: + // * either as a convention on the document store + // * or pass it to 'setContext' method on the session + + // Configuration will be in effect when 'disabled' is set to false + disabled: false +}; + +// Define the put configuration operation for ALL databases +const putConfigurationOp = new PutServerWideClientConfigurationOperation(configurationToSave); + +// Execute the operation by passing it to maintenance.server.send +await documentStore.maintenance.server.send(putConfigurationOp); + +// After the operation has executed: +// all Read & Write requests, per session, will address the node calculated from: +// * the seed set on the server & +// * the session's context string set on the client +`} + + + +#### Set LoadBalanceBehavior on the server - from Studio: + +* The `loadBalanceBehavior` configuration can be set from the Studio's [Client Configuration view](../../../studio/database/settings/client-configuration-per-database.mdx). + Setting it from the Studio will set this configuration directly **on the server**. + +* Once configuration on the server has changed, the running client will get updated with the new settings. + See [keeping client up-to-date](../../../client-api/configuration/load-balance/overview.mdx#keeping-the-client-topology-up-to-date). + + + +## When to use + +* Distributing _Read & Write_ requests among the cluster nodes can be beneficial + when a set of sessions handle a specific set of documents or similar data. + Load balancing can be achieved by routing requests from the sessions that handle similar topics to the same node, while routing other sessions to other nodes. + +* Another usage example can be setting the session's context to be the current user. + Thus spreading the _Read & Write_ requests per user that logs into the application. + +* Once setting the load balance to be per session-context, + in the case when detecting that many or all sessions send requests to the same node, + a further level of node randomization can be added by changing the seed. + + + + diff --git a/versioned_docs/version-7.1/client-api/configuration/load-balance/_load-balance-behavior-php.mdx b/versioned_docs/version-7.1/client-api/configuration/load-balance/_load-balance-behavior-php.mdx new file mode 100644 index 0000000000..98985c2fbb --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/load-balance/_load-balance-behavior-php.mdx @@ -0,0 +1,271 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The `loadBalanceBehavior` configuration allows you to specify which sessions should + communicate with the same node. + +* Sessions that are assigned the **same context** will have all their _Read_ & _Write_ + requests routed to the **same node**. Gain load balancing by assigning **different contexts** + to **different sessions**. +* In this page: + * [LoadBalanceBehavior options](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#loadbalancebehavior-options) + * [Initialize LoadBalanceBehavior on the client](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#initialize-loadbalancebehavior-on-the-client) + * [Set LoadBalanceBehavior on the server:](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#set-loadbalancebehavior-on-the-server) + * [By operation](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#set-loadbalancebehavior-on-the-server---by-operation) + * [From Studio](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#set-loadbalancebehavior-on-the-server---from-studio) + * [When to use](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#when-to-use) + + +## LoadBalanceBehavior options + +### `None` (default option) + +* Requests will be handled based on the `ReadBalanceBehavior` configuration. + See the conditional flow described in [Client logic for choosing a node](../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + * **_Read_** requests: + The client will calculate the target node from the configured [ReadBalanceBehavior Option](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#readbalancebehavior-options). + * **_Write_** requests: + Will be sent to the [preferred node](../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node). + The data will then be replicated to all the other nodes in the database group. +### `UseSessionContext` + +* **Load-balance** + + * When this option is enabled, the client will calculate the target node from the session-id. + The session-id is hashed from a **context string** and an optional **seed** given by the user. + The context string together with the seed are referred to as **"The session context"**. + + * Per session, the client will select a node from the topology list based on this session-context. + So sessions that use the **same** context will target the **same** node. + + * All **_Read & Write_** requests made on the session (i.e a query or a load request, etc.) + will address this calculated node. + _Read & Write_ requests that are made on the store (i.e. executing an [operation](../../../client-api/operations/what-are-operations.mdx)) + will go to the preferred node. + + * All _Write_ requests will be replicated to all the other nodes in the database group as usual. + +* **Failover** + + * In case of a failure, the client will try to access the next node from the topology nodes list. + + + +## Initialize LoadBalanceBehavior on the client + +* The `LoadBalanceBehavior` convention can be set **on the client** when initializing the Document Store. + This will set the load balance behavior for the default database that is set on the store. + +* This setting can be **overriden** by setting 'LoadBalanceBehavior' on the server, see [below](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#set-loadbalancebehavior-on-the-server). +**Initialize conventions**: + + + +{`// Initialize 'LoadBalanceBehavior' on the client: +$documentStore = new DocumentStore(["ServerURL_1", "ServerURL_2", "..."], "DefaultDB"); + +$conventions = new DocumentConventions(); +// Enable the session-context feature +// If this is not enabled then a context string set in a session will be ignored +$conventions->setLoadBalanceBehavior(LoadBalanceBehavior::useSessionContext()); + + +// Assign a method that sets the default context string +// This string will be used for sessions that do Not provide a context string +// A sample GetDefaultContext method is defined below +$conventions->setLoadBalancerPerSessionContextSelector(\\Closure::fromCallable([$this, 'GetDefaultContext'])); + +// Set a seed +// The seed is 0 by default, provide any number to override +$conventions->setLoadBalancerContextSeed(5); + +$documentStore->setConventions($conventions); +$documentStore->initialize(); +`} + + + + +{`// A customized method for getting a default context string +private function GetDefaultContext(string $dbName): string +\{ + // Method is invoked by RavenDB with the database name + // Use that name - or return any string of your choice + return "DefaultContextString"; +\} +`} + + +**Session usage**: + + + +{`// Open a session that will use the DEFAULT store values: +$session = $documentStore->openSession(); +try \{ + // For all Read & Write requests made in this session, + // node to access is calculated from string & seed values defined on the store + $employee = $session->load(Employee::class, "employees/1-A"); +\} finally \{ + $session->close(); +\} +`} + + + + +{`// Open a session that will use a UNIQUE context string: +$session = $documentStore->openSession(); +try \{ + // Call SetContext, pass a unique context string for this session + $session->advanced()->getSessionInfo()->setContext("SomeOtherContext"); + + // For all Read & Write requests made in this session, + // node to access is calculated from the unique string & the seed defined on the store + $employee = $session->load(Employee::class, "employees/1-A"); +\} finally \{ + $session->close(); +\} +`} + + + + + +## Set LoadBalanceBehavior on the server + + + +**Note**: + +* Setting the load balance behavior on the server, either by an **Operation** or from the **Studio**, + only 'enables the feature' and sets the seed. + +* For the feature to be in effect, you still need to define the context string itself: + * either, per session, call the advanced `setContext` method + * or, set a default document store value using `setLoadBalancerPerSessionContextSelector` + + +#### Set LoadBalanceBehavior on the server - by operation: + +* The `LoadBalanceBehavior` configuration can be set **on the server** by sending an [operation](../../../client-api/operations/what-are-operations.mdx). + +* The operation can modify the default database only, or all databases - see examples below. + +* Once configuration on the server has changed, the running client will get updated with the new settings. + See [keeping client up-to-date](../../../client-api/configuration/load-balance/overview.mdx#keeping-the-client-topology-up-to-date). + + + + +{`// Setting 'LoadBalanceBehavior' on the server by sending an operation: +$documentStore = new DocumentStore(); +try { + // Define the client configuration to put on the server + $configurationToSave = new ClientConfiguration(); + // Enable the session-context feature + // If this is not enabled then a context string set in a session will be ignored + $configurationToSave->setLoadBalanceBehavior(LoadBalanceBehavior::useSessionContext()); + + // Set a seed + // The seed is 0 by default, provide any number to override + $configurationToSave->setLoadBalancerContextSeed(10); + + // NOTE: + // The session's context string is Not set on the server + // You still need to set it on the client: + // * either as a convention on the document store + // * or pass it to 'SetContext' method on the session + + // Configuration will be in effect when Disabled is set to false + $configurationToSave->setDisabled(false); + + + // Define the put configuration operation for the DEFAULT database + $putConfigurationOp = new PutClientConfigurationOperation($configurationToSave); + + // Execute the operation by passing it to Maintenance.Send + $documentStore->maintenance()->send($putConfigurationOp); + + // After the operation has executed: + // all Read & Write requests, per session, will address the node calculated from: + // * the seed set on the server & + // * the session's context string set on the client +} finally { + $documentStore->close(); +} +`} + + + + +{`// Setting 'LoadBalanceBehavior' on the server by sending an operation: +$documentStore = new DocumentStore(); +try { + // Define the client configuration to put on the server + $configurationToSave = new ClientConfiguration(); + // Enable the session-context feature + // If this is not enabled then a context string set in a session will be ignored + $configurationToSave->setLoadBalanceBehavior(LoadBalanceBehavior::useSessionContext()); + + // Set a seed + // The seed is 0 by default, provide any number to override + $configurationToSave->setLoadBalancerContextSeed(10); + + // NOTE: + // The session's context string is Not set on the server + // You still need to set it on the client: + // * either as a convention on the document store + // * or pass it to 'SetContext' method on the session + + // Configuration will be in effect when Disabled is set to false + $configurationToSave->setDisabled(false); + + + // Define the put configuration operation for ALL databases + $putConfigurationOp = new PutServerWideClientConfigurationOperation($configurationToSave); + + // Execute the operation by passing it to Maintenance.Server.Send + $documentStore->maintenance()->server()->send($putConfigurationOp); + + // After the operation has executed: + // all Read & Write requests, per session, will address the node calculated from: + // * the seed set on the server & + // * the session's context string set on the client +} finally { + $documentStore->close(); +} +`} + + + +#### Set LoadBalanceBehavior on the server - from Studio: + +* The `LoadBalanceBehavior` configuration can be set from the Studio's [Client Configuration view](../../../studio/database/settings/client-configuration-per-database.mdx). + Setting it from the Studio will set this configuration directly **on the server**. + +* Once configuration on the server has changed, the running client will get updated with the new settings. + See [keeping client up-to-date](../../../client-api/configuration/load-balance/overview.mdx#keeping-the-client-topology-up-to-date). + + + +## When to use + +* Distributing _Read & Write_ requests among the cluster nodes can be beneficial + when a set of sessions handle a specific set of documents or similar data. + Load balancing can be achieved by routing requests from the sessions that handle similar topics to the same node, while routing other sessions to other nodes. + +* Another usage example can be setting the session's context to be the current user. + Thus spreading the _Read & Write_ requests per user that logs into the application. + +* Once setting the load balance to be per session-context, + in the case when detecting that many or all sessions send requests to the same node, + a further level of node randomization can be added by changing the seed. + + + + diff --git a/versioned_docs/version-7.1/client-api/configuration/load-balance/_load-balance-behavior-python.mdx b/versioned_docs/version-7.1/client-api/configuration/load-balance/_load-balance-behavior-python.mdx new file mode 100644 index 0000000000..0f27437fa9 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/load-balance/_load-balance-behavior-python.mdx @@ -0,0 +1,252 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The `loadBalanceBehavior` configuration allows you to specify which sessions should + communicate with the same node. + +* Sessions that are assigned the **same context** will have all their _Read_ & _Write_ + requests routed to the **same node**. Gain load balancing by assigning **different contexts** + to **different sessions**. +* In this page: + * [LoadBalanceBehavior options](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#loadbalancebehavior-options) + * [Initialize LoadBalanceBehavior on the client](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#initialize-loadbalancebehavior-on-the-client) + * [Set LoadBalanceBehavior on the server:](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#set-loadbalancebehavior-on-the-server) + * [By operation](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#set-loadbalancebehavior-on-the-server---by-operation) + * [From Studio](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#set-loadbalancebehavior-on-the-server---from-studio) + * [When to use](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#when-to-use) + + +## LoadBalanceBehavior options + +### `None` (default option) + +* Requests will be handled based on the `ReadBalanceBehavior` configuration. + See the conditional flow described in [Client logic for choosing a node](../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + * **_Read_** requests: + The client will calculate the target node from the configured [ReadBalanceBehavior Option](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#readbalancebehavior-options). + * **_Write_** requests: + Will be sent to the [preferred node](../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node). + The data will then be replicated to all the other nodes in the database group. +### `UseSessionContext` + +* **Load-balance** + + * When this option is enabled, the client will calculate the target node from the session-id. + The session-id is hashed from a **context string** and an optional **seed** given by the user. + The context string together with the seed are referred to as **"The session context"**. + + * Per session, the client will select a node from the topology list based on this session-context. + So sessions that use the **same** context will target the **same** node. + + * All **_Read & Write_** requests made on the session (i.e a query or a load request, etc.) + will address this calculated node. + _Read & Write_ requests that are made on the store (i.e. executing an [operation](../../../client-api/operations/what-are-operations.mdx)) + will go to the preferred node. + + * All _Write_ requests will be replicated to all the other nodes in the database group as usual. + +* **Failover** + + * In case of a failure, the client will try to access the next node from the topology nodes list. + + + +## Initialize LoadBalanceBehavior on the client + +* The `LoadBalanceBehavior` convention can be set **on the client** when initializing the Document Store. + This will set the load balance behavior for the default database that is set on the store. + +* This setting can be **overriden** by setting 'LoadBalanceBehavior' on the server, see [below](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#set-loadbalancebehavior-on-the-server). +**Initialize conventions**: + + + +{`# Initialize 'LoadBalanceBehavior' on the client: +document_store = DocumentStore( + urls=["ServerURL_1", "ServerURL_2", "..."], + database="DefaultDB", +) +conventions = DocumentConventions() + +# Enable the session-context feature +# If this is not enabled then a context string set in a session will be ignored +conventions.load_balance_behavior = LoadBalanceBehavior.USE_SESSION_CONTEXT + +# Assign a method that sets the default context string +# This string will be used for sessions that do Not provide a context string +# A sample GetDefaultContext method is defined below +conventions.load_balancer_per_session_context_selector = get_default_context + +# Set a seed +# The seed is 0 by default, provide any number to override +conventions.load_balancer_context_seed = 5 + +document_store.conventions = conventions +document_store.initialize() +`} + + + + +{`# A customized method for getting a default context string +def get_default_context(self, db_name: str) -> str: + # Method is invoked by RavenDB with the database name + # Use that name - or return any string of your choice + return "DefaultContextString" +`} + + +**Session usage**: + + + +{`# Open a session that will use the DEFAULT store values: +with document_store.open_session() as session: + # For all Read & Write requests made in this session + # node to access is calculated from string & seed values defined on the store + employee = session.load("employees/1-A", Employee) +`} + + + + +{`# Open a session that will use a UNIQUE context string: +with document_store.open_session() as session: + # Call context, pass a unique context string for this session + session.advanced.session_info.context = "SomeOtherContext" + + # For all Read & Write requests made in this session, + # node to access is calculated from the unique string & the seed defined on the store + employee = session.load("employees/1-A", Employee) +`} + + + + + +## Set LoadBalanceBehavior on the server + + + +**Note**: + +* Setting the load balance behavior on the server, either by an **Operation** or from the **Studio**, + only 'enables the feature' and sets the seed. + +* For the feature to be in effect, you still need to define the context string itself: + * either per session, call `session.advanced.session_info.context` + * or, on the document store, set a default value for - `load_balancer_per_session_context_selector` + + +#### Set LoadBalanceBehavior on the server - by operation: + +* The `LoadBalanceBehavior` configuration can be set **on the server** by sending an [operation](../../../client-api/operations/what-are-operations.mdx). + +* The operation can modify the default database only, or all databases - see examples below. + +* Once configuration on the server has changed, the running client will get updated with the new settings. + See [keeping client up-to-date](../../../client-api/configuration/load-balance/overview.mdx#keeping-the-client-topology-up-to-date). + + + + +{`# Setting 'LoadBalanceBehavior' on the server by sending an operation: +with document_store: + # Define the client configuration to put on the server + configuration_to_save = ClientConfiguration() + # Enable the session-context feature + # If this is not enabled then a context string set in a session will be ignored + configuration_to_save.load_balance_behavior = LoadBalanceBehavior.USE_SESSION_CONTEXT + + # Set a seed + # The seed is 0 by default, provide any number to override + load_balancer_context_seed = 10 + + # NOTE: + # The session's context string is Not set on the server + # You still need to set it on the client: + # * either as a convention on the document store + # * or pass it to the 'context' method on the session + + # Configuration will be in effect when Disabled is set to false + configuration_to_save.disabled = False + + # Define the put configuration operation for the DEFAULT database + put_configuration_op = PutClientConfigurationOperation(configuration_to_save) + + # Execute the operation by passing it to maintenance.send + document_store.maintenance.send(put_configuration_op) + + # After the operation has executed: + # all Read & Write requests, per session, will address the node calculated from: + # * the seed set on the server & + # * the session's context string set on the client +`} + + + + +{`with document_store: + # Define the client configuration to put on the server + configuration_to_save = ClientConfiguration() + # Enable the session-context feature + # If this is not enabled then a context string set in a session will be ignored + configuration_to_save.load_balance_behavior = LoadBalanceBehavior.USE_SESSION_CONTEXT + + # Set a seed + # The seed is 0 by default, provide any number to override + load_balancer_context_seed = 10 + + # NOTE: + # The session's context string is Not set on the server + # You still need to set it on the client: + # * either as a convention on the document store + # * or pass it to the 'context' method on the session + + # Configuration will be in effect when Disabled is set to false + configuration_to_save.disabled = False + + # Define the put configuration operation for ALL databases + put_configuration_op = PutServerWideClientConfigurationOperation(configuration_to_save) + + # Execute the operation by passing it to maintenance.server.send + document_store.maintenance.server.send(put_configuration_op) + + # After the operation has executed: + # all Read & Write requests, per session, will address the node calculated from: + # * the seed set on the server & + # * the session's context string set on the client +`} + + + +#### Set LoadBalanceBehavior on the server - from Studio: + +* The `LoadBalanceBehavior` configuration can be set from the Studio's [Client Configuration view](../../../studio/database/settings/client-configuration-per-database.mdx). + Setting it from the Studio will set this configuration directly **on the server**. + +* Once configuration on the server has changed, the running client will get updated with the new settings. + See [keeping client up-to-date](../../../client-api/configuration/load-balance/overview.mdx#keeping-the-client-topology-up-to-date). + + + +## When to use + +* Distributing _Read & Write_ requests among the cluster nodes can be beneficial + when a set of sessions handle a specific set of documents or similar data. + Load balancing can be achieved by routing requests from the sessions that handle similar topics to the same node, while routing other sessions to other nodes. + +* Another usage example can be setting the session's context to be the current user. + Thus spreading the _Read & Write_ requests per user that logs into the application. + +* Once setting the load balance to be per session-context, + in the case when detecting that many or all sessions send requests to the same node, + a further level of node randomization can be added by changing the seed. + + + + diff --git a/versioned_docs/version-7.1/client-api/configuration/load-balance/_read-balance-behavior-csharp.mdx b/versioned_docs/version-7.1/client-api/configuration/load-balance/_read-balance-behavior-csharp.mdx new file mode 100644 index 0000000000..dec0f7e66e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/load-balance/_read-balance-behavior-csharp.mdx @@ -0,0 +1,169 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When set, the `ReadBalanceBehavior` configuration will be in effect according to the + conditional flow described in [Client logic for choosing a node](../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + +* Once configuration is in effect then: + * **_Read_** requests - will be sent to the node determined by the configured option - see below. + * **_Write_** requests - are always sent to the preferred node. + The data will then be replicated to all the other nodes in the database group. + * Upon a node failure, the node to failover to is also determined by the defined option. + +* In this page: + * [ReadBalanceBehavior options](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#readbalancebehavior-options) + * [Initialize ReadBalanceBehavior on the client](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#initialize-readbalancebehavior-on-the-client) + * [Set ReadBalanceBehavior on the server:](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#set-readbalancebehavior-on-the-server) + * [By operation](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#set-readbalancebehavior-on-the-server---by-operation) + * [From Studio](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#set-readbalancebehavior-on-the-server---from-studio) + * [When to use](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#when-to-use) + + +## readBalanceBehavior options + +### `None` (default option) + + * **Read-balance** + No read balancing will occur. + The client will always send _Read_ requests to the [preferred node](../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node). + * **Failover** + The client will failover nodes in the order they appear in the [topology nodes list](../../../studio/database/settings/manage-database-group.mdx#database-group-topology---actions). +### `RoundRobin` + +* **Read-balance** + * Each session opened is assigned an incremental session-id number. + **Per session**, the client will select the next node from the topology list based on this internal session-id. + * All _Read_ requests made on the session (i.e a query or a load request, etc.) + will address the calculated node. + * A _Read_ request that is made on the store (i.e. executing an [operation](../../../client-api/operations/what-are-operations.mdx)) + will go to the preferred node. +* **Failover** + In case of a failure, the client will try the next node from the topology nodes list. +### `FastestNode` + + * **Read-balance** + All _Read_ requests will go to the fastest node. + The fastest node is determined by a [Speed Test](../../../client-api/cluster/speed-test.mdx). + * **Failover** + In case of a failure, a speed test will be triggered again, + and in the meantime the client will use the preferred node. + + + +## Initialize ReadBalanceBehavior on the client + +* The `ReadBalanceBehavior` convention can be set **on the client** when initializing the Document Store. + This will set the read balance behavior for the default database that is set on the store. + +* This setting can be **overriden** by setting 'ReadBalanceBehavior' on the server, see [below](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#set-readbalancebehavior-on-the-server). + + + +{`// Initialize 'ReadBalanceBehavior' on the client: +var documentStore = new DocumentStore +\{ + Urls = new[] \{ "ServerURL_1", "ServerURL_2", "..." \}, + Database = "DefaultDB", + Conventions = new DocumentConventions + \{ + // With ReadBalanceBehavior set to: 'FastestNode': + // Client READ requests will address the fastest node + // Client WRITE requests will address the preferred node + ReadBalanceBehavior = ReadBalanceBehavior.FastestNode + \} +\}.Initialize(); +`} + + + + + +## Set ReadBalanceBehavior on the server + +#### Set ReadBalanceBehavior on the server - by operation: + +* The `ReadBalanceBehavior` configuration can be set **on the server** by sending an [operation](../../../client-api/operations/what-are-operations.mdx). + +* The operation can modify the default database only, or all databases - see examples below. + +* Once configuration on the server has changed, the running client will get updated with the new settings. + See [keeping client up-to-date](../../../client-api/configuration/load-balance/overview.mdx#keeping-the-client-topology-up-to-date). + + + + +{`// Setting 'ReadBalanceBehavior' on the server by sending an operation: +using (documentStore) +{ + // Define the client configuration to put on the server + var clientConfiguration = new ClientConfiguration + { + // Replace 'FastestNode' (from example above) with 'RoundRobin' + ReadBalanceBehavior = ReadBalanceBehavior.RoundRobin + }; + + // Define the put configuration operation for the DEFAULT database + var putConfigurationOp = new PutClientConfigurationOperation(clientConfiguration); + + // Execute the operation by passing it to Maintenance.Send + documentStore.Maintenance.Send(putConfigurationOp); + + // After the operation has executed: + // All WRITE requests will continue to address the preferred node + // READ requests, per session, will address a different node based on the RoundRobin logic +} +`} + + + + +{`// Setting 'ReadBalanceBehavior' on the server by sending an operation: +using (documentStore) +{ + // Define the client configuration to put on the server + var clientConfiguration = new ClientConfiguration + { + // Replace 'FastestNode' (from example above) with 'RoundRobin' + ReadBalanceBehavior = ReadBalanceBehavior.RoundRobin + }; + + // Define the put configuration operation for the ALL databases + var putConfigurationOp = new PutServerWideClientConfigurationOperation(clientConfiguration); + + // Execute the operation by passing it to Maintenance.Server.Send + documentStore.Maintenance.Server.Send(putConfigurationOp); + + // After the operation has executed: + // All WRITE requests will continue to address the preferred node + // READ requests, per session, will address a different node based on the RoundRobin logic +} +`} + + + + +#### Set ReadBalanceBehavior on the server - from Studio: + +* The `ReadBalanceBehavior` configuration can be set from the Studio's [Client Configuration view](../../../studio/database/settings/client-configuration-per-database.mdx). + Setting it from the Studio will set this configuration directly **on the server**. + +* Once configuration on the server has changed, the running client will get updated with the new settings. + See [keeping client up-to-date](../../../client-api/configuration/load-balance/overview.mdx#keeping-the-client-topology-up-to-date). + + + +## When to use + +* Setting the read balance behavior is beneficial when you only care about distributing the _Read_ requests among the cluster nodes, + and when all _Write_ requests can go to the same node. + +* Using the 'FastestNode' option is beneficial when some nodes in the system are known to be faster than others, + thus letting the fastest node serve each read request. + + + + diff --git a/versioned_docs/version-7.1/client-api/configuration/load-balance/_read-balance-behavior-nodejs.mdx b/versioned_docs/version-7.1/client-api/configuration/load-balance/_read-balance-behavior-nodejs.mdx new file mode 100644 index 0000000000..3d29583c2d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/load-balance/_read-balance-behavior-nodejs.mdx @@ -0,0 +1,163 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When set, the `readBalanceBehavior` configuration will be in effect according to the + conditional flow described in [Client logic for choosing a node](../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + +* Once configuration is in effect then: + * **_Read_** requests - will be sent to the node determined by the configured option - see below. + * **_Write_** requests - are always sent to the preferred node. + The data will then be replicated to all the other nodes in the database group. + * Upon a node failure, the node to failover to is also determined by the defined option. + +* In this page: + * [readBalanceBehavior options](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#readbalancebehavior-options) + * [Initialize readBalanceBehavior on the client](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#initialize-readbalancebehavior-on-the-client) + * [Set readBalanceBehavior on the server:](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#set-readbalancebehavior-on-the-server) + * [By operation](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#set-readbalancebehavior-on-the-server---by-operation) + * [From Studio](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#set-readbalancebehavior-on-the-server---from-studio) + * [When to use](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#when-to-use) + + +## readBalanceBehavior options + +### `None` (default option) + + * **Read-balance** + No read balancing will occur. + The client will always send _Read_ requests to the [preferred node](../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node). + * **Failover** + The client will failover nodes in the order they appear in the [topology nodes list](../../../studio/database/settings/manage-database-group.mdx#database-group-topology---actions). +### `RoundRobin` + +* **Read-balance** + * Each session opened is assigned an incremental session-id number. + **Per session**, the client will select the next node from the topology list based on this internal session-id. + * All _Read_ requests made on the session (i.e a query or a load request, etc.) + will address the calculated node. + * A _Read_ request that is made on the store (i.e. executing an [operation](../../../client-api/operations/what-are-operations.mdx)) + will go to the preferred node. +* **Failover** + In case of a failure, the client will try the next node from the topology nodes list. +### `FastestNode` + + * **Read-balance** + All _Read_ requests will go to the fastest node. + The fastest node is determined by a [Speed Test](../../../client-api/cluster/speed-test.mdx). + * **Failover** + In case of a failure, a speed test will be triggered again, + and in the meantime the client will use the preferred node. + + + +## Initialize readBalanceBehavior on the client + +* The `readBalanceBehavior` convention can be set **on the client** when initializing the Document Store. + This will set the read balance behavior for the default database that is set on the store. + +* This setting can be **overriden** by setting 'readBalanceBehavior' on the server, see [below](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#set-readbalancebehavior-on-the-server). + + + +{`// Initialize 'readBalanceBehavior' on the client: +// =============================================== + +const documentStore = new DocumentStore(["serverUrl_1", "serverUrl_2", "..."], "DefaultDB"); + +// For example: +// With readBalanceBehavior set to: 'FastestNode': +// Client READ requests will address the fastest node +// Client WRITE requests will address the preferred node +documentStore.conventions.readBalanceBehavior = "FastestNode"; + +documentStore.initialize(); +`} + + + + + +## Set readBalanceBehavior on the server + +#### Set readBalanceBehavior on the server - by operation: + +* The `readBalanceBehavior` configuration can be set **on the server** by sending an [operation](../../../client-api/operations/what-are-operations.mdx). + +* The operation can modify the default database only, or all databases - see examples below. + +* Once configuration on the server has changed, the running client will get updated with the new settings. + See [keeping client up-to-date](../../../client-api/configuration/load-balance/overview.mdx#keeping-the-client-topology-up-to-date). + + + + +{`// Setting 'readBalanceBehavior' on the server by sending an operation: +// ==================================================================== + +// Define the client configuration to put on the server +const configurationToSave = { + // Replace 'FastestNode' (from example above) with 'RoundRobin' + readBalanceBehavior: "RoundRobin" +}; + +// Define the put configuration operation for the DEFAULT database +const putConfigurationOp = new PutClientConfigurationOperation(configurationToSave); + +// Execute the operation by passing it to maintenance.send +await documentStore.maintenance.send(putConfigurationOp); + +// After the operation has executed: +// All WRITE requests will continue to address the preferred node +// READ requests, per session, will address a different node based on the RoundRobin logic +`} + + + + +{`// Setting 'readBalanceBehavior' on the server by sending an operation: +// ==================================================================== + +// Define the client configuration to put on the server +const configurationToSave = { + // Replace 'FastestNode' (from example above) with 'RoundRobin' + readBalanceBehavior: "RoundRobin" +}; + +// Define the put configuration operation for ALL databases +const putConfigurationOp = new PutServerWideClientConfigurationOperation(configurationToSave); + +// Execute the operation by passing it to maintenance.server.send +await documentStore.maintenance.server.send(putConfigurationOp); + +// After the operation has executed: +// All WRITE requests will continue to address the preferred node +// READ requests, per session, will address a different node based on the RoundRobin logic +`} + + + +#### Set readBalanceBehavior on the server - from Studio: + +* The `readBalanceBehavior` configuration can be set from the Studio's [Client Configuration view](../../../studio/database/settings/client-configuration-per-database.mdx). + Setting it from the Studio will set this configuration directly **on the server**. + +* Once configuration on the server has changed, the running client will get updated with the new settings. + See [keeping client up-to-date](../../../client-api/configuration/load-balance/overview.mdx#keeping-the-client-topology-up-to-date). + + + +## When to use + +* Setting the read balance behavior is beneficial when you only care about distributing the _Read_ requests among the cluster nodes, + and when all _Write_ requests can go to the same node. + +* Using the 'FastestNode' option is beneficial when some nodes in the system are known to be faster than others, + thus letting the fastest node serve each read request. + + + + diff --git a/versioned_docs/version-7.1/client-api/configuration/load-balance/_read-balance-behavior-php.mdx b/versioned_docs/version-7.1/client-api/configuration/load-balance/_read-balance-behavior-php.mdx new file mode 100644 index 0000000000..0eb01f15c2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/load-balance/_read-balance-behavior-php.mdx @@ -0,0 +1,168 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When set, the `ReadBalanceBehavior` configuration will be in effect according to the + conditional flow described in [Client logic for choosing a node](../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + +* Once configuration is in effect then: + * **_Read_** requests - will be sent to the node determined by the configured option - see below. + * **_Write_** requests - are always sent to the preferred node. + The data will then be replicated to all the other nodes in the database group. + * Upon a node failure, the node to failover to is also determined by the defined option. + +* In this page: + * [ReadBalanceBehavior options](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#readbalancebehavior-options) + * [Initialize ReadBalanceBehavior on the client](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#initialize-readbalancebehavior-on-the-client) + * [Set ReadBalanceBehavior on the server:](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#set-readbalancebehavior-on-the-server) + * [By operation](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#set-readbalancebehavior-on-the-server---by-operation) + * [From Studio](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#set-readbalancebehavior-on-the-server---from-studio) + * [When to use](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#when-to-use) + + +## readBalanceBehavior options + +### `None` (default option) + + * **Read-balance** + No read balancing will occur. + The client will always send _Read_ requests to the [preferred node](../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node). + * **Failover** + The client will failover nodes in the order they appear in the [topology nodes list](../../../studio/database/settings/manage-database-group.mdx#database-group-topology---actions). +### `RoundRobin` + +* **Read-balance** + * Each session opened is assigned an incremental session-id number. + **Per session**, the client will select the next node from the topology list based on this internal session-id. + * All _Read_ requests made on the session (i.e a query or a load request, etc.) + will address the calculated node. + * A _Read_ request that is made on the store (i.e. executing an [operation](../../../client-api/operations/what-are-operations.mdx)) + will go to the preferred node. +* **Failover** + In case of a failure, the client will try the next node from the topology nodes list. +### `FastestNode` + + * **Read-balance** + All _Read_ requests will go to the fastest node. + The fastest node is determined by a [Speed Test](../../../client-api/cluster/speed-test.mdx). + * **Failover** + In case of a failure, a speed test will be triggered again, + and in the meantime the client will use the preferred node. + + + +## Initialize ReadBalanceBehavior on the client + +* The `ReadBalanceBehavior` convention can be set **on the client** when initializing the Document Store. + This will set the read balance behavior for the default database that is set on the store. + +* This setting can be **overriden** by setting 'ReadBalanceBehavior' on the server, see [below](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#set-readbalancebehavior-on-the-server). + + + +{`// Initialize 'ReadBalanceBehavior' on the client: +$documentStore = new DocumentStore(["ServerURL_1", "ServerURL_2", "..."], "DefaultDB"); + +$conventions = new DocumentConventions(); +// With ReadBalanceBehavior set to: 'FastestNode': +// Client READ requests will address the fastest node +// Client WRITE requests will address the preferred node +$conventions->setReadBalanceBehavior(ReadBalanceBehavior::fastestNode()); + +$documentStore->setConventions($conventions); +$documentStore->initialize(); +`} + + + + + +## Set ReadBalanceBehavior on the server + +#### Set ReadBalanceBehavior on the server - by operation: + +* The `ReadBalanceBehavior` configuration can be set **on the server** by sending an [operation](../../../client-api/operations/what-are-operations.mdx). + +* The operation can modify the default database only, or all databases - see examples below. + +* Once configuration on the server has changed, the running client will get updated with the new settings. + See [keeping client up-to-date](../../../client-api/configuration/load-balance/overview.mdx#keeping-the-client-topology-up-to-date). + + + + +{`// Setting 'ReadBalanceBehavior' on the server by sending an operation: +$documentStore = new DocumentStore(); +try { + // Define the client configuration to put on the server + $clientConfiguration = new ClientConfiguration(); + // Replace 'FastestNode' (from example above) with 'RoundRobin' + $clientConfiguration->setReadBalanceBehavior(ReadBalanceBehavior::roundRobin()); + + // Define the put configuration operation for the DEFAULT database + $putConfigurationOp = new PutClientConfigurationOperation($clientConfiguration); + + // Execute the operation by passing it to Maintenance.Send + $documentStore->maintenance()->send($putConfigurationOp); + + // After the operation has executed: + // All WRITE requests will continue to address the preferred node + // READ requests, per session, will address a different node based on the RoundRobin logic +} finally { + $documentStore->close(); +} +`} + + + + +{`// Setting 'ReadBalanceBehavior' on the server by sending an operation: +$documentStore = new DocumentStore(); +try { + // Define the client configuration to put on the server + $clientConfiguration = new ClientConfiguration(); + + // Replace 'FastestNode' (from example above) with 'RoundRobin' + $clientConfiguration->setReadBalanceBehavior(ReadBalanceBehavior::roundRobin()); + + // Define the put configuration operation for the ALL databases + $putConfigurationOp = new PutServerWideClientConfigurationOperation($clientConfiguration); + + // Execute the operation by passing it to Maintenance.Server.Send + $documentStore->maintenance()->server()->send($putConfigurationOp); + + // After the operation has executed: + // All WRITE requests will continue to address the preferred node + // READ requests, per session, will address a different node based on the RoundRobin logic +} finally { + $documentStore->close(); +} +`} + + + + +#### Set ReadBalanceBehavior on the server - from Studio: + +* The `ReadBalanceBehavior` configuration can be set from the Studio's [Client Configuration view](../../../studio/database/settings/client-configuration-per-database.mdx). + Setting it from the Studio will set this configuration directly **on the server**. + +* Once configuration on the server has changed, the running client will get updated with the new settings. + See [keeping client up-to-date](../../../client-api/configuration/load-balance/overview.mdx#keeping-the-client-topology-up-to-date). + + + +## When to use + +* Setting the read balance behavior is beneficial when you only care about distributing the _Read_ requests among the cluster nodes, + and when all _Write_ requests can go to the same node. + +* Using the 'FastestNode' option is beneficial when some nodes in the system are known to be faster than others, + thus letting the fastest node serve each read request. + + + + diff --git a/versioned_docs/version-7.1/client-api/configuration/load-balance/_read-balance-behavior-python.mdx b/versioned_docs/version-7.1/client-api/configuration/load-balance/_read-balance-behavior-python.mdx new file mode 100644 index 0000000000..099d3883da --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/load-balance/_read-balance-behavior-python.mdx @@ -0,0 +1,160 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When set, the `ReadBalanceBehavior` configuration will be in effect according to the + conditional flow described in [Client logic for choosing a node](../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + +* Once configuration is in effect then: + * **_Read_** requests - will be sent to the node determined by the configured option - see below. + * **_Write_** requests - are always sent to the preferred node. + The data will then be replicated to all the other nodes in the database group. + * Upon a node failure, the node to failover to is also determined by the defined option. + +* In this page: + * [ReadBalanceBehavior options](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#readbalancebehavior-options) + * [Initialize ReadBalanceBehavior on the client](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#initialize-readbalancebehavior-on-the-client) + * [Set ReadBalanceBehavior on the server:](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#set-readbalancebehavior-on-the-server) + * [By operation](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#set-readbalancebehavior-on-the-server---by-operation) + * [From Studio](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#set-readbalancebehavior-on-the-server---from-studio) + * [When to use](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#when-to-use) + + +## readBalanceBehavior options + +### `None` (default option) + + * **Read-balance** + No read balancing will occur. + The client will always send _Read_ requests to the [preferred node](../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node). + * **Failover** + The client will failover nodes in the order they appear in the [topology nodes list](../../../studio/database/settings/manage-database-group.mdx#database-group-topology---actions). +### `RoundRobin` + +* **Read-balance** + * Each session opened is assigned an incremental session-id number. + **Per session**, the client will select the next node from the topology list based on this internal session-id. + * All _Read_ requests made on the session (i.e a query or a load request, etc.) + will address the calculated node. + * A _Read_ request that is made on the store (i.e. executing an [operation](../../../client-api/operations/what-are-operations.mdx)) + will go to the preferred node. +* **Failover** + In case of a failure, the client will try the next node from the topology nodes list. +### `FastestNode` + + * **Read-balance** + All _Read_ requests will go to the fastest node. + The fastest node is determined by a [Speed Test](../../../client-api/cluster/speed-test.mdx). + * **Failover** + In case of a failure, a speed test will be triggered again, + and in the meantime the client will use the preferred node. + + + +## Initialize ReadBalanceBehavior on the client + +* The `ReadBalanceBehavior` convention can be set **on the client** when initializing the Document Store. + This will set the read balance behavior for the default database that is set on the store. + +* This setting can be **overriden** by setting 'ReadBalanceBehavior' on the server, see [below](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#set-readbalancebehavior-on-the-server). + + + +{`# Initialize 'ReadBalanceBehavior' on the client: +document_store = DocumentStore( + urls=["ServerURL_1", "ServerURL_2", "..."], + database="DefaultDB", +) +conventions = DocumentConventions() +# With ReadBalanceBehavior set to: 'FastestNode': +# Client READ requests will address the fastest node +# Client WRITE requests will address the preferred node +conventions.read_balance_behavior = ReadBalanceBehavior.FASTEST_NODE + +document_store.conventions = conventions +`} + + + + + +## Set ReadBalanceBehavior on the server + +#### Set ReadBalanceBehavior on the server - by operation: + +* The `ReadBalanceBehavior` configuration can be set **on the server** by sending an [operation](../../../client-api/operations/what-are-operations.mdx). + +* The operation can modify the default database only, or all databases - see examples below. + +* Once configuration on the server has changed, the running client will get updated with the new settings. + See [keeping client up-to-date](../../../client-api/configuration/load-balance/overview.mdx#keeping-the-client-topology-up-to-date). + + + + +{`# Setting 'ReadBalanceBehavior' on the server by sending an operation: +with document_store: + # Define the client configuration to put on the server + client_configuration = ClientConfiguration() + # Replace 'FastestNode' (from the example above) with 'RoundRobin' + client_configuration.read_balance_behavior = ReadBalanceBehavior.ROUND_ROBIN + + # Define the put configuration operation for the DEFAULT database + put_configuration_op = PutClientConfigurationOperation(client_configuration) + + # Execute the operation by passing it to maintenance.send + document_store.maintenance.send(put_configuration_op) + + # After the operation has executed: + # All WRITE requests will continue to address the preferred node + # READ requests, per session, will address a different node based on the RoundRobin logic +`} + + + + +{`# Setting 'ReadBalanceBehavior' on the server by sending an operation: +with document_store: + # Define the client configuration to put on the server + client_configuration = ClientConfiguration() + # Replace 'FastestNode' (from the example above) with 'RoundRobin' + client_configuration.read_balance_behavior = ReadBalanceBehavior.ROUND_ROBIN + + # Define the put configuration operation for the ALL databases + put_configuration_op = PutServerWideClientConfigurationOperation(client_configuration) + + # Execute the operation by passing it to maintenance.server.send + document_store.maintenance.server.send(put_configuration_op) + + # After the operation has executed: + # All WRITE requests will continue to address the preferred node + # READ requests, per session, will address a different node based on the RoundRobin logic +`} + + + + +#### Set ReadBalanceBehavior on the server - from Studio: + +* The `ReadBalanceBehavior` configuration can be set from the Studio's [Client Configuration view](../../../studio/database/settings/client-configuration-per-database.mdx). + Setting it from the Studio will set this configuration directly **on the server**. + +* Once configuration on the server has changed, the running client will get updated with the new settings. + See [keeping client up-to-date](../../../client-api/configuration/load-balance/overview.mdx#keeping-the-client-topology-up-to-date). + + + +## When to use + +* Setting the read balance behavior is beneficial when you only care about distributing the _Read_ requests among the cluster nodes, + and when all _Write_ requests can go to the same node. + +* Using the 'FastestNode' option is beneficial when some nodes in the system are known to be faster than others, + thus letting the fastest node serve each read request. + + + + diff --git a/versioned_docs/version-7.1/client-api/configuration/load-balance/load-balance-behavior.mdx b/versioned_docs/version-7.1/client-api/configuration/load-balance/load-balance-behavior.mdx new file mode 100644 index 0000000000..f80d62262f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/load-balance/load-balance-behavior.mdx @@ -0,0 +1,53 @@ +--- +title: "Load balance behavior" +hide_table_of_contents: true +sidebar_label: Load balance behavior +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import LoadBalanceBehaviorCsharp from './_load-balance-behavior-csharp.mdx'; +import LoadBalanceBehaviorPython from './_load-balance-behavior-python.mdx'; +import LoadBalanceBehaviorPhp from './_load-balance-behavior-php.mdx'; +import LoadBalanceBehaviorNodejs from './_load-balance-behavior-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/configuration/load-balance/overview.mdx b/versioned_docs/version-7.1/client-api/configuration/load-balance/overview.mdx new file mode 100644 index 0000000000..289909a6ed --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/load-balance/overview.mdx @@ -0,0 +1,106 @@ +--- +title: "Load balancing client requests - Overview" +hide_table_of_contents: true +sidebar_label: Overview +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Load balancing client requests - Overview + + +* A database can have multiple instances, each one residing on a different cluster node. + Each instance is a complete replica of the database. + +* The [database-group-topology](../../../studio/database/settings/manage-database-group.mdx#database-group-topology---view) is the list of nodes that contain those database replicas. + The first node in this list is called the [preferred node](../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node). + +* The client is kept up-to-date with this topology list. + __The client decides which node from this list to access__ when making requests to the RavenDB cluster. + +* By default, the client will access the preferred node for all _Read & Write_ requests it makes. + This default behavior can be changed by configuring: + * [ReadBalanceBehavior](../../../client-api/configuration/load-balance/read-balance-behavior.mdx) - load balancing `Read` requests only + * [LoadBalanceBehavior](../../../client-api/configuration/load-balance/load-balance-behavior.mdx) - load balancing `Read & Write` requests +* In this page: + * [Keeping the client topology up-to-date](../../../client-api/configuration/load-balance/overview.mdx#keeping-the-client-topology-up-to-date) + * [Client logic for choosing a node](../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node) + * [The preferred node](../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) + * [Single-node session usage](../../../client-api/configuration/load-balance/overview.mdx#single-node-session-usage) + + +## Keeping the client topology up-to-date + +* Upon Document Store initialization, the client receives the __initial topology list__, + after which the client is kept updated at all times for any changes made to it. + +* If the topology list has changed on the server, (or any other client configuration), + the client will learn about it upon making its __next request__ to the server, + and will update its configuration accordingly. + +* In addition, every 5 minutes, the client will fetch the current topology from the server + if no requests were made within that time frame. + +* Any client-configuration settings that are set on the server side __override__ the settings made on the client-side. + +* For more information see [Topology in the client](../../../client-api/cluster/how-client-integrates-with-replication-and-cluster.mdx#cluster-topology-in-the-client). + + + +## Client logic for choosing a node + +The client uses the following logic (from top to bottom) to determine which node to send the request to: + + +* Use the __specified node__: + A client can explicitly specify the target node when executing a [server-maintenance operation](../../../client-api/operations/what-are-operations.mdx#server-maintenance-operations). + Learn more in [switch operation to a different node](../../../client-api/operations/how-to/switch-operations-to-a-different-node.mdx). +* Else, if using-session-context is defined, use __LoadBalanceBehavior__: + Per session, the client will select a node based on the [session context](../../../client-api/configuration/load-balance/load-balance-behavior.mdx#loadbalancebehavior-options). + All `Read & Write` requests made on the session will be directed to that node. +* Else, if defined, use __ReadBalanceBehavior__: + `Read` requests: The client will select a node based on the [read balance options](../../../client-api/configuration/load-balance/read-balance-behavior.mdx#readbalancebehavior-options). + `Write` requests: All _Write_ requests will be directed to the preferred node. +* Else, use the __preferred node__: + Use the [preferred node](../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) for both `Read & Write` requests. + + + + + +## The preferred node + +* The preferred node is simply the __first__ node in the [topology nodes list](../../../studio/database/settings/manage-database-group.mdx#database-group-topology---view). +* __By default__, when no load balancing strategy is defined, + the client will send all `Read & Write` requests to this node. +* When the preferred node is in a failure state, + the cluster will update the topology, assigning another node to be the preferred one. +* Once the preferred node is back up and has caught up with all data, + it will be placed __last__ in the topology list. +* If all the nodes in the topology list are in a failure state then the first node in the list will be the 'preferred'. + The user would get an error, or recover if the error was transient. +* The preferred node can be explicitly set by: + * Reordering the topology list from the [Database Group view](../../../studio/database/settings/manage-database-group.mdx#database-group-topology---actions). + * Sending [ReorderDatabaseMembersOperation](../../../client-api/operations/server-wide/reorder-database-members.mdx) from the client code. +* The cluster may assign a different preferred node when removing/adding new nodes to the database-group. + + + +## Single-node session usage + +* When using a [single-node session](../../../client-api/session/cluster-transaction/overview.mdx#single-node), + a short delay in replicating changes to all nodes in the cluster is acceptable in most cases. + +* If `ReadBalanceBehavior` or `LoadBalanceBehavior` are defined, + then the next session you open may access a different node. + So if you need to ensure that the next request will be able to _immediately_ read what you just wrote, + then use [Write Assurance](../../../client-api/session/saving-changes.mdx#waiting-for-replication---write-assurance). + + + diff --git a/versioned_docs/version-7.1/client-api/configuration/load-balance/read-balance-behavior.mdx b/versioned_docs/version-7.1/client-api/configuration/load-balance/read-balance-behavior.mdx new file mode 100644 index 0000000000..2822c87618 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/load-balance/read-balance-behavior.mdx @@ -0,0 +1,53 @@ +--- +title: "Read balance behavior" +hide_table_of_contents: true +sidebar_label: Read balance behavior +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import ReadBalanceBehaviorCsharp from './_read-balance-behavior-csharp.mdx'; +import ReadBalanceBehaviorPython from './_read-balance-behavior-python.mdx'; +import ReadBalanceBehaviorPhp from './_read-balance-behavior-php.mdx'; +import ReadBalanceBehaviorNodejs from './_read-balance-behavior-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/configuration/serialization.mdx b/versioned_docs/version-7.1/client-api/configuration/serialization.mdx new file mode 100644 index 0000000000..5fb21b3cf8 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/configuration/serialization.mdx @@ -0,0 +1,44 @@ +--- +title: "Conventions: Serialization" +hide_table_of_contents: true +sidebar_label: Serialization +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import SerializationCsharp from './_serialization-csharp.mdx'; +import SerializationJava from './_serialization-java.mdx'; + +export const supportedLanguages = ["csharp", "java"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/creating-document-store.mdx b/versioned_docs/version-7.1/client-api/creating-document-store.mdx new file mode 100644 index 0000000000..58a28cfa47 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/creating-document-store.mdx @@ -0,0 +1,49 @@ +--- +title: "Client API: Creating a Document Store" +hide_table_of_contents: true +sidebar_label: Creating Document Store +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import CreatingDocumentStoreCsharp from './_creating-document-store-csharp.mdx'; +import CreatingDocumentStoreJava from './_creating-document-store-java.mdx'; +import CreatingDocumentStoreNodejs from './_creating-document-store-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/_category_.json b/versioned_docs/version-7.1/client-api/data-subscriptions/_category_.json new file mode 100644 index 0000000000..129df78e9f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 9, + "label": Data Subscriptions, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/_concurrent-subscriptions-csharp.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/_concurrent-subscriptions-csharp.mdx new file mode 100644 index 0000000000..d71a2b699c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/_concurrent-subscriptions-csharp.mdx @@ -0,0 +1,119 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* With **Concurrent Subscriptions**, multiple data subscription workers can connect to the same subscription task simultaneously. + +* Each worker is assigned a different batch of documents to process. + +* By processing different batches in parallel, multiple workers can significantly accelerate the consumption of the subscription's contents. + +* Documents that were assigned to workers whose connection has ended unexpectedly, + can be reassigned by the server to available workers. + See [connection failure](../../client-api/data-subscriptions/concurrent-subscriptions.mdx#connection-failure) below. + +* In this page: + * [Defining concurrent workers](../../client-api/data-subscriptions/concurrent-subscriptions.mdx#defining-concurrent-workers) + * [Dropping a connection](../../client-api/data-subscriptions/concurrent-subscriptions.mdx#dropping-a-connection) + * [Connection failure](../../client-api/data-subscriptions/concurrent-subscriptions.mdx#connection-failure) + + +## Defining concurrent workers + +Concurrent workers are defined similarly to other workers, except their +[strategy](../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#worker-strategies) +is set to [SubscriptionOpeningStrategy.Concurrent](../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#multiple-workers-per-subscription-strategy). + +* To define a concurrent worker: + * Create the worker using [GetSubscriptionWorker](../../client-api/data-subscriptions/consumption/api-overview.mdx#create-the-subscription-worker). + * Pass it a [SubscriptionWorkerOptions](../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionworkeroptions) instance. + * Set the strategy to `SubscriptionOpeningStrategy.Concurrent` + +* Usage: + * Define two concurrent workers + + +{`// Define concurrent subscription workers +var subscriptionWorker1 = store.Subscriptions.GetSubscriptionWorker( + // Set the worker to connect to the "All Orders" subscription task + new SubscriptionWorkerOptions("All Orders") + \{ + // Set Concurrent strategy + Strategy = SubscriptionOpeningStrategy.Concurrent, + MaxDocsPerBatch = 20 + \}); + +var subscriptionWorker2 = store.Subscriptions.GetSubscriptionWorker( + new SubscriptionWorkerOptions("All Orders") + \{ + Strategy = SubscriptionOpeningStrategy.Concurrent, + MaxDocsPerBatch = 20 + \}); +`} + + + * Run both workers + + +{`// Start the concurrent worker. Workers will connect concurrently to the "All Orders" subscription task. +var subscriptionRuntimeTask1 = subscriptionWorker1.Run(batch => +\{ + // process batch + foreach (var item in batch.Items) + \{ + // process item + \} +\}); + +var subscriptionRuntimeTask2 = subscriptionWorker2.Run(batch => +\{ + // process batch + foreach (var item in batch.Items) + \{ + // process item + \} +\}); +`} + + + + + +## Dropping a connection + +* Use `Subscriptions.DropSubscriptionWorker` to **forcefully disconnect** + the specified worker from the subscription it is connected to. + + +{`public void DropSubscriptionWorker(SubscriptionWorker worker, string database = null) +`} + + + +* Usage: + + +{`//drop a concurrent subscription worker +store.Subscriptions.DropSubscriptionWorker(subscriptionWorker2); +`} + + + + + +## Connection failure + +* When a concurrent worker's connection ends unexpectedly, + the server may reassign the documents this worker has been processing to any other concurrent worker that is available. +* A worker that reconnects after a connection failure will be assigned a **new** batch of documents. + It is **not** guaranteed that the new batch will contain the same documents this worker was processing before the disconnection. +* As a result, documents may be processed more than once: + - first by a worker that disconnected unexpectedly without acknowledging the completion of its assigned documents, + - and later by other workers the documents are reassigned to. + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/_concurrent-subscriptions-nodejs.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/_concurrent-subscriptions-nodejs.mdx new file mode 100644 index 0000000000..4db64cbcf2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/_concurrent-subscriptions-nodejs.mdx @@ -0,0 +1,126 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* With **Concurrent Subscriptions**, multiple data subscription workers can connect to the same subscription task simultaneously. + +* Each worker is assigned a different batch of documents to process. + +* By processing different batches in parallel, multiple workers can significantly accelerate the consumption of the subscription's contents. + +* Documents that were assigned to workers whose connection has ended unexpectedly, + can be reassigned by the server to available workers. + See [connection failure](../../client-api/data-subscriptions/concurrent-subscriptions.mdx#connection-failure) below. + +* In this page: + * [Defining concurrent workers](../../client-api/data-subscriptions/concurrent-subscriptions.mdx#defining-concurrent-workers) + * [Dropping a connection](../../client-api/data-subscriptions/concurrent-subscriptions.mdx#dropping-a-connection) + * [Connection failure](../../client-api/data-subscriptions/concurrent-subscriptions.mdx#connection-failure) + + +## Defining concurrent workers + +Concurrent workers are defined similarly to other workers, except their +[strategy](../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#worker-strategies) +is set to [Concurrent](../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#multiple-workers-per-subscription-strategy). + +* To define a concurrent worker: + * Create the worker using [getSubscriptionWorker](../../client-api/data-subscriptions/consumption/api-overview.mdx#create-the-subscription-worker). + * Pass it a [subscription worker options](../../client-api/data-subscriptions/consumption/api-overview.mdx#subscription-worker-options) object. + * Set the strategy to `Concurrent` + +* Usage: + * Define two concurrent workers + + +{`// Define 2 concurrent subscription workers +// ======================================== + +const options = \{ + // Set concurrent strategy + strategy: "Concurrent", + subscriptionName: "Get all orders", + maxDocsPerBatch: 20 +\}; + +const worker1 = documentStore.subscriptions.getSubscriptionWorker(options); +const worker2 = documentStore.subscriptions.getSubscriptionWorker(options); +`} + + + * Run both workers + + +{`worker1.on("batch", (batch, callback) => \{ + try \{ + for (const item of batch.items) \{ + // Process item + \} + callback(); + + \} catch(err) \{ + callback(err); + \} +\}); + +worker2.on("batch", (batch, callback) => \{ + try \{ + for (const item of batch.items) \{ + // Process item + \} + callback(); + + \} catch(err) \{ + callback(err); + \} +\}); +`} + + + + + +## Dropping a connection + +* Use `dropSubscriptionWorker` to **forcefully disconnect** + the specified worker from the subscription it is connected to. + +* Use `dropConnection` to disconnect ALL workers connected to the specified subscription. + + + +{`// Drop connection for worker2 +await documentStore.subscriptions.dropSubscriptionWorker(worker2); +`} + + + + + +{`// Available overloads: +dropConnection(options); +dropConnection(options, database); +dropSubscriptionWorker(worker); +dropSubscriptionWorker(worker, database); +`} + + + + + +## Connection failure + +* When a concurrent worker's connection ends unexpectedly, + the server may reassign the documents this worker has been processing to any other concurrent worker that is available. +* A worker that reconnects after a connection failure will be assigned a **new** batch of documents. + It is **not** guaranteed that the new batch will contain the same documents this worker was processing before the disconnection. +* As a result, documents may be processed more than once: + - first by a worker that disconnected unexpectedly without acknowledging the completion of its assigned documents, + - and later by other workers the documents are reassigned to. + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/_what-are-data-subscriptions-csharp.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/_what-are-data-subscriptions-csharp.mdx new file mode 100644 index 0000000000..cd09e4587d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/_what-are-data-subscriptions-csharp.mdx @@ -0,0 +1,160 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Data subscriptions provide a reliable and handy way to perform document processing on the client side. +* The server sends batches of documents to the client. + The client then processes the batch and will receive the next one only after it acknowledges the batch was processed. + The server persists the processing progress, allowing you to pause and continue the processing. + +* In this page: + * [Data subscription consumption](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#data-subscription-consumption) + * [What defines a data subscription](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#what-defines-a-data-subscription) + * [Documents processing](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#documents-processing) + * [Progress Persistence](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#progress-persistence) + * [How the worker communicates with the server](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#how-the-worker-communicates-with-the-server) + * [Working with multiple clients](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#working-with-multiple-clients) + * [Data subscriptions usage example](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#data-subscriptions-usage-example) + + +## Data subscription consumption + +* Data subscriptions are consumed by clients, called **Subscription Workers**. +* You can determine whether workers would be able to connect a subscription + [concurrently, or only one at a time](../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#worker-interplay). +* A worker that connects to a data subscription receives a batch of documents, and gets to process it. + Depending on the code that the client provided the worker with, processing can take from seconds to hours. + When all documents are processed, the worker informs the server of its progress and the server can send it the next batch. + + + +## What defines a data subscription + +Data subscriptions are defined by the server-side definition and by the worker connecting to it: + +1. [Subscription Creation Options](../../client-api/data-subscriptions/creation/api-overview.mdx#subscriptioncreationoptions): The documents that will be sent to the worker, it's filtering and projection. + +2. [Subscription Worker Options](../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionworkeroptions): Worker batch processing logic, batch size, interaction with other connections. + + + +## Documents processing + +Documents are sent in batches and progress will be registered only after the whole batch is processed and acknowledged. +Documents are always sent in Etag order which means that data that has already been processed and acknowledged won't be sent twice, except for the following scenarios: + +1. If the document was changed after it was already sent. + +2. If data was received but not acknowledged. + +3. In case of subscription failover (`Enterprise feature`), when there is a chance that documents will be processed again, because it's not always possible to find the same starting point on a different machine. + + +If the database has Revisions defined, the subscription can be configured to process pairs +of subsequent document revisions. +Read more here: [revisions support](../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx) + + + + +## Progress Persistence + +* The processing progress is persisted on the server and therefore the subscription + task can be paused and resumed from the last point it was stopped. +* The persistence mechanism also ensures that no documents are missed even in the + presence of failure, whether it's client-side related, communication, or any other disaster. +* Subscriptions progress is stored in the cluster level, in the `Enterprise edition`. + In the case of a node failure, the processing can be automatically failed over to another node. +* The usage of **Change Vectors** allows us to continue from a point that is close to + the last point reached before failure rather than starting the process from scratch. + + +## How the worker communicates with the server + +A worker communicates with the data subscription using a custom protocol on top of a long-lived TCP connection. Each successful batch processing consists of these stages: + +1. The server sends documents in a batch. + +2. Worker sends acknowledgment message after it finishes processing the batch. + +3. The server returns the client a notification that the acknowledgment persistence is done and it is ready to send the next batch. + + +When the responsible node handling the subscription is down, the subscription task can be manually reassigned to another node in the cluster. +With the Enterprise license, the cluster will automatically reassign the work to another node. + + +* The status of the TCP connection is also used to determine the "state" of the worker process. + If the subscription and its workers implement a + [One Worker Per Subscription](../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#worker-interplay) + strategy, as long as the connection is alive the server will not allow + other clients to consume the subscription. +* The TCP connection is kept alive and monitored using "heartbeat" messages. + If the connection is found nonfunctional, the current batch progress will be restarted. + +See the sequence diagram below that summarizes the lifespan of a subscription connection. + +![Subscription document processing](./assets/SubscriptionsDocumentProcessing.png) + + + +## Working with multiple clients + +You can use a **Subscription Worker Strategy** to determine whether multiple +workers of the same subscription can connect to it one by one, or **concurrently**. + +* **One Worker Per Subscription Strategies** + The one-worker-per-subscription strategies allow workers of the same subscription + to connect to it **one worker at a time**, with different strategies to support various + inter-worker scenarios. + * One worker is allowed to take the place of another in the processing of a subscription. + Thanks to subscriptions persistence, the worker will be able to continue the work + starting at the point its predecessor got to. + * You can also configure a worker to wait for an existing connection to fail and take + its place, or to force an existing connection to close. + * Read more about these strategies [here](../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#one-worker-per-subscription-strategies). + +* **Concurrent Subscription Strategy** + Using the concurrent subscription strategy, multiple workers of the same subscription can + connect to it simultaneously and divide the documents processing load between them to speed it up. + * Batch processing is divided between the multiple workers. + * Connection failure is handled by assigning batches of failing workers to + active available workers. + * Read more about this strategy [here](../../client-api/data-subscriptions/concurrent-subscriptions.mdx). + + + +## Data subscriptions usage example + +Data subscriptions are accessible by a document store. +Here's an example of creating and using a data subscription: + + + +{`public async Task Worker(IDocumentStore store, CancellationToken cancellationToken) +\{ + // Create the ongoing subscription task on the server + string subscriptionName = await store.Subscriptions + .CreateAsync(x => x.Company == "companies/11"); + + // Create a worker on the client that will consume the subscription + SubscriptionWorker worker = store.Subscriptions + .GetSubscriptionWorker(subscriptionName); + + // Run the worker task and process data received from the subscription + Task workerTask = worker.Run(x => x.Items.ForEach(item => + Console.WriteLine($"Order #\{item.Result.Id\} will be shipped via: \{item.Result.ShipVia\}")), + cancellationToken); + + await workerTask; +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/_what-are-data-subscriptions-java.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/_what-are-data-subscriptions-java.mdx new file mode 100644 index 0000000000..c2d1857b87 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/_what-are-data-subscriptions-java.mdx @@ -0,0 +1,134 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Data subscriptions provide a reliable and handy way to perform document processing on the client side. +* The server sends batches of documents to the client. + The client then processes the batch and will receive the next one only after it acknowledges the batch was processed. + The server persists the processing progress, allowing you to pause and continue the processing. + +* In this page: + * [Data subscription consumption](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#data-subscription-consumption) + * [What defines a data subscription](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#what-defines-a-data-subscription) + * [Documents processing](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#documents-processing) + * [Progress Persistence](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#progress-persistence) + * [How the worker communicates with the server](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#how-the-worker-communicates-with-the-server) + * [Working with multiple clients](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#working-with-multiple-clients) + * [Data subscriptions usage example](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#data-subscriptions-usage-example) + + + +## Data subscription consumption + +Data subscriptions are consumed by clients, called subscription workers. In any given moment, only one worker can be connected to a data subscription. +A worker connected to a data subscription receives a batch of documents and gets to process it. +When it's done, depending on the code that the client gave the worker, it can take from seconds to hours. It informs the server about the progress, and the server is ready to send the next batch. + + + +## What defines a data subscription + +Data subscriptions are defined by the server side definition and by the worker connecting to it: + +1. [Subscription Creation Options](../../client-api/data-subscriptions/creation/api-overview.mdx#subscriptioncreationoptions): The documents that will be received, it's filtering and projection. + +2. [Subscription Worker Options](../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionworkeroptions): Worker batch processing logic, batch size, interaction with other connections. + + + +## Documents processing + +Documents are sent in batches and progress will be registered only after the whole batch is processed and acknowledged. +Documents are always sent in Etag order which means that data that already been processed and acknowledged won't be sent twice, except for the following scenarios: + +1. If the document was changed after it was already sent. + +2. If data was received but not acknowledged. + +3. In case of subscription failover (`Enterprise feature`), when there is a chance that documents will be processed again, because it's not always possible to find the same starting point on a different machine. + + +If the database has Revisions defined, the subscription can be configured to process pairs +of subsequent document revisions. +Read more here: [revisions support](../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx) + + + + +## Progress Persistence + +Processing progress is persisted and therefore it can be paused and resumed from the last point it was stopped. +The persistence mechanism also ensures that no documents are missed even in the presence of failure, whether it's client side related, communication, or any other disaster. +Subscriptions progress is stored in the cluster level, in the `Enterprise edition`. In the case of node failure, the processing can be automatically failed over to another node. +The usage of Change Vectors allows us to continue from a point that is close to the last point reached before failure rather than starting the process from scratch. + + +## How the worker communicates with the server + +A worker communicates with the data subscription using a custom protocol on top of a long-lived TCP connection. Each successful batch processing consists of these stages: + +1. The server sends documents a batch. + +2. Worker sends acknowledgment message after it finishes processing the batch. + +3. The server returns the client a notification that the acknowledgment persistence is done and it is ready to send the next batch. + + +When the responsible node handling the subscription is down, the subscription task can be manually reassigned to another node in the cluster. +With the Enterprise license the cluster will automatically reassign the work to another node. + + +The TCP connection is also used as the "state" of the worker process and as long as it's alive, the server will not allow other clients to consume the subscription. +The TCP connection is kept alive and monitored using "heartbeat" messages. If it's found nonfunctional, the current batch progress will be restarted. + +See the sequence diagram below that summarizes the lifetime of a subscription connection. + +![Subscription document processing](./assets/SubscriptionsDocumentProcessing.png) + + + +## Working with multiple clients + +In order to support various inter-worker scenarios, one worker is allowed to take the place of another in the processing of a subscription. +Thanks to subscriptions persistence, the worker will be able to continue the work from the point it's predecessor stopped. + +It's possible to configure that a worker will wait for an existing connection to fail, and take it's place, or we can configure it to force close an existing connection etc. See more in [Workers interplay](../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#workers-interplay). + + + +## Data subscriptions usage example + +Data subscriptions are accessible by a document store. Here's an example of an ad-hoc creation and usage of data subscriptions: + + + +{`public void worker(IDocumentStore store) \{ + + // Create the ongoing subscription task on the server + SubscriptionCreationOptions options = new SubscriptionCreationOptions(); + options.setQuery("from Orders where Company = 'companies/11'"); + String subscriptionName = store.subscriptions().create(Order.class, options); + + // Create a worker on the client that will consume the subscription + SubscriptionWorker worker = store + .subscriptions().getSubscriptionWorker(Order.class, subscriptionName); + + // Run the worker task and process data received from the subscription + worker.run(x -> \{ + for (SubscriptionBatch.Item item : x.getItems()) \{ + System.out.println("Order #" + + item.getResult().getId() + + " will be shipped via: " + item.getResult().getShipVia()); + \} + \}); +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/_what-are-data-subscriptions-nodejs.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/_what-are-data-subscriptions-nodejs.mdx new file mode 100644 index 0000000000..12e6eedbd2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/_what-are-data-subscriptions-nodejs.mdx @@ -0,0 +1,133 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Data subscriptions provide a reliable and handy way to perform document processing on the client side. +* The server sends batches of documents to the client. + The client then processes the batch and will receive the next one only after it acknowledges the batch was processed. + The server persists the processing progress, allowing you to pause and continue the processing. + +* In this page: + * [Data subscription consumption](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#data-subscription-consumption) + * [What defines a data subscription](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#what-defines-a-data-subscription) + * [Documents processing](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#documents-processing) + * [Progress Persistence](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#progress-persistence) + * [How the worker communicates with the server](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#how-the-worker-communicates-with-the-server) + * [Working with multiple clients](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#working-with-multiple-clients) + * [Data subscriptions usage example](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#data-subscriptions-usage-example) + + + +## Data subscription consumption + +Data subscriptions are consumed by clients, called subscription workers. In any given moment, only one worker can be connected to a data subscription. +A worker connected to a data subscription receives a batch of documents and gets to process it. +When it's done, depending on the code that the client gave the worker, it can take from seconds to hours. It informs the server about the progress, and the server is ready to send the next batch. + + + +## What defines a data subscription + +Data subscriptions are defined by the server side definition and by the worker connecting to it: + +1. [Subscription Creation Options](../../client-api/data-subscriptions/creation/api-overview.mdx#subscriptioncreationoptions): The documents that will be received, it's filtering and projection. + +2. [Subscription Worker Options](../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionworkeroptions): Worker batch processing logic, batch size, interaction with other connections. + + + +## Documents processing + +Documents are sent in batches and progress will be registered only after the whole batch is processed and acknowledged. +Documents are always sent in Etag order which means that data that already been processed and acknowledged won't be sent twice, except for the following scenarios: + +1. If the document was changed after it was already sent. + +2. If data was received but not acknowledged. + +3. In case of subscription failover (`Enterprise feature`), when there is a chance that documents will be processed again, because it's not always possible to find the same starting point on a different machine. + + +If the database has Revisions defined, the subscription can be configured to process pairs +of subsequent document revisions. +Read more here: [revisions support](../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx) + + + + +## Progress Persistence + +Processing progress is persisted and therefore it can be paused and resumed from the last point it was stopped. +The persistence mechanism also ensures that no documents are missed even in the presence of failure, whether it's client side related, communication, or any other disaster. +Subscriptions progress is stored in the cluster level, in the `Enterprise edition`. In the case of node failure, the processing can be automatically failed over to another node. +The usage of Change Vectors allows us to continue from a point that is close to the last point reached before failure rather than starting the process from scratch. + + +## How the worker communicates with the server + +A worker communicates with the data subscription using a custom protocol on top of a long-lived TCP connection. Each successful batch processing consists of these stages: + +1. The server sends documents a batch. + +2. Worker sends acknowledgment message after it finishes processing the batch. + +3. The server returns the client a notification that the acknowledgment persistence is done and it is ready to send the next batch. + + +When the responsible node handling the subscription is down, the subscription task can be manually reassigned to another node in the cluster. +With the Enterprise license the cluster will automatically reassign the work to another node. + + +The TCP connection is also used as the "state" of the worker process and as long as it's alive, the server will not allow other clients to consume the subscription. +The TCP connection is kept alive and monitored using "heartbeat" messages. If it's found nonfunctional, the current batch progress will be restarted. + +See the sequence diagram below that summarizes the lifetime of a subscription connection. + +![Subscription document processing](./assets/SubscriptionsDocumentProcessing.png) + + + +## Working with multiple clients + +In order to support various inter-worker scenarios, one worker is allowed to take the place of another in the processing of a subscription. +Thanks to subscriptions persistence, the worker will be able to continue the work from the point it's predecessor stopped. + +It's possible to configure that a worker will wait for an existing connection to fail, and take it's place, or we can configure it to force close an existing connection etc. See more in [Workers interplay](../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#workers-interplay). + + + +## Data subscriptions usage example + +Data subscriptions are accessible by a document store. Here's an example of an ad-hoc creation and usage of data subscriptions: + + + +{`async function worker() \{ + + // Create the ongoing subscription task on the server + const subscriptionName = await store.subscriptions.create(\{ + query: "from Orders where Company = 'companies/11'" + \}); + + // Create a worker on the client that will consume the subscription + const worker = store.subscriptions.getSubscriptionWorker(subscriptionName); + + // Listen for and process data received in batches from the subscription + worker.on("batch", (batch, callback) => \{ + for (const item of batch.items) \{ + console.log(\`Order #$\{item.result.Id\} will be shipped via: $\{item.result.ShipVia\}\`); + \} + + callback(); + \}); +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/_category_.json b/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/_category_.json new file mode 100644 index 0000000000..c206697952 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 3, + "label": Advanced topics, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/_maintenance-operations-csharp.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/_maintenance-operations-csharp.mdx new file mode 100644 index 0000000000..fcc71d4ce2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/_maintenance-operations-csharp.mdx @@ -0,0 +1,206 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article covers data subscriptions maintenance operations. + +* In this page: + * [DocumentSubscriptions class](../../../client-api/data-subscriptions/advanced-topics/maintenance-operations.mdx#documentsubscriptions-class) + * [Delete subscription](../../../client-api/data-subscriptions/advanced-topics/maintenance-operations.mdx#delete-subscription) + * [Disabling subscription](../../../client-api/data-subscriptions/advanced-topics/maintenance-operations.mdx#disable-subscription) + * [Enable subscription](../../../client-api/data-subscriptions/advanced-topics/maintenance-operations.mdx#enable-subscription) + * [Update subscription](../../../client-api/data-subscriptions/advanced-topics/maintenance-operations.mdx#update-subscription) + * [Drop Connection](../../../client-api/data-subscriptions/advanced-topics/maintenance-operations.mdx#drop-connection) + * [Get subscription state](../../../client-api/data-subscriptions/advanced-topics/maintenance-operations.mdx#get-subscription-state) + + +## DocumentSubscriptions class + +The `DocumentSubscriptions` class is the class that manages all interaction with the data subscriptions. +The class is available through `DocumentStore`'s `Subscriptions` property. + +| Method Signature | Return type | Description | +|---------------------------------------------------------------------------------------------------------------|---------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------| +| **Create<T>(SubscriptionCreationOptions<T> options, string database)** | `string` | Create a new data subscription. | +| **Create(SubscriptionCreationOptions criteria, string database)** | `string` | Create a new data subscription. | +| **Create(SubscriptionCreationOptions criteria, string database)** | `string` | Create a new data subscription. | +| **CreateAsync<T>(SubscriptionCreationOptions<T> options, string database)** | `Task` | Create a new data subscription. | +| **CreateAsync<T>(Expression<Func<T, bool>> predicate, SubscriptionCreationOptions options, string database)** | `Task` | Create a new data subscription. | +| **Delete(string name, string database)** | `void` | Delete subscription. | +| **DeleteAsync(string name, string database)** | `Task` | Delete subscription. | +| **DropConnection(string name, string database)** | `void` | Drop all existing subscription connections with workers. | +| **DropConnectionAsync(string name, string database)** | `Task` | Drop all existing subscription connections with workers. | +| **DropSubscriptionWorker<T>(SubscriptionWorker<T> worker, string database = null)** | `void` | Drop an existing subscription connection with a worker | +| **Enable(string name, string database)** | `void` | Enable existing subscription. | +| **EnableAsync(string name, string database)** | `Task` | Enable existing subscription. | +| **Disable(string name, string database)** | `void` | Disable existing subscription. | +| **DisableAsync(string name, string database)** | `Task` | Disable existing subscription. | +| **GetSubscriptions(int start, int take, string database)** | `List` | Returns subscriptions list. | +| **GetSubscriptionsAsync(int start, int take, string database)** | `Task>` | Returns subscriptions list. | +| **GetSubscriptionState(string subscriptionName, string database)** | `SubscriptionState ` | Get specific subscription state. | +| **GetSubscriptionStateAsync(string subscriptionName, string database)** | `Task ` | Get specific subscription state. | +| **GetSubscriptionWorker<T>(string subscriptionName, string database)** | `SubscriptionWorker` | Generate a subscription worker, using default configurations, that processes documents deserialized to `T` type . | +| **GetSubscriptionWorker(string subscriptionName, string database)** | `SubscriptionWorker` | Generate a subscription worker, using default configurations, that processes documents in its raw `BlittableJsonReader`, wrapped by dynamic object. | +| **GetSubscriptionWorker(SubscriptionWorkerOptions options, string database)** | `SubscriptionWorker` | Generate a subscription worker, using default configurations, that processes documents deserialized to `T` type . | +| **GetSubscriptionWorker(SubscriptionWorkerOptions options, string database)** | `SubscriptionWorker` | Generate a subscription worker, using default configurations, that processes documents in its raw `BlittableJsonReader`, wrapped by dynamic object. | +| **Update(SubscriptionUpdateOptions options, string database = null)** | `string` | Update an existing data subscription. | +| **UpdateAsync(SubscriptionUpdateOptions options, string database = null, CancellationToken token = default)** | `Task` | Update an existing data subscription. | + + + +## Delete subscription + +Subscriptions can be entirely deleted from the system. + +This operation can be very useful in ad-hoc subscription scenarios when a lot of subscriptions tasks information may accumulate, making tasks management very hard. + + + +{`void Delete(string name, string database = null); +Task DeleteAsync(string name, string database = null, CancellationToken token = default); +`} + + + +usage: + + + +{`store.Subscriptions.Delete(subscriptionName); +`} + + + + + +## Disable subscription + +Existing subscription tasks can be disabled from the client. + + + +{`void Disable(string name, string database = null); +Task DisableAsync(string name, string database = null, CancellationToken token = default); +`} + + + +usage: + + + +{`store.Subscriptions.Disable(subscriptionName); +`} + + + + + +## Enable subscription + +Existing subscription tasks can be enabled from the client. +This operation can be useful for already disabled subscriptions. + + + +{`void Enable(string name, string database = null); +Task EnableAsync(string name, string database = null, CancellationToken token = default); +`} + + + +usage: + + + +{`store.Subscriptions.Enable(subscriptionName); +`} + + + + + +## Update subscription + +See [examples](../../../client-api/data-subscriptions/creation/examples.mdx#update-existing-subscription) +and [API description](../../../client-api/data-subscriptions/creation/api-overview.mdx#update-subscription). + + + +{`string Update(SubscriptionUpdateOptions options, string database = null); + +Task UpdateAsync(SubscriptionUpdateOptions options, string database = null, + CancellationToken token = default); +`} + + + + + +## Drop connection + +Active subscription connections established by workers can be dropped remotely from the client. +Once dropped, the worker will not attempt to reconnect to the server. + + + +{`void DropConnection(string name, string database = null); +Task DropConnectionAsync(string name, string database = null, CancellationToken token = default); +`} + + + +usage: + + + +{`store.Subscriptions.DropConnection(subscriptionName); +`} + + + + + +## Get subscription state + + + +{`SubscriptionState GetSubscriptionState(string subscriptionName, string database = null); +Task GetSubscriptionStateAsync(string subscriptionName, string database = null, CancellationToken token = default); +`} + + + +usage: + + + +{`var subscriptionState = store.Subscriptions.GetSubscriptionState(subscriptionName); +`} + + + + + +##### SubscriptionState + +| Member | Type | Description | +|-------------------------------------------|-------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **Query** | `string` | Subscription's RQL like query. | +| **LastBatchAckTime** | `DateTime?` | Last time a batch processing progress was acknowledged. | +| **NodeTag** | `string` | Processing server's node tag. | +| **MentorNode** | `string` | The mentor node that was manually set. | +| **SubscriptionName** | `string` | The subscription's name, which is also its unique identifier. | +| **SubscriptionId** | `long` | Subscription's internal identifier (cluster's operation etag during subscription creation). | +| **ChangeVectorForNextBatchStartingPoint** | `string` | The Change Vector from which the subscription will begin sending documents.<br.This value is updated on batch acknowledgement and can also be set manually. | +| **Disabled** | `bool` | If `true`, subscription will not allow workers to connect. | +| **LastClientConnectionTime** | `DateTime?` | Time when last client was connected (value sustained after disconnection). | + + + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/_maintenance-operations-java.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/_maintenance-operations-java.mdx new file mode 100644 index 0000000000..16aecf7986 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/_maintenance-operations-java.mdx @@ -0,0 +1,160 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This page covers data subscriptions maintenance operations: + * [Deleting subscription](../../../client-api/data-subscriptions/advanced-topics/maintenance-operations.mdx#deleting-subscription) + * [Dropping connection](../../../client-api/data-subscriptions/advanced-topics/maintenance-operations.mdx#dropping-connection) + * [Disabling subscription](../../../client-api/data-subscriptions/advanced-topics/maintenance-operations.mdx#disabling-subscription) + * [Updating subscription](../../../client-api/data-subscriptions/advanced-topics/maintenance-operations.mdx#updating-subscription) + * [Getting subscription status](../../../client-api/data-subscriptions/advanced-topics/maintenance-operations.mdx#getting-subscription-status) + + +## Deleting subscription + +Subscriptions can be entirely deleted from the system. + +This operation can be very useful in ad-hoc subscription scenarios when a lot of subscriptions tasks information may accumulate, making tasks management very hard. + + + +{`void delete(String name); + +void delete(String name, String database); +`} + + + +usage: + + + +{`store.subscriptions().delete(subscriptionName); +`} + + + + + +## Dropping connection + +Subscription connections with workers can be dropped remotely. +A dropped worker will not try to reconnect to the server. + + + +{`void dropConnection(String name); + +void dropConnection(String name, String database); +`} + + + +usage: + + + +{`store.subscriptions().dropConnection(subscriptionName); +`} + + + + + +## Disabling subscription + + +This operation can only be performed through the management studio + + + + +## Updating subscription + + +This operation can only be performed through the management studio + + + + +## Getting subscription status + + + +{`SubscriptionState getSubscriptionState(String subscriptionName); + +SubscriptionState getSubscriptionState(String subscriptionName, String database); +`} + + + +usage: + + + +{`SubscriptionState subscriptionState = store.subscriptions().getSubscriptionState(subscriptionName); +`} + + + + + +| Member | Type | Description | +|--------|:-----|-------------| +| **query** | `String` | Subscription's RQL like query. | +| **lastBatchAckTime** | `Date` | Last time a batch processing progress was acknowledged. | +| **nodeTag** | `String` | Processing server's node tag | +| **mentorNode** | `String` | The mentor node that was manually set. | +| **subscriptionName** | `String` | Subscription's name, and also it's unique identifier | +| **subscriptionId** | `long` | Subscription's internal identifier (cluster's operation etag during subscription creation) | +| **changeVectorForNextBatchStartingPoint** | `String` | Change vector, starting from which the subscription will send documents. This value is updated manually, or automatically on batch acknowledgment | +| **disabled** | `boolean` | If true, subscription will not allow workers to connect | +| **lastClientConnectionTime** | `Date` | Time when last client was connected (value sustained after disconnection) | + + + + + +## DocumentSubscriptions class + +The `DocumentSubscriptions` class is the class that manages all interaction with the data subscriptions. +The class is available through `DocumentStore`'s `subscriptions()` method. + +| Method Signature| Return type | Description | +|--------|:---|-------------| +| **create(SubscriptionCreationOptions options)** | `String` | Creates a new data subscription. | +| **create(SubscriptionCreationOptions options, String database)** | `String` | Creates a new data subscription. | +| **create(SubscriptionCreationOptions options)** | `String` | Creates a new data subscription. | +| **create(Class<T> clazz)** | `String` | Creates a new data subscription. | +| **create(Class<T> clazz, SubscriptionCreationOptions options)** | `String` | Creates a new data subscription. | +| **create(Class<T> clazz, SubscriptionCreationOptions options, String database)** | `String` | Creates a new data subscription. | +| **createForRevisions(Class<T> clazz)** | `String` | Creates a new data subscription. | +| **createForRevisions(Class<T> clazz, SubscriptionCreationOptions options)** | `String` | Creates a new data subscription. | +| **createForRevisions(Class<T> clazz, SubscriptionCreationOptions options, String database)** | `String` | Creates a new data subscription. | +| **delete(String name)** | `void` | Deletes subscription. | +| **delete(String name, String database)** | `void` | Deletes subscription. | +| **dropConnection(String name)** | `void` | Drops existing subscription connection with worker. | +| **dropConnection(String name, String database)** | `void` | Drops existing subscription connection with worker. | +| **getSubscriptions(int start, int take)** | `List` | Returns subscriptions list. | +| **getSubscriptions(int start, int take, String database)** | `List` | Returns subscriptions list. | +| **getSubscriptionState(String subscriptionName)** | `SubscriptionState ` | Get specific subscription state. | +| **getSubscriptionState(String subscriptionName, String database)** | `SubscriptionState ` | Get specific subscription state. | +| **getSubscriptionWorker(string subscriptionName)** | `SubscriptionWorker` | Generates a subscription worker, using default configurations, that processes documents in it's raw `ObjectNode` type . | +| **getSubscriptionWorker(string subscriptionName, String database)** | `SubscriptionWorker` | Generates a subscription worker, using default configurations, that processes documents in it's raw `ObjectNode` type . | +| **getSubscriptionWorker(SubscriptionWorkerOptions options)** | `SubscriptionWorker` | Generates a subscription worker, using default configurations, that processes documents in it's raw `ObjectNode` type . | +| **getSubscriptionWorker(SubscriptionWorkerOptions options, String database)** | `SubscriptionWorker` | Generates a subscription worker, using default configurations, that processes documents in it's raw `ObjectNode` type . | +| **getSubscriptionWorker<T>(Class<T> clazz, String subscriptionName)** | `SubscriptionWorker` | Generates a subscription worker, using default configurations, that processes documents deserialized to `T` type . | +| **getSubscriptionWorker<T>(Class<T> clazz, String subscriptionName, String database)** | `SubscriptionWorker` | Generates a subscription worker, using default configurations, that processes documents deserialized to `T` type . | +| **getSubscriptionWorker<T>(Class<T> clazz, SubscriptionWorkerOptions options)** | `SubscriptionWorker` | Generates a subscription worker, using provided configuration, that processes documents deserialized to `T` type . | +| **getSubscriptionWorker<T>(Class<T> clazz, SubscriptionWorkerOptions options, String database)** | `SubscriptionWorker` | Generates a subscription worker, using provided configuration, that processes documents deserialized to `T` type . | +| **getSubscriptionWorkerForRevisions<T>(Class<T> clazz, String subscriptionName)** | `SubscriptionWorker` | Generates a subscription worker, using default configurations, that processes documents deserialized to `T` type . | +| **getSubscriptionWorkerForRevisions<T>(Class<T> clazz, String subscriptionName, String database)** | `SubscriptionWorker` | Generates a subscription worker, using default configurations, that processes documents deserialized to `T` type . | +| **getSubscriptionWorkerForRevisions<T>(Class<T> clazz, SubscriptionWorkerOptions options)** | `SubscriptionWorker` | Generates a subscription worker, using provided configuration, that processes documents deserialized to `T` type . | +| **getSubscriptionWorkerForRevisions<T>(Class<T> clazz, SubscriptionWorkerOptions options, String database)** | `SubscriptionWorker` | Generates a subscription worker, using provided configuration, that processes documents deserialized to `T` type . | + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/_maintenance-operations-nodejs.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/_maintenance-operations-nodejs.mdx new file mode 100644 index 0000000000..a62165b301 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/_maintenance-operations-nodejs.mdx @@ -0,0 +1,252 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article covers data subscriptions maintenance operations. + +* In this page: + * [Delete subscription](../../../client-api/data-subscriptions/advanced-topics/maintenance-operations.mdx#delete-subscription) + * [Disable subscription](../../../client-api/data-subscriptions/advanced-topics/maintenance-operations.mdx#disable-subscription) + * [Enable subscription](../../../client-api/data-subscriptions/advanced-topics/maintenance-operations.mdx#enable-subscription) + * [Update subscription](../../../client-api/data-subscriptions/advanced-topics/maintenance-operations.mdx#update-subscription) + * [Drop connection](../../../client-api/data-subscriptions/advanced-topics/maintenance-operations.mdx#drop-connection) + * [Get subscriptions](../../../client-api/data-subscriptions/advanced-topics/maintenance-operations.mdx#get-subscriptions) + * [Get subscription state](../../../client-api/data-subscriptions/advanced-topics/maintenance-operations.mdx#get-subscription-state) + * [DocumentSubscriptions class](../../../client-api/data-subscriptions/advanced-topics/maintenance-operations.mdx#documentsubscriptions-class) + + +## Delete subscription + +Subscription tasks can be entirely deleted from the system. + + + +{`await documentStore.subscriptions.delete("subscriptionNameToDelete"); +`} + + + + +{`// Available overloads: +delete(name); +delete(name, database); +`} + + + + + +## Disable subscription + +Existing subscription tasks can be disabled from the client. + + + +{`await documentStore.subscriptions.disable("subscriptionNameToDisable"); +`} + + + + +{`// Available overloads: +disable(name); +disable(name, database); +`} + + + + + +## Enable subscription + +Existing subscription tasks can be enabled from the client. +This operation can be useful for already disabled subscriptions. + + + +{`await documentStore.subscriptions.enable("subscriptionNameToEnable"); +`} + + + + +{`// Available overloads: +enable(name); +enable(name, database); +`} + + + + + +## Update subscription + +See [examples](../../../client-api/data-subscriptions/creation/examples.mdx#update-existing-subscription) +and [API description](../../../client-api/data-subscriptions/creation/api-overview.mdx#update-subscription). + + + +{`const updateOptions = \{ + id: "", + query: "" + // ... +\} +await documentStore.subscriptions.update(updateOptions); +`} + + + + +{`// Available overloads: +update(options); +update(options, database); +`} + + + + + +## Drop connection + +Active subscription connections established by workers can be dropped remotely from the client. +Once dropped, the worker will not attempt to reconnect to the server. + + + +{`// Drop all connections to the subscription: +// ========================================= + +await documentStore.subscriptions.dropConnection("subscriptionName"); + +// Drop specific worker connection: +// =============================== + +const workerOptions = \{ + subscriptionName: "subscriptionName", + // ... +\}; + +const worker = documentStore.subscriptions.getSubscriptionWorker(workerOptions); + + +worker.on("batch", (batch, callback) => \{ + // worker processing logic +\}); + +await documentStore.subscriptions.dropConnection(worker); +`} + + + + +{`// Available overloads: +dropConnection(options); +dropConnection(options, database); +dropSubscriptionWorker(worker); +dropSubscriptionWorker(worker, database); +`} + + + + + +## Get subscriptions + +Get a list of all existing subscription tasks in the database. + + + +{`const subscriptions = await documentStore.subscriptions.getSubscriptions(0, 10); +`} + + + + +{`// Available overloads: +getSubscriptions(start, take); +getSubscriptions(start, take, database); +`} + + + + + +## Get subscription state + + + +{`const subscriptionState = + await documentStore.subscriptions.getSubscriptionState("subscriptionName"); +`} + + + + +{`// Available overloads: +getSubscriptionState(subscriptionName); +getSubscriptionState(subscriptionName, database); +`} + + + + + +##### SubscriptionState + +| Member | Type | Description | +|-------------------------------------------|-----------|-------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **query** | `string` | Subscription's RQL like query. | +| **lastBatchAckTime** | `string` | Last time a batch processing progress was acknowledged. | +| **nodeTag** | `string` | Processing server's node tag. | +| **mentorNode** | `string` | The mentor node that was manually set. | +| **subscriptionName** | `string` | The subscription's name, which is also its unique identifier. | +| **subscriptionId** | `number` | Subscription's internal identifier (cluster's operation etag during subscription creation). | +| **changeVectorForNextBatchStartingPoint** | `string` | The Change Vector from which the subscription will begin sending documents.
This value is updated on batch acknowledgement and can also be set manually. | +| **disabled** | `boolean` | If `true`, subscription will not allow workers to connect. | +| **lastClientConnectionTime** | `string` | Time when last client was connected (value sustained after disconnection). | + +
+ + + +## DocumentSubscriptions class + +The `DocumentSubscriptions` class manages all interaction with the data subscriptions. +The class is available through the `subscriptions` property in the `documentStore`. + +| Method Signature | Return type | Description | +|----------------------------------------------------------|--------------------------------|--------------------------------------------------------------| +| **create(options)** | `Promise` | Create a new data subscription. | +| **create(options, database)** | `Promise` | Create a new data subscription. | +| **create(documentType)** | `Promise` | Create a new data subscription. | +| **create(optionsOrDocumentType, database)** | `Promise` | Create a new data subscription. | +| **createForRevisions(options)** | `Promise` | Create a new data subscription. | +| **createForRevisions(options, database)** | `Promise` | Create a new data subscription. | +| **delete(name)** | `Promise` | Delete subscription. | +| **delete(name, database)** | `Promise` | Delete subscription. | +| **dropConnection(name)** | `Promise` | Drop all existing subscription connections with workers. | +| **dropConnection(name, database)** | `Promise` | Drop all existing subscription connections with workers. | +| **dropSubscriptionWorker(worker, database)** | `Promise` | Drop an existing subscription connection with a worker. | +| **enable(name)** | `Promise` | Enable existing subscription. | +| **enable(name, database)** | `Promise` | Enable existing subscription. | +| **disable(name)** | `Promise` | Disable existing subscription. | +| **disable(name, database)** | `Promise` | Disable existing subscription. | +| **update(updateOptions)** | `Promise` | Update an existing data subscription. | +| **update(updateOptions, database)** | `Promise` | Update an existing data subscription. | +| **getSubscriptions(start, take)** | `Promise` | Returns subscriptions list. | +| **getSubscriptions(start, take, database)** | `Promise` | Returns subscriptions list. | +| **getSubscriptionState(subscriptionName)** | `Promise ` | Get the state of a specific subscription. | +| **getSubscriptionState(subscriptionName, database)** | `Promise ` | Get the state of a specific subscription. | +| **getSubscriptionWorker(options)** | `SubscriptionWorker` | Generate a subscription worker. | +| **getSubscriptionWorker(options, database)** | `SubscriptionWorker` | Generate a subscription worker. | +| **getSubscriptionWorker(subscriptionName)** | `SubscriptionWorker` | Generate a subscription worker. | +| **getSubscriptionWorker(subscriptionName, database)** | `SubscriptionWorker` | Generate a subscription worker. | +| **getSubscriptionWorkerForRevisions(options)** | `SubscriptionWorker` | Generate a subscription worker for a revisions subscription. | +| **getSubscriptionWorkerForRevisions(options, database)** | `SubscriptionWorker` | Generate a subscription worker for a revisions subscription. | + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/_subscription-with-revisioning-csharp.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/_subscription-with-revisioning-csharp.mdx new file mode 100644 index 0000000000..cbb54c9e40 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/_subscription-with-revisioning-csharp.mdx @@ -0,0 +1,312 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When the [Revisions feature](../../../document-extensions/revisions/overview.mdx) is enabled, a document revision is created with each change made to the document. + Each revision contains a snapshot of the document at the time of modification, forming a complete audit trail. + +* The **Data Subscription** feature supports subscribing not only to documents but also to their **revisions**. + This functionality allows the subscribed client to track changes made to documents over time. + +* The revisions support is specified within the subscription definition. + See how to create and consume it in the examples below. + +* In this page: + * [Regular subscription vs Revisions subscription](../../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx#regular-subscription-vs-revisions-subscription) + * [Revisions processing order](../../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx#revisions-processing-order) + * [Simple creation and consumption](../../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx#simple-creation-and-consumption) + * [Filtering revisions](../../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx#filtering-revisions) + * [Projecting fields from revisions](../../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx#projecting-fields-from-revisions) + + +## Regular subscription vs Revisions subscription + + + +##### Regular subscription +* **Processed items**: + The subscription processes **documents** from the defined collection. + Only the latest version of the document is processed, even if the document has revisions. +* **Query access scope**: + The subscription query running on the server has access only to the latest/current version of the documents. +* **Data sent to client**: + Each item in the batch sent to the client contains a single document (or a projection of it), + as defined in the subscription. + + + + +##### Revisions subscription +* **Processed items**: + The subscription processes all **revisions** of documents from the defined collection, + including revisions of deleted documents from the revision bin if they have not been purged. +* **Query access scope**: + For each revision, the subscription query running on the server has access to both the currently processed revision and its previous revision. +* **Data sent to client**: + By default, unless the subscription query is [projecting specific fields](../../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx#projecting-fields-from-revisions), + each item in the batch sent to the client contains both the processed revision (`Result.Current`) and its preceding revision (`Result.Previous`). + If the document has just been created, the previous revision will be `null`. + + +* In order for the revisions subscription to work, + [Revisions must be configured](../../../document-extensions/revisions/overview.mdx#defining-a-revisions-configuration) and enabled for the collection the subscription manages. + +* A document that has no revisions will Not be processed, + so make sure that your revisions configuration does not purge revisions before the subscription has a chance to process them. + + + + + + +## Revisions processing order + +In the revisions subscription, revisions are processed in pairs of subsequent entries. +For example, consider the following User document: + + + +{`\{ + Name: "James", + Age: "21" +\} +`} + + + +We update this User document in two consecutive operations: + +* Update the 'Age' field to the value of 22 +* Update the 'Age' field to the value of 23 + +The subscription worker in the client will receive pairs of revisions ( _Previous_ & _Current_ ) +within each item in the batch in the following order: + +| Batch item | Previous | Current | +|------------|--------------------------------|--------------------------------| +| item #1 | `null` | `{ Name: "James", Age: "21" }` | +| item #2 | `{ Name: "James", Age: "21" }` | `{ Name: "James", Age: "22" }` | +| item #3 | `{ Name: "James", Age: "22" }` | `{ Name: "James", Age: "23" }` | + + + +## Simple creation and consumption + +Here we set up a basic revisions subscription that will deliver pairs of consecutive _Order_ document revisions to the client: + +**Create subscription**: + + + + +{`subscriptionName = store.Subscriptions.Create( + // Use > as the type for the processed items + // e.g. > + new SubscriptionCreationOptions>()); +`} + + + + +{`subscriptionName = store.Subscriptions.Create(new SubscriptionCreationOptions() +{ + // Add (Revisions = true) to your subscription RQL + Query = @"From Orders (Revisions = true)" +}); +`} + + + + +**Consume subscription**: + + + +{`SubscriptionWorker> revisionsWorker = + // Specify > as the type of the processed items + store.Subscriptions.GetSubscriptionWorker>(subscriptionName); + +await revisionsWorker.Run((SubscriptionBatch> batch) => +\{ + foreach (var item in batch.Items) + \{ + // Access the previous revision via 'Result.Previous' + var previousRevision = item.Result.Previous; + + // Access the current revision via 'Result.Current' + var currentRevision = item.Result.Current; + + // Provide your own processing logic: + ProcessOrderRevisions(previousRevision, currentRevision); + \} +\}); +`} + + + + + +## Filtering revisions + +Here we set up a revisions subscription that will send the client only document revisions in which the order was shipped to Mexico. + +**Create subscription**: + + + + +{`subscriptionName = store.Subscriptions.Create( + // Specify > as the type of the processed items + new SubscriptionCreationOptions>() + { + // Provide filtering logic + // Only revisions that where shipped to Mexico will be sent to subscribed clients + Filter = revision => revision.Current.ShipTo.Country == "Mexico", + }); +`} + + + + +{`subscriptionName = await store.Subscriptions.CreateAsync(new SubscriptionCreationOptions() +{ + Query = @"declare function isSentToMexico(doc) { + return doc.Current.ShipTo.Country == 'Mexico' + } + + from 'Orders' (Revisions = true) as doc + where isSentToMexico(doc) == true" +}); +`} + + + + +**Consume subscription**: + + + +{`SubscriptionWorker> worker = + store.Subscriptions.GetSubscriptionWorker>(subscriptionName); + +await worker.Run(batch => +\{ + foreach (var item in batch.Items) + \{ + Console.WriteLine($@" + This is a revision of document \{item.Id\}. + The order in this revision was shipped at \{item.Result.Current.ShippedAt\}."); + \} +\}); +`} + + + + + +## Projecting fields from revisions + +Here we define a revisions subscription that will filter the revisions and send projected data to the client. + +**Create subscription**: + + + + +{`subscriptionName = store.Subscriptions.Create( + // Specify > as the type of the processed items within the query + new SubscriptionCreationOptions>() + { + // Filter revisions by the revenue delta. + // The subscription will only process revisions where the revenue + // is higher than in the preceding revision by 2500. + Filter = revision => + revision.Previous != null && + revision.Current.Lines.Sum(x => x.PricePerUnit * x.Quantity) > + revision.Previous.Lines.Sum(x => x.PricePerUnit * x.Quantity) + 2500, + + // Define the projected fields that will be sent to the client + Projection = revision => new OrderRevenues() + { + PreviousRevenue = + revision.Previous.Lines.Sum(x => x.PricePerUnit * x.Quantity), + + CurrentRevenue = + revision.Current.Lines.Sum(x => x.PricePerUnit * x.Quantity) + } + }); +`} + + + + +{`subscriptionName = store.Subscriptions.Create(new SubscriptionCreationOptions() +{ + Query = @"declare function isRevenueDeltaAboveThreshold(doc, threshold) { + return doc.Previous !== null && doc.Current.Lines.map(function(x) { + return x.PricePerUnit * x.Quantity; + }).reduce((a, b) => a + b, 0) > doc.Previous.Lines.map(function(x) { + return x.PricePerUnit * x.Quantity; + }).reduce((a, b) => a + b, 0) + threshold + } + + from 'Orders' (Revisions = true) as doc + where isRevenueDeltaAboveThreshold(doc, 2500) + + select { + PreviousRevenue: doc.Previous.Lines.map(function(x) { + return x.PricePerUnit * x.Quantity; + }).reduce((a, b) => a + b, 0), + + CurrentRevenue: doc.Current.Lines.map(function(x) { + return x.PricePerUnit * x.Quantity; + }).reduce((a, b) => a + b, 0) + }" +}); +`} + + + + +{`public class OrderRevenues +{ + public decimal PreviousRevenue { get; set; } + public decimal CurrentRevenue { get; set; } +} +`} + + + + +**Consume subscription**: + +Since the revision fields are projected into the `OrderRevenues` class in the subscription definition, +each item received in the batch has the format of this projected class instead of the default `Result.Previous` and `Result.Current` fields, +as was demonstrated in the [simple example](../../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx#simple-creation-and-consumption). + + + +{`SubscriptionWorker revenuesComparisonWorker = + // Use the projected class type 'OrderRevenues' for the items the worker will process + store.Subscriptions.GetSubscriptionWorker(subscriptionName); + +await revenuesComparisonWorker.Run(batch => +\{ + foreach (var item in batch.Items) + \{ + // Access the projected content: + Console.WriteLine($@"Revenue for order with ID: \{item.Id\} + has grown from \{item.Result.PreviousRevenue\} + to \{item.Result.CurrentRevenue\}"); + \} +\}); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/_subscription-with-revisioning-java.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/_subscription-with-revisioning-java.mdx new file mode 100644 index 0000000000..473554f719 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/_subscription-with-revisioning-java.mdx @@ -0,0 +1,147 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The **Data Subscription** feature supports subscribing not only to documents, but also to [document revisions](../../../document-extensions/revisions/overview.mdx). + +* The revisions support is defined within the subscription. + A [Revisions Configuration](../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx) must be defined for the subscribed collection. + +* While a regular subscription processes a single document, a Revisions subscription processes **pairs of subsequent document revisions**. + + Using this functionality allows you to keep track of each change made in a document, as well as compare pairs of subsequent versions of the document. + + Both revisions are accessible for filtering and projection. + +* In this page: + * [Revisions processing order](../../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx#revisions-processing-order) + * [Simple declaration and usage](../../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx#simple-declaration-and-usage) + * [Revisions processing and projection](../../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx#revisions-processing-and-projection) + + +## Revisions processing order + +The Revisions feature allows the tracking of changes made in a document, by storing the audit trail of its changes over time. +An audit trail entry is called a **Document Revision**, and is comprised of a document snapshot. +Read more about revisions [here](../../../document-extensions/revisions/overview.mdx). + +In a data subscription, revisions will be processed in pairs of subsequent entries. +For example, consider the following User document: + +`{ + Name:'James', + Age:'21' +}` + +We update the User document twice, in separate operations: + +* We update the 'Age' field to the value of 22 +* We update the 'Age' field to the value of 23 + +The data subscriptions revisions processing mechanism will receive pairs of revisions in the following order: + +| # | Previous | Current | +|---|------------------------------|------------------------------| +| 1 | `null` | `{ Name:'James', Age:'21' }` | +| 2 | `{ Name:'James', Age:'21' }` | `{ Name:'James', Age:'22' }` | +| 3 | `{ Name:'James', Age:'22' }` | `{ Name:'James', Age:'23' }` | + + +The revisions subscription will be able to function properly only if the revisions it needs to process are available. +Please make sure that your revisions configuration doesn't purge revisions before the subscription had the chance to process them. + + + + +## Simple declaration and usage + +Here we declare a simple revisions subscription that will send pairs of subsequent document revisions to the client: + +Creation: + + + +{`name = store.subscriptions().createForRevisions(Order.class); +`} + + + + +{`SubscriptionCreationOptions options = new SubscriptionCreationOptions(); +options.setQuery("from orders (Revisions = true)"); +name = store.subscriptions().createForRevisions(Order.class, options); +`} + + + + +Consumption: + + +{`SubscriptionWorker> revisionWorker = store + .subscriptions().getSubscriptionWorkerForRevisions(Order.class, name); +revisionWorker.run(x -> \{ + for (SubscriptionBatch.Item> documentsPair : x.getItems()) \{ + + Order prev = documentsPair.getResult().getPrevious(); + Order current = documentsPair.getResult().getCurrent(); + + processOrderChanges(prev, current); + \} +\}); +`} + + + + + +## Revisions processing and projection + +Here we declare a revisions subscription that will filter and project data from revisions pairs: + +Creation: + + +{`SubscriptionCreationOptions options = new SubscriptionCreationOptions(); +options.setQuery("declare function getOrderLinesSum(doc) \{" + + " var sum = 0;" + + " for (var i in doc.Lines) \{ sum += doc.Lines[i]; \} " + + " return sum;" + + "\}" + + "" + + " from orders (Revisions = true) " + + " where getOrderLinesSum(this.Current) > getOrderLinesSum(this.Previous) " + + " select \{" + + " previousRevenue: getOrderLinesSum(this.Previous)," + + " currentRevenue: getOrderLinesSum(this.Current)" + + "\}"); + +name = store.subscriptions().create(options); +`} + + + +Consumption: + + +{`SubscriptionWorker> revisionWorker = store + .subscriptions().getSubscriptionWorkerForRevisions(Order.class, name); +revisionWorker.run(x -> \{ + for (SubscriptionBatch.Item> documentsPair : x.getItems()) \{ + + Order prev = documentsPair.getResult().getPrevious(); + Order current = documentsPair.getResult().getCurrent(); + + processOrderChanges(prev, current); + \} +\}); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/_subscription-with-revisioning-nodejs.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/_subscription-with-revisioning-nodejs.mdx new file mode 100644 index 0000000000..4da675704b --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/_subscription-with-revisioning-nodejs.mdx @@ -0,0 +1,287 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When the [Revisions feature](../../../document-extensions/revisions/overview.mdx) is enabled, a document revision is created with each change made to the document. + Each revision contains a snapshot of the document at the time of modification, forming a complete audit trail. + +* The **Data Subscription** feature supports subscribing not only to documents but also to their **revisions**. + This functionality allows the subscribed client to track changes made to documents over time. + +* The revisions support is specified within the subscription definition. + See how to create and consume it in the examples below. + +* In this page: + * [Regular subscription vs Revisions subscription](../../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx#regular-subscription-vs-revisions-subscription) + * [Revisions processing order](../../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx#revisions-processing-order) + * [Simple creation and consumption](../../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx#simple-creation-and-consumption) + * [Filtering revisions](../../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx#filtering-revisions) + * [Projecting fields from revisions](../../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx#projecting-fields-from-revisions) + + +## Regular subscription vs Revisions subscription + + + +##### Regular subscription +* **Processed items**: + The subscription processes **documents** from the defined collection. + Only the latest version of the document is processed, even if the document has revisions. +* **Query access scope**: + The subscription query running on the server has access only to the latest/current version of the documents. +* **Data sent to client**: + Each item in the batch sent to the client contains a single document (or a projection of it), + as defined in the subscription. + + + + +##### Revisions subscription +* **Processed items**: + The subscription processes all **revisions** of documents from the defined collection, + including revisions of deleted documents from the revision bin if they have not been purged. +* **Query access scope**: + For each revision, the subscription query running on the server has access to both the currently processed revision and its previous revision. +* **Data sent to client**: + By default, unless the subscription query is projecting specific fields, + each item in the batch sent to the client contains both the processed revision (`result.current`) and its preceding revision (`result.previous`). + If the document has just been created, the previous revision will be `null`. + + +* In order for the revisions subscription to work, + [Revisions must be configured](../../../document-extensions/revisions/overview.mdx#defining-a-revisions-configuration) and enabled for the collection the subscription manages. + +* A document that has no revisions will Not be processed, + so make sure that your revisions configuration does not purge revisions before the subscription has a chance to process them. + + + + + + +## Revisions processing order + +In the revisions subscription, revisions are processed in pairs of subsequent entries. +For example, consider the following User document: + + + +{`\{ + Name: "James", + Age: "21" +\} +`} + + + +We update this User document in two consecutive operations: + +* Update the 'Age' field to the value of 22 +* Update the 'Age' field to the value of 23 + +The subscription worker in the client will receive pairs of revisions ( _previous_ & _current_ ) +within each item in the batch in the following order: + +| Batch item | Previous | Current | +|------------|--------------------------------|--------------------------------| +| item #1 | `null` | `{ Name: "James", Age: "21" }` | +| item #2 | `{ Name: "James", Age: "21" }` | `{ Name: "James", Age: "22" }` | +| item #3 | `{ Name: "James", Age: "22" }` | `{ Name: "James", Age: "23" }` | + + + +## Simple creation and consumption + +Here we set up a basic revisions subscription that will deliver pairs of consecutive _Order_ document revisions to the client: + +**Create subscription**: + + + +{`const subscriptionName = await documentStore.subscriptions.create(\{ + // Add (Revisions = true) to your subscription RQL + query: "From Orders (Revisions = true)" +\}); +`} + + + +**Consume subscription**: + + + +{`const workerOptions = \{ subscriptionName \}; + +const worker = + // Use method \`getSubscriptionWorkerForRevisions\` + documentStore.subscriptions.getSubscriptionWorkerForRevisions(workerOptions); + +worker.on("batch", (batch, callback) => \{ + try \{ + for (const item of batch.items) \{ + + // Access the previous revision via 'result.previous' + const previousRevision = item.result.previous; + + // Access the current revision via 'result.current' + const currentRevision = item.result.current; + \} + callback(); + + \} catch (err) \{ + callback(err); + \} +\}); +`} + + + + + +## Filtering revisions + +Here we set up a revisions subscription that will send the client only document revisions in which the order was shipped to Mexico. + +**Create subscription**: + + + +{`const subscriptionName = await documentStore.subscriptions.create(\{ + // Provide filtering logic + // Only revisions that where shipped to Mexico will be sent to subscribed clients + query: \`declare function isSentToMexico(doc) \{ + return doc.Current.ShipTo.Country == 'Mexico' + \} + + from 'Orders' (Revisions = true) as doc + where isSentToMexico(doc) == true\` +\}); +`} + + + +**Consume subscription**: + + + +{`const workerOptions = \{ subscriptionName \}; + +const worker = + documentStore.subscriptions.getSubscriptionWorkerForRevisions(workerOptions); + +worker.on("batch", (batch, callback) => \{ + try \{ + for (const item of batch.items) \{ + console.log(\` + This is a revision of document $\{item.id\}. + The order in this revision was shipped at $\{item.result.current.ShippedAt\}. + \`); + \} + callback(); + + \} catch (err) \{ + callback(err); + \} +\}); +`} + + + + + +## Projecting fields from revisions + +Here we define a revisions subscription that will filter the revisions and send projected data to the client. + +**Create subscription**: + + + + +{`const subscriptionName = await documentStore.subscriptions.create({ + // Filter revisions by the revenue delta. + // The subscription will only process revisions where the revenue + // is higher than in the preceding revision by 2500. + + query: \`declare function isRevenueDeltaAboveThreshold(doc, threshold) { + return doc.Previous !== null && doc.Current.Lines.map(function(x) { + return x.PricePerUnit * x.Quantity; + }).reduce((a, b) => a + b, 0) > doc.Previous.Lines.map(function(x) { + return x.PricePerUnit * x.Quantity; + }).reduce((a, b) => a + b, 0) + threshold + } + + from 'Orders' (Revisions = true) as doc + where isRevenueDeltaAboveThreshold(doc, 2500) + + // Define the projected fields that will be sent to the client: + select { + previousRevenue: doc.Previous.Lines.map(function(x) { + return x.PricePerUnit * x.Quantity; + }).reduce((a, b) => a + b, 0), + + currentRevenue: doc.Current.Lines.map(function(x) { + return x.PricePerUnit * x.Quantity; + }).reduce((a, b) => a + b, 0) + }\` +}); +`} + + + + +{`class OrderRevenues { + constructor() { + this.previousRevenue; + this.currentRevenue; + } +} +`} + + + + +**Consume subscription**: + +Since the revision fields are projected into the `OrderRevenues` class in the subscription definition, +each item received in the batch has the format of this projected class instead of the default `result.previous` and `result.current` fields, +as was demonstrated in the [simple example](../../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx#simple-creation-and-consumption). + + + +{`const workerOptions = \{ + subscriptionName: subscriptionName, + documentType: OrderRevenues +\}; + +const worker = + // Note: in this case, where each resulting item in the batch is a projected object + // and not the revision itself, we use method \`getSubscriptionWorker\` + documentStore.subscriptions.getSubscriptionWorker(workerOptions); + +worker.on("batch", (batch, callback) => \{ + try \{ + for (const item of batch.items) \{ + // Access the projected content: + console.log(\` + Revenue for order with ID: $\{item.id\} + has grown from $\{item.result.previousRevenue\} + to $\{item.result.currentRevenue\} + \`); + \} + callback(); + + \} catch (err) \{ + callback(err); + \} +\}); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/maintenance-operations.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/maintenance-operations.mdx new file mode 100644 index 0000000000..02933882f5 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/maintenance-operations.mdx @@ -0,0 +1,44 @@ +--- +title: "Data Subscriptions: Maintenance Operations" +hide_table_of_contents: true +sidebar_label: Maintenance Operations +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import MaintenanceOperationsCsharp from './_maintenance-operations-csharp.mdx'; +import MaintenanceOperationsJava from './_maintenance-operations-java.mdx'; +import MaintenanceOperationsNodejs from './_maintenance-operations-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx new file mode 100644 index 0000000000..146e1cbf51 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx @@ -0,0 +1,48 @@ +--- +title: "Data Subscriptions: Revisions Support" +hide_table_of_contents: true +sidebar_label: Revisions Support +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import SubscriptionWithRevisioningCsharp from './_subscription-with-revisioning-csharp.mdx'; +import SubscriptionWithRevisioningJava from './_subscription-with-revisioning-java.mdx'; +import SubscriptionWithRevisioningNodejs from './_subscription-with-revisioning-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/assets/SubscriptionsDocumentProcessing.png b/versioned_docs/version-7.1/client-api/data-subscriptions/assets/SubscriptionsDocumentProcessing.png new file mode 100644 index 0000000000..f1fc7883d2 Binary files /dev/null and b/versioned_docs/version-7.1/client-api/data-subscriptions/assets/SubscriptionsDocumentProcessing.png differ diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/concurrent-subscriptions.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/concurrent-subscriptions.mdx new file mode 100644 index 0000000000..3ea98edbd7 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/concurrent-subscriptions.mdx @@ -0,0 +1,34 @@ +--- +title: "Concurrent Subscriptions" +hide_table_of_contents: true +sidebar_label: Concurrent Subscriptions +sidebar_position: 4 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import ConcurrentSubscriptionsCsharp from './_concurrent-subscriptions-csharp.mdx'; +import ConcurrentSubscriptionsNodejs from './_concurrent-subscriptions-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_api-overview-csharp.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_api-overview-csharp.mdx new file mode 100644 index 0000000000..9644ddb560 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_api-overview-csharp.mdx @@ -0,0 +1,262 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In this page: + * [Create the subscription worker](../../../client-api/data-subscriptions/consumption/api-overview.mdx#create-the-subscription-worker) + * [SubscriptionWorkerOptions](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionworkeroptions) + * [Run the subscription worker](../../../client-api/data-subscriptions/consumption/api-overview.mdx#run-the-subscription-worker) + * [SubscriptionBatch<T>](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionbatcht) + * [SubscriptionBatch<T>.Item](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionbatchtitem) + * [SubscriptionWorker<T>](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionworkert) + + +## Create the subscription worker + +A subscription worker can be created using the following `GetSubscriptionWorker` methods available through the `Subscriptions` property of the `DocumentStore`. + +Note: Simply creating the worker is insufficient; +after creating the worker, you need to [run the subscription worker](../../../client-api/data-subscriptions/consumption/api-overview.mdx#run-the-subscription-worker) to initiate document processing. + + + +{`SubscriptionWorker GetSubscriptionWorker( + string subscriptionName, string database = null); + +SubscriptionWorker GetSubscriptionWorker( + SubscriptionWorkerOptions options, string database = null); + +SubscriptionWorker GetSubscriptionWorker( + string subscriptionName, string database = null) where T : class; + +SubscriptionWorker GetSubscriptionWorker( + SubscriptionWorkerOptions options, string database = null) where T : class; +`} + + + +| Parameter | Type | Description | +|----------------------|-----------------------------|--------------------------------------------------------------------------------------------------------------------------------------------| +| **subscriptionName** | `string` | The name of the subscription to which the worker will connect. | +| **options** | `SubscriptionWorkerOptions` | Options that affect how the worker interacts with the subscription. These options do not alter the definition of the subscription itself. | +| **database** | `string` | The name of the database where the subscription task resides.
If `null`, the default database configured in DocumentStore will be used. | + +| Return value | | +|----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------| +| `SubscriptionWorker` | The subscription worker that has been created.
Initially, it is idle and will only start processing documents when the `Run` function is called. | + + + +## SubscriptionWorkerOptions + + + +{`public class SubscriptionWorkerOptions +\{ + public string SubscriptionName \{ get; \} + public int MaxDocsPerBatch \{ get; set; \} + public int SendBufferSizeInBytes \{ get; set; \} + public int ReceiveBufferSizeInBytes \{ get; set; \} + public bool IgnoreSubscriberErrors \{ get; set; \} + public bool CloseWhenNoDocsLeft \{ get; set; \} + public TimeSpan TimeToWaitBeforeConnectionRetry \{ get; set; \} + public TimeSpan ConnectionStreamTimeout \{ get; set; \} + public TimeSpan MaxErroneousPeriod \{ get; set; \} + public SubscriptionOpeningStrategy Strategy \{ get; set; \} +\} +`} + + + +When creating a worker with `SubscriptionWorkerOptions`, the only mandatory property is `SubscriptionName`. +All other parameters are optional and will default to their respective default values if not specified. + +| Member | Type | Description | +|-------------------------------------|-------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **SubscriptionName** | `string` | The name of the subscription to which the worker will connect. | +| **MaxDocsPerBatch** | `int` | The maximum number of documents that the server will try to retrieve and send to the client in a batch. If the server doesn't find as many documents as specified, it will send the documents it has found without waiting. Default: 4096. | +| **SendBufferSizeInBytes** | `int` | The size in bytes of the TCP socket buffer used for _sending_ data.
Default: 32,768 bytes (32 KiB). | +| **ReceiveBufferSizeInBytes** | `int` | The size in bytes of the TCP socket buffer used for _receiving_ data.
Default: 4096 (4 KiB). | +| **IgnoreSubscriberErrors** | `bool` | Determines if subscription processing is aborted when the worker's batch-handling code throws an unhandled exception.

`true` – subscription processing will continue.

`false` (Default) – subscription processing will be aborted. | +| **CloseWhenNoDocsLeft** | `bool` | Determines whether the subscription connection closes when no new documents are available.

`true` – The subscription worker processes all available documents and stops when none remain, at which point the `Run` method throws a `SubscriptionClosedException`.
Useful for ad-hoc, one-time processing.

`false` (Default) – The subscription worker remains active, waiting for new documents. | +| **TimeToWaitBeforeConnectionRetry** | `TimeSpan` | The time to wait before attempting to reconnect after a non-aborting failure during subscription processing. Default: 5 seconds. | +| **MaxErroneousPeriod** | `TimeSpan` | The maximum amount of time a subscription connection can remain in an erroneous state before it is terminated. Default: 5 minutes. | +| **Strategy** | `SubscriptionOpeningStrategy` | This enum configures how the server handles connection attempts from workers to a specific subscription task.
Default: `OpenIfFree`. | + +Learn more about `SubscriptionOpeningStrategy` in [worker strategies](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#worker-strategies). + + + +{`public enum SubscriptionOpeningStrategy +\{ + // Connect if no other worker is connected + OpenIfFree, + + // Take over the connection + TakeOver, + + // Wait for currently connected worker to disconnect + WaitForFree, + + // Connect concurrently + Concurrent +\} +`} + + + + + +## Run the subscription worker + +After [creating](../../../client-api/data-subscriptions/consumption/api-overview.mdx#create-the-subscription-worker) a subscription worker, the subscription worker is still not processing any documents. +To start processing, you need to call the `Run` method of the [SubscriptionWorker](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionworkert). + +The `Run` function takes a delegate, which is your client-side code responsible for processing the received document batches. + + + +{`Task Run(Action> processDocuments, + CancellationToken ct = default(CancellationToken)); + +Task Run(Func, Task> processDocuments, + CancellationToken ct = default(CancellationToken)); +`} + + + +| Parameter | Type | Description | +|----------------------|------------------------------------|----------------------------------------------------------------| +| **processDocuments** | `Action>` | Delegate for sync batches processing. | +| **processDocuments** | `Func, Task>` | Delegate for async batches processing. | +| **ct** | `CancellationToken` | Cancellation token used in order to halt the worker operation. | + +| Return value | | +|---------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `Task` | Task that is alive as long as the subscription worker is processing or tries processing.
If the processing is aborted, the task exits with an exception. | + + + +## SubscriptionBatch<T> + +| Member | Type | Description | +|--------------------------|-----------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **Items** | `List.Item>` | List of items in the batch.
See [SubscriptionBatch<T>.Item](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionbatchtitem) below. | +| **NumberOfItemsInBatch** | `int` | Number of items in the batch. | + +| Method Signature | Return value | Description | +|------------------------|-------------------------|-------------------------------------------------------------------------------------------------------------------| +| **OpenSession()** | `IDocumentSession` | Open a new document session that tracks all items and their included items within the current batch. | +| **OpenAsyncSession()** | `IAsyncDocumentSession` | Open a new asynchronous document session that tracks all items and their included items within the current batch. | + + + +##### Subscription worker connectivity + +As long as there is no exception, the worker will continue addressing the same server that the first batch was received from. +If the worker fails to reach that node, it will try to [failover](../../../client-api/configuration/load-balance/overview.mdx) to another node from the session's topology list. +The node that the worker succeeded connecting to, will inform the worker which node is currently responsible for data subscriptions. + + + + + +## SubscriptionBatch<T>.Item + +This class represents a single item in a subscription batch results. + + + +{`public struct Item +\{ + public T Result \{ get; internal set; \} + public string ExceptionMessage \{ get; internal set; \} + public string Id \{ get; internal set; \} + public string ChangeVector \{ get; internal set; \} + public bool Projection \{ get; internal set; \} + public bool Revision \{ get; internal set; \} + public BlittableJsonReaderObject RawResult \{ get; internal set; \} + public BlittableJsonReaderObject RawMetadata \{ get; internal set; \} + public IMetadataDictionary Metadata \{ get; internal set; \} +\} +`} + + + +| Member | Type | Description | +|----------------------|-----------------------------|-------------------------------------------------------------------------------------------------------| +| **Result** | `T` | The current batch item.
If `T` is `BlittableJsonReaderObject`, no deserialization will take place. | +| **ExceptionMessage** | `string` | The exception message thrown during current document processing in the server side. | +| **Id** | `string` | The document ID of the underlying document for the current batch item. | +| **ChangeVector** | `string` | The change vector of the underlying document for the current batch item. | +| **RawResult** | `BlittableJsonReaderObject` | Current batch item before serialization to `T`. | +| **RawMetadata** | `BlittableJsonReaderObject` | Current batch item's underlying document metadata. | +| **Metadata** | `IMetadataDictionary` | Current batch item's underlying metadata values. | + + +This class should only be used within the subscription's `Run` delegate. +Using it outside this scope may cause unexpected behavior. + + + + +## SubscriptionWorker<T> + + + +##### Methods + +| Method Signature | Return Type | Description | +|------------------------------------------------|---------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **Dispose()** | `void` | Aborts subscription worker operation ungracefully by waiting for the task returned by the `Run` function to finish running. | +| **DisposeAsync()** | `Task` | Async version of `Dispose()`. | +| **Dispose(bool waitForSubscriptionTask)** | `void` | Aborts the subscription worker, but allows deciding whether to wait for the `Run` function task or not. | +| **DisposeAsync(bool waitForSubscriptionTask)** | `Task` | Async version of `DisposeAsync(bool waitForSubscriptionTask)`. | +| **Run (multiple overloads)** | `Task` | Call `Run` to begin the worker's batch processing.
Pass the batch processing delegates to this method
(see [above](../../../client-api/data-subscriptions/consumption/api-overview.mdx#run-the-subscription-worker)). | + +
+ + + +##### Events + +| Event | Event type | Description | +|-----------------------------------|:--------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **AfterAcknowledgment** | `AfterAcknowledgmentAction` | Triggered after each time the server acknowledges the progress of batch processing. | +| **OnSubscriptionConnectionRetry** | `Action` | Triggered when the subscription worker attempts to reconnect to the server after a failure.
The event receives as a parameter the exception that interrupted the processing. | +| **OnDisposed** | `Action>` | Triggered after the subscription worker is disposed. | + + + +##### AfterAcknowledgmentAction + +| Parameter | | | +|-------------|------------------------|------------------------------------------| +| **batch** | `SubscriptionBatch` | The batch process which was acknowledged | + +| Return value | | +|----------------|---------------------------------------------------------------------------------------------------------| +| `Task` | Task for which the worker will wait for the event processing to be finished (for async functions, etc.) | + + + +
+ + + +##### Properties + +| Member | Type | Description | +|-------------------------------|----------|-----------------------------------------------------------------------| +| **CurrentNodeTag** | `string` | The node tag of the current RavenDB server handling the subscription. | +| **SubscriptionName** | `string` | The name of the currently processed subscription. | +| **WorkerId** | `string` | The worker ID. | + + + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_api-overview-java.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_api-overview-java.mdx new file mode 100644 index 0000000000..d99288db9a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_api-overview-java.mdx @@ -0,0 +1,175 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In this page: + * [Create the subscription worker](../../../client-api/data-subscriptions/consumption/api-overview.mdx#create-the-subscription-worker) + * [SubscriptionWorkerOptions](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionworkeroptions) + * [Run the subscription worker](../../../client-api/data-subscriptions/consumption/api-overview.mdx#run-the-subscription-worker) + * [SubscriptionBatch<T>](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionbatcht) + * [SubscriptionWorker<T>](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionworkert) + + +## Create the subscription worker + +Subscription worker generation is accessible through the `DocumentStore`'s `subscriptions()` method, of type `DocumentSubscriptions`: + + +{`SubscriptionWorker getSubscriptionWorker(SubscriptionWorkerOptions options); +SubscriptionWorker getSubscriptionWorker(SubscriptionWorkerOptions options, String database); + +SubscriptionWorker getSubscriptionWorker(String subscriptionName); +SubscriptionWorker getSubscriptionWorker(String subscriptionName, String database); + + SubscriptionWorker getSubscriptionWorker(Class clazz, SubscriptionWorkerOptions options); + SubscriptionWorker getSubscriptionWorker(Class clazz, SubscriptionWorkerOptions options, String database); + + SubscriptionWorker getSubscriptionWorker(Class clazz, String subscriptionName); + SubscriptionWorker getSubscriptionWorker(Class clazz, String subscriptionName, String database); + + SubscriptionWorker> getSubscriptionWorkerForRevisions(Class clazz, SubscriptionWorkerOptions options); + SubscriptionWorker> getSubscriptionWorkerForRevisions(Class clazz, SubscriptionWorkerOptions options, String database); + + SubscriptionWorker> getSubscriptionWorkerForRevisions(Class clazz, String subscriptionName); + SubscriptionWorker> getSubscriptionWorkerForRevisions(Class clazz, String subscriptionName, String database); +`} + + + +| Parameter | | | +|----------------------|-----------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **subscriptionName** | `String` | The subscription's name. This parameter appears in more simple overloads allowing to start processing without creating a `SubscriptionCreationOptions` instance, relying on the default values | +| **options** | `SubscriptionWorkerOptions` | Options that affect how the worker interacts with the subscription. These options do not alter the definition of the subscription itself. | +| **database** | `String` | The name of the database where the subscription task resides. If `null`, the default database configured in DocumentStore will be used. | + +| Return value | | +|----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------| +| `SubscriptionWorker` | The subscription worker that has been created.
Initially, it is idle and will only start processing documents when the `run` function is called. | + + + + +## SubscriptionWorkerOptions + +When creating a worker with `SubscriptionWorkerOptions`, the only mandatory property is `subscriptionName`. +All other parameters are optional and will default to their respective default values if not specified. + + +| Member | Type | Description | +|-------------------------------------|-----------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **subscriptionName** | `String` | The name of the subscription to which the worker will connect. | +| **timeToWaitBeforeConnectionRetry** | `Duration` | The time to wait before attempting to reconnect after a non-aborting failure during subscription processing. Default: 5 seconds. | +| **ignoreSubscriberErrors** | `boolean` | Determines if subscription processing is aborted when the worker's batch-handling code throws an unhandled exception.

`true` – subscription processing will continue.

`false` (Default) – subscription processing will be aborted. | +| **strategy** | `SubscriptionOpeningStrategy`
(enum) | Configures how the server handles connection attempts from workers to a specific subscription task.
Learn more in [worker strategies](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#worker-strategies).
Default: `OPEN_IF_FREE`. | +| **maxDocsPerBatch** | `int` | The maximum number of documents that the server will try to retrieve and send to the client in a batch. If the server doesn't find as many documents as specified, it will send the documents it has found without waiting. Default: 4096. | +| **closeWhenNoDocsLeft** | `boolean` | Determines whether the subscription connection closes when no new documents are available.

`true` – The subscription worker processes all available documents and stops when none remain, at which point the `run` method throws a `SubscriptionClosedException`.
Useful for ad-hoc, one-time processing.

`false` (Default) – The subscription worker remains active, waiting for new documents. | +| **sendBufferSizeInBytes** | `int` | The size in bytes of the TCP socket buffer used for _sending_ data.
Default: 32,768 bytes (32 KiB). | +| **receiveBufferSizeInBytes** | `int` | The size in bytes of the TCP socket buffer used for _receiving_ data.
Default: 4096 (4 KiB). | + + + +## Run the subscription worker + +After [creating](../../../client-api/data-subscriptions/consumption/api-overview.mdx#create-the-subscription-worker) a subscription worker, the subscription worker is still not processing any documents. +To start processing, you need to call the `run` method of the [SubscriptionWorker](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionworkert). + +The `run` function takes a delegate, which is your client-side code responsible for processing the received document batches. + + + +{`CompletableFuture run(Consumer> processDocuments); +`} + + + +| Parameter | | | +|----------------------|----------------------------------|--------------------------------------| +| **processDocuments** | `Consumer>` | Delegate for sync batches processing | + +| Return value | | +|---------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `CompletableFuture` | Task that is alive as long as the subscription worker is processing or tries processing. If the processing is aborted, the future exits with an exception | + + + + +## SubscriptionBatch<T> + +| Member | Type | Description | +|--------------------------|-----------------------------------|-------------------------------| +| **items** | `List.Item>` | List of items in the batch. | +| **numberOfItemsInBatch** | `int` | Number of items in the batch. | + +| Method Signature | Return value | Description | +|--------------------|--------------------|--------------------------------------------------------------------------------------| +| **openSession()** | `IDocumentSession` | New document session, that tracks all items and included items of the current batch. | + + + + +As long as there is no exception, the worker will continue addressing the same +server that the first batch was received from. +If the worker fails to reach that node, it will try to failover to another node +from the session's topology list. +The node that the worker succeeded connecting to, will inform the worker which +node is currently responsible for data subscriptions. + + + + + + + +if T is `ObjectNode`, no deserialization will take place + + +| Member | Type | Description | +|----------------------|-----------------------|----------------------------------------------------------------------------------------| +| **result** | `T` | Current batch item. | +| **exceptionMessage** | `String` | Message of the exception thrown during current document processing in the server side. | +| **id** | `String` | Current batch item's underlying document ID. | +| **changeVector** | `String` | Current batch item's underlying document change vector of the current document. | +| **rawResult** | `ObjectNode` | Current batch item before serialization to `T`. | +| **rawMetadata** | `ObjectNode` | Current batch item's underlying document metadata. | +| **metadata** | `IMetadataDictionary` | Current batch item's underlying metadata values. | + + + + + +## SubscriptionWorker<T> + + + +| Method Signature | Return Type | Description | +|------------------------------|---------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **close()** | `void` | Aborts subscription worker operation ungracefully by waiting for the task returned by the `run` function to finish running. | +| **run (multiple overloads)** | `CompletableFuture` | Call `run` to begin the worker's batch processing.
Pass the batch processing delegates to this method
(see [above](../../../client-api/data-subscriptions/consumption/api-overview.mdx#run-the-subscription-worker)). | + +
+ + + +| Event | Type\Return type | Description | +|------------------------------------|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **addAfterAcknowledgmentListener** | `Consumer>` (event) | Event that is risen after each the server acknowledges batch processing progress. | +| **onSubscriptionConnectionRetry** | `Consumer` (event) | Event that is fired when the subscription worker tries to reconnect to the server after a failure. The event receives as a parameter the exception that interrupted the processing. | +| **onClosed** | `Consumer>` (event) | Event that is fired after the subscription worker was disposed. | + + + + + +| Member | Type\Return type | Description | +|----------------------|--------------------|-----------------------------------------------------------------------| +| **currentNodeTag** | `String` | The node tag of the current RavenDB server handling the subscription. | +| **subscriptionName** | `String` | The name of the currently processed subscription. | + + + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_api-overview-nodejs.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_api-overview-nodejs.mdx new file mode 100644 index 0000000000..948886203a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_api-overview-nodejs.mdx @@ -0,0 +1,212 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In this page: + * [Create the subscription worker](../../../client-api/data-subscriptions/consumption/api-overview.mdx#create-the-subscription-worker) + * [Subscription worker options](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscription-worker-options) + * [Run the subscription worker](../../../client-api/data-subscriptions/consumption/api-overview.mdx#run-the-subscription-worker) + * [Subscription batch](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscription-batch) + * [Subscription batch item](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscription-batch-item) + * [Subscription worker](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscription-worker) + + +## Create the subscription worker + +A subscription worker can be created using the following `getSubscriptionWorker` methods available through the `subscriptions` property of the `documentStore`. + +Note: Simply creating the worker is insufficient; +after creating the worker, you need to [run the subscription worker](../../../client-api/data-subscriptions/consumption/api-overview.mdx#run-the-subscription-worker) to initiate document processing. + + + +{`await documentStore.subscriptions.getSubscriptionWorker(subscriptionName); +await documentStore.subscriptions.getSubscriptionWorker(subscriptionName, database); + +await documentStore.subscriptions.getSubscriptionWorker(options); +await documentStore.subscriptions.getSubscriptionWorker(options, database); +`} + + + +| Parameter | Type | Description | +|----------------------|----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **subscriptionName** | `string` | The name of the subscription to which the worker will connect. | +| **database** | `string` | The name of the database where the subscription task resides.
If `null`, the default database configured in DocumentStore will be used. | +| **options** | `object` | [Subscription worker options](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscription-worker-options) object that affect how the worker interacts with the subscription. These options do not alter the definition of the subscription itself. | + +| Return value | | +|----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `SubscriptionWorker` | The [subscription worker](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscription-worker) that has been created.
The worker will start processing documents once you define the worker's `on` method,
which listens to the `batch` event. | + + + +## Subscription worker options + + + +{`// The SubscriptionWorkerOptions object: +// ===================================== +\{ + subscriptionName; + documentType; + ignoreSubscriberErrors; + closeWhenNoDocsLeft; + maxDocsPerBatch; + timeToWaitBeforeConnectionRetry; + maxErroneousPeriod; + strategy; +\} +`} + + + +When creating a worker with subscription worker options, the only mandatory property is `subscriptionName`. +All other parameters are optional and will default to their respective default values if not specified. + +| Member | Type | Description | +|-------------------------------------|-----------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **subscriptionName** | `string` | The name of the subscription to which the worker will connect. | +| **documentType** | `object` | The class type of the subscription documents. | +| **ignoreSubscriberErrors** | `boolean` | Determines if subscription processing is aborted when the worker's batch-handling code throws an unhandled exception.

`true` – subscription processing will continue.

`false` (default) – subscription processing will be aborted. | +| **closeWhenNoDocsLeft** | `boolean` | Determines whether the subscription connection closes when no new documents are available.

`true` – The subscription worker processes all available documents and stops when none remain, at which point the `SubscriptionClosedException` will be thrown.
Useful for ad-hoc, one-time processing.

`false` (default) – The subscription worker remains active, waiting for new documents. | +| **maxDocsPerBatch** | `number` | The maximum number of documents that the server will try to retrieve and send to the client in a batch. If the server doesn't find as many documents as specified, it will send the documents it has found without waiting. Default: 4096. | +| **timeToWaitBeforeConnectionRetry** | `number` | The time (in ms) to wait before attempting to reconnect after a non-aborting failure during subscription processing. Default: 5 seconds. | +| **maxErroneousPeriod** | `number` | The maximum amount of time (in ms) a subscription connection can remain in an erroneous state before it is terminated. Default: 5 minutes. | +| **strategy** | `string` | The strategy configures how the server handles connection attempts from workers to a specific subscription task.

Available options:
`OpenIfFree` (default), `TakeOver`, `WaitForFree`, or `Concurrent`.

Learn more in [worker strategies](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#worker-strategies). | + + + +## Run the subscription worker + +After [creating](../../../client-api/data-subscriptions/consumption/api-overview.mdx#create-the-subscription-worker) a subscription worker, the subscription worker is still not processing any documents. +To initiate processing, you need to define an event handler and attach it to the worker's `batch` event listener. + +This handler contains your client-side code responsible for processing the document batches received from the server. +Whenever a new batch of documents is ready, the provided handler will be triggered. + + + +{`subscriptionWorker.on("batch", (batch, callback) => \{ + // Process incoming items: + // ======================= + + // 'batch': + // Contains the documents to be processed. + + // callback(): + // Needs to be called after processing the batch + // to notify the worker that you're done processing. +\}); +`} + + + + + +## Subscription batch + +The subscription batch class contains the following public properties & methods: + +| Property | Type | Description | +|-------------------------------|------------|--------------------------------------------------------------------------------------------------------------------------------------------------------| +| **items** | `Item[]` | List of items in the batch.
See [subscription batch item](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscription-batch-item). | + +| Method | Return type | Description | +|-------------------------------|-------------|--------------------------------------------------------------------------------------------------------------------------| +| **getNumberOfItemsInBatch()** | `number` | Get the number of items in the batch. | +| **getNumberOfIncludes()** | `number` | Get the number of included documents in the batch. | +| **openSession()** | `object` | Open a new document session that tracks all items and their included items within the current batch. | +| **openSession(options)** | `object` | Open a new document session - can pass [session options](../../../client-api/session/opening-a-session.mdx#session-options). | + + + +##### Subscription worker connectivity + +As long as there is no exception, the worker will continue addressing the same server that the first batch was received from. +If the worker fails to reach that node, it will try to [failover](../../../client-api/configuration/load-balance/overview.mdx) to another node from the session's topology list. +The node that the worker succeeded connecting to, will inform the worker which node is currently responsible for data subscriptions. + + + + + +## Subscription batch item + +This class represents a single item in a subscription batch result. + + + +{`class Item +\{ + result; + exceptionMessage; + id; + changeVector; + projection; + revision; + rawResult; + rawMetadata; + metadata; +\} +`} + + + +| Member | Type | Description | +|----------------------|-----------|-------------------------------------------------------------------------------------| +| **result** | `object` | The current batch item. | +| **exceptionMessage** | `string` | The exception message thrown during current document processing in the server side. | +| **id** | `string` | The document ID of the underlying document for the current batch item. | +| **changeVector** | `string` | The change vector of the underlying document for the current batch item. | +| **rawResult** | `object` | Current batch item - no types reconstructed. | +| **rawMetadata** | `object` | Current batch item's underlying document metadata. | +| **metadata** | `object` | Current batch item's underlying metadata values. | + + + +## Subscription worker + + + +##### Methods + +| Method | Return type | Description | +|-------------------|---------------|---------------------------------------------------| +| **dispose()** | `void` | Aborts subscription worker operation. | +| **on()** | `object` | Method used to set up event listeners & handlers. | +| **getWorkerId()** | `string` | Get the worker ID. | + + + + + +##### Events + +| Event | Listener signature | Description | +|-----------------------------------|-----------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **"batch"** | `(batch, callback) => void` | Emitted when a batch of documents is sent from the server to the client.

Once processing is done, `callback` *must be called* in order to continue batches' emission. | +| **"afterAcknowledgment"** | `(batch, callback) => void` | Emitted after each time the server acknowledges the progress of batch processing. | +| **"connectionRetry"** | `(error) => void` | Emitted when the worker attempts to reconnect to the server after a failure. | +| **"error"** | `(error) => void` | Emitted on subscription errors. | +| **"end"** | `(error) => void` | Emitted when subscription is finished.
No more batches are going to be emitted. | + +
+ + + +##### Properties + +| Member | Type | Description | +|----------------------|----------|-----------------------------------------------------------------------| +| **currentNodeTag** | `string` | The node tag of the current RavenDB server handling the subscription. | +| **subscriptionName** | `string` | The name of the currently processed subscription. | + + + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_api-overview-python.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_api-overview-python.mdx new file mode 100644 index 0000000000..e91e65fa4f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_api-overview-python.mdx @@ -0,0 +1,207 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In this page: + * [Create the subscription worker](../../../client-api/data-subscriptions/consumption/api-overview.mdx#create-the-subscription-worker) + * [`SubscriptionWorkerOptions`](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionworkeroptions) + * [Run the subscription worker](../../../client-api/data-subscriptions/consumption/api-overview.mdx#run-the-subscription-worker) + * [`SubscriptionBatch[_T]`](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionbatch[_t]) + * [`SubscriptionWorker[_T]`](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionworker[_t]) + + +## Create the subscription worker + +Create a subscription worker using `get_subscription_worker` or `get_subscription_worker_by_name`. + +* Use the `get_subscription_worker` method to specify the subscription options while creating the worker. +* Use the `get_subscription_worker_by_name` method to create the worker using the default options. + + + +{`def get_subscription_worker( + self, options: SubscriptionWorkerOptions, object_type: Optional[Type[_T]] = None, database: Optional[str] = None +) -> SubscriptionWorker[_T]: ... + +def get_subscription_worker_by_name( + self, + subscription_name: Optional[str] = None, + object_type: Optional[Type[_T]] = None, + database: Optional[str] = None, +) -> SubscriptionWorker[_T]: ... +`} + + + +| Parameter | | | +|----------------------------------|-----------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------| +| **options** | `SubscriptionWorkerOptions` | Options that affect how the worker interacts with the subscription. These options do not alter the definition of the subscription itself. | +| **object_type** (Optional) | `Type[_T]` | Defines the object type (class) for the items that will be included in the received `SubscriptionBatch` object. | +| **database** (Optional) | `str` | The name of the database where the subscription task resides. If `None`, the default database configured in DocumentStore will be used. | +| **subscription_name** (Optional) | `str` | The subscription's name. Used when the worker is generated without creating a `SubscriptionCreationOptions` instance, relying on the default values. | + +| Return value | | +|----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------| +| `SubscriptionWorker` | The subscription worker that has been created.
Initially, it is idle and will only start processing documents when the `run` function is called. | + + + +## `SubscriptionWorkerOptions` + +When creating a worker with `SubscriptionWorkerOptions`, the only mandatory property is `subscription_name`. +All other parameters are optional and will default to their respective default values if not specified. + +| Member | Type | Description | +|------------------------------------------|-----------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **subscription_name** | `str` | The name of the subscription to which the worker will connect. | +| **time_to_wait_before_connection_retry** | `timedelta` | The time to wait before attempting to reconnect after a non-aborting failure during subscription processing. Default: 5 seconds. | +| **ignore_subscriber_errors** | `bool` | Determines if subscription processing is aborted when the worker's batch-handling code throws an unhandled exception.

`True` – subscription processing will continue.

`False` (Default) – subscription processing will be aborted. | +| **max_docs_per_batch** | `int` | The maximum number of documents that the server will try to retrieve and send to the client in a batch. If the server doesn't find as many documents as specified, it will send the documents it has found without waiting. Default: 4096. | +| **close_when_no_docs_left** | `bool` | Determines whether the subscription connection closes when no new documents are available.

`True` – The subscription worker processes all available documents and stops when none remain, at which point the `run` method throws a `SubscriptionClosedException`.
Useful for ad-hoc, one-time processing.

`False` (Default) – The subscription worker remains active, waiting for new documents. | +| **send_buffer_size_in_bytes** | `int` | The size in bytes of the TCP socket buffer used for _sending_ data.
Default: 32,768 bytes (32 KiB). | +| **receive_buffer_size_in_bytes** | `int` | The size in bytes of the TCP socket buffer used for _receiving_ data.
Default: 4096 (4 KiB). | +| **strategy** | `SubscriptionOpeningStrategy`
(enum) | Configures how the server handles connection attempts from workers to a specific subscription task.
Learn more in [worker strategies](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#worker-strategies).
Default: `OPEN_IF_FREE`. | + + + +Learn more about `SubscriptionOpeningStrategy` in [worker strategies](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#worker-strategies). + +| `SubscriptionOpeningStrategy` | | +|---------------------------------|---------------------------------------------------| +| `OPEN_IF_FREE` | Connect if no other worker is connected | +| `WAIT_FOR_FREE` | Wait for currently connected worker to disconnect | +| `TAKE_OVER` | Take over the connection | +| `CONCURRENT` | Connect concurrently | + + + + + +## Run the subscription worker + +After [creating](../../../client-api/data-subscriptions/consumption/api-overview.mdx#create-the-subscription-worker) a subscription worker, the subscription worker is still not processing any documents. +To start processing, you need to call the `run` function of the [SubscriptionWorker](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionworker[_t]). + +The `run` function receives the client-side code as a function that will process the received document batches. + + + +{`def run(self, process_documents: Optional[Callable[[SubscriptionBatch[_T]], Any]]) -> Future: ... +`} + + + +| Parameter | | | +|----------------------------------|--------------------------------------------|-----------------------------------| +| **process_documents** (Optional) | `[Callable[[SubscriptionBatch[_T]], Any]]` | Delegate to sync batch processing | + + + + +## `SubscriptionBatch[_T]` + +| Member | Type | Description | +|------------------------------|------------------------------------|------------------------------| +| **items** | `SubscriptionBatch[_T].Item` array | List of items in the batch | +| **number_of_items_in_batch** | `int` | Number of items in the batch | + + + +{`def number_of_items_in_batch(self) -> int: + return 0 if self.items is None else len(self.items) +`} + + + + + +As long as there is no exception, the worker will continue addressing the same +server that the first batch was received from. +If the worker fails to reach that node, it will try to +[failover](../../../client-api/configuration/load-balance/overview.mdx) to another node +from the session's topology list. +The node that the worker succeeds connecting to, will inform the worker which +node is currently responsible for data subscriptions. + + + + +{`class Item(Generic[_T_Item]): + """ + Represents a single item in a subscription batch results. + This class should be used only inside the subscription's run delegate, + using it outside this scope might cause unexpected behavior. + """ +`} + + + + +{`class SubscriptionBatch(Generic[_T]): + +def __init__(self): + self._result: Optional[_T_Item] = None + self._exception_message: Optional[str] = None + self._key: Optional[str] = None + self._change_vector: Optional[str] = None + self._projection: Optional[bool] = None + self._revision: Optional[bool] = None + self.raw_result: Optional[Dict] = None + self.raw_metadata: Optional[Dict] = None + self._metadata: Optional[MetadataAsDictionary] = None +`} + + + +| `SubscriptionBatch[_T].item` Member | Type | Description | +|---------------------------------------|------------------------|---------------------------------------------------------------------------------------| +| **\_result** (Optional) | `_T_Item` | Current batch item | +| **\_exception_message** (Optional) | `str` | Message of the exception thrown during current document processing in the server side | +| **\_key** (Optional) | `str` | Current batch item underlying document ID | +| **\_change_vector** (Optional) | `str` | Current batch item underlying document change vector of the current document | +| **\_projection** (Optional) | `bool` | indicates whether the value id a projection | +| **raw_result** (Optional) | `Dict` | Current batch item before serialization to `T` | +| **raw_metadata** (Optional) | `Dict` | Current batch item underlying document metadata | +| **\_metadata** (Optional) | `MetadataAsDictionary` | Current batch item underlying metadata values | + + +Usage of `raw_result`, `raw_metadata`, and `_metadata` values outside of the document processing delegate +is not supported. + + + + +## `SubscriptionWorker[_T]` +### Methods: + +| Method | Return Type | Description | +|-------------------------------------------------|----------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `close(bool wait_for_subscription_task = True)` | `void` | Aborts subscription worker operation ungracefully by waiting for the task returned by the `run` function to finish running. | +| `run` | `Future[None]` | Call `run` to begin the worker's batch processing.
Pass the batch processing delegates to this method
(see [above](../../../client-api/data-subscriptions/consumption/api-overview.mdx#run-the-subscription-worker)). | +### Events: + +| Event | Type\Return type | Description | +|---------------------------|-------------------------------------------|----------------------------------------------------------------------------------| +| **after\_acknowledgment** | `Callable[[SubscriptionBatch[_T]], None]` | Event invoked after each time the server acknowledges batch processing progress. | + +| `after_acknowledgment` Parameters | | | +|------------------------------------|-------------------------|------------------------------------------| +| **batch** | `SubscriptionBatch[_T]` | The batch process which was acknowledged | + +| Return value | | +|----------------|--------------------------------------------------------------| +| `Future[None]` | The worker waits for the task to finish the event processing | + +### Properties: + +| Member | Type | Description | +|-----------------------|---------|-----------------------------------------------------------------------| +| **current_node_tag** | `str` | The node tag of the current RavenDB server handling the subscription. | +| **subscription_name** | `str` | The name of the currently processed subscription. | + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_category_.json b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_category_.json new file mode 100644 index 0000000000..4f2eeb38b4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 2, + "label": Consumption, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_examples-csharp.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_examples-csharp.mdx new file mode 100644 index 0000000000..3e4f97e0d8 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_examples-csharp.mdx @@ -0,0 +1,450 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In this page: + * [Client with full exception handling and processing retries](../../../client-api/data-subscriptions/consumption/examples.mdx#client-with-full-exception-handling-and-processing-retries) + * [Worker with a specified batch size](../../../client-api/data-subscriptions/consumption/examples.mdx#worker-with-a-specified-batch-size) + * [Worker that operates with a session](../../../client-api/data-subscriptions/consumption/examples.mdx#worker-that-operates-with-a-session) + * [Worker that processes dynamic objects](../../../client-api/data-subscriptions/consumption/examples.mdx#worker-that-processes-dynamic-objects) + * [Worker that processes a blittable object](../../../client-api/data-subscriptions/consumption/examples.mdx#worker-that-processes-a-blittable-object) + * [Subscription that ends when no documents are left](../../../client-api/data-subscriptions/consumption/examples.mdx#subscription-that-ends-when-no-documents-are-left) + * [Subscription that uses included documents](../../../client-api/data-subscriptions/consumption/examples.mdx#subscription-that-uses-included-documents) + * [Subscription workers with failover on other nodes](../../../client-api/data-subscriptions/consumption/examples.mdx#subscription-workers-with-failover-on-other-nodes) + * [Primary and secondary workers](../../../client-api/data-subscriptions/consumption/examples.mdx#primary-and-secondary-workers) + + +## Client with full exception handling and processing retries + +Here we implement a client that handles exceptions thrown by the worker. +If the exception is recoverable, the client retries creating the worker. + + + +{`while (true) +\{ + // Create the worker: + // ================== + var options = new SubscriptionWorkerOptions(subscriptionName); + + // Configure the worker: + // Allow a downtime of up to 2 hours, + // and wait 2 minutes before reconnecting + options.MaxErroneousPeriod = TimeSpan.FromHours(2); + options.TimeToWaitBeforeConnectionRetry = TimeSpan.FromMinutes(2); + + subscriptionWorker = store.Subscriptions.GetSubscriptionWorker(options); + + try + \{ + // Subscribe to connection retry events + // and log any exceptions that occur during processing + subscriptionWorker.OnSubscriptionConnectionRetry += exception => + \{ + Logger.Error("Error during subscription processing: " + subscriptionName, + exception); + \}; + + // Run the worker: + // =============== + await subscriptionWorker.Run(batch => + \{ + foreach (var item in batch.Items) + \{ + // Forcefully stop subscription processing if the ID is "companies/2-A" + // and throw an exception to let external logic handle the specific case + if (item.Result.Company == "companies/2-A") + \{ + // The custom exception thrown from here + // will be wrapped by \`SubscriberErrorException\` + throw new UnsupportedCompanyException( + "Company ID can't be 'companies/2-A', pleases fix"); + \} + + // Process the order document - provide your own logic + ProcessOrder(item.Result); + \} + \}, cancellationToken); + + // The Run method will stop if the subscription worker is disposed, + // exiting the while loop + return; + \} + catch (Exception e) + \{ + Logger.Error("Failure in subscription: " + subscriptionName, e); + + // The following exceptions are Not recoverable + if (e is DatabaseDoesNotExistException || + e is SubscriptionDoesNotExistException || + e is SubscriptionInvalidStateException || + e is AuthorizationException) + throw; + + + if (e is SubscriptionClosedException) + // Subscription probably closed explicitly by admin + return; + + if (e is SubscriberErrorException se) + \{ + // For UnsupportedCompanyException we want to throw an exception, + // otherwise, continue processing + if (se.InnerException != null && se.InnerException is UnsupportedCompanyException) + \{ + throw; + \} + + // Call continue to skip the current while(true) iteration and try reconnecting + // in the next one, allowing the worker to process future batches. + continue; + \} + + // Handle this depending on the subscription opening strategy + if (e is SubscriptionInUseException) + continue; + + // Call return to exit the while(true) loop, + // dispose the worker (via finally), and stop the subscription. + return; + \} + finally + \{ + subscriptionWorker.Dispose(); + \} +\} +`} + + + + + +## Worker with a specified batch size + +Here we create a worker and specify the maximum number of documents the server will send to the worker in each batch. + + + +{`var workerWBatch = store.Subscriptions.GetSubscriptionWorker( + new SubscriptionWorkerOptions(subscriptionName) + \{ + MaxDocsPerBatch = 20 + \}); + +_ = workerWBatch.Run(x => +\{ + // your custom logic +\}); +`} + + + + + +## Worker that operates with a session + +Here we create a subscription that sends _Order_ documents that do not have a shipping date. +The worker receiving these documents will update the `ShippedAt` field value and save the document back to the server via the session. + + +Note: +The session is opened with `batch.OpenSession` instead of with `Store.OpenSession`. + + + + +{`// Create the subscription task on the server: +// =========================================== + +var subscriptionName = store.Subscriptions.Create(new SubscriptionCreationOptions() +\{ + Query = @"from Orders as o where o.ShippedAt = null" +\}); + +// Create the subscription worker that will consume the documents: +// =============================================================== + +var subscriptionWorker = store.Subscriptions.GetSubscriptionWorker(subscriptionName); +_ = subscriptionWorker.Run(batch => +\{ + // Open a session with 'batch.OpenSession' + using (var session = batch.OpenSession()) + \{ + foreach (var order in batch.Items.Select(x => x.Result)) + \{ + TransferOrderToShipmentCompany(order); // call your custom method + order.ShippedAt = DateTime.UtcNow; // update the document field + \} + + // Save the updated Order documents + session.SaveChanges(); + \} +\}); +`} + + + + + +## Worker that processes dynamic objects + +Here we define a subscription that projects the _Order_ documents into a dynamic format. +The worker processes the dynamic objects it receives. + + + +{`// Create the subscription task on the server: +// =========================================== + +var subscriptionName = "My dynamic subscription"; +store.Subscriptions.Create(new SubscriptionCreationOptions() +\{ + Name = subscriptionName, + Projection = order => + new \{ DynanamicField_1 = "Company: " + order.Company + " Employee: " + order.Employee \} +\}); + +// Create the subscription worker that will consume the documents: +// =============================================================== + +var subscriptionWorker = store.Subscriptions.GetSubscriptionWorker(subscriptionName); +_ = subscriptionWorker.Run(batch => +\{ + foreach (var item in batch.Items) + \{ + // Access the dynamic field in the document + dynamic field = item.Result.DynanamicField_1; + + // Call your custom method + ProcessItem(field); + \} +\}); +`} + + + + + +## Worker that processes a blittable object + +Create a worker that processes documents as low level blittable objects. +This can be useful in extreme high-performance scenarios, but may be dangerous due to the direct usage of unmanaged memory. + + + +{`// Create the subscription task on the server: +// =========================================== + +var subscriptionName = store.Subscriptions.Create(new SubscriptionCreationOptions +\{ + Projection = x => new + \{ + x.Employee + \} +\}); + +// Create the subscription worker that will consume the documents: +// =============================================================== + +var subscriptionWorker = + // Specify \`BlittableJsonReaderObject\` as the generic type parameter + store.Subscriptions.GetSubscriptionWorker(subscriptionName); + +_ = subscriptionWorker.Run(batch => +\{ + foreach (var item in batch.Items) + \{ + // Access the Employee field within the blittable object + var employeeField = item.Result["Employee"].ToString(); + + ProcessItem(employeeField); // call your custom method + \} +\}); +`} + + + + + +## Subscription that ends when no documents are left + +Here we create a subscription client that runs until there are no more new documents to process. +This is useful for ad-hoc, single-use processing where the user needs to ensure that all documents are fully processed. + + + +{`// Create the subscription task on the server: +// =========================================== +var subscriptionName = store.Subscriptions.Create( + new SubscriptionCreationOptions + \{ + Filter = order => order.Lines.Sum(line => line.PricePerUnit * line.Quantity) > 10000, + Projection = order => new OrderAndCompany + \{ + OrderId = order.Id, + Company = RavenQuery.Load(order.Company) + \} + \}); + +// Create the subscription worker that will consume the documents: +// =============================================================== +var highValueOrdersWorker = store.Subscriptions.GetSubscriptionWorker( + new SubscriptionWorkerOptions(subscriptionName) + \{ + // Here we set the worker to stop when there are no more documents left to send + // Will throw SubscriptionClosedException when it finishes it's job + CloseWhenNoDocsLeft = true + \}); + +try +\{ + await highValueOrdersWorker.Run(batch => + \{ + foreach (var item in batch.Items) + \{ + SendThankYouNoteToEmployee(item.Result); // call your custom method + \} + \}); +\} +catch (SubscriptionClosedException) +\{ + // That's expected, no more documents to process +\} +`} + + + + + +## Subscription that uses included documents + +Here we create a subscription that, in addition to sending all the _Order_ documents to the worker, +will include all the referenced _Product_ documents in the batch sent to the worker. + +When the worker accesses these _Product_ documents, no additional requests will be made to the server. + + + +{`// Create the subscription task on the server: +// =========================================== + +var subscriptionName = store.Subscriptions.Create(new SubscriptionCreationOptions() +\{ + // Include the referenced Product documents for each Order document + Query = @"from Orders include Lines[].Product" +\}); + +// Create the subscription worker that will consume the documents: +// =============================================================== + +var subscriptionWorker = store.Subscriptions.GetSubscriptionWorker(subscriptionName); +_ = subscriptionWorker.Run(batch => +\{ + // Open a session via 'batch.OpenSession' + // in order to access the Product documents + using (var session = batch.OpenSession()) + \{ + foreach (var order in batch.Items.Select(x => x.Result)) + \{ + foreach (var orderLine in order.Lines) + \{ + // Calling Load will Not generate a request to the server, + // because orderLine.Product was included in the batch + var product = session.Load(orderLine.Product); + + ProcessOrderAndProduct(order, product); // call your custom method + \} + \} + \} +\}); +`} + + + + + +## Subscription workers with failover on other nodes + +In this configuration, any available node will create a worker. +If the worker fails, another available node will take over. + + + +{`var worker = store.Subscriptions.GetSubscriptionWorker( + new SubscriptionWorkerOptions(subscriptionName) +\{ + Strategy = SubscriptionOpeningStrategy.WaitForFree +\}); +`} + + + + + +## Primary and secondary workers + +Here we create two workers: + +* The primary worker, with a `TakeOver` strategy, will take over the other worker and establish the connection. +* The secondary worker, with a `WaitForFree` strategy, will wait for the first worker to fail (due to machine failure, etc.). + +The primary worker: + + +{`var primaryWorker = store.Subscriptions.GetSubscriptionWorker( + new SubscriptionWorkerOptions(subscriptionName) +\{ + Strategy = SubscriptionOpeningStrategy.TakeOver +\}); + +while (true) +\{ + try + \{ + await primaryWorker.Run(x => + \{ + // your logic + \}); + \} + catch (Exception) + \{ + // retry + \} +\} +`} + + + +The secondary worker: + + +{`var secondaryWorker = store.Subscriptions.GetSubscriptionWorker( + new SubscriptionWorkerOptions(subscriptionName) +\{ + Strategy = SubscriptionOpeningStrategy.WaitForFree +\}); + +while (true) +\{ + try + \{ + await secondaryWorker.Run(x => + \{ + // your logic + \}); + \} + catch (Exception) + \{ + // retry + \} +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_examples-java.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_examples-java.mdx new file mode 100644 index 0000000000..295611c535 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_examples-java.mdx @@ -0,0 +1,294 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* this page: + * [Worker with a specified batch size](../../../client-api/data-subscriptions/consumption/examples.mdx#worker-with-a-specified-batch-size) + * [Client with full exception handling and processing retries](../../../client-api/data-subscriptions/consumption/examples.mdx#client-with-full-exception-handling-and-processing-retries) + * [Subscription that ends when no documents left](../../../client-api/data-subscriptions/consumption/examples.mdx#subscription-that-ends-when-no-documents-left) + * [Worker that processes raw objects](../../../client-api/data-subscriptions/consumption/examples.mdx#sworker-that-processes-raw-objects) + * [Worker that operates with a session](../../../client-api/data-subscriptions/consumption/examples.mdx#worker-that-operates-with-a-session) + * [Subscription that uses included documents](../../../client-api/data-subscriptions/consumption/examples.mdx#subscription-that-uses-included-documents) + * [Primary and secondary workers](../../../client-api/data-subscriptions/consumption/examples.mdx#primary-and-secondary-workers) + + +## Worker with a specified batch size + +Here we create a worker and specify the maximum number of documents the server will send to the worker in each batch. + + + +{`SubscriptionWorkerOptions options = new SubscriptionWorkerOptions(subscriptionName); +options.setMaxDocsPerBatch(20); +SubscriptionWorker workerWBatch = store.subscriptions().getSubscriptionWorker(Order.class, options); +workerWBatch.run(x -> \{ /* custom logic */\}); +`} + + + + + +## Client with full exception handling and processing retries + +Here we implement a client that handles exceptions thrown by a worker. +If the exception is recoverable, the client retries creating the worker. + + + +{`while (true) \{ + SubscriptionWorkerOptions options = new SubscriptionWorkerOptions(subscriptionName); + // here we configure that we allow a down time of up to 2 hours, + // and will wait for 2 minutes for reconnecting + + options.setMaxErroneousPeriod(Duration.ofHours(2)); + options.setTimeToWaitBeforeConnectionRetry(Duration.ofMinutes(2)); + + subscriptionWorker = store.subscriptions().getSubscriptionWorker(Order.class, options); + + try \{ + // here we are able to be informed of any exception that happens during processing + subscriptionWorker.addOnSubscriptionConnectionRetry(exception -> \{ + logger.error("Error during subscription processing: " + subscriptionName, exception); + \}); + + subscriptionWorker.run(batch -> \{ + for (SubscriptionBatch.Item item : batch.getItems()) \{ + // we want to force close the subscription processing in that case + // and let the external code decide what to do with that + if ("Europe".equalsIgnoreCase(item.getResult().getShipVia())) \{ + throw new IllegalStateException("We cannot ship via Europe"); + \} + processOrder(item.getResult()); + \} + \}).get(); + + + // Run will complete normally if you have disposed the subscription + return; + \} catch (Exception e) \{ + logger.error("Failure in subscription: " + subscriptionName, e); + + e = ExceptionsUtils.unwrapException(e); + if (e instanceof DatabaseDoesNotExistException || + e instanceof SubscriptionDoesNotExistException || + e instanceof SubscriptionInvalidStateException || + e instanceof AuthorizationException) \{ + throw e; // not recoverable + \} + + if (e instanceof SubscriptionClosedException) \{ + // closed explicitly by admin, probably + return; + \} + + if (e instanceof SubscriberErrorException) \{ + SubscriberErrorException se = (SubscriberErrorException) e; + // for IllegalStateException type, we want to throw an exception, otherwise + // we continue processing + if (se.getCause() != null && se.getCause() instanceof IllegalStateException) \{ + throw e; + \} + + continue; + \} + + // handle this depending on subscription + // open strategy (discussed later) + if (e instanceof SubscriptionInUseException) \{ + continue; + \} + + return; + \} finally \{ + subscriptionWorker.close(); + \} +\} +`} + + + + + +## Subscription that ends when no documents left + +Here we create a subscription client that runs only up to the point there are no more new documents left to process. + +This is useful for ad-hoc, single-use processing where the user needs to ensure that all documents are fully processed. + + + +{`SubscriptionWorkerOptions options = new SubscriptionWorkerOptions(subsId); + +// Here we ask the worker to stop when there are no documents left to send. +// Will throw SubscriptionClosedException when it finishes it's job +options.setCloseWhenNoDocsLeft(true); +SubscriptionWorker highValueOrdersWorker = store + .subscriptions().getSubscriptionWorker(OrderAndCompany.class, options); + +try \{ + highValueOrdersWorker.run(batch -> \{ + for (SubscriptionBatch.Item item : batch.getItems()) \{ + sendThankYouNoteToEmployee(item.getResult()); + \} + \}); +\} catch (SubscriptionClosedException e) \{ + //that's expected +\} +`} + + + + + +## Worker that processes raw objects + +Here we create a worker that processes received data as ObjectNode objects. + + + +{`String subscriptionName = "My dynamic subscription"; + +SubscriptionCreationOptions subscriptionCreationOptions = new SubscriptionCreationOptions(); +subscriptionCreationOptions.setName("My dynamic subscription"); +subscriptionCreationOptions.setQuery("from Orders as o \\n" + + "select \{ \\n" + + " DynamicField_1: 'Company:' + o.Company + ' Employee: ' + o.Employee \\n" + + "\}"); + +SubscriptionWorker worker = store.subscriptions().getSubscriptionWorker(subscriptionName); +worker.run(x -> \{ + for (SubscriptionBatch.Item item : x.getItems()) \{ + ObjectNode result = item.getResult(); + raiseNotification(result.get("DynamicField_1")); + \} +\}); +`} + + + + + +## Worker that operates with a session + +Here we create a subscription that sends Order documents that do not have a shipping date. +The worker receiving these documents will update the `ShippedAt` field value and save the document back to the server via the session. + + + +{`SubscriptionCreationOptions subscriptionCreationOptions = new SubscriptionCreationOptions(); +subscriptionCreationOptions.setQuery("from Orders as o where o.ShippedAt = null"); +String subscriptionName = store.subscriptions().create(subscriptionCreationOptions); + +SubscriptionWorker subscriptionWorker = store.subscriptions().getSubscriptionWorker(Order.class, subscriptionName); + +subscriptionWorker.run(batch -> \{ + try (IDocumentSession session = batch.openSession()) \{ + for (SubscriptionBatch.Item orderItem : batch.getItems()) \{ + transferOrderToShipmentCompany(orderItem.getResult()); + orderItem.getResult().setShippedAt(new Date()); + \} + + // we know that we have at least one order to ship, + // because the subscription query above has that in it's WHERE clause + session.saveChanges(); + \} +\}); +`} + + + + + +## Subscription that uses included documents + +Here we create a subscription that, in addition to sending all the _Order_ documents to the worker, +will include all the referenced _Product_ documents in the batch sent to the worker. + +When the worker accesses these _Product_ documents, no additional requests will be made to the server. + + + +{`SubscriptionCreationOptions subscriptionCreationOptions = new SubscriptionCreationOptions(); +subscriptionCreationOptions.setQuery("from Orders include Lines[].Product"); + + +String subscriptionName = store.subscriptions().create(subscriptionCreationOptions); + +SubscriptionWorker subscriptionWorker = store.subscriptions().getSubscriptionWorker(Order.class, subscriptionName); + +subscriptionWorker.run(batch -> \{ + try (IDocumentSession session = batch.openSession()) \{ + for (SubscriptionBatch.Item orderItem : batch.getItems()) \{ + Order order = orderItem.getResult(); + for (OrderLine orderLine : order.getLines()) \{ + // this line won't generate a request, because orderLine.Product was included + Product product = session.load(Product.class, orderLine.getProduct()); + raiseProductNotification(order, product); + \} + \} + \} +\}); +`} + + + + + +## Primary and secondary workers + +Here we create two workers: + +* The primary worker, with a `TAKE_OVER` strategy, will take over the other worker and establish the connection. +* The secondary worker, with a `WAIT_FOR_FREE` strategy, will wait for the first worker to fail (due to machine failure, etc.). + +The primary worker: + + + +{`SubscriptionWorkerOptions options1 = new SubscriptionWorkerOptions(subscriptionName); +options1.setStrategy(SubscriptionOpeningStrategy.TAKE_OVER); +SubscriptionWorker worker1 = store.subscriptions().getSubscriptionWorker(Order.class, options1); + + +while (true) \{ + try \{ + worker1 + .run(x -> \{ + // your logic + \}); + \} catch (Exception e) \{ + // retry + \} +\} +`} + + + +The secondary worker: + + + +{`SubscriptionWorkerOptions options2 = new SubscriptionWorkerOptions(subscriptionName); +options2.setStrategy(SubscriptionOpeningStrategy.WAIT_FOR_FREE); +SubscriptionWorker worker2 = store.subscriptions().getSubscriptionWorker(Order.class, options2); + +while (true) \{ + try \{ + worker2 + .run(x -> \{ + // your logic + \}); + \} catch (Exception e) \{ + // retry + \} +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_examples-nodejs.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_examples-nodejs.mdx new file mode 100644 index 0000000000..e1a57b8406 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_examples-nodejs.mdx @@ -0,0 +1,456 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In this page: + * [Client with full exception handling and processing retries](../../../client-api/data-subscriptions/consumption/examples.mdx#client-with-full-exception-handling-and-processing-retries) + * [Worker with a specified batch size](../../../client-api/data-subscriptions/consumption/examples.mdx#worker-with-a-specified-batch-size) + * [Worker that operates with a session](../../../client-api/data-subscriptions/consumption/examples.mdx#worker-that-operates-with-a-session) + * [Worker that processes dynamic objects](../../../client-api/data-subscriptions/consumption/examples.mdx#worker-that-processes-dynamic-objects) + * [Subscription that ends when no documents are left](../../../client-api/data-subscriptions/consumption/examples.mdx#subscription-that-ends-when-no-documents-are-left) + * [Subscription that uses included documents](../../../client-api/data-subscriptions/consumption/examples.mdx#subscription-that-uses-included-documents) + * [Primary and secondary workers](../../../client-api/data-subscriptions/consumption/examples.mdx#primary-and-secondary-workers) + + +## Client with full exception handling and processing retries + +Here we implement a client that handles exceptions thrown by the worker. +If the exception is recoverable, the client retries creating the worker. + + + +{`// Create the subscription task on the server: +// =========================================== + +const subscriptionName = await documentStore.subscriptions.create(\{ + name: "ProcessOrdersWithLowFreight", + query: "from Orders where Freight < 0.5" +\}); + +// Create the subscription worker that will consume the documents: +// =============================================================== + +await setupReconnectingWorker(subscriptionName); + +async function setupReconnectingWorker(subscriptionName) \{ + let subscriptionWorker; + + await reconnect(); + + function closeWorker(worker) \{ + worker.dispose(); + \} + + async function reconnect() \{ + if (subscriptionWorker) \{ + closeWorker(subscriptionWorker); + \} + + // Configure the worker: + const subscriptionWorkerOptions = \{ + subscriptionName: subscriptionName, + // Allow a downtime of up to 2 hours + maxErroneousPeriod: 2 * 3600 * 1000, + // Wait 2 minutes before reconnecting + timeToWaitBeforeConnectionRetry: 2 * 60 * 1000 + \}; + + subscriptionWorker = + store.subscriptions.getSubscriptionWorker(subscriptionWorkerOptions); + + // Subscribe to connection retry events, + // and log any exceptions that occur during processing + subscriptionWorker.on("connectionRetry", error => \{ + console.error( + "Error during subscription processing: " + subscriptionName, error); + \}); + + // Run the worker: + // =============== + subscriptionWorker.on("batch", (batch, callback) => \{ + try \{ + for (const item of batch.items) \{ + const orderDocument = item.result; + + // Forcefully stop subscription processing if the ID is "companies/46-A" + // and throw an exception to let external logic handle the specific case + if (orderDocument.Company && orderDocument.Company === "companies/46-A") \{ + // 'The InvalidOperationException' thrown from here + // will be wrapped by \`SubscriberErrorException\` + callback(new InvalidOperationException( + "Company ID can't be 'companies/46-A', pleases fix")); + return; + \} + + // Process the order document - provide your own logic + processOrder(orderDocument); + \} + // Call 'callback' once you're done + // The worker will send an acknowledgement to the server, + // so that server can send next batch + callback(); + \} + catch(err) \{ + callback(err); + \} + \}); + + // Handle errors: + // ============== + subscriptionWorker.on("error", error => \{ + console.error("Failure in subscription: " + subscriptionName, error); + + // The following exceptions are Not recoverable + if (error.name === "DatabaseDoesNotExistException" || + error.name === "SubscriptionDoesNotExistException" || + error.name === "SubscriptionInvalidStateException" || + error.name === "AuthorizationException") \{ + throw error; + \} + + if (error.name === "SubscriptionClosedException") \{ + // Subscription probably closed explicitly by admin + return closeWorker(subscriptionWorker); + \} + + if (error.name === "SubscriberErrorException") \{ + // For the InvalidOperationException we want to throw an exception, + // otherwise, continue processing + if (error.cause && error.cause.name === "InvalidOperationException") \{ + throw error; + \} + + setTimeout(reconnect, 1000); + return; + \} + + // Handle this depending on the subscription opening strategy + if (error.name === "SubscriptionInUseException") \{ + setTimeout(reconnect, 1000); + return; + \} + + setTimeout(reconnect, 1000); + return; + \}); + + // Handle worker end event: + // ======================== + subscriptionWorker.on("end", () => \{ + closeWorker(subscriptionWorker); + \}); + \} +\} +`} + + + + + +## Worker with a specified batch size + +Here we create a worker and specify the maximum number of documents the server will send to the worker in each batch. + + + +{`// Create the subscription task on the server: +// =========================================== + +const subscriptionName = await documentStore.subscriptions.create(\{ + name: "ProcessOrders", + query: "from Orders" +\}); + +// Create the subscription worker that will consume the documents: +// =============================================================== + +const workerOptions = \{ + subscriptionName: subscriptionName, + maxDocsPerBatch: 20 // Set the maximum number of documents per batch +\}; + +const worker = documentStore.subscriptions.getSubscriptionWorker(workerOptions); + +worker.on("batch", (batch, callback) => \{ + try \{ + // Add your logic for processing the incoming batch items here... + + // Call 'callback' once you're done + // The worker will send an acknowledgement to the server, + // so that server can send next batch + callback(); + + \} catch(err) \{ + callback(err); + \} +\}); +`} + + + + + +## Worker that operates with a session + +Here we create a subscription that sends _Order_ documents that do not have a shipping date. +The worker receiving these documents will update the `ShippedAt` field value and save the document back to the server via the session. + + +Note: +The session is opened with `batch.openSession` instead of with `documentStore.openSession`. + + + + +{`// Create the subscription task on the server: +// =========================================== + +const subscriptionName = await documentStore.subscriptions.create(\{ + name: "ProcessOrdersThatWereNotShipped", + query: "from Orders as o where o.ShippedAt = null" +\}); + +// Create the subscription worker that will consume the documents: +// =============================================================== + +const workerOptions = \{ subscriptionName \}; +const worker = documentStore.subscriptions.getSubscriptionWorker(workerOptions); + +worker.on("batch", async (batch, callback) + try \{ + // Open a session with 'batch.openSession' + const session = batch.openSession(); + + for (const item of batch.items) \{ + orderDocument = item.result; + + transferOrderToShipmentCompany(orderDocument); // call your custom method + orderDocument.ShippedAt = new Date(); // update the document field + \} + + // Save the updated Order documents + await session.saveChanges(); + callback(); + + \} catch(err) \{ + callback(err); + \} +\}); +`} + + + + + +## Worker that processes dynamic objects + +Here we define a subscription that projects the _Order_ documents into a dynamic format. +The worker processes the dynamic objects it receives. + + + +{`// Create the subscription task on the server: +// =========================================== + +const subscriptionName = await documentStore.subscriptions.create(\{ + name: "ProcessDynamicFields", + query: \`From Orders as o + Select \{ + dynamicField: "Company: " + o.Company + " Employee: " + o.Employee, + \}\` +\}); + +// Create the subscription worker that will consume the documents: +// =============================================================== + +const workerOptions = \{ subscriptionName \}; +const worker = documentStore.subscriptions.getSubscriptionWorker(workerOptions); + +worker.on("batch", (batch, callback) => \{ + for (const item of batch.items) \{ + + // Access the dynamic field in the document + const field = item.result.dynamicField; + + // Call your custom method + processItem(field); + \} + + callback(); +\}); +`} + + + + + +## Subscription that ends when no documents are left + +Here we create a subscription client that runs until there are no more new documents to process. +This is useful for ad-hoc, single-use processing where the user needs to ensure that all documents are fully processed. + + + +{`// Create the subscription task on the server: +// =========================================== + +// Define the filtering criteria +const query = \` + declare function getOrderLinesSum(doc) \{ + var sum = 0; + for (var i in doc.Lines) \{ + sum += doc.Lines[i].PricePerUnit * doc.Lines[i].Quantity; + \} + return sum; + \} + + from Orders as o + where getOrderLinesSum(o) > 10_000\`; + +// Create the subscription with the defined query +const subscriptionName = await documentStore.subscriptions.create(\{ query \}); + +// Create the subscription worker that will consume the documents: +// =============================================================== + +const workerOptions = \{ + subscriptionName: subscriptionName, + // Here we set the worker to stop when there are no more documents left to send + // Will throw SubscriptionClosedException when it finishes it's job + closeWhenNoDocsLeft: true +\}; + +const highValueOrdersWorker = + documentStore.subscriptions.getSubscriptionWorker(workerOptions); + +highValueOrdersWorker.on("batch", (batch, callback) => \{ + for (const item of batch.items) \{ + sendThankYouNoteToEmployee(item.result); // call your custom method + \} + + callback(); +\}); + +highValueOrdersWorker.on("error", err => \{ + if (err.name === "SubscriptionClosedException") \{ + // That's expected, no more documents to process + \} +\}); +`} + + + + + +## Subscription that uses included documents + +Here we create a subscription that, in addition to sending all the _Order_ documents to the worker, +will include all the referenced _Product_ documents in the batch sent to the worker. + +When the worker accesses these _Product_ documents, no additional requests will be made to the server. + + + +{`// Create the subscription task on the server: +// =========================================== + +const subscriptionName = await documentStore.subscriptions.create(\{ + name: "ProcessIncludedDocuments", + query: \`from Orders include Lines[].Product\` +\}); + +// Create the subscription worker that will consume the documents: +// =============================================================== + +const workerOptions = \{ subscriptionName \}; +const worker = documentStore.subscriptions.getSubscriptionWorker(workerOptions); + +worker.on("batch", async (batch, callback) => \{ + // Open a session via 'batch.openSession' + // in order to access the Product documents + const session = batch.openSession(); + + for (const item of batch.items) \{ + const orderDocument = item.result; + + for (const orderLine of orderDocument.Lines) + \{ + // Calling 'load' will Not generate a request to the server, + // because orderLine.Product was included in the batch + const product = await session.load(orderLine.Product); + const productName = product.Name; + + // Call your custom method + processOrderAndProduct(order, product); + \} + \} + + callback(); +\}); +`} + + + + + +## Primary and secondary workers + +Here we create two workers: + +* The primary worker, with a `TakeOver` strategy, will take over the other worker and establish the connection. +* The secondary worker, with a `WaitForFree` strategy, will wait for the first worker to fail (due to machine failure, etc.). + +The primary worker: + + + +{`const workerOptions1 = \{ + subscriptionName, + strategy: "TakeOver", + documentType: Order +\}; + +const worker1 = documentStore.subscriptions.getSubscriptionWorker(workerOptions1); + +worker1.on("batch", (batch, callback) => \{ + // your logic + callback(); +\}); + +worker1.on("error", err => \{ + // retry +\}); +`} + + + +The secondary worker: + + + +{`const workerOptions2 = \{ + subscriptionName, + strategy: "WaitForFree", + documentType: Order +\}; + +const worker2 = documentStore.subscriptions.getSubscriptionWorker(workerOptions2); + +worker2.on("batch", (batch, callback) => \{ + // your logic + callback(); +\}); + +worker2.on("error", err => \{ + // retry +\}); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_examples-python.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_examples-python.mdx new file mode 100644 index 0000000000..37569345e2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_examples-python.mdx @@ -0,0 +1,314 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In this page: + * [Client with full exception handling and processing retries](../../../client-api/data-subscriptions/consumption/examples.mdx#client-with-full-exception-handling-and-processing-retries) + * [Worker with a specified batch size](../../../client-api/data-subscriptions/consumption/examples.mdx#worker-with-a-specified-batch-size) + * [Worker that operates with a session](../../../client-api/data-subscriptions/consumption/examples.mdx#worker-that-operates-with-a-session) + * [Worker that processes dynamic objects](../../../client-api/data-subscriptions/consumption/examples.mdx#worker-that-processes-dynamic-objects) + * [Subscription that ends when no documents are left](../../../client-api/data-subscriptions/consumption/examples.mdx#subscription-that-ends-when-no-documents-are-left) + * [Subscription that uses included documents](../../../client-api/data-subscriptions/consumption/examples.mdx#subscription-that-uses-included-documents) + * [Subscription workers with failover on other nodes](../../../client-api/data-subscriptions/consumption/examples.mdx#subscription-workers-with-failover-on-other-nodes) + * [Primary and secondary workers](../../../client-api/data-subscriptions/consumption/examples.mdx#primary-and-secondary-workers) + + +## Client with full exception handling and processing retries + +Here we implement a client that handles exceptions thrown by a worker. +If the exception is recoverable, the client retries creating the worker. + + + +{`while True: + options = SubscriptionWorkerOptions(subscription_name) + + # here we configure that we allow a down time of up to 2 hours, and will wait for 2 minutes for reconnecting + options.max_erroneous_period = timedelta(hours=2) + options.time_to_wait_before_connection_retry = timedelta(minutes=2) + + subscription_worker = store.subscriptions.get_subscription_worker(options, Order) + + try: + # here we are able to be informed of any exceptions that happens during processing + subscription_worker.add_on_subscription_connection_retry( + lambda exception: logger.error( + f"Error during subscription processing: \{subscription_name\}", exc_info=exception + ) + ) + + def _process_documents_callback(batch: SubscriptionBatch[Order]): + for item in batch.items: + # we want to force close the subscription processing in that case + # and let the external code decide what to do with that + if item.result.company == "companies/2-A": + raise UnsupportedCompanyException( + "Company Id can't be 'companies/2-A', you must fix this" + ) + process_order(item.result) + + # Run will complete normally if you have disposed the subscription + return + + # Pass the callback to worker.run() + subscription_worker.run(_process_documents_callback) + + except Exception as e: + logger.error(f"Failure in subscription: \{subscription_name\}", exc_info=e) + exception_type = type(e) + if ( + exception_type is DatabaseDoesNotExistException + or exception_type is SubscriptionDoesNotExistException + or exception_type is SubscriptionInvalidStateException + or exception_type is AuthorizationException + ): + raise # not recoverable + + if exception_type is SubscriptionClosedException: + # closed explicitely by admin, probably + return + + if exception_type is SubscriberErrorException: + # for UnsupportedCompanyException type, we want to throw an exception, otherwise + # we continue processing + if e.args[1] is not None and type(e.args[1]) is UnsupportedCompanyException: + raise + + continue + + # handle this depending on subscription + # open strategy (discussed later) + if e is SubscriptionInUseException: + continue + + return + finally: + subscription_worker.close(False) +`} + + + + + +## Worker with a specified batch size + +Here we create a worker and specify the maximum number of documents the server will send to the worker in each batch. + + + +{`worker_w_batch = store.subscriptions.get_subscription_worker( + SubscriptionWorkerOptions(subscription_name, max_docs_per_batch=20), Order +) + +_ = worker_w_batch.run( + process_documents=lambda batch: ... +) # Pass your method that takes SubscriptionBatch[_T] as an argument, with your logic in it +`} + + + + + +## Worker that operates with a session + +Here we create a subscription that sends _Order_ documents that do not have a shipping date. +The worker receiving these documents will update the `ShippedAt` field value and save the document back to the server via the session. + + + +{`subscription_name = store.subscriptions.create_for_options( + SubscriptionCreationOptions(query="from Orders as o where o.ShippedAt = null") +) + +subscription_worker = store.subscriptions.get_subscription_worker_by_name(subscription_name, Order) + +def _transfer_order_callback(batch: SubscriptionBatch[Order]): + with batch.open_session() as session: + for order in (item.result for item in batch.items): + transfer_order_to_shipment_company(order) + order.shipped_at = datetime.utcnow() + + # we know that we have at least one order to ship, + # because the subscription query above has that in it's WHERE clause + session.save_changes() + +_ = subscription_worker.run(_transfer_order_callback) +`} + + + + + +## Worker that processes dynamic objects + +Here we define a subscription that projects the _Order_ documents into a dynamic format. +The worker processes the dynamic objects it receives. + + + +{`subscription_name = "My dynamic subscription" +store.subscriptions.create_for_class( + Order, + SubscriptionCreationOptions( + subscription_name, + query=""" + From Orders as o + Select + \{ + dynamic_field_1: "Company: " + o.Company + " Employee: " + o.Employee, + \} + """, + ), +) + +subscription_worker = store.subscriptions.get_subscription_worker_by_name(subscription_name) + +def _raise_notification_callback(batch: SubscriptionBatch[Order]): + for item in batch.items: + raise_notification(item.result.dynamic_field_1) + +_ = subscription_worker.run(_raise_notification_callback) +`} + + + + + +## Subscription that ends when no documents are left + +Here we create a subscription client that runs only up to the point there are no more new documents left to process. + +This is useful for ad-hoc, single-use processing where the user needs to ensure that all documents are fully processed. + + + +{`high_value_orders_worker = store.subscriptions.get_subscription_worker( + SubscriptionWorkerOptions( + subs_id, + # Here we ask the worker to stop when there are no documents left to send. + # Will throw SubscriptionClosedException when it finishes its job + close_when_no_docs_left=True, + ), + OrderAndCompany, +) + +try: + + def _subscription_batch_callback(batch: SubscriptionBatch[OrderAndCompany]): + for item in batch.items: + send_thank_you_note_to_employee(item.result) + + high_value_orders_worker.run(_subscription_batch_callback) +except SubscriptionClosedException: + # that's expected + ... +`} + + + + + +## Subscription that uses included documents + +Here we create a subscription that, in addition to sending all the _Order_ documents to the worker, +will include all the referenced _Product_ documents in the batch sent to the worker. + +When the worker accesses these _Product_ documents, no additional requests will be made to the server. + + + +{`// Create the subscription task on the server: +// =========================================== + +var subscriptionName = store.Subscriptions.Create(new SubscriptionCreationOptions() +\{ + // Include the referenced Product documents for each Order document + Query = @"from Orders include Lines[].Product" +\}); + +// Create the subscription worker that will consume the documents: +// =============================================================== + +var subscriptionWorker = store.Subscriptions.GetSubscriptionWorker(subscriptionName); +_ = subscriptionWorker.Run(batch => +\{ + // Open a session via 'batch.OpenSession' + // in order to access the Product documents + using (var session = batch.OpenSession()) + \{ + foreach (var order in batch.Items.Select(x => x.Result)) + \{ + foreach (var orderLine in order.Lines) + \{ + // Calling Load will Not generate a request to the server, + // because orderLine.Product was included in the batch + var product = session.Load(orderLine.Product); + + ProcessOrderAndProduct(order, product); // call your custom method + \} + \} + \} +\}); +`} + + + + + +## Subscription workers with failover on other nodes + +In this configuration, any available node will create a worker. +If the worker fails, another available node will take over. + + + +{`worker = store.subscriptions.get_subscription_worker( + SubscriptionWorkerOptions(subscription_name, strategy=SubscriptionOpeningStrategy.WAIT_FOR_FREE), Order +) +`} + + + + + +## Primary and secondary workers + +Here we create two workers: + +* The primary worker, with a `TAKE_OVER` strategy, will take over the other worker and establish the connection. +* The secondary worker, with a `WAIT_FOR_FREE` strategy, will wait for the first worker to fail (due to machine failure, etc.). + +The primary worker: + + +{`primary_worker = store.subscriptions.get_subscription_worker(SubscriptionWorkerOptions(subscription_name, strategy=SubscriptionOpeningStrategy.TAKE_OVER), Order) + +while True: + try: + run_future = primary_worker.run(lambda batch: ...) # your logic + except Exception: + ... # retry +`} + + + +The secondary worker: + + +{`secondary_worker = store.subscriptions.get_subscription_worker(SubscriptionWorkerOptions(subscription_name), strategy=SubscriptionOpeningStrategy.WAIT_FOR_FREE) + +while True: + try: + run_future = secondary_worker.run(lambda batch: ...) # your logic + except Exception: + ... # retry +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_how-to-consume-data-subscription-csharp.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_how-to-consume-data-subscription-csharp.mdx new file mode 100644 index 0000000000..b5114a2de6 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_how-to-consume-data-subscription-csharp.mdx @@ -0,0 +1,198 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Batches of documents sent from a Subscription Task defined on the server are consumed and processed by a subscription worker client. + +* The `SubscriptionWorker` object, defined on the client, manages the communication between the server and the client and processes the document batches sent from the server. + +* There are several ways to create and configure the SubscriptionWorker - see [SubscriptionWorkerOptions](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionworkeroptions). + +* In this page: + * [SubscriptionWorker lifecycle](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#subscriptionworker-lifecycle) + * [Error handling](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#error-handling) + * [Worker strategies](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#worker-strategies) + * [Determining which workers a subscription will serve](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#determining-which-workers-a-subscription-will-serve) + + +## SubscriptionWorker lifecycle + +A `SubscriptionWorker` object starts its life from being generated by the `DocumentsStore.Subscriptions`: + + +{`subscriptionWorker = store.Subscriptions.GetSubscriptionWorker(subscriptionName); +`} + + + +At this point, the worker has only got its configuration. No connection or processing happens at this moment. +To start processing, the `Run` method should be called. The Run method receives the batch processing logic that should be performed: + + +{`subscriptionRuntimeTask = subscriptionWorker.Run(batch => +\{ + // your logic here +\}); +`} + + + +From this point on, the subscription worker will start processing batches. +If processing is aborted for any reason, the returned task (`subscriptionRuntimeTask`) will complete with an exception. + + + +## Error handling + + + +Subscription worker connection failures may occur during the routine communication between the worker and the server. +When an unexpected error arises, the worker will attempt to **reconnect to the server**. + +However, there are several conditions under which the worker will stop its operation but will Not attempt to reconnect: + +* The subscription no longer exists or has been deleted. +* Another worker has taken control of the subscription (see [connection strategy](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#available-worker-strategies)). +* The worker is unable to connect to any of the servers. +* The worker could not receive the node responsible for the task + (this can happen when there is no leader in the cluster). +* An authorization exception occurred. +* An exception occurred during the connection establishment phase. +* The database doesn't exist. + + + + + +An exception may occur while processing a batch of documents in the worker. +For example: + + + +{`_ = workerWBatch.Run(x => throw new Exception()); +`} + + + +When creating a worker, the worker can be configured to handle these exceptions in either of the following ways, +depending on the `IgnoreSubscriberErrors` property in [SubscriptionWorkerOptions](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionworkeroptions): + +* **Abort processing completely** + When `IgnoreSubscriberErrors = false` (default): + The current batch processing will be aborted, and in this case, the worker will wrap the thrown exception in a `SubscriberErrorException` and will rethrow it. + Processing of the subscription will be terminated without acknowledging progress to the server or retrying to connect. + As a result, the task returned by the `Run` function will complete in an erroneous state, throwing a _SubscriberErrorException_. + +* **Continue processing subsequent batches** + When `IgnoreSubscriberErrors = true`: + The current batch processing will be aborted; however, the erroneous batch will be acknowledged without retrying, + and processing will continue with the next batches. + + + + + +Two properties in the [SubscriptionWorkerOptions](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionworkeroptions) +object control the behavior of a worker attempting to reconnect with the server: + +* `TimeToWaitBeforeConnectionRetry` + The time the worker will wait before attempting to reconnect. + Default: 5 seconds. +* `MaxErroneousPeriod` + The maximum amount of time the subscription connection can remain in an erroneous state. + Once this period is exceeded, the worker will stop trying to reconnect. + Default: 5 minutes. + + + + + +A worker will time out after losing its connectivity with the server for a given time period. + +* The timeout period can be set using the `ConnectionStreamTimeout` option. E.g.: + + +{`var options = new SubscriptionWorkerOptions(subscriptionName); + +// Set the worker's timeout period +options.ConnectionStreamTimeout = TimeSpan.FromSeconds(45); +`} + + +* Default timeout period: 30 second + + + + + +`OnUnexpectedSubscriptionError` is the event that is triggered when a connection failure occurs between the subscription worker and the server, +resulting in an unexpected exception. +When this happens, the worker will automatically attempt to reconnect. +This event is useful for logging these unexpected exceptions. + + + + + +## Worker strategies + +Subscription workers are configured with a **strategy** that determines whether multiple workers +can connect to the subscription concurrently or if only one worker can connect at a time. + +The _one-worker-at-a-time_ strategy also determines how the workers interact with each other +to resolve which will establish the subscription connection. +### One worker per subscription strategies + +The following three strategies allow only a **single worker to connect to the subscription at any given time**, +and determine what happens when one worker is connected and another tries to connect. + +* `SubscriptionOpeningStrategy.OpenIfFree` + The server will allow a worker to connect only if no other worker is currently connected. + If there is an existing connection, the incoming worker will throw a `SubscriptionInUseException`. +* `SubscriptionOpeningStrategy.WaitForFree` + If the worker cannot open the subscription because it is in use by another worker, it will wait for the currently connected worker to disconnect before establishing the connection. + This is useful in worker failover scenarios, where one worker is connected while another is awaiting its turn to take its place. +* `SubscriptionOpeningStrategy.TakeOver` + The server will allow an incoming connection to take over an existing one, + based on the connection strategy in use by the currently connected worker: + * If the existing connection **does not** have a `TakeOver` strategy: + The incoming connection will take over, causing the existing connection to throw a `SubscriptionInUseException`. + * If the existing connection **has** a `TakeOver` strategy: + The incoming connection will throw a `SubscriptionInUseException` exception. +### Multiple workers per subscription strategy + +* `SubscriptionOpeningStrategy.Concurrent` + The server allows multiple workers to connect to the same subscription **concurrently**. + Read more about concurrent subscriptions [here](../../../client-api/data-subscriptions/concurrent-subscriptions.mdx). + + + +## Determining which workers a subscription will serve + + + +The **strategy used by the first worker connecting to a subscription** determines +which additional workers the subscription can serve until all worker connections are dropped. + + + +* A subscription that serves one or more [concurrent](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#multiple-workers-per-subscription-strategy) workers, + **can only serve other concurrent workers** until all connections are dropped. + If a worker with a [one worker per subscription](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#one-worker-per-subscription-strategies) + strategy attempts to connect - + * The connection attempt will be rejected. + * `SubscriptionInUseException` will be thrown. + +* A subscription that serves a worker with a [one worker per subscription](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#one-worker-per-subscription-strategies) strategy, + **cannot** serve [concurrent](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#multiple-workers-per-subscription-strategy) + workers until that worker's connection is dropped. + If a concurrent worker attempts to connect - + * The connection attempt will be rejected. + * `SubscriptionInUseException` will be thrown. + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_how-to-consume-data-subscription-java.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_how-to-consume-data-subscription-java.mdx new file mode 100644 index 0000000000..5c80940a9f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_how-to-consume-data-subscription-java.mdx @@ -0,0 +1,129 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +Subscriptions are consumed by processing batches of documents received from the server. +A `SubscriptionWorker` object manages the documents processing and the communication between the client and the server according to a set of configurations received upon it's creation. +We've introduced several ways to create and configure a SubscriptionWorker, starting from just giving a subscription name, and ending with a detailed configuration object - `SubscriptionWorkerOptions`. + +* In this page: + * [SubscriptionWorker lifecycle](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#subscriptionworker-lifecycle) + * [Error handling](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#error-handling) + * [Worker strategies](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#worker-strategies) + + +## SubscriptionWorker lifecycle + +A `SubscriptionWorker` object starts its life from being generated by the `DocumentsStore.subscriptions`: + + + +{`subscriptionWorker = store.subscriptions().getSubscriptionWorker(Order.class, subscriptionName); +`} + + + +At this point, the worker has only got its configuration. No connection or processing happens at this moment. +In order to start processing, the `run` method should be called. The `run` method receives the batch processing logic that should be performed: + + + +{`subscriptionRuntimeTask = subscriptionWorker.run(batch -> \{ + // your logic here +\}); +`} + + + +From this point on, the subscription worker will start processing batches. If for any reason, the processing is aborted, the returned task (`subscriptionRuntimeTask`) will complete with an exception. + + + +## Error handling + + + +Subscription worker connection failures may occur during the routine communication between the worker and the server. +When an unexpected error arises, the worker will attempt to **reconnect to the server**. + +However, there are several conditions under which the worker will stop its operation but will Not attempt to reconnect: + +* The subscription no longer exists or has been deleted. +* Another worker has taken control of the subscription (see [connection strategy](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#available-worker-strategies)). +* The worker is unable to connect to any of the servers. +* The worker could not receive the node responsible for the task + (this can happen when there is no leader in the cluster). +* An authorization exception occurred. +* An exception occurred during the connection establishment phase. +* The database doesn't exist. + + + + + +An exception may occur while processing a batch of documents in the worker. +For example: + + + +{`workerWBatch.run(x -> \{ + throw new RuntimeException(); +\}); +`} + + + +When creating a worker, the worker can be configured to handle these exceptions in either of the following ways, +depending on the `IgnoreSubscriberErrors` property in [SubscriptionWorkerOptions](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionworkeroptions): + +* **Abort processing completely** + When `IgnoreSubscriberErrors` is set to _false_ (default): + The current batch processing will be aborted, and in this case, the worker will wrap the thrown exception in a `SubscriberErrorException` and will rethrow it. + Processing of the subscription will be terminated without acknowledging progress to the server or retrying to connect. + As a result, the task returned by the `Run` function will complete in an erroneous state, throwing a _SubscriberErrorException_. + +* **Continue processing subsequent batches** + When `IgnoreSubscriberErrors` is set to _true_: + The current batch processing will be aborted; however, the erroneous batch will be acknowledged without retrying, + and processing will continue with the next batches. + + + + + +Two properties in the [SubscriptionWorkerOptions](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionworkeroptions) +object control the behavior of a worker attempting to reconnect with the server: + +* `timeToWaitBeforeConnectionRetry` + The time the worker will wait before attempting to reconnect. + Default: 5 seconds. +* `maxErroneousPeriod` + The maximum amount of time the subscription connection can remain in an erroneous state. + Once this period is exceeded, the worker will stop trying to reconnect. + Default: 5 minutes. + + + + + +## Worker strategies + +There can only be one active subscription worker working on a subscription. +Nevertheless, there are scenarios where it is required to interact between an existing subscription worker and one that tries to connect. +This relationship and interoperation is configured by the `SubscriptionConnectionOptions` `Strategy` field. +The strategy field is an enum, having the following values: + +* `OPEN_IF_FREE` - the server will allow the worker to connect only if there isn't any other currently connected workers. + If there is a existing connection, the incoming worker will throw a SubscriptionInUseException. +* `WAIT_FOR_FREE` - If the client currently cannot open the subscription because it is used by another client, it will wait for the previous client to disconnect and only then will connect. + This is useful in client failover scenarios where there is one active client and another one already waiting to take its place. +* `TAKE_OVER` - the server will allow an incoming connection to overthrow an existing one. It will behave according to the existing connection strategy: + * The existing connection has a strategy that is not `TAKE_OVER`. In this case, the incoming connection will take over it causing the existing connection to throw a SubscriptionInUseException exception. + * The existing connection has a strategy that is `TAKE_OVER`. In this case, the incoming connection will throw a SubscriptionInUseException exception. + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_how-to-consume-data-subscription-nodejs.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_how-to-consume-data-subscription-nodejs.mdx new file mode 100644 index 0000000000..07a785a3ad --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_how-to-consume-data-subscription-nodejs.mdx @@ -0,0 +1,204 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Batches of documents sent from a Subscription Task defined on the server are consumed and processed by a subscription worker client. + +* The `SubscriptionWorker` object, defined on the client, manages the communication between the server and the client and processes the document batches sent from the server. + +* There are several ways to create and configure the SubscriptionWorker - see [subscription worker options](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionworkeroptions). + +* In this page: + * [SubscriptionWorker lifecycle](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#subscriptionworker-lifecycle) + * [Error handling](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#error-handling) + * [Worker strategies](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#worker-strategies) + * [Determining which workers a subscription will serve](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#determining-which-workers-a-subscription-will-serve) + + +## SubscriptionWorker lifecycle + +Create a `SubscriptionWorker` object by calling `getSubscriptionWorker`: + + + +{`const worker = documentStore.subscriptions.getSubscriptionWorker(\{ + subscriptionName: "your subscription name" +\}); +`} + + + +At this stage, the worker is initialized, no connection to the server or document processing occurs yet. + +To start handling documents from the subscription, you need to define a listener for the `batch` event. +This event is triggered whenever a new batch of documents is received. + +Add an event handler using `on` method of the worker object to process incoming batches: + + + +{`worker.on("batch", (batch, callback) => \{ + try \{ + // Add your logic for processing the incoming batch items here... + + // Call 'callback' once you're done + // The worker will send an acknowledgement to the server, + // allowing the server to send the next batch + callback(); + + \} catch(err) \{ + // If processing fails for a particular batch then pass the error to the callback + callback(err); + \} +\}); +`} + + + +Once the event handler is defined, the worker will begin processing batches of documents sent by the server. +Each batch must be acknowledged by calling `callback()` once processing is complete. + + + +## Error handling + + + +Subscription worker connection failures may occur during the routine communication between the worker and the server. +When an unexpected error arises, the worker will attempt to **reconnect to the server**. + +However, there are several conditions under which the worker will stop its operation but will Not attempt to reconnect: + +* The subscription no longer exists or has been deleted. +* Another worker has taken control of the subscription (see [connection strategy](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#available-worker-strategies)). +* The worker is unable to connect to any of the servers. +* The worker could not receive the node responsible for the task + (this can happen when there is no leader in the cluster). +* An authorization exception occurred. +* An exception occurred during the connection establishment phase. +* The database doesn't exist. + + + + + +An exception may occur while processing a batch of documents in the worker. +For example: + + + +{`worker.on("batch", (batch, callback) => \{ + try \{ + throw new Error("Exception occurred"); + \} catch (err) \{ + callback(err); // Pass the error to the callback to signal failure + \} +\}); +`} + + + +When creating a worker, the worker can be configured to handle these exceptions in either of the following ways, +depending on the `ignoreSubscriberErrors` property in the [subscription worker options](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionworkeroptions): + +* **Abort processing completely** + When `ignoreSubscriberErrors` is `false` (default): + The current batch processing will be aborted, and in this case, the worker will wrap the thrown exception in a `SubscriberErrorException` and will rethrow it. + Processing of the subscription will be terminated without acknowledging progress to the server or retrying to connect. + As a result, the worker task will complete in an erroneous state, throwing a _SubscriberErrorException_. + +* **Continue processing subsequent batches** + When `ignoreSubscriberErrors` is `true`: + The current batch processing will be aborted; however, the erroneous batch will be acknowledged without retrying, + and processing will continue with the next batches. + + + + + +Two properties in the [subscription worker options](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionworkeroptions) +object control the behavior of a worker attempting to reconnect with the server: + +* `timeToWaitBeforeConnectionRetry` + The time the worker will wait before attempting to reconnect. + Default: 5 seconds. +* `maxErroneousPeriod` + The maximum amount of time the subscription connection can remain in an erroneous state. + Once this period is exceeded, the worker will stop trying to reconnect. + Default: 5 minutes. + + + + + +`unexpectedSubscriptionError` is the event that is triggered when a connection failure occurs between the subscription worker and the server, +resulting in an unexpected exception. +When this happens, the worker will automatically attempt to reconnect. +This event is useful for logging these unexpected exceptions. + + + + + +## Worker strategies + +Subscription workers are configured with a **strategy** that determines whether multiple workers +can connect to the subscription concurrently or if only one worker can connect at a time. + +The _one-worker-at-a-time_ strategy also determines how the workers interact with each other +to resolve which will establish the subscription connection. +### One worker per subscription strategies + +The following three strategies allow only a **single worker to connect to the subscription at any given time**, +and determine what happens when one worker is connected and another tries to connect. + +* `OpenIfFree` + The server will allow a worker to connect only if no other worker is currently connected. + If there is an existing connection, the incoming worker will throw a `SubscriptionInUseException`. +* `WaitForFree` + If the worker cannot open the subscription because it is in use by another worker, it will wait for the currently connected worker to disconnect before establishing the connection. + This is useful in worker failover scenarios, where one worker is connected while another is awaiting its turn to take its place. +* `TakeOver` + The server will allow an incoming connection to take over an existing one, + based on the connection strategy in use by the currently connected worker: + * If the existing connection **does not** have a `TakeOver` strategy: + The incoming connection will take over, causing the existing connection to throw a `SubscriptionInUseException`. + * If the existing connection **has** a `TakeOver` strategy: + The incoming connection will throw a `SubscriptionInUseException` exception. +### Multiple workers per subscription strategy + +* `Concurrent` + The server allows multiple workers to connect to the same subscription **concurrently**. + Read more about concurrent subscriptions [here](../../../client-api/data-subscriptions/concurrent-subscriptions.mdx). + + + +## Determining which workers a subscription will serve + + + +The **strategy used by the first worker connecting to a subscription** determines +which additional workers the subscription can serve until all worker connections are dropped. + + + +* A subscription that serves one or more [concurrent](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#multiple-workers-per-subscription-strategy) workers, + **can only serve other concurrent workers** until all connections are dropped. + If a worker with a [one worker per subscription](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#one-worker-per-subscription-strategies) + strategy attempts to connect - + * The connection attempt will be rejected. + * `SubscriptionInUseException` will be thrown. + +* A subscription that serves a worker with a [one worker per subscription](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#one-worker-per-subscription-strategies) strategy, + **cannot** serve [concurrent](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#multiple-workers-per-subscription-strategy) + workers until that worker's connection is dropped. + If a concurrent worker attempts to connect - + * The connection attempt will be rejected. + * `SubscriptionInUseException` will be thrown. + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_how-to-consume-data-subscription-python.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_how-to-consume-data-subscription-python.mdx new file mode 100644 index 0000000000..9404ade81c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/_how-to-consume-data-subscription-python.mdx @@ -0,0 +1,182 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Batches of documents sent from a Subscription Task defined on the server are consumed and processed by a subscription worker client. + +* The `subscription_worker` object, defined on the client, manages the communication between the server and the client and processes the document batches sent from the server. + +* There are several ways to create and configure the SubscriptionWorker - see `SubscriptionWorkerOptions`. + +* In this page: + * [`subscription_worker` lifecycle](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#subscription_worker-lifecycle) + * [Error handling](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#error-handling) + * [Worker strategies](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#worker-strategies) + * [Determining which workers a subscription will serve](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#determining-which-workers-a-subscription-will-serve) + + +## `subscription_worker` lifecycle + +A `subscription_worker` object starts its life from being generated by the `store.subscriptions`: + + +{`subscription_worker = store.subscriptions.get_subscription_worker_by_name(subscription_name, Order) +`} + + + +At this point, the worker has only got its configuration. No connection or processing happens at this moment. +To start processing, the `run` method should be called. The Run method receives the batch processing logic that should be performed: + + +{`subscription_runtime_task = subscription_worker.run( + process_documents=lambda batch: ... +) # Pass your method that takes SubscriptionBatch[_T] as an argument, with your logic in it +`} + + + +From this point on, the subscription worker will start processing batches. +If processing is aborted for any reason, the returned task (`subscription_runtime_task`) will complete with an exception. + + + +## Error handling + + + +Subscription worker connection failures may occur during the routine communication between the worker and the server. +When an unexpected error arises, the worker will attempt to **reconnect to the server**. + +However, there are several conditions under which the worker will stop its operation but will Not attempt to reconnect: + +* The subscription no longer exists or has been deleted. +* Another worker has taken control of the subscription (see [connection strategy](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#available-worker-strategies)). +* The worker is unable to connect to any of the servers. +* The worker could not receive the node responsible for the task + (this can happen when there is no leader in the cluster). +* An authorization exception occurred. +* An exception occurred during the connection establishment phase. +* The database doesn't exist. + + + + + +An exception may occur while processing a batch of documents in the worker. +For example: + + + +{`def _throw_exception(batch: SubscriptionBatch): + raise Exception() + +_ = worker_w_batch.run(_throw_exception) +`} + + + +When creating a worker, the worker can be configured to handle these exceptions in either of the following ways, +depending on the `ignore_subscriber_errors` property in [SubscriptionWorkerOptions](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionworkeroptions): + +* **Abort processing completely** + When `ignore_subscriber_errors` is set to _false_ (default): + The current batch processing will be aborted, and in this case, the worker will wrap the thrown exception in a `SubscriberErrorException` and will rethrow it. + Processing of the subscription will be terminated without acknowledging progress to the server or retrying to connect. + As a result, the task returned by the `Run` function will complete in an erroneous state, throwing a _SubscriberErrorException_. + +* **Continue processing subsequent batches** + When `ignore_subscriber_errors` is set to _true_: + The current batch processing will be aborted; however, the erroneous batch will be acknowledged without retrying, + and processing will continue with the next batches. + + + + + +Two properties in the [SubscriptionWorkerOptions](../../../client-api/data-subscriptions/consumption/api-overview.mdx#subscriptionworkeroptions) +object control the behavior of a worker attempting to reconnect with the server: + +* `time_to_wait_before_connection_retry` + The time the worker will wait before attempting to reconnect. + Default: 5 seconds. +* `max_erroneous_period` + The maximum amount of time the subscription connection can remain in an erroneous state. + Once this period is exceeded, the worker will stop trying to reconnect. + Default: 5 minutes. + + + + + +`on_unexpected_subscription_error` is the event that is triggered when a connection failure occurs between the subscription worker and the server, +resulting in an unexpected exception. +When this happens, the worker will automatically attempt to reconnect. +This event is useful for logging these unexpected exceptions. + + + + + +## Worker strategies + +Subscription workers are configured with a **strategy** that determines whether multiple workers +can connect to the subscription concurrently or if only one worker can connect at a time. + +The _one-worker-at-a-time_ strategy also determines how the workers interact with each other +to resolve which will establish the subscription connection. +### One worker per subscription strategies + +The following three strategies allow only a **single worker to connect to the subscription at any given time**, +and determine what happens when one worker is connected and another tries to connect. + +* `SubscriptionOpeningStrategy.OPEN_IF_FREE` + The server will allow a worker to connect only if no other worker is currently connected. + If there is an existing connection, the incoming worker will throw a `SubscriptionInUseException`. +* `SubscriptionOpeningStrategy.WAIT_FOR_FREE` + If the worker cannot open the subscription because it is in use by another worker, it will wait for the currently connected worker to disconnect before establishing the connection. + This is useful in worker failover scenarios, where one worker is connected while another is awaiting its turn to take its place. +* `SubscriptionOpeningStrategy.TAKE_OVER` + The server will allow an incoming connection to take over an existing one, + based on the connection strategy in use by the currently connected worker: + * If the existing connection **does not** have a `TAKE_OVER` strategy: + The incoming connection will take over, causing the existing connection to throw a `SubscriptionInUseException`. + * If the existing connection **has** a `TAKE_OVER` strategy: + The incoming connection will throw a `SubscriptionInUseException` exception. +### Multiple workers per subscription strategy + +* `SubscriptionOpeningStrategy.CONCURRENT` + The server allows multiple workers to connect to the same subscription **concurrently**. + Read more about concurrent subscriptions [here](../../../client-api/data-subscriptions/concurrent-subscriptions.mdx). + + + +## Determining which workers a subscription will serve + + + +The **strategy used by the first worker connecting to a subscription** determines +which additional workers the subscription can serve until all worker connections are dropped. + + + +* A subscription that serves one or more [CONCURRENT](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#multiple-workers-per-subscription-strategy) workers, + **can only serve other concurrent workers** until all connections are dropped. + If a worker with a [one worker per subscription](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#one-worker-per-subscription-strategies) + strategy attempts to connect - + * The connection attempt will be rejected. + * `SubscriptionInUseException` will be thrown. + +* A subscription that serves a worker with a [one worker per subscription](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#one-worker-per-subscription-strategies) strategy, + **cannot** serve [CONCURRENT](../../../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx#multiple-workers-per-subscription-strategy) + workers until that worker's connection is dropped. + If a concurrent worker attempts to connect - + * The connection attempt will be rejected. + * `SubscriptionInUseException` will be thrown. + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/api-overview.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/api-overview.mdx new file mode 100644 index 0000000000..b424f128a4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/api-overview.mdx @@ -0,0 +1,44 @@ +--- +title: "Consume Subscriptions API" +hide_table_of_contents: true +sidebar_label: API Overview +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import ApiOverviewCsharp from './_api-overview-csharp.mdx'; +import ApiOverviewJava from './_api-overview-java.mdx'; +import ApiOverviewPython from './_api-overview-python.mdx'; +import ApiOverviewNodejs from './_api-overview-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/examples.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/examples.mdx new file mode 100644 index 0000000000..9db13b374b --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/examples.mdx @@ -0,0 +1,44 @@ +--- +title: "Subscription Consumption Examples" +hide_table_of_contents: true +sidebar_label: Examples +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import ExamplesCsharp from './_examples-csharp.mdx'; +import ExamplesJava from './_examples-java.mdx'; +import ExamplesPython from './_examples-python.mdx'; +import ExamplesNodejs from './_examples-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx new file mode 100644 index 0000000000..04ac04347d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx @@ -0,0 +1,48 @@ +--- +title: "How to Consume a Data Subscription" +hide_table_of_contents: true +sidebar_label: How to Consume a Data Subscription +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToConsumeDataSubscriptionCsharp from './_how-to-consume-data-subscription-csharp.mdx'; +import HowToConsumeDataSubscriptionJava from './_how-to-consume-data-subscription-java.mdx'; +import HowToConsumeDataSubscriptionPython from './_how-to-consume-data-subscription-python.mdx'; +import HowToConsumeDataSubscriptionNodejs from './_how-to-consume-data-subscription-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_api-overview-csharp.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_api-overview-csharp.mdx new file mode 100644 index 0000000000..d7da1cefc3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_api-overview-csharp.mdx @@ -0,0 +1,277 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In this page: + * [Create subscription](../../../client-api/data-subscriptions/creation/api-overview.mdx#create-subscription) + * [Subscription creation options](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscription-creation-options) + * [Update subscription](../../../client-api/data-subscriptions/creation/api-overview.mdx#update-subscription) + * [Subscription update options](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscription-update-options) + * [Subscription query](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscription-query) + + +## Create subscription + +Subscriptions can be created using the following `Create` methods available through the `Subscriptions` property of the `DocumentStore`. + + + +{`string Create(SubscriptionCreationOptions options, + string database = null); + +string Create(SubscriptionCreationOptions options = null, + string database = null); + +string Create(SubscriptionCreationOptions options, + string database = null); + +string Create(Expression> predicate = null, + PredicateSubscriptionCreationOptions options = null, + string database = null); + +Task CreateAsync(SubscriptionCreationOptions options, + string database = null, + CancellationToken token = default); + +public Task CreateAsync(SubscriptionCreationOptions options = null, + string database = null, + CancellationToken token = default); + +Task CreateAsync(SubscriptionCreationOptions options, + string database = null, + CancellationToken token = default); + +Task CreateAsync(Expression> predicate = null, + PredicateSubscriptionCreationOptions options = null, + string database = null, + CancellationToken token = default); +`} + + + +| Parameter | Type | Description | +|----------------|-----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **predicate** | `Expression>` | An optional lambda expression that returns a boolean.
This predicate defines the filter criteria for the subscription documents. | +| **options** | `SubscriptionCreationOptions` | Contains subscription creation options.
See [Subscription creation options](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscription-creation-options) | +| **options** | `SubscriptionCreationOptions` | Contains subscription creation options
(non-generic version).
See [Subscription creation options](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscription-creation-options) | +| **options** | `PredicateSubscriptionCreationOptions ` | Contains subscription creation options
(when passing a predicate).
See [Subscription creation options](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscription-creation-options) | +| **database** | `string` | The name of the database where the subscription task will be created. If `null`, the default database configured in the DocumentStore will be used. | +| **token** | `CancellationToken` | Cancellation token used in to halt the subscription creation process. | + +| Return value | Description | +|---------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `string` | The name of the created data subscription.
If the name was provided in `SubscriptionCreationOptions`, it will be returned.
Otherwise, a unique name will be generated by server. | + + + +## Subscription creation options + + + +Options for the **generic** version of the subscription creation options object: + + +{`public class SubscriptionCreationOptions +\{ + public string Name \{ get; set; \} + public Expression> Filter \{ get; set; \} + public Expression> Projection \{ get; set; \} + public Action> Includes \{ get; set; \} + public string ChangeVector \{ get; set; \} + public bool Disabled \{ get; set; \} + public string MentorNode \{ get; set; \} + public bool PinToMentorNode \{ get; set; \} + public ArchivedDataProcessingBehavior? ArchivedDataProcessingBehavior \{ get; set; \} +\} +`} + + + + + + +Options for the **non-generic** version of the subscription creation options object: + + +{`public class SubscriptionCreationOptions +\{ + public string Name \{ get; set; \} + public string Query \{ get; set; \} + public string ChangeVector \{ get; set; \} + public virtual bool Disabled \{ get; set; \} + public string MentorNode \{ get; set; \} + public virtual bool PinToMentorNode \{ get; set; \} + public ArchivedDataProcessingBehavior? ArchivedDataProcessingBehavior \{ get; set; \} +\} +`} + + + + + + +Options for the **non-generic** version of the subscription creation options object when passing a **predicate**: + + +{`public sealed class PredicateSubscriptionCreationOptions +\{ + public string Name \{ get; set; \} + public string ChangeVector \{ get; set; \} + public bool Disabled \{ get; set; \} + public string MentorNode \{ get; set; \} + public bool PinToMentorNode \{ get; set; \} + public ArchivedDataProcessingBehavior? ArchivedDataProcessingBehavior \{ get; set; \} +\} +`} + + + + + +| Member | Type | Description | +|------------------------------------|------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **<T>** | `T` | Type of object from which the collection of documents managed by the subscription will be derived. | +| **Name** | `string` | User defined name for the subscription.
The name must be unique in the database. | +| **Query** | `string` | RQL query that defines the subscription. This RQL comes with additional support to JavaScript functions inside the `where` clause and special semantics for subscriptions on documents revisions. | +| **Filter** | `Expression>` | Lambda expression defining the filter logic for the subscription. Will be translated to a JavaScript function. | +| **Projection** | `Expression>` | Lambda expression defining the projection that will be sent by the subscription for each matching document. Will be translated to a JavaScript function. | +| **Includes** | `Action>` | An action that defines include clauses for the subscription. [Included documents](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---include-documents) and/or [included counters](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---include-counters) will be part of the batch sent by the subscription. Include methods can be chained. | +| **ChangeVector** | `string` | Allows to define a change vector from which the subscription will start processing.
Learn more [below](../../../client-api/data-subscriptions/creation/api-overview.mdx#the--property). | +| **Disabled** | `bool` | `true` - task will be disabled.
`false` - task will be enabled. | +| **MentorNode** | `string` | Allows to define a node in the cluster that will be responsible to handle the subscription. Useful when you prefer a specific server due to its stronger hardware, closer geographic proximity to clients, or other reasons. | +| **PinToMentorNode** | `bool` | `true` - the selected responsible node will be pinned to handle the task.
`false` - Another node will execute the task if the responsible node is down. | +| **ArchivedDataProcessingBehavior** | `ArchivedDataProcessingBehavior?` | Define whether [archived documents](../../../data-archival/archived-documents-and-other-features.mdx#archived-documents-and-subscriptions) will be included in the subscription. | + + + +###### The `ChangeVector` property: + +* The _ChangeVector_ property allows you to define a starting point from which the subscription will begin processing changes. +* This is useful for ad-hoc processes that need to process only recent changes. In such cases, you can: + * Set the field to _"LastDocument"_ to start processing from the latest document in the collection. + * Or, provide an actual Change Vector to begin processing from a specific point. +* By default, the subscription will send all documents matching the RQL query, regardless of their creation time. + + + + + +## Update subscription + +Existing subscriptions can be modified using the following `Update` methods available through the `Subscriptions` property of the `DocumentStore`. + + + +{`string Update(SubscriptionUpdateOptions options, string database = null); + +Task UpdateAsync(SubscriptionUpdateOptions options, string database = null, + CancellationToken token = default); +`} + + + +| Parameter | Type | Description | +|--------------|-----------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **options** | `SubscriptionUpdateOptions` | The subscription update options object.
See [SubscriptionUpdateOptions](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscriptionupdateoptions) | +| **database** | `string` | The name of the database where the subscription task resides.
If `null`, the default database configured in the DocumentStore will be used. | +| **token** | `CancellationToken` | Cancellation token used to halt the update process. | + +| Return value | Description | +|---------------|--------------------------------------------| +| `string` | The name of the updated data subscription. | + + + +## Subscription update options + +`SubscriptionUpdateOptions` inherits from [SubscriptionCreationOptions](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscriptioncreationoptions) +and adds two additional fields: + + + +{`public class SubscriptionUpdateOptions : SubscriptionCreationOptions +\{ + public long? Id \{ get; set; \} + public bool CreateNew \{ get; set; \} +\} +`} + + + +| Parameter | Type | Description | +|---------------|---------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **Id** | `long?` | The unique ID that was assigned to the subscription by the server at creation time.
You can retrieve it by [getting the subscription status](../../../client-api/data-subscriptions/advanced-topics/maintenance-operations.mdx#getting-subscription-status).
When updating, the `Id` can be used instead of the `Name` field, and takes precedence over it. This allows you to modify the subscription's name: provide the Id and submit a new name in the Name field. | +| **CreateNew** | `bool` | Determines the behavior when the subscription you wish to update does Not exist.
`true` - a new subscription is created with the provided option parameters.
`false` - an exception will be thrown.
Default: `false` | + + + +## Subscription query + +All subscriptions are eventually translated to an RQL-like statement. These statements have the following parts: + +* Functions definition part, like in ordinary RQL. Those functions can contain any JavaScript code, + and also supports `load` and `include` operations. + +* From statement, defining the documents source, ex: `from Orders`. The from statement can only address collections, therefore, indexes are not supported. + +* Where statement describing the criteria according to which it will be decided to either + send the documents to the worker or not. Those statements support either RQL like `equality` operations (`=`, `==`) , + plain JavaScript expressions or declared function calls, allowing to perform complex filtering logic. + The subscriptions RQL does not support any of the known RQL searching keywords. + +* Select statement, that defines the projection to be performed. + The select statements can contain function calls, allowing complex transformations. + +* Include statement allowing to define include path in document. + + +Although subscription's query syntax has an RQL-like structure, it supports only the `declare`, `select` and `where` keywords, usage of all other RQL keywords is not supported. +Usage of JavaScript ES5 syntax is supported. + + + +Paths in subscriptions RQL statements are treated as JavaScript indirections and not like regular RQL paths. +It means that a query that in RQL would look like: + +``` +from Orders as o +where o.Lines[].Product = "products/1-A" +``` + +Will look like that in subscriptions RQL: + +``` +declare function filterLines(doc, productId) +{ + if (!!doc.Lines){ + return doc.Lines.filter(x=>x.Product == productId).length >0; + } + return false; +} + +from Orders as o +where filterLines(o, "products/1-A") +``` + + + + +To define a data subscription that sends document revisions to the client, +you must first [configure revisions](../../../document-extensions/revisions/overview.mdx#defining-a-revisions-configuration) +for the specific collection managed by the subscription. + +The subscription should be defined in a special way: + +* In case of the generic API, the `SubscriptionCreationOptions<>` generic parameter should be of the generic type `Revision<>`, + while it's generic parameter correlates to the collection to be processed. Ex: `new SubscriptionCreationOptions>()` +* For RQL syntax, concatenate the `(Revisions = true)` clause to the collection being queried. + For example: `From Orders(Revisions = true) as o` + + + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_api-overview-nodejs.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_api-overview-nodejs.mdx new file mode 100644 index 0000000000..006d6e9463 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_api-overview-nodejs.mdx @@ -0,0 +1,285 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In this page: + * [Create subscription](../../../client-api/data-subscriptions/creation/api-overview.mdx#create-subscription) + * [Subscription creation options](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscription-creation-options) + * [Include methods](../../../client-api/data-subscriptions/creation/api-overview.mdx#include-methods) + * [Update subscription](../../../client-api/data-subscriptions/creation/api-overview.mdx#update-subscription) + * [Subscription update options](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscription-update-options) + * [Subscribe to revisions](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscribe-to-revisions) + * [Subscription RQL](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscription-rql) + + +## Create subscription + +Subscriptions can be created using the following `create` methods available through the `subscriptions` property of the `DocumentStore`. + + + +{`// Available overloads: +// ==================== + +create(options); + +create(options, database); + +create(documentType); +`} + + + +| Parameter | Type | Description | +|------------------|-----------|--------------------------------------------------------------------------------------------------------------------------------------------------------| +| **options** | `object` | The [subscription creation options](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscription-creation-options). | +| **database** | `string` | The name of the database where the subscription task will be created.
If `null`, the default database configured in the DocumentStore will be used. | +| **documentType** | `object` | The class type from which the collection of documents managed by the subscription will be derived. | + +| Return value | Description | +|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `Promise` | A Promise that resolves to the **name** of the created data subscription (a `string`).
If the name was provided in the [subscription creation options](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscription-creation-options), it will be returned.
Otherwise, a unique name will be generated by server. | + +Examples for creating subscriptions are available [here](../../../client-api/data-subscriptions/creation/examples.mdx). + + + +## Subscription creation options + + + +{`// The SubscriptionCreationOptions object: +// ======================================= +\{ + name; + query; + includes; + changeVector; + mentorNode; + pinToMentorNode; + disabled; + documentType; +\} +`} + + + +| Member | Type | Description | +|---------------------|---------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **name** | `string` | User defined name for the subscription.
The name must be unique in the database. | +| **query** | `string` | RQL query that defines the subscription. This RQL comes with additional support to JavaScript functions inside the `where` clause and special semantics for subscriptions on documents revisions.
Learn more in [subscription RQL](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscription-rql). | +| **includes** | `(builder) => void` | A function that accepts a builder object, which allows you to include related documents, counters, and time series in the batch that is sent to the client.
See [Include methods](../../../client-api/data-subscriptions/creation/api-overview.mdx#include-methods). | +| **changeVector** | `string` | Allows to define a change vector from which the subscription will start processing. Useful for ad-hoc processes that need to process only recent changes. In such cases, you can set the field to _"LastDocument"_ to start processing from the latest document in the collection. | +| **mentorNode** | `string` | Allows to define a specific node in the cluster to handle the subscription. Useful when you prefer a specific server due to its stronger hardware, closer geographic proximity to clients, or other reasons. | +| **pinToMentorNode** | `boolean` | `true` - task will only be handled by the specified mentor node.
`false` - When the specified mentor node is down, the cluster selects another node from the Database Group to handle the task.
Learn more in [pinning a task](../../../server/clustering/distribution/highly-available-tasks.mdx#pinning-a-task). | +| **disabled** | `boolean` | `true` - the created subscription will be in a disabled state.
`false` (default) - the created subscription will be enabled. | +| **documentType** | `object` | The class type from which the collection of documents managed by the subscription will be derived. | + + + + +## Include methods + +**Including documents**: + + + +{`includeDocuments(path); +`} + + + +| Parameter | Type | Description | +|-----------|------------|----------------------------------------------------------------| +| **path** | `string` | Path to the property which contains ID of document to include. | + +An example of including documents is available [here](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---include-documents). +**Including counters**: + + + +{`// Include a single counter +includeCounter(name); + +// Include multiple counters +includeCounters(names); + +// Include ALL counters from ALL documents that match the subscription criteria +includeAllCounters(); +`} + + + +| Parameter | Type | Description | +|------------|------------|--------------------------------------------------------------------------------------------------------------------------------------------------| +| **name** | `string` | The name of a counter. The subscription will include all counters with this name that are contained in the documents the subscription retrieves. | +| **names** | `string[]` | Array of counter names. | + +An example of including counters is available [here](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---include-counters). +**Including time series**: + + + +{`includeTimeSeries(name, type, time); +includeTimeSeries(name, type, count); + +includeTimeSeries(names, type, time); +includeTimeSeries(names, type, count); + +includeAllTimeSeries(type, time); +includeAllTimeSeries(type, count); +`} + + + +| Parameter | Type | Description | +|-------------|-------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **name** | `string` | The name of the time series to include. | +| **names** | `string[]` | The names of the time series to include. | +| **type** | `string` | Indicates how to retrieve the time series entries.
Range type can be: `"None"` or `"Last"`.
When set to _Last_, retrieve the last X entries, where X is determined by _count_. | +| **time** | `TimeValue` | The time range to consider when retrieving time series entries.
E.g.: `TimeValue.ofDays(7)` | +| **count** | `number` | The maximum number of entries to take when retrieving time series entries. | + + + +## Update subscription + +Existing subscriptions can be modified using the following `update` methods available through the `subscriptions` property of the `DocumentStore`. + + + +{`// Available overloads: +// ==================== + +update(options); + +update(options, database); +`} + + + +| Parameter | Type | Description | +|--------------|---------------------|------------------------------------------------------------------------------------------------------------------------------------------------| +| **options** | `object` | The [subscription update options](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscription-update-options). | +| **database** | `string` | The name of the database where the subscription task resides.
If `null`, the default database configured in the DocumentStore will be used. | + +| Return value | Description | +|---------------|----------------------------------------------------------------------------------------| +| `Promise` | A Promise that resolves to the **name** of the updated data subscription (a `string`). | + +Examples for updating an existing subscription are available [here](../../../client-api/data-subscriptions/creation/examples.mdx#update-existing-subscription). + + + +## Subscription update options + +The subscription update options object extends the [creation options object](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscription-creation-options) +and adds two additional fields: + + + +{`// The SubscriptionUpdateOptions object: +// ===================================== +\{ + id; + createNew; +\} +`} + + + +| Parameter | Type | Description | +|---------------|-----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **id** | `number` | The unique ID that was assigned to the subscription by the server at creation time.
You can retrieve it by [getting the subscription status](../../../client-api/data-subscriptions/advanced-topics/maintenance-operations.mdx#getting-subscription-status).
When updating, the `id` can be used instead of the `name` field, and takes precedence over it. This allows you to modify the subscription's name: provide the id and submit a new name in the name field. | +| **createNew** | `boolean` | Determines the behavior when the subscription you wish to update does Not exist.
`true` - a new subscription is created with the provided option parameters.
`false` - an exception will be thrown.
Default: `false` | + + + + +## Subscribe to revisions + +To define a data subscription that sends document revisions to the client, +you must first [configure revisions](../../../document-extensions/revisions/overview.mdx#defining-a-revisions-configuration) +for the specific collection managed by the subscription. + +Create a subscription that sends document revisions using the following `createForRevisions` methods: + + + +{`// Available overloads: +// ==================== + +createForRevisions(options); + +createForRevisions(options, database); +`} + + + +| Parameter | Type | Description | +|--------------|----------|--------------------------------------------------------------------------------------------------------------------------------------------------------| +| **options** | `object` | The [subscription creation options](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscription-creation-options). | +| **database** | `string` | The name of the database where the subscription task will be created.
If `null`, the default database configured in the DocumentStore will be used. | + +When providing raw RQL to the `query` param in the options object, +concatenate the `(Revisions = true)` clause to the collection being queried. +For example: `From Orders(Revisions = true) as o` + +Learn more about subscribing to revisions in [revisions support](../../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx). + + + +## Subscription RQL + +All subscriptions are eventually translated to an RQL-like statement. These statements have the following parts: + +* Functions definition part, like in ordinary RQL. Those functions can contain any JavaScript code, + and also supports `load` and `include` operations. + +* From statement, defining the documents source, ex: `from Orders`. The from statement can only address collections, therefore, indexes are not supported. + +* Where statement describing the criteria according to which it will be decided to either + send the documents to the worker or not. Those statements support either RQL like `equality` operations (`=`, `==`) , + plain JavaScript expressions or declared function calls, allowing to perform complex filtering logic. + The subscriptions RQL does not support any of the known RQL searching keywords. + +* Select statement, that defines the projection to be performed. + The select statements can contain function calls, allowing complex transformations. + +* Include statement allowing to define include path in document. + + +Although subscription's query syntax has an RQL-like structure, it supports only the `declare`, `select` and `where` keywords, usage of all other RQL keywords is not supported. +Usage of JavaScript ES5 syntax is supported. + + + +Paths in subscriptions RQL statements are treated as JavaScript indirections and not like regular RQL paths. +It means that a query that in RQL would look like: + +``` +from Orders as o +where o.Lines[].Product = "products/1-A" +``` + +Will look like that in subscriptions RQL: + +``` +declare function filterLines(doc, productId) +{ + if (!!doc.Lines){ + return doc.Lines.filter(x=>x.Product == productId).length >0; + } + return false; +} + +from Orders as o +where filterLines(o, "products/1-A") +``` + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_api-overview-python.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_api-overview-python.mdx new file mode 100644 index 0000000000..4e619bb86c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_api-overview-python.mdx @@ -0,0 +1,179 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In this page: + * [Create subscription](../../../client-api/data-subscriptions/creation/api-overview.mdx#create-subscription) + * [SubscriptionCreationOptions](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscriptioncreationoptions) + * [Update subscription](../../../client-api/data-subscriptions/creation/api-overview.mdx#update-subscription) + * [SubscriptionUpdateOptions](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscriptionupdateoptions) + * [Subscription query](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscription-query) + + +## Create subscription + +Subscriptions can be created using the `create_for_options` and `create_for_class` methods. + + +{`def create_for_options(self, options: SubscriptionCreationOptions, database: Optional[str] = None) -> str: ... + +def create_for_class( + self, + object_type: Type[_T], + options: Optional[SubscriptionCreationOptions] = None, + database: Optional[str] = None, +) -> str: ... +`} + + + +| Parameter | Type | Description | +| ------------- | ------------- | ----- | +| **options** | `SubscriptionCreationOptions` | Contains subscription creation options | +| **database** (Optional) | `[str]` | The name of the database where the subscription task will be created. If `None`, default database configured in DocumentStore will be used. | +| **object_type** | `Type[_T]` | Predicate describing the subscription documents filter | + +| Return value | Description | +| ------------- | ----- | +| `str` | Created data subscription name. If the name was provided in `SubscriptionCreationOptions`, it will be returned. Otherwise, a unique name will be generated by the server. | + + + +## SubscriptionCreationOptions + +An RQL statement will be built based on the fields. + + +{`class SubscriptionCreationOptions: + def __init__( + self, + name: Optional[str] = None, + query: Optional[str] = None, + includes: Optional[Callable[[SubscriptionIncludeBuilder], None]] = None, + change_vector: Optional[str] = None, + mentor_node: Optional[str] = None, + ): + self.name = name + self.query = query + self.includes = includes + self.change_vector = change_vector + self.mentor_node = mentor_node +`} + + + +| Member | Type | Description | +|------------------------------|:-----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **name** (Optional) | `str` | User-defined name of the subscription: allows to have a human readable identification of a subscription. The name must be unique in the database. | +| **query** (Optional) | `str` | RQL query that describes the subscription. This RQL comes with additional support to JavaScript functions inside the `where` clause and special semantics for subscriptions on documents revisions. | +| **change_vector** (Optional) | `str` | Allows to define a change vector from which the subscription will start processing. Useful for ad-hoc processes that need to process only recent changes. In such cases, you can set the field to _"LastDocument"_ to start processing from the latest document in the collection. | +| **mentor_node** (Optional) | `str` | Allows to define a specific node in the cluster to handle the subscription. Useful when you prefer a specific server due to its stronger hardware, closer geographic proximity to clients, or other reasons. | +| **includes** (Optional) | `[Callable[[SubscriptionIncludeBuilder]` | Action with a [SubscriptionIncludeBuilder](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---include-documents) parameter that allows you to define an include clause for the subscription. Methods can be chained to include documents as well as [counters](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---include-counters). | + + + +## Update subscription + +Modifies an existing data subscription. These methods are accessible at `DocumentStore.Subscriptions`. + + + +{`def update(self, options: SubscriptionUpdateOptions, database: Optional[str] = None) -> str: ... +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **options** | `SubscriptionUpdateOptions` | A subscription update options object | +| **database** (Optional) | `str` | The name of the database where the subscription task will be created. If `None`, default database configured in DocumentStore will be used. | + +| Return value | Description | +| ------------- | ----- | +| `str` | The updated data subscription's name. | + + + +## SubscriptionUpdateOptions + +Inherits from `SubscriptionCreationOptions` and has all the same fields (see [above](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscriptioncreationoptions)) plus the two additional fields described below: + + + +{`class SubscriptionUpdateOptions(SubscriptionCreationOptions): + def __init__( + self, + name: Optional[str] = None, + query: Optional[str] = None, + includes: Optional[Callable[[SubscriptionIncludeBuilder], None]] = None, + change_vector: Optional[str] = None, + mentor_node: Optional[str] = None, + key: Optional[int] = None, + create_new: Optional[bool] = None, + ): ... +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **key** (Optional) | `int` | Unique server-side ID of the data subscription. `key` can be used instead of the subscription update options `name` field, and takes precedence over it. This allows you to change the subscription's name: submit a subscription's ID, and submit a different name in the `name` field. | +| **create_new** (Optional) | `bool` | Determines the behavior when the subscription you wish to update does Not exist.
`true` - a new subscription is created with the provided option parameters.
`false` - an exception will be thrown.
Default: `false` | + + + +## Subscription query + +All subscriptions are eventually translated to an RQL-like statement. These statements have the following parts: + +* Functions definition part, like in ordinary RQL. Those functions can contain any JavaScript code, + and also supports `load` and `include` operations. + +* From statement, defining the documents source, ex: `from Orders`. The from statement can only address collections, therefore, indexes are not supported. + +* Where statement describing the criteria according to which it will be decided to either + send the documents to the worker or not. Those statements support either RQL like `equality` operations (`=`, `==`) , + plain JavaScript expressions or declared function calls, allowing to perform complex filtering logic. + The subscriptions RQL does not support any of the known RQL searching keywords. + +* Select statement, that defines the projection to be performed. + The select statements can contain function calls, allowing complex transformations. + +* Include statement allowing to define include path in document. + + +Although subscription's query syntax has an RQL-like structure, it supports only the `declare`, `select` and `where` keywords, usage of all other RQL keywords is not supported. +Usage of JavaScript ES5 syntax is supported. + + + +Paths in subscriptions RQL statements are treated as JavaScript indirections and not like regular RQL paths. +It means that a query that in RQL would look like: + +``` +from Orders as o +where o.Lines[].Product = "products/1-A" +``` + +Will look like that in subscriptions RQL: + +``` +declare function filterLines(doc, productId) +{ + if (!!doc.Lines){ + return doc.Lines.filter(x=>x.Product == productId).length >0; + } + return false; +} + +from Orders as o +where filterLines(o, "products/1-A") +``` + + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_category_.json b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_category_.json new file mode 100644 index 0000000000..696f998ee4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 1, + "label": Creation, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_examples-csharp.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_examples-csharp.mdx new file mode 100644 index 0000000000..52c7682c15 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_examples-csharp.mdx @@ -0,0 +1,479 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This page contains examples of **creating a subscription**. + To learn how to consume and process documents sent by the subscription, see these [examples](../../../client-api/data-subscriptions/consumption/examples.mdx). + +* For a detailed syntax of the available subscription methods and objects, see this [API overview](../../../client-api/data-subscriptions/creation/api-overview.mdx). + +* In this page: + * [Create subscription - for all documents in a collection](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---for-all-documents-in-a-collection) + * [Create subscription - filter documents](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---filter-documents) + * [Create subscription - filter and project fields](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---filter-and-project-fields) + * [Create subscription - project data from a related document](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---project-data-from-a-related-document) + * [Create subscription - include documents](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---include-documents) + * [Create subscription - include counters](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---include-counters) + * [Create subscription - subscribe to revisions](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---subscribe-to-revisions) + * [Create subscription - via update](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---via-update) + * [Update existing subscription](../../../client-api/data-subscriptions/creation/examples.mdx#update-existing-subscription) + + +## Create subscription - for all documents in a collection + +Here we create a plain subscription on the _Orders_ collection without any constraints or transformations. +The server will send ALL documents from the _Orders_ collection to a client that connects to this subscription. + + + + +{`subscriptionName = store.Subscriptions.Create(new SubscriptionCreationOptions +{ + // Set a custom name for the subscription + Name = "OrdersProcessingSubscription" +}); +`} + + + + +{`subscriptionName = store.Subscriptions.Create(new SubscriptionCreationOptions() +{ + Query = "From Orders", + Name = "OrdersProcessingSubscription" +}); +`} + + + + + + +## Create subscription - filter documents + +Here we create a subscription for documents from the _Orders_ collection where the total order revenue is greater than 100. +Only documents that match this condition will be sent from the server to a client connected to this subscription. + + + + +{`subscriptionName = store.Subscriptions.Create(x => + // Only documents matching this criteria will be sent + x.Lines.Sum(line => line.PricePerUnit * line.Quantity) > 100); +`} + + + + +{`subscriptionName = store.Subscriptions.Create(new SubscriptionCreationOptions() +{ + Query = @"declare function getOrderLinesSum(doc) { + var sum = 0; + for (var i in doc.Lines) { + sum += doc.Lines[i].PricePerUnit * doc.Lines[i].Quantity; + } + return sum; + } + + From Orders as o + Where getOrderLinesSum(o) > 100" +}); +`} + + + + + + +## Create subscription - filter and project fields + +Here, again, we create a subscription for documents from the _Orders_ collection where the total order revenue is greater than 100. +However, this time we only project the document ID and the Total Revenue properties in each object sent to the client. + + + + +{`subscriptionName = store.Subscriptions.Create(new SubscriptionCreationOptions() +{ + // The subscription criteria: + Filter = x => x.Lines.Sum(line => line.PricePerUnit * line.Quantity) > 100, + + // The object properties that will be sent for each matching document: + Projection = x => new + { + Id = x.Id, + Total = x.Lines.Sum(line => line.PricePerUnit * line.Quantity) + } +}); +`} + + + + +{`subscriptionName = store.Subscriptions.Create(new SubscriptionCreationOptions() +{ + Query = @"declare function getOrderLinesSum(doc) { + var sum = 0; + for (var i in doc.Lines) { + sum += doc.Lines[i].PricePerUnit * doc.Lines[i].Quantity; + } + return sum; + } + + declare function projectOrder(doc) { + return { + Id: doc.Id, + Total: getOrderLinesSum(doc) + }; + } + + From Orders as o + Where getOrderLinesSum(o) > 100 + Select projectOrder(o)" +}); +`} + + + + + + +## Create subscription - project data from a related document + +In this subscription, in addition to projecting the document fields, +we also project data from a [related document](../../../indexes/indexing-related-documents.mdx#what-are-related-documents) that is loaded using the `Load` method. + + + + +{`subscriptionName = store.Subscriptions.Create( + new SubscriptionCreationOptions() + { + // The subscription criteria: + Filter = x => x.Lines.Sum(line => line.PricePerUnit * line.Quantity) > 100, + + // The object properties that will be sent for each matching document: + Projection = x => new + { + Id = x.Id, + Total = x.Lines.Sum(line => line.PricePerUnit * line.Quantity), + ShipTo = x.ShipTo, + + // 'Load' the related Employee document and use its data in the projection + EmployeeName = RavenQuery.Load(x.Employee).FirstName + " " + + RavenQuery.Load(x.Employee).LastName + } + }); +`} + + + + +{`subscriptionName = store.Subscriptions.Create(new SubscriptionCreationOptions() +{ + Query = @"declare function getOrderLinesSum(doc) { + var sum = 0; + for (var i in doc.Lines) { + sum += doc.Lines[i].PricePerUnit * doc.Lines[i].Quantity; + } + return sum; + } + + declare function projectOrder(doc) { + var employee = load(doc.Employee); + return { + Id: doc.Id, + Total: getOrderLinesSum(doc), + ShipTo: doc.ShipTo, + EmployeeName: employee.FirstName + ' ' + employee.LastName + }; + } + + From Orders as o + Where getOrderLinesSum(o) > 100 + Select projectOrder(o)" +}); +`} + + + + + + +## Create subscription - include documents + +Here we create a subscription on the _Orders_ collection, which will send all the _Order_ documents. + +In addition, the related _Product_ documents associated with each Order are **included** in the batch sent to the client. +This way, when the subscription worker that processes the batch in the client accesses a _Product_ document, no additional call to the server will be made. + +See how to consume this type of subscription [here](../../../client-api/data-subscriptions/consumption/examples.mdx#subscription-that-uses-included-documents). + + + + +{`subscriptionName = store.Subscriptions.Create(new SubscriptionCreationOptions() +{ + Includes = builder => builder + // The documents whose IDs are specified in the 'Product' property + // will be included in the batch + .IncludeDocuments(x => x.Lines.Select(y => y.Product)) +}); +`} + + + + +{`subscriptionName = store.Subscriptions.Create(new SubscriptionCreationOptions() +{ + Query = @"from Orders include Lines[].Product" +}); +`} + + + + +{`subscriptionName = store.Subscriptions.Create(new SubscriptionCreationOptions() +{ + Query = @"declare function includeProducts(doc) { + let includedFields = 0; + let linesCount = doc.Lines.length; + + for (let i = 0; i < linesCount; i++) { + includedFields++; + include(doc.Lines[i].Product); + } + + return doc; + } + + from Orders as o select includeProducts(o)" +}); +`} + + + + + + +**Include using builder**: + +Include statements can be added to the subscription with `ISubscriptionIncludeBuilder`. +This builder is assigned to the `Includes` property in [SubscriptionCreationOptions<T>](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscriptioncreationoptionst). +It supports methods for including documents as well as [counters](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---include-counters). +These methods can be chained. + +To include related documents, use method `IncludeDocuments`. +(See the _Builder-syntax_ tab in the example above). + + + + +**Include using RQL**: + +The include statements can be written in two ways: + +1. Use the `include` keyword at the end of the query, followed by the paths to the fields containing the IDs of the documents to include. + It is recommended to prefer this approach whenever possible, both for the clarity of the query and for slightly better performance. + (See the _RQL-path-syntax_ tab in the example above). + +2. Define the `include` within a JavaScript function that is called from the `select` clause. + (See the _RQL-javascript-syntax_ tab in the example above). + + + + + +If you include documents when making a [projection](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---filter-and-project-fields), +the include will search for the specified paths in the projected fields rather than in the original document. + + + + +## Create subscription - include counters + +Here we create a subscription on the _Orders_ collection, which will send all the _Order_ documents. +In addition, values for the specified counters will be **included** in the batch. + +Note: +Modifying an existing counter's value after the document has been sent to the client does Not trigger re-sending. +However, adding a new counter to the document or removing an existing one will trigger re-sending the document. + + + + +{`subscriptionName = store.Subscriptions.Create(new SubscriptionCreationOptions() +{ + Includes = builder => builder + // Values for the specified counters will be included in the batch + .IncludeCounters(new[] { "Pros", "Cons" }) +}); +`} + + + + +{`subscriptionName = store.Subscriptions.Create(new SubscriptionCreationOptions() +{ + Query = @"from Orders include counters('Pros'), counters('Cons')" +}); +`} + + + + +`ISubscriptionIncludeBuilder` has three methods for including counters: + + + +{`// Include a single counter +ISubscriptionIncludeBuilder IncludeCounter(string name); + +// Include multiple counters +ISubscriptionIncludeBuilder IncludeCounters(string[] names); + +// Include ALL counters from ALL documents that match the subscription criteria +ISubscriptionIncludeBuilder IncludeAllCounters(); +`} + + + +| Parameter | Type | Description | +|------------|------------|--------------------------------------------------------------------------------------------------------------------------------------------------| +| **name** | `string` | The name of a counter. The subscription will include all counters with this name that are contained in the documents the subscription retrieves. | +| **names** | `string[]` | Array of counter names. | + +**All include methods can be chained**: +For example, the following subscription includes multiple counters and documents: + + + +{`subscriptionName = store.Subscriptions.Create(new SubscriptionCreationOptions() +\{ + Includes = builder => builder + .IncludeCounter("Likes") + .IncludeCounters(new[] \{ "Pros", "Cons" \}) + .IncludeDocuments("Employee") +\}); +`} + + + + + +## Create subscription - subscribe to revisions + +Here we create a simple revisions subscription on the _Orders_ collection that will send pairs of subsequent document revisions to the client. + + + + +{`subscriptionName = store.Subscriptions.Create( + // Use > as the type for the processed items + // e.g. > + new SubscriptionCreationOptions>()); +`} + + + + +{`subscriptionName = store.Subscriptions.Create(new SubscriptionCreationOptions() +{ + // Add (Revisions = true) to your subscription RQL + Query = @"From Orders (Revisions = true)" +}); +`} + + + + +Learn more about subscribing to document revisions in [subscriptions: revisions support](../../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx). + + + +## Create subscription - via update + +When attempting to update a subscription that does Not exist, +you can request a new subscription to be created by setting `CreateNew` to `true`. +In such a case, a new subscription will be created with the provided query. + + + +{`subscriptionName = store.Subscriptions.Update(new SubscriptionUpdateOptions() +\{ + Name = "my subscription", + Query = "from Products where PricePerUnit > 20", + + // Set to true so that a new subscription will be created + // if a subscription with name "mySubscription" does Not exist + CreateNew = true +\}); +`} + + + + + +## Update existing subscription + +**Update subscription by name**: +The subscription definition can be updated after it has been created. +In this example we update the filtering **query** of an existing subscription named "my subscription". + + + +{`subscriptionName = store.Subscriptions.Update(new SubscriptionUpdateOptions() +\{ + // Specify the subscription you wish to modify + Name = "my subscription", + + // Provide a new query + Query = "from Products where PricePerUnit > 50" +\}); +`} + + +**Update subscription by id**: +In addition to the subscription name, each subscription is assigned a subscription ID when it is created by the server. +This ID can be used instead of the name when updating the subscription. + + + +{`// Get the subscription's ID +SubscriptionState mySubscription = store.Subscriptions.GetSubscriptionState("my subscription"); +long subscriptionId = mySubscription.SubscriptionId; + +// Update the subscription +subscriptionName = store.Subscriptions.Update(new SubscriptionUpdateOptions() +\{ + Id = subscriptionId, + Query = "from Products where PricePerUnit > 50" +\}); +`} + + + +Using the subscription ID allows you to modify the subscription name: + + + +{`// Get the subscription's ID +mySubscription = store.Subscriptions.GetSubscriptionState("my subscription"); +subscriptionId = mySubscription.SubscriptionId; + +// Update the subscription name +subscriptionName = store.Subscriptions.Update(new SubscriptionUpdateOptions() +\{ + Id = subscriptionId, + Name = "New name" +\}); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_examples-nodejs.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_examples-nodejs.mdx new file mode 100644 index 0000000000..0c67863693 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_examples-nodejs.mdx @@ -0,0 +1,417 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This page contains examples of **creating a subscription**. + To learn how to consume and process documents sent by the subscription, see these [examples](../../../client-api/data-subscriptions/consumption/examples.mdx). + +* For a detailed syntax of the available subscription methods and objects, see this [API overview](../../../client-api/data-subscriptions/creation/api-overview.mdx). + +* In this page: + * [Create subscription - for all documents in a collection](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---for-all-documents-in-a-collection) + * [Create subscription - filter documents](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---filter-documents) + * [Create subscription - filter and project fields](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---filter-and-project-fields) + * [Create subscription - project data from a related document](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---project-data-from-a-related-document) + * [Create subscription - include documents](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---include-documents) + * [Create subscription - include counters](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---include-counters) + * [Create subscription - subscribe to revisions](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---subscribe-to-revisions) + * [Create subscription - via update](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---via-update) + * [Update existing subscription](../../../client-api/data-subscriptions/creation/examples.mdx#update-existing-subscription) + + +## Create subscription - for all documents in a collection + +Here we create a plain subscription on the _Orders_ collection without any constraints or transformations. +The server will send ALL documents from the _Orders_ collection to a client that connects to this subscription. + + + +{`const subscriptionName = await documentStore.subscriptions.create(\{ + // Optionally, provide a custom name for the subscription + name: "OrdersProcessingSubscription", + + // You can provide the collection name in the RQL string in the 'query' param + query: "from Orders" +\}); +`} + + + + +{`const subscriptionName = await documentStore.subscriptions.create(\{ + name: "OrdersProcessingSubscription", + + // Or, you can provide the document type for the collection in the 'documentType' param + documentType: Order +\}); +`} + + + + +{`// Or, you can use the folllowing overload, +// pass the document class type to the 'create' method +const subscriptionName = await documentStore.subscriptions.create(Order); +`} + + + + + +## Create subscription - filter documents + +Here we create a subscription for documents from the _Orders_ collection where the total order revenue is greater than 100. +Only documents that match this condition will be sent from the server to a client connected to this subscription. + + + +{`// Define the filtering criteria +const query = \` + declare function getOrderLinesSum(doc) \{ + var sum = 0; + for (var i in doc.Lines) \{ + sum += doc.Lines[i].PricePerUnit * doc.Lines[i].Quantity; + \} + return sum; + \} + + from Orders as o + where getOrderLinesSum(o) > 100\`; + +// Create the subscription with the defined query +const subscriptionName = await documentStore.subscriptions.create(\{ query \}); + +// In this case, the server will create a default name for the subscription +// since no specific name was provided when creating the subscription. +`} + + + + + +## Create subscription - filter and project fields + +Here, again, we create a subscription for documents from the _Orders_ collection where the total order revenue is greater than 100. +However, this time we only project the document ID and the Total Revenue properties in each object sent to the client. + + + +{`const query = \` + declare function getOrderLinesSum(doc) \{ + var sum = 0; + for (var i in doc.Lines) \{ + sum += doc.Lines[i].PricePerUnit * doc.Lines[i].Quantity; + \} + return sum; + \} + + declare function projectOrder(doc) \{ + return \{ + Id: doc.Id, + Total: getOrderLinesSum(doc) + \} + \} + + from order as o + where getOrderLinesSum(o) > 100 + select projectOrder(o)\`; + +const subscriptionName = await documentStore.subscriptions.create(\{ query \}); +`} + + + + + +## Create subscription - project data from a related document + +In this subscription, in addition to projecting the document fields, +we also project data from a [related document](../../../indexes/indexing-related-documents.mdx#what-are-related-documents) that is loaded using the `load` method. + + + +{`const query = \` + declare function getOrderLinesSum(doc) \{ + var sum = 0; + for (var i in doc.Lines) \{ + sum += doc.Lines[i].PricePerUnit * doc.Lines[i].Quantity; + \} + return sum; + \} + + declare function projectOrder(doc) \{ + var employee = load(doc.Employee); + return \{ + Id: doc.Id, + Total: getOrderLinesSum(doc), + ShipTo: doc.ShipTo, + EmployeeName: employee.FirstName + ' ' + employee.LastName + \} + \} + + from order as o + where getOrderLinesSum(o) > 100 + select projectOrder(o)\`; + +const subscriptionName = await documentStore.subscriptions.create(\{ query \}); +`} + + + + + +## Create subscription - include documents + +Here we create a subscription on the _Orders_ collection, which will send all the _Order_ documents. + +In addition, the related _Product_ documents associated with each Order are **included** in the batch sent to the client. +This way, when the subscription worker that processes the batch in the client accesses a _Product_ document, no additional call to the server will be made. + +See how to consume this type of subscription [here](../../../client-api/data-subscriptions/consumption/examples.mdx#subscription-that-uses-included-documents). + + + + +{`const options = { + // The documents whose IDs are specified in the 'Product' property + // will be included in the batch + includes: builder => builder.includeDocuments("Lines[].Product"), + documentType: Order +}; + +const subscriptionName = await documentStore.subscriptions.create(options); +`} + + + + +{`const query = \`from Orders include Lines[].Product\`; +const subscriptionName = await documentStore.subscriptions.create({ query }); +`} + + + + +{`const query = \` + declare function includeProducts(doc) { + let includedFields = 0; + let linesCount = doc.Lines.length; + + for (let i = 0; i < linesCount; i++) { + includedFields++; + include(doc.Lines[i].Product); + } + + return doc; + } + + from Orders as o select includeProducts(o)\`; + +const subscriptionName = await documentStore.subscriptions.create({ query }); +`} + + + + + + +**Include using builder**: + +Include statements can be added to the subscription with a _builder_ object. +This builder is assigned to the `includes` property in the _options_ object. +It supports methods for including documents as well as [counters](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---include-counters). +These methods can be chained. + +See this [API overview](../../../client-api/data-subscriptions/creation/api-overview.mdx#include-methods) for all available include methods. + +To include related documents, use method `includeDocuments`. +(See the _Builder-syntax_ tab in the example above). + + + + +**Include using RQL**: + +The include statements can be written in two ways: + +1. Use the `include` keyword at the end of the query, followed by the paths to the fields containing the IDs of the documents to include. + It is recommended to prefer this approach whenever possible, both for the clarity of the query and for slightly better performance. + (See the _RQL-path-syntax_ tab in the example above). + +2. Define the `include` within a JavaScript function that is called from the `select` clause. + (See the _RQL-javascript-syntax_ tab in the example above). + + + + + +If you include documents when making a [projection](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---filter-and-project-fields), +the include will search for the specified paths in the projected fields rather than in the original document. + + + + +## Create subscription - include counters + +Here we create a subscription on the _Orders_ collection, which will send all the _Order_ documents. +In addition, values for the specified counters will be **included** in the batch. + +Note: +Modifying an existing counter's value after the document has been sent to the client does Not trigger re-sending. +However, adding a new counter to the document or removing an existing one will trigger re-sending the document. + + + + +{`const options = { + includes: builder => builder + // Values for the specified counters will be included in the batch + .includeCounters(["Pros", "Cons"]), + documentType: Order +}; + +const subscriptionName = await documentStore.subscriptions.create(options); +`} + + + + +{`const options = { + query: "from Orders include counters('Pros'), counters('Cons')" +}; + +const subscriptionName = await documentStore.subscriptions.create(options); +`} + + + + +**All include methods can be chained**: +For example, the following subscription includes multiple counters and documents: + + + +{`const options = \{ + includes: builder => builder + .includeCounter("Likes") + .includeCounters(["Pros", "Cons"]) + .includeDocuments("Employee"), + documentType: Order +\}; + +const subscriptionName = await documentStore.subscriptions.create(options); +`} + + + + + +## Create subscription - subscribe to revisions + +Here we create a simple revisions subscription on the _Orders_ collection that will send pairs of subsequent document revisions to the client. + + + + +{`const subscriptionName = await documentStore.subscriptions.createForRevisions({ + documentType: Order +}); +`} + + + + +{`const subscriptionName = await documentStore.subscriptions.createForRevisions({ + query: "from Orders (Revisions = true)" +}); +`} + + + + +Learn more about subscribing to document revisions in [subscriptions: revisions support](../../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx). + + + +## Create subscription - via update + +When attempting to update a subscription that does Not exist, +you can request a new subscription to be created by setting `createNew` to `true`. +In such a case, a new subscription will be created with the provided query. + + + +{`const subscriptionName = await documentStore.subscriptions.update(\{ + name: "my subscription", + query: "from Products where PricePerUnit > 20", + + // Set to true so that a new subscription will be created + // if a subscription with name "my subscription" does Not exist + createNew: true +\}); +`} + + + + + +## Update existing subscription + +**Update subscription by name**: +The subscription definition can be updated after it has been created. +In this example we update the filtering **query** of an existing subscription named "my subscription". + + + +{`const subscriptionName = await documentStore.subscriptions.update(\{ + // Specify the subscription you wish to modify + name: "my subscription", + + // Provide a new query + query: "from Products where PricePerUnit > 50" +\}); +`} + + +**Update subscription by id**: +In addition to the subscription name, each subscription is assigned a subscription ID when it is created by the server. +This ID can be used instead of the name when updating the subscription. + + + +{`// Get the subscription's ID +const mySubscription = await documentStore.subscriptions.getSubscriptionState("my subscription"); +const subscriptionId = mySubscription.subscriptionId; + +// Update the subscription +const subscriptionName = await documentStore.subscriptions.update(\{ + id: subscriptionId, + query: "from Products where PricePerUnit > 50" +\}); +`} + + + +Using the subscription ID allows you to modify the subscription name: + + + +{`// Get the subscription's ID +const mySubscription = await documentStore.subscriptions.getSubscriptionState("my subscription"); +const subscriptionId = mySubscription.subscriptionId; + +// Update the subscription's name +const subscriptionName = await documentStore.subscriptions.update(\{ + id: subscriptionId, + name: "new name" +\}); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_examples-python.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_examples-python.mdx new file mode 100644 index 0000000000..8a45037cb4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_examples-python.mdx @@ -0,0 +1,322 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This page contains examples of **creating a subscription**. + To learn how to consume and process documents sent by the subscription, see these [examples](../../../client-api/data-subscriptions/consumption/examples.mdx). + +* For a detailed syntax of the available subscription methods and objects, see this [API overview](../../../client-api/data-subscriptions/creation/api-overview.mdx). + +* In this page: + * [Create subscription - for all documents in a collection](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---for-all-documents-in-a-collection) + * [Create subscription - filter documents](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---filter-documents) + * [Create subscription - filter and project fields](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---filter-and-project-fields) + * [Create subscription - project data from a related document](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---project-data-from-a-related-document) + * [Create subscription - include documents](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---include-documents) + * [Create subscription - include counters](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---include-counters) + * [Update existing subscription](../../../client-api/data-subscriptions/creation/examples.mdx#update-existing-subscription) + + +## Create subscription - for all documents in a collection + +Here we create a plain subscription on the _Orders_ collection without any constraints or transformations. +The server will send ALL documents from the _Orders_ collection to a client that connects to this subscription. + + + + +{`name = store.subscriptions.create_for_class( + Order, SubscriptionCreationOptions(name="OrdersProcessingSubscription") +) +`} + + + + +{`name = store.subscriptions.create_for_options(SubscriptionCreationOptions(query="From Orders")) +`} + + + + + + +## Create subscription - filter documents + +Here we create a subscription for documents from the _Orders_ collection where the total order revenue is greater than 100. +Only documents that match this condition will be sent from the server to a client connected to this subscription. + + + +{`name = store.subscriptions.create_for_options( + SubscriptionCreationOptions( + query=( + "declare function getOrderLinesSum(doc) \{" + " var sum = 0;" + " for (var i in doc.Lines) \{" + " sum += doc.Lines[i].PricePerUnit * doc.Lines[i].Quantity;" + " \}" + " return sum;" + "\}" + "From Orders as o " + "Where getOrderLinesSum(o) > 100 " + ) + ), +) +`} + + + + + +## Create subscription - filter and project fields + +Here, again, we create a subscription for documents from the _Orders_ collection where the total order revenue is greater than 100. +However, this time we only project the document ID and the Total Revenue properties in each object sent to the client. + + + +{`name = store.subscriptions.create_for_options( + SubscriptionCreationOptions( + query=""" + declare function getOrderLinesSum(doc) \{ + var sum = 0; + for (var i in doc.Lines) \{ + sum += doc.Lines[i].PricePerUnit * doc.Lines[i].Quantity; + \} + return sum; + \} + + declare function projectOrder(doc) \{ + return \{ + Id: doc.Id, + Total: getOrderLinesSum(doc) + \}; + \} + + From Orders as o + Where getOrderLinesSum(o) > 100 + Select projectOrder(o) + """ + ) +) +`} + + + + + +## Create subscription - project data from a related document + +In this subscription, in addition to projecting the document fields, +we also project data from a [related document](../../../indexes/indexing-related-documents.mdx#what-are-related-documents) that is loaded using the `load` method. + + + +{`name = store.subscriptions.create_for_options( + SubscriptionCreationOptions( + query=""" + declare function getOrderLinesSum(doc) \{ + var sum = 0; + for (var i in doc.Lines) \{ + sum += doc.Lines[i].PricePerUnit * doc.Lines[i].Quantity; + \} + return sum; + \} + + declare function projectOrder(doc) \{ + var employee = load(doc.Employee); + return \{ + Id: doc.Id, + Total: getOrderLinesSum(doc), + ShipTo: doc.ShipTo, + EmployeeName: employee.FirstName + ' ' + employee.LastName + \}; + \} + + From Orders as o + Where getOrderLinesSum(o) > 100 + Select projectOrder(o) + """ + ) +) +`} + + + + + +## Create subscription - include documents + +Here we create a subscription on the _Orders_ collection, which will send all the _Order_ documents. + +In addition, the related _Product_ documents associated with each Order are **included** in the batch sent to the client. +This way, when the subscription worker that processes the batch in the client accesses a _Product_ document, no additional call to the server will be made. + +See how to consume this type of subscription [here](../../../client-api/data-subscriptions/consumption/examples.mdx#subscription-that-uses-included-documents). + + + + +{`store.subscriptions.create_for_class( + Order, + SubscriptionCreationOptions(includes=lambda builder: builder.include_documents("Lines[].Product")), +) +`} + + + + +{`store.subscriptions.create_for_options( + SubscriptionCreationOptions(query="from Orders include Lines[].Product") +) +`} + + + + +{`store.subscriptions.create_for_options( + SubscriptionCreationOptions( + query=""" + declare function includeProducts(doc) { + let includedFields = 0; + let linesCount = doc.Lines.length; + + for (let i = 0; i < linesCount; i++) { + includedFields++; + include(doc.Lines[i].Product); + } + + return doc; + } + + from Orders as o select includeProducts(o) + """ + ) +) +`} + + + + + + +**Include using builder**: + +Include statements can be added to the subscription with `SubscriptionIncludeBuilder`. +This builder is assigned to the `includes` property in [SubscriptionCreationOptions](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscriptioncreationoptionst). +It supports methods for including documents as well as [counters](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---include-counters). +These methods can be chained. + +To include related documents, use method `include_documents`. +(See the _Builder-syntax_ tab in the example above). + + + + +**Include using RQL**: + +The include statements can be written in two ways: + +1. Use the `include` keyword at the end of the query, followed by the paths to the fields containing the IDs of the documents to include. + It is recommended to prefer this approach whenever possible, both for the clarity of the query and for slightly better performance. + (See the _RQL-path-syntax_ tab in the example above). + +2. Define the `include` within a JavaScript function that is called from the `select` clause. + (See the _RQL-javascript-syntax_ tab in the example above). + + + + + +If you include documents when making a [projection](../../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---filter-and-project-fields), +the include will search for the specified paths in the projected fields rather than in the original document. + + + + + +## Create subscription - include counters + +`SubscriptionIncludeBuilder` has three methods for including counters: + + + +{`def include_counter(self, name: str) -> SubscriptionIncludeBuilder: ... + +def include_counters(self, *names: str) -> SubscriptionIncludeBuilder: ... + +def include_all_counters(self) -> SubscriptionIncludeBuilder: ... +`} + + + +`include_counter` is used to specify a single counter. +`include_counters` is used to specify multiple counters. +`include_all_counters` retrieves all counters from all subscribed documents. + +| Parameter | Type | Description | +|-------------|-------|--------------------------------------------------------------------------------------------------------------------------------------------------| +| **name** | `str` | The name of a counter. The subscription will include all counters with this name that are contained in the documents the subscription retrieves. | +| **\*names** | `str` | Array of counter names. | + +The following subscription, which includes multiple counters in the batch sent to the client, +demonstrates how the methods can be chained. + + + +{`store.subscriptions.create_for_class( + Order, + SubscriptionCreationOptions( + includes=lambda builder: builder + .include_counter("Likes") + .include_counters("Pros", "Cons") + ), +) +`} + + + + + +## Update existing subscription + +The subscription definition can be updated after it has been created. +In this example we update the filtering query of an existing subscription named "my subscription". + + + +{`store.subscriptions.update(SubscriptionUpdateOptions( + name="My subscription", query="from Products where PricePerUnit > 50")) +`} + + + + +**Modifying the subscription's name**: + +In addition to the subscription name, each subscription is assigned a **subscription ID** when it is created by the server. +This ID can be used to identify the subscription, instead of the name, when updating the subscription. + +This allows users to change an existing subscription's **name** by specifying the subscription's ID +and submitting a new string in the `name` field of `SubscriptionUpdateOptions`. + + + +{`my_subscription = store.subscriptions.get_subscription_state("my subscription") + +subscription_id = my_subscription.subscription_id + +store.subscriptions.update(SubscriptionUpdateOptions(key=subscription_id, name="new name")) +`} + + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_how-to-create-data-subscription-csharp.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_how-to-create-data-subscription-csharp.mdx new file mode 100644 index 0000000000..fa87558cb4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_how-to-create-data-subscription-csharp.mdx @@ -0,0 +1,105 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A subscription task can be created in two ways: + * **From the client API**: + The client can create a subscription task on the server using this [creation API](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscription-creation). + * **From the Studio**: + See [creating subscription task](../../../studio/database/tasks/ongoing-tasks/subscription-task.mdx) to learn how to create a subscription task on the server via the Studio. + +* Once created, its definition and progress will be stored on the cluster, and not in a single server. + +* Upon subscription creation, the cluster will choose a preferred node that will run the subscription + (unless the client has stated a responsible node). + +* From that point and on, clients that will connect to a server in order to consume the subscription will be redirected to the node mentioned above. + +* In this page: + * [Subscription creation](../../../client-api/data-subscriptions/creation/how-to-create-data-subscription.mdx#subscription-creation) + * [Subscription name](../../../client-api/data-subscriptions/creation/how-to-create-data-subscription.mdx#subscription-name) + * [Responsible node](../../../client-api/data-subscriptions/creation/how-to-create-data-subscription.mdx#responsible-node) + + +## Subscription creation + +Data subscription is a batch processing mechanism that sends documents that meet specific criteria to connected clients. + +In order to create a data subscription, we first need to define the criteria. +The basic requirement is to specify the collection from which the subscription will retrieve documents. +However, the criteria can be a complex RQL-like expression defining JavaScript functions that filter documents and project their content. + +* The following is a simple subscription definition: + + + +{`// With the following subscription definition, the server will send ALL documents +// from the 'Orders' collection to a client that connects to this subscription. +subscriptionName = store.Subscriptions.Create(); +`} + + + +* For more complex subscription creation scenarios, see the these [examples](../../../client-api/data-subscriptions/creation/examples.mdx). + +* A subscription also can be modified after it has been created, see [update existing subscription](../../../client-api/data-subscriptions/creation/examples.mdx#update-existing-subscription). + + + + +## Subscription name + +In order to consume a data subscription, a subscription name is required to identify it. +If you don't specify a name when creating the subscription, the server will automatically generate a default name. +However, you have the option to provide a custom name for the subscription. + +A dedicated name can be useful for use cases like dedicated, long-running batch processing mechanisms, +where it'll be more comfortable to use a human-readable name in the code and even use the same name between different environments +(as long as subscription creation is taken care of upfront). + + + +{`subscriptionName = store.Subscriptions.Create(new SubscriptionCreationOptions +\{ + // Set a custom name for the subscription + Name = "OrdersProcessingSubscription" +\}); +`} + + + + +Note that the subscription name is unique and it will not be possible to create two subscriptions with the same name in the same database. + + + + +## Responsible node + +As stated above, upon creation, the cluster will choose a node that will be responsible for managing the subscription task on the server-side. +Once chosen, that node will be the only node to manage the subscription. + +There is an enterprise license level feature that supports subscription (and any other ongoing task) failover between nodes, +but eventually, as long as the originally assigned node is online, it will be the one to manage the data subscription task. + +Nevertheless, there is an option to manually decide which node will be responsible for managing the subscription task. +Provide the tag of the node you wish to be responsible in the `MentorNode` property as follows: + + + +{`subscriptionName = store.Subscriptions.Create(new SubscriptionCreationOptions +\{ + MentorNode = "D" +\}); +`} + + + +Manually setting the node can help choose a more suitable server based on factors such as resources, client proximity, or other considerations. + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_how-to-create-data-subscription-java.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_how-to-create-data-subscription-java.mdx new file mode 100644 index 0000000000..65156682d4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_how-to-create-data-subscription-java.mdx @@ -0,0 +1,99 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A subscription task can be created in two ways: + * **From the client API**: + The client can create a subscription task on the server using this [creation API](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscription-creation). + * **From the Studio**: + See [creating subscription task](../../../studio/database/tasks/ongoing-tasks/subscription-task.mdx) to learn how to create a subscription task on the server via the Studio. + +* Once created, it's definition and progress will be stored on the cluster, and not in a single server. + +* Upon subscription creation, the cluster will choose a preferred node that will run the subscription + (unless client has stated a mentor node). + +* From that point and on, clients that will connect to a server in order to consume the subscription will be redirected to the node mentioned above. + +* In this page: + * [Subscription creation](../../../client-api/data-subscriptions/creation/how-to-create-data-subscription.mdx#subscription-creation) + * [Subscription name](../../../client-api/data-subscriptions/creation/how-to-create-data-subscription.mdx#subscription-name) + * [Responsible node](../../../client-api/data-subscriptions/creation/how-to-create-data-subscription.mdx#responsible-node) + + +## Subscription creation + +Data subscription is a batch processing mechanism that sends documents that meet specific criteria to connected clients. + +In order to create a data subscription, we first need to define the criteria. +The basic requirement is to specify the collection from which the subscription will retrieve documents. +However, the criteria can be a complex RQL-like expression defining JavaScript functions that filter documents and project their content. + +* The following is a simple subscription definition: + + + +{`// With the following subscription definition, the server will send ALL documents +// from the 'Orders' collection to a client that connects to this subscription. +name = store.subscriptions().create(Order.class); +`} + + + +* For more complex subscription definitions, see these [examples](../../../client-api/data-subscriptions/creation/examples.mdx). + + + +## Subscription name + +In order to consume a data subscription, a subscription name is required to identify it. +If you don't specify a name when creating the subscription, the server will automatically generate a default name. +However, you have the option to provide a custom name for the subscription. + +A dedicated name can be useful for use cases like dedicated, long-running batch processing mechanisms, +where it'll be more comfortable to use a human-readable name in the code and even use the same name between different environments +(as long as subscription creation is taken care of upfront). + + + +{`SubscriptionCreationOptions options = new SubscriptionCreationOptions(); +options.setName("OrdersProcessingSubscription"); +name = store.subscriptions().create(Order.class, options); +`} + + + + +Note that subscription name is unique and it will not be possible to create two subscriptions with the same name in the same database. + + + + +## Responsible node + +As stated above, upon creation, the cluster will choose a node that will be responsible for managing the subscription task on the server-side. +Once chosen, that node will be the only node to manage the subscription. + +There is an enterprise license level feature that supports subscription (and any other ongoing task) failover between nodes, +but eventually, as long as the originally assigned node is online, it will be the one to manage the data subscription task. + +Nevertheless, there is an option to manually decide which node will be responsible for managing the subscription task. +Provide the tag of the node you wish to be responsible in the `MentorNode` property as follows: + + + +{`SubscriptionCreationOptions options = new SubscriptionCreationOptions(); +options.setMentorNode("D"); +name = store.subscriptions().create(Order.class, options); +`} + + + +Manually setting the node can help choose a more suitable server based on factors such as resources, client proximity, or other considerations. + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_how-to-create-data-subscription-nodejs.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_how-to-create-data-subscription-nodejs.mdx new file mode 100644 index 0000000000..516a3955c4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_how-to-create-data-subscription-nodejs.mdx @@ -0,0 +1,107 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A subscription task can be created in two ways: + * **From the client API**: + The client can create a subscription task on the server using this [creation API](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscription-creation). + * **From the Studio**: + See [creating subscription task](../../../studio/database/tasks/ongoing-tasks/subscription-task.mdx) to learn how to create a subscription task on the server via the Studio. + +* Once created, it's definition and progress will be stored on the cluster, and not in a single server. + +* Upon subscription creation, the cluster will choose a preferred node that will run the subscription + (unless client has stated a mentor node). + +* From that point and on, clients that will connect to a server in order to consume the subscription will be redirected to the node mentioned above. + +* In this page: + * [Subscription creation](../../../client-api/data-subscriptions/creation/how-to-create-data-subscription.mdx#subscription-creation) + * [Subscription name](../../../client-api/data-subscriptions/creation/how-to-create-data-subscription.mdx#subscription-name) + * [Responsible node](../../../client-api/data-subscriptions/creation/how-to-create-data-subscription.mdx#responsible-node) + + +## Subscription creation + +Data subscription is a batch processing mechanism that sends documents that meet specific criteria to connected clients. + +In order to create a data subscription, we first need to define the criteria. +The basic requirement is to specify the collection from which the subscription will retrieve documents. +However, the criteria can be a complex RQL-like expression defining JavaScript functions that filter documents and project their content. + +* The following is a simple subscription definition: + + + +{`// With the following subscription definition, the server will send ALL documents +// from the 'Orders' collection to a client that connects to this subscription. +const subscriptionName = await documentStore.subscriptions.create(\{ + query: "from Orders" +\}); +`} + + + +* For more complex subscription creation scenarios, see the these [examples](../../../client-api/data-subscriptions/creation/examples.mdx). + +* A subscription also can be modified after it has been created, see [update existing subscription](../../../client-api/data-subscriptions/creation/examples.mdx#update-existing-subscription). + + + +## Subscription name + +In order to consume a data subscription, a subscription name is required to identify it. +If you don't specify a name when creating the subscription, the server will automatically generate a default name. +However, you have the option to provide a custom name for the subscription. + +A dedicated name can be useful for use cases like dedicated, long-running batch processing mechanisms, +where it'll be more comfortable to use a human-readable name in the code and even use the same name between different environments +(as long as subscription creation is taken care of upfront). + + + +{`const name = await store.subscriptions.create(\{ + query: "from Orders", + // Set a custom name for the subscription + name: "OrdersProcessingSubscription" +\}); +`} + + + + +Note that subscription name is unique and it will not be possible to create two subscriptions with the same name in the same database. + + + + +## Responsible node + +As stated above, upon creation, the cluster will choose a node that will be responsible for managing the subscription task on the server-side. +Once chosen, that node will be the only node to manage the subscription. + +There is an enterprise license level feature that supports subscription (and any other ongoing task) failover between nodes, +but eventually, as long as the originally assigned node is online, it will be the one to manage the data subscription task. + +Nevertheless, there is an option to manually decide which node will be responsible for managing the subscription task. +Provide the tag of the node you wish to be responsible in the `mentorNode` property as follows: + + + +{`const name = await store.subscriptions.create(\{ + query: "from Orders", + // Set a responsible node for the subscritpion task + mentorNode: "D" +\}); +`} + + + +Manually setting the node can help choose a more suitable server based on factors such as resources, client proximity, or other considerations. + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_how-to-create-data-subscription-python.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_how-to-create-data-subscription-python.mdx new file mode 100644 index 0000000000..c5ef376ecc --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/_how-to-create-data-subscription-python.mdx @@ -0,0 +1,99 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A subscription task can be created in two ways: + * **From the client API**: + The client can create a subscription task on the server using this [creation API](../../../client-api/data-subscriptions/creation/api-overview.mdx#subscription-creation). + * **From the Studio**: + See [creating subscription task](../../../studio/database/tasks/ongoing-tasks/subscription-task.mdx) to learn how to create a subscription task on the server via the Studio. + +* Once created, its definition and progress will be stored on the cluster, and not in a single server. + +* Upon subscription creation, the cluster will choose a preferred node that will run the subscription + (unless the client has stated a responsible node). + +* From that point and on, clients that will connect to a server in order to consume the subscription will be redirected to the node mentioned above. + +* In this page: + * [Subscription creation](../../../client-api/data-subscriptions/creation/how-to-create-data-subscription.mdx#subscription-creation) + * [Subscription name](../../../client-api/data-subscriptions/creation/how-to-create-data-subscription.mdx#subscription-name) + * [Responsible node](../../../client-api/data-subscriptions/creation/how-to-create-data-subscription.mdx#responsible-node) + + +## Subscription creation + +Data subscription is a batch processing mechanism that sends documents that meet specific criteria to connected clients. + +In order to create a data subscription, we first need to define the criteria. +The basic requirement is to specify the collection from which the subscription will retrieve documents. +However, the criteria can be a complex RQL-like expression defining JavaScript functions that filter documents and project their content. + +* The following is a simple subscription definition: + + + +{`# With the following subscription definition, the server will send ALL documents +# from the 'Orders' collection to a client that connects to this subscription. +name = store.subscriptions.create_for_class(Order) +`} + + + +* For more complex subscription definitions, see these [examples](../../../client-api/data-subscriptions/creation/examples.mdx). + +* A subscription also can be modified after it has been created, see [update existing subscription](../../../client-api/data-subscriptions/creation/examples.mdx#update-existing-subscription). + + + +## Subscription name + +In order to consume a data subscription, a subscription name is required to identify it. +If you don't specify a name when creating the subscription, the server will automatically generate a default name. +However, you have the option to provide a custom name for the subscription. + +A dedicated name can be useful for use cases like dedicated, long-running batch processing mechanisms, +where it'll be more comfortable to use a human-readable name in the code and even use the same name between different environments +(as long as subscription creation is taken care of upfront). + + + +{`name = store.subscriptions.create_for_class( + Order, SubscriptionCreationOptions(name="OrdersProcessingSubscription") +) +`} + + + + +Note that the subscription name is unique and it will not be possible to create two subscriptions with the same name in the same database. + + + + +## Responsible node + +As stated above, upon creation, the cluster will choose a node that will be responsible for managing the subscription task on the server-side. +Once chosen, that node will be the only node to manage the subscription. + +There is an enterprise license level feature that supports subscription (and any other ongoing task) failover between nodes, +but eventually, as long as the originally assigned node is online, it will be the one to manage the data subscription task. + +Nevertheless, there is an option to manually decide which node will be responsible for managing the subscription task. +Provide the tag of the node you wish to be responsible in the `mentor_node` property as follows: + + + +{`name = store.subscriptions.create_for_class(Order, SubscriptionCreationOptions(mentor_node="D")) +`} + + + +Manually setting the node can help choose a more suitable server based on factors such as resources, client proximity, or other considerations. + + + + diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/creation/api-overview.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/api-overview.mdx new file mode 100644 index 0000000000..6b8b498d56 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/api-overview.mdx @@ -0,0 +1,42 @@ +--- +title: "Create and Update Subscription API" +hide_table_of_contents: true +sidebar_label: API Overview +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import ApiOverviewCsharp from './_api-overview-csharp.mdx'; +import ApiOverviewPython from './_api-overview-python.mdx'; +import ApiOverviewNodejs from './_api-overview-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/creation/examples.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/examples.mdx new file mode 100644 index 0000000000..5955bff6bd --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/examples.mdx @@ -0,0 +1,42 @@ +--- +title: "Data Subscription Creation Examples" +hide_table_of_contents: true +sidebar_label: Examples +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import ExamplesCsharp from './_examples-csharp.mdx'; +import ExamplesPython from './_examples-python.mdx'; +import ExamplesNodejs from './_examples-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/creation/how-to-create-data-subscription.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/how-to-create-data-subscription.mdx new file mode 100644 index 0000000000..c2dfa20da2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/creation/how-to-create-data-subscription.mdx @@ -0,0 +1,46 @@ +--- +title: "How to Create a Data Subscription" +hide_table_of_contents: true +sidebar_label: How to Create a Data Subscription +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToCreateDataSubscriptionCsharp from './_how-to-create-data-subscription-csharp.mdx'; +import HowToCreateDataSubscriptionJava from './_how-to-create-data-subscription-java.mdx'; +import HowToCreateDataSubscriptionPython from './_how-to-create-data-subscription-python.mdx'; +import HowToCreateDataSubscriptionNodejs from './_how-to-create-data-subscription-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/data-subscriptions/what-are-data-subscriptions.mdx b/versioned_docs/version-7.1/client-api/data-subscriptions/what-are-data-subscriptions.mdx new file mode 100644 index 0000000000..108c0aa5a0 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/data-subscriptions/what-are-data-subscriptions.mdx @@ -0,0 +1,43 @@ +--- +title: "Data Subscriptions" +hide_table_of_contents: true +sidebar_label: What are Data Subscriptions +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import WhatAreDataSubscriptionsCsharp from './_what-are-data-subscriptions-csharp.mdx'; +import WhatAreDataSubscriptionsJava from './_what-are-data-subscriptions-java.mdx'; +import WhatAreDataSubscriptionsNodejs from './_what-are-data-subscriptions-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/document-identifiers/_category_.json b/versioned_docs/version-7.1/client-api/document-identifiers/_category_.json new file mode 100644 index 0000000000..c99a4325c7 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/document-identifiers/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 12, + "label": Document Identifiers, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/document-identifiers/_hilo-algorithm-csharp.mdx b/versioned_docs/version-7.1/client-api/document-identifiers/_hilo-algorithm-csharp.mdx new file mode 100644 index 0000000000..419b87d639 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/document-identifiers/_hilo-algorithm-csharp.mdx @@ -0,0 +1,317 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The HiLo algorithm is the default method used by a RavenDB client to generate unique document IDs when storing a new document **without explicitly providing an `Id` value**. + +* It is an efficient solution used by the [session](../session/what-is-a-session-and-how-does-it-work.mdx) to generate the numeric part of the document identifier. + These numeric values are then combined with the collection name and server node-tag to create document identifiers such as `orders/10-A` or `products/93-B`. + +* An example of storing a document without specifying its `Id` is available in section [Autogenerated HiLo IDs](../../client-api/document-identifiers/working-with-document-identifiers.mdx#autogenerated-ids). + For an overview of all methods for generating unique IDs in RavenDB, see: + * [Document identifier generation](../../server/kb/document-identifier-generation.mdx) + * [Working with document identifiers](../../client-api/document-identifiers/working-with-document-identifiers.mdx). +* In this page: + * [How the HiLo algorithm works in RavenDB](../../client-api/document-identifiers/hilo-algorithm.mdx#how-the-hilo-algorithm-works-in-ravendb) + * [Generating unique IDs efficiently](../../client-api/document-identifiers/hilo-algorithm.mdx#generating-unique-ids-efficiently) + * [Using HiLo documents](../../client-api/document-identifiers/hilo-algorithm.mdx#using-hilo-documents) + * [Returning HiLo ranges](../../client-api/document-identifiers/hilo-algorithm.mdx#returning-hilo-ranges) + * [Identity parts separator](../../client-api/document-identifiers/hilo-algorithm.mdx#identity-parts-separator) + * [Manual HiLo ID generation](../../client-api/document-identifiers/hilo-algorithm.mdx#manual-hilo-id-generation) + * [Get next ID - number only](../../client-api/document-identifiers/hilo-algorithm.mdx#get-next-id---number-only) + * [Get next ID - full document ID](../../client-api/document-identifiers/hilo-algorithm.mdx#get-next-id---full-document-id) + * [Overriding the HiLo algorithm](../../client-api/document-identifiers/hilo-algorithm.mdx#overriding-the-hilo-algorithm) + + + +## How the HiLo algorithm works in RavenDB + +### Generating unique IDs efficiently: + +**The client creates IDs from a range of unique numbers that it gets from the server.** +The HiLo algorithm is efficient because the client can automatically generate unique document IDs +without checking with the server or cluster each time a new document is created to ensure that the new ID is unique. +The client receives from the server a range of numbers that are reserved for the client's usage. + +Each time a session creates a new document, the client assigns the new document an ID based on the next number from that range. +For example, the first client to generate documents on a collection will receive the reserved numbers 1-32. The next one will reserve numbers 33-64, and so on. + +**The collection name and node-tag are added to the ID.** +To further ensure that no two clients generate a document with the same ID, the collection name and the server node-tag are added to the ID. +This is an added measure so that if two nodes B and C are working with the same range of numbers, the IDs generated will be `orders/54-B` and `orders/54-C`. +This situation is rare because as long as the nodes can communicate when requesting a range of numbers, the clients will receive a different range of numbers. +The node-tag is added to ensure unique IDs across the cluster. + +Thus, with minimal trips to the server, the client is able to determine to which collection an entity belongs +and automatically assign it a number with a node-tag to ensure that the ID is unique across the cluster. +### Using HiLo documents: + +**HiLo documents are used by the server to provide the next range of numbers.** +To ensure that multiple clients can generate identifiers simultaneously without producing duplicates, +a mechanism is needed to avoid duplication. + +This is handled by `Raven/HiLo/` documents, stored in the `@hilo` collection in the database. +These documents are created and modified by the server and have a simple structure: + + + +{`\{ + "Max": 32, + "@metadata": \{ + "@collection": "@hilo" + \} +\} +`} + + + +The `Max` property means the maximum possible number that has been used by any client to create the identifier for a given collection. It is used as follows: + +1. The client asks the server for a range of numbers that it can use to create a document. + (32 is the initial capacity, but the range size can dynamically expand based on how frequently the client requests HiLo ranges). +2. Then, the server checks the HiLo file to see what is the last "Max" number it sent to any client for this collection. +3. The client will get the min and the max values it can use from the server (33 - 64 in our case). +4. Then, the client creates a range object using the values received from the server. + This range object is then used to generate unique document IDs as needed. +5. When the client reaches the max limit, it will repeat the process. + + + +## Returning HiLo ranges + +When the document store is disposed, the client sends the server the last value it used to create an identifier +and the max value that was previously received from the server. + +If the max value on the server-side is equal to the max value of the client and +the last used value by the client is smaller or equal to the max of the server-side, +the server will update the `Max` value to the last used value by the client. + + + +{`var store = new DocumentStore(); + +using (var session = store.OpenSession()) +\{ + // Storing the first entity causes the client to receive the initial HiLo range (1-32) + session.Store(new Employee + \{ + FirstName = "John", + LastName = "Doe" + \}); + + session.SaveChanges(); + // The document ID will be: employees/1-A +\} + +// Release the range when it is no longer relevant +store.Dispose(); +`} + + + +`store.Dispose()` is used in this example to demonstrate that the range is released. +In normal use, the `store` should only be disposed when the application is closed. + +After execution of the code above, the `Max` value of the Hilo document for the _Employees_ collection in the server will be 1. +That's because the client used only one identifier from the range it got before we disposed the store. + +The next time that a client asks for a range of numbers from the server for this collection it will get (in our example) the range 2 - 33. + + + +{`var newStore = new DocumentStore(); +using (var session = newStore.OpenSession()) +\{ + // Storing an entity after disposing the store in the previous example + // causes the client to receive the next HiLo range (2-33) + session.Store(new Employee + \{ + FirstName = "Dave", + LastName = "Brown" + \}); + + session.SaveChanges(); + // The document ID will be: employees/2-A +\} +`} + + + + + + + +#### Identity parts separator +* By default, document IDs created by the server use the character `/` to separate their components. + +* This separator can be customized to any other character, except `|`, by setting the [IdentityPartsSeparator](../../client-api/configuration/conventions.mdx#identitypartsseparator) convention. + + + +## Manual HiLo ID generation + +* **Automatic generation**: + When the session stores a new document with the `Id` set to `null`, RavenDB's default HiLo ID generator automatically generates the ID for the document. + This document ID includes the collection name, a unique number, and the server node-tag, ensuring the ID is unique across the database. + +* **Manual generation**: + We provide you with the option of manually retrieving the next ID from the HiLo range currently reserved for the client without having to store the document first. + You can retrieve either the next number portion or the full document ID and then use it when storing the document, as explained below: + * [Get next ID - number only](../../client-api/document-identifiers/hilo-algorithm.mdx#get-next-id---number-only) + * [Get next ID - full document ID](../../client-api/document-identifiers/hilo-algorithm.mdx#get-next-id---full-document-id) +### Get next ID - number only + +You can take advantage of the HiLo algorithm and create documents with your own customized ID that is based on the next HiLo ID number provided by the client. + + + +* Manually getting the next HiLo ID number only provides **the next number in the HiLo range**, + it does Not include the collection name and the server node-tag. +* Therefore, when manually specifying your own IDs this way, + you are responsible for ensuring that the IDs are unique within the database. + + + +#### Syntax: + +Either one of the following overloads will return the next available ID from the HiLo numbers reserved for the client. +The returned ID number can then be used when storing a new document. + + + +{`Task GenerateNextIdForAsync(string database, object entity); + +Task GenerateNextIdForAsync(string database, Type type); + +Task GenerateNextIdForAsync(string database, string collectionName); +`} + + + +| Parameter | Type | Description | +|--------------------|----------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **database** | `string` | The database for which to get the ID.
`null` will get the ID for the default database set in the document store. | +| **collectionName** | `string` | The collection for which to get the ID. | +| **entity** | `object` | An instance of the specified collection. | +| **type** | `Type` | The collection entity type. It is usually the singular of the collection name.
For example, collection = "Orders", then type = "Order". | + +| Return value | Type | Description | +|---------------|--------|------------------------------------------------------------------------| +| **nextId** | `long` | The next available number from the HiLo range reserved for the client. | + +#### Example: + +The following example shows how to get the next ID number from the HiLo range reserved for the client. +The ID provided is the next unique number without the node tag and the collection. +This ID is then used to create and store a new document. + +Calling `GenerateNextIdForAsync` ensures minimal calls to the server, +as the ID is generated by the client from the reserved range of numbers. + + + +{`using (var session = store.OpenSession()) +\{ + // Use any overload to get the next id: + // (Note how the id increases with each call) + // ========================================== + + var nextId = await store.HiLoIdGenerator.GenerateNextIdForAsync(null, "Products"); + // nextId = 1 + + nextId = await store.HiLoIdGenerator.GenerateNextIdForAsync(null, new Product()); + // nextId = 2 + + nextId = await store.HiLoIdGenerator.GenerateNextIdForAsync(null, typeof(Product)); + // nextId = 3 + + // Now you can create a new document with the nextId received + // ========================================================== + + var product = new Product + \{ + Id = "MyCustomId/" + nextId.ToString() + \}; + + // Store the new document + // The document ID will be: "MyCustomId/3" + session.Store(product); + session.SaveChanges(); +\} +`} + + + + + +##### Unique IDs across the cluster + +This manual generator sample is sufficient if you are using only one server. +If you want to ensure unique IDs across the cluster, we recommend using [our default HiLo generator](../../client-api/document-identifiers/working-with-document-identifiers.mdx#autogenerated-ids). + +You may also consider using the [cluster-wide Identities generator](../../client-api/document-identifiers/working-with-document-identifiers.mdx#identities), which guarantees a unique ID across the cluster. +It is more costly than the default HiLo generator because it requires a request from the server for _each ID_, +and the server needs to do a Raft consensus check to ensure that the other nodes in the cluster agree that the ID is unique, then returns the ID to the client. + + +### Get next ID - full document ID + +You can request to get the next full document ID from the default HiLo generator without having to store the document first. + +#### Syntax: + + + +{`Task GenerateDocumentIdAsync(string database, object entity); +`} + + + +#### Example: + +The latest HiLo ID number generated in the example above was `3`. +Therefore, when running the following example immediately after, +the consecutive number `4` is retrieved and incorporated into the full document ID (`products/4-A`) that is returned by `GenerateDocumentIdAsync`. + + + +{`using (var session = store.OpenSession()) +\{ + var nextFullId = await store.HiLoIdGenerator.GenerateDocumentIdAsync(null, "Products"); + // nextFullId = "products/4-A" + + // You can now use the nextFullId and customize the document ID as you wish: + var product = new Product + \{ + Id = "MyCustomId/" + nextFullId + \}; + + session.Store(product); + session.SaveChanges(); +\} +`} + + + + + +## Overriding the HiLo algorithm + +* RavenDB's default HiLo generator is managed by the `HiLoIdGenerator` property in your _DocumentStore_ object. + +* If needed, you can override this default ID generation behavior by setting the [AsyncDocumentIdGenerator](../../client-api/configuration/conventions.mdx#asyncdocumentidgenerator) convention with your own implementation. + +* Once you configure your custom behavior through this convention: + + * Your customized ID generation will be applied whenever you store a document without explicitly specifying an `Id`. + + * Attempting to call [GenerateNextIdForAsync](../../client-api/document-identifiers/hilo-algorithm.mdx#get-next-id---number-only) or + [GenerateDocumentIdAsync](../../client-api/document-identifiers/hilo-algorithm.mdx#get-next-id---full-document-id) via the store's `HiLoIdGenerator` + will throw an exception. + + + + diff --git a/versioned_docs/version-7.1/client-api/document-identifiers/_working-with-document-identifiers-csharp.mdx b/versioned_docs/version-7.1/client-api/document-identifiers/_working-with-document-identifiers-csharp.mdx new file mode 100644 index 0000000000..ad9832bf0d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/document-identifiers/_working-with-document-identifiers-csharp.mdx @@ -0,0 +1,372 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Each document in a RavenDB database has a unique string associated with it, called an **identifier**. + Every entity that you store, using either a [session](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx) + or a [put document command](../../client-api/commands/documents/put.mdx), is assigned such an identifier. + +* RavenDB supports [several options](../../server/kb/document-identifier-generation.mdx) of storing a document and assigning + it an identifier. + The client can directly utilize these options. + +* You can always handle the identifier generation using your knowledge of the entity type and the identifier number provided + by the HiLo algorithm. As described below, this is how the identifier is generated by the session. + +* In this page: + * [Session Usage](../../client-api/document-identifiers/working-with-document-identifiers.mdx#session-usage) + * [Autogenerated IDs](../../client-api/document-identifiers/working-with-document-identifiers.mdx#autogenerated-ids) + * [Custom / Semantic IDs](../../client-api/document-identifiers/working-with-document-identifiers.mdx#custom-/-semantic-ids) + * [Server-side generated IDs](../../client-api/document-identifiers/working-with-document-identifiers.mdx#server-side-generated-ids) + * [Identities](../../client-api/document-identifiers/working-with-document-identifiers.mdx#identities) + * [Setting Identity IDs Using Commands and Operations](../../client-api/document-identifiers/working-with-document-identifiers.mdx#setting-identity-ids-using-commands-and-operations) + * [Using Commands](../../client-api/document-identifiers/working-with-document-identifiers.mdx#using-commands) + * [Using Operations](../../client-api/document-identifiers/working-with-document-identifiers.mdx#using-operations) + + +## Session Usage + +If you choose to use the session, you don't have to pay any special attention to the identifiers of the stored entities. +The session will take care of it by generating the identifiers automatically. + +It utilizes [conventions](../../client-api/configuration/conventions.mdx) and HiLo algorithms to produce the identifiers. +Everything is handled by the session's mechanism and is transparent for the user. +However, you can influence the identifier generation strategy by overwriting +[the identifier generation conventions](../../client-api/configuration/identifier-generation/global.mdx). + +In this article we are going to consider the behavior in accordance with the default conventions. + + +Identifiers of documents in RavenDB database are always strings, so take this into consideration when you model your entities. + + + + +## Autogenerated IDs + +To figure out which property (or field) holds the entity's identifier, the convention `Conventions. +FindIdentityProperty` is called. +By default, it looks for the property or the field named `Id` (case sensitive). However, this property can +have a `null` value or even not be present at all. Then the automatic identifier generation strategy is performed. +The default convention is that entities get the identifiers in the following format `collection/number-tag`. +RavenDB client first determines the name of [the collection](../../client-api/faq/what-is-a-collection.mdx) that +the entity belongs to, then contacts the server to retrieve a numeric range of values. These values +can be used as the `number` part. +The range of available numbers is calculated by using the `HiLo` algorithm and is tracked per collection. +The current maximum value in ranges is stored in documents `Raven/Hilo/collection`. + +Let's see the example. + + + +{`var order = new Order +\{ + Id = null // value not provided +\}; + +session.Store(order); +`} + + + +What will be the identifier of this order? You can check it by calling: + + + +{`var orderId = session.Advanced.GetDocumentId(order); // "orders/1-A" +`} + + + +If this is the first `Order` entity in your database, then it will return `orders/1-A`. How does the identifier +generation process proceed? The RavenDB client determines the collection name as `orders` +(by default it is the plural form of the entity name). +Then it asks the server to give him the ID's range he can use (the first available range is 1 - 32). The server will +handle the Raven/Hilo/orders document. +The next available identifier value (always incrementing number) from the given range is `1` so its combination with +the collection name and the node tag gives the result `orders/1-A`. + +The next attempt to store another `Order` object within the same session will result in creating the `orders/2-A` +identifier. However, this time asking the server about the possible range will not be necessary because the in-memory range +(1 - 32) is enough, so simply the next number will be added as the identifier suffix. + + + +Each (in code) document store _instance_ handles the generation of the identifier value numeric range. The database +stores the last requested number while the document store _instances_ request ranges and caches the returned range of +available identities. + +The database has a single document (per collection) which stores the last identifier value requested by a document +store instance. + +E.g. the document `Raven/HiLo/accounts` has the following value + + +{`\{ + "Max": "4000", + "@metadata": \{ + "@collection": "@hilo" + \} +\} +`} + + + +then the next range will be `4001 - 4032`, if 32 was range size (by default, it's 32). + +The number of sessions per document store instance plays no part in identifiers value generation. When the store is +disposed of, the client sends the server the last value it used and the max value it got from the server. +Then the server will write it in the HiLo document (If the Max number is equal to the max number from the client +and bigger or equal to the last used value by the client) + + +If you intend to skip the identifier creation strategy that relies on the collection and HiLo value pair, +you can allow RavenDB to assign the Guid identifier to the stored document. Then, you have to provide the +`string.Empty` as the value of the `Id` property: + + + +{`var orderEmptyId = new Order +\{ + Id = string.Empty // database will create a GUID value for it +\}; + +session.Store(orderEmptyId); + +session.SaveChanges(); + +var guidId = session.Advanced.GetDocumentId(orderEmptyId); // "bc151542-8fa7-45ac-bc04-509b343a8720" +`} + + + +This time the check for the document ID is called after `SaveChanges` because only then we go to the server while +the entity's identifier is generated there. + + + +## Custom / Semantic IDs + +The session also supports the option to store the entity and explicitly tell under what identifier it should be stored +in the database. To do this, you can either set the `Id` property of the object: + + + +{`var product = new Product +\{ + Id = "products/ravendb", + Name = "RavenDB" +\}; + +session.Store(product); +`} + + + +or use the following `Store` method overload: + + + +{`session.Store(new Product +\{ + Name = "RavenDB" +\}, "products/ravendb"); +`} + + + + + +## Server-side generated IDs + +RavenDB also supports the notion of the identifier without the usage of the HiLo. By creating a string ID property +in your entity and setting it to a value ending with a slash (`/`), you can ask RavenDB to assign a document ID to +a new document when it is saved. + + + +{`session.Store(new Company +\{ + Id = "companies/" +\}); + +session.SaveChanges(); +`} + + + +Using `/` at the end of the ID will create an ID at the server-side by appending a numeric value and the node tag. +After executing the code above we will get from the server ID something that looks like `companies/000000000000000027-A`. + + +Be aware that the only guarantee for the numeric part is that it will always be increasing only within the same node. + + + + +## Identities + +If you need IDs to increment across the cluster, you can use the **Identity** option. +To do so you need to use a pipe (`|`) as a suffix to the provided ID. This will instruct RavenDB +to create the ID when the document is saved, using a special cluster-wide integer value that is +continuously incremented. + + +Using an identity guarantees that IDs will be incremental, but does **not** guarantee +that there wouldn't be gaps in the sequence. +The IDs sequence can therefore be, for example, `companies/1`, `companies/2`, `companies/4`.. +This is because - + + * Documents could have been deleted. + * A failed transaction still increments the identity value, thus causing a gap in the sequence. + + + + +{`session.Store(new Company +\{ + Id = "companies|" +\}); + +session.SaveChanges(); +`} + + + +After the execution of the code above, the ID will be `companies/1`. +We do not add the node tag to the end of the ID, because the added number is unique in the cluster. +Identities continuously increase, so running the above code again will generate `companies/2`, and so on. + +Note that we used `companies` as the prefix just to follow the RavenDB convention. +Nothing prevents you from providing a different prefix, unrelated to the collection name. + + +Be aware that using the pipe symbol (`|`) as a prefix to the ID generates a call to the cluster +and might affect performance. + + + + +* **Identity Parts Separator** +By default, document IDs created by the server use `/` to separate their components. +This separator can be changed to any other character except `|`, in the +[Global Identifier Generation Conventions](../../client-api/configuration/identifier-generation/global.mdx#identitypartsseparator). +See [Setting Identity IDs Using Commands and Operations](../../client-api/document-identifiers/working-with-document-identifiers.mdx#setting-identity-ids-using-commands-and-operations) +for details. + +* **Concurrent writes** + The identities are generated and updated on the server side in the atomic fashion. + This means you can safely use this approach in the concurrent writes scenario. + + + + +## Setting Identity IDs Using Commands and Operations + +The commands API gives you full freedom in selecting the identifier generation strategy. + +* As in the case of a session, you can either ask the server to provide the identifier or provide the identifier of the + stored entity manually. +* You can also indicate if the identifier that you are passing needs to have the identifier suffix added. + Do this by ending the ID with `/` or `|` as demonstrated below. + + + +{`var doc = new DynamicJsonValue +\{ + ["Name"] = "My RavenDB" +\}; + +var blittableDoc = session.Advanced.JsonConverter.ToBlittable(doc, null); + +var command = new PutDocumentCommand("products/", null, blittableDoc); + +session.Advanced.RequestExecutor.Execute(command, session.Advanced.Context); + +var identityId = command.Result.Id; // "products/0000000000000000001-A if using only '/' in the seesion" + +var commandWithPipe = new PutDocumentCommand("products|", null, blittableDoc); +session.Advanced.RequestExecutor.Execute(commandWithPipe, session.Advanced.Context); + +var identityPipeId = command.Result.Id; // "products/1" +`} + + +### Using Commands + +* **Get the next available identity from the server** + You can set an identifier by your client, while still relying on the server to generate the identifier for you. + It is done using the `NextIdentityForCommand` command s shown below, with the prefix for which you want the server + to provide the next available identifier. + + + +{`var command = new NextIdentityForCommand("products"); +session.Advanced.RequestExecutor.Execute(command, session.Advanced.Context); +var identity = command.Result; + +var doc = new DynamicJsonValue +\{ + ["Name"] = "My RavenDB" +\}; + +var blittableDoc = session.Advanced.JsonConverter.ToBlittable(doc, null); + +var putCommand = new PutDocumentCommand("products/" + identity, null, blittableDoc); + +session.Advanced.RequestExecutor.Execute(putCommand, session.Advanced.Context); +`} + + + + Note that such construction requires approaching the server twice in order to add a single document. + You need to call `session.Advanced.RequestExecutor.Execute(command, session.Advanced.Context)` for every + entity that you want to store. + + **Asking** the server about the next identifier results in **increasing this value** on the server-side. + + Please note that you **cannot** get the next available identifier and increment its value locally to create + the identifiers of a whole collection of the same prefix, because you may accidentally overwrite documents or + conflicts may occur if another client puts documents using the identifier mechanism. + +* **Provide an identity of your choice** + You can choose an identifier's value yourself, using the `SeedIdentityForCommand` command. + + +{`var seedIdentityCommand = new SeedIdentityForCommand("products", 1994); +`} + + +### Using Operations + +RavenDB ver. 4.2 and higher provides high-level [operations](../../client-api/operations/what-are-operations.mdx#operations-what-are-the-operations) +that you may set IDs with, in addition to the +low-level [commands](../../client-api/document-identifiers/working-with-document-identifiers.mdx#using-commands) +we have described above. +There is no operational difference between using operations and commands, since the high-level operations actually +execute low-level commands. However, using operations may produce a clearer, more concise code. + +* Use the `NextIdentityForOperation` operation to choose the next value suggested by the server as an ID. + It is identical to using the `NextIdentityForCommand` command. + + +{`store.Maintenance.Send(new NextIdentityForOperation("products")); +`} + + + +* Use the `SeedIdentityForOperation` operation to choose your ID's value yourself. + It is identical to using the `SeedIdentityForCommand` command. + + +{`var seedIdentityOperation = store.Maintenance.Send(new SeedIdentityForOperation("products", 1994)); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/document-identifiers/hilo-algorithm.mdx b/versioned_docs/version-7.1/client-api/document-identifiers/hilo-algorithm.mdx new file mode 100644 index 0000000000..539549d5b4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/document-identifiers/hilo-algorithm.mdx @@ -0,0 +1,44 @@ +--- +title: "HiLo Algorithm" +hide_table_of_contents: true +sidebar_label: HiLo algorithm +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HiloAlgorithmCsharp from './_hilo-algorithm-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/document-identifiers/working-with-document-identifiers.mdx b/versioned_docs/version-7.1/client-api/document-identifiers/working-with-document-identifiers.mdx new file mode 100644 index 0000000000..a18697390f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/document-identifiers/working-with-document-identifiers.mdx @@ -0,0 +1,38 @@ +--- +title: "Working with Document Identifiers" +hide_table_of_contents: true +sidebar_label: Working with Document Identifiers +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import WorkingWithDocumentIdentifiersCsharp from './_working-with-document-identifiers-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/faq/_category_.json b/versioned_docs/version-7.1/client-api/faq/_category_.json new file mode 100644 index 0000000000..6f963e01b3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/faq/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 18, + "label": FAQ, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/faq/assets/what-is-a-collection.png b/versioned_docs/version-7.1/client-api/faq/assets/what-is-a-collection.png new file mode 100644 index 0000000000..2f3b4b610d Binary files /dev/null and b/versioned_docs/version-7.1/client-api/faq/assets/what-is-a-collection.png differ diff --git a/versioned_docs/version-7.1/client-api/faq/backward-compatibility.mdx b/versioned_docs/version-7.1/client-api/faq/backward-compatibility.mdx new file mode 100644 index 0000000000..e27a0dd69b --- /dev/null +++ b/versioned_docs/version-7.1/client-api/faq/backward-compatibility.mdx @@ -0,0 +1,92 @@ +--- +title: "FAQ: Backward Compatibility" +hide_table_of_contents: true +sidebar_label: Backward Compatibility +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# FAQ: Backward Compatibility + + +* RavenDB is released in **Major** versions like 4.0 and 5.0, which are + complemented over time by **Minor** versions like 5.1 and 5.2. + +* This article explains which major and minor RavenDB Clients and Servers are + compatible, and advises regarding upgrading. + +* In this page: + * [Client/Server Compatibility](../../client-api/faq/backward-compatibility.mdx#client/server-compatibility) + * [Compatibility - Up to RavenDB 4.1](../../client-api/faq/backward-compatibility.mdx#compatibility---up-to-ravendb-41) + * [Compatibility - RavenDB 4.2 and Higher](../../client-api/faq/backward-compatibility.mdx#compatibility---ravendb-42-and-higher) + * [Upgrading](../../client-api/faq/backward-compatibility.mdx#upgrading) + * [Upgrading - Up to RavenDB 4.1](../../client-api/faq/backward-compatibility.mdx#upgrading---up-to-ravendb-41) + * [Upgrading - RavenDB 4.2 and Higher](../../client-api/faq/backward-compatibility.mdx#upgrading---ravendb-42-and-higher) + * [Upgrading Order](../../client-api/faq/backward-compatibility.mdx#upgrading-order) + + + +## Client/Server Compatibility + +### Compatibility - Up to RavenDB 4.1 +RavenDB **Clients** of versions lower than 4.2 are compatible with **Servers +of the same Major version** (3.x Clients with 3.x Servers, 4.x Clients +with 4.x Servers), and a **Minor version the same as theirs or higher**. +E.g. - + +* `Client 3.0` is **compatible** with `Server 3.0`, because they are of the exact + same version. +* `Client 4.0` is **compatible** with `Server 4.1` because they are of the same + major version and the server is of a higher minor version. +* `Client 4.1.7` is **compatible** with `Server 4.1.6` because + though the client is a little newer, the server is of the same + minor version (1) as the client. +* `Client 3.0` is **not** compatible with `Server 4.0` because the + server is of a different major version. +* `Client 4.5` is **not** compatible with `Server 4.0` because the + server is of a lower minor version. + + + +* A server that receives an erroneous client request, will check + whether the client version is supported. +* If the client version is not supported, an exception will be thrown: + **`RavenDB does not support interaction between Client API major version 3 and Server version 4 + when major version does not match.`** + + +### Compatibility - RavenDB 4.2 and Higher +Starting with version 4.2, RavenDB clients are compatible with +any server of their own version **and higher**. +E.g. - + +* `Client 4.2` is **compatible** with `Server 4.2`, `Server 4.5`, + `Server 5.2`, and any other server of a higher version. + + + +## Upgrading + +### Upgrading - Up to RavenDB 4.1 +Upgrading RavenDB from a version earlier than 4.2 to a higher major version, +requires the upgrading of the server and all clients in lockstep. +Please visit our [migration introduction](../../migration/client-api/introduction.mdx) +page to learn more about migrating from early versions. +### Upgrading - RavenDB 4.2 and Higher +When RavenDB is upgraded from version 4.2 and higher, e.g. from 4.2 to 5.3, +it is recommended - but not mandatory - to upgrade the clients, since they +are [compatible with servers of versions higher than theirs](../../client-api/faq/backward-compatibility.mdx#ravendb-42-and-higher-compatibility). +### Upgrading Order +To properly upgrade your applications and server, we advise you to upgrade the server first, +then the clients. +This way, your applications will keep working as before and you can update +them one by one if needed. + + + diff --git a/versioned_docs/version-7.1/client-api/faq/transaction-support.mdx b/versioned_docs/version-7.1/client-api/faq/transaction-support.mdx new file mode 100644 index 0000000000..feb3ea203b --- /dev/null +++ b/versioned_docs/version-7.1/client-api/faq/transaction-support.mdx @@ -0,0 +1,268 @@ +--- +title: "FAQ: Transaction Support in RavenDB" +hide_table_of_contents: true +sidebar_label: Transaction Support +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# FAQ: Transaction Support in RavenDB + + +* In this page: + * [ACID storage](../../client-api/faq/transaction-support.mdx#acid-storage) + * [What is and what isn't a transaction](../../client-api/faq/transaction-support.mdx#what-is-and-what-isn) + * [Working with transactions in RavenDB ](../../client-api/faq/transaction-support.mdx#working-with-transactions-in-ravendb) + * [Single-node model](../../client-api/faq/transaction-support.mdx#single-node-model) + * [Multi-master model](../../client-api/faq/transaction-support.mdx#multi-master-model) + * [Cluster-wide transactions](../../client-api/faq/transaction-support.mdx#cluster-wide-transactions) + * [ACID for document operations](../../client-api/faq/transaction-support.mdx#acid-for-document-operations) + * [BASE for query operations](../../client-api/faq/transaction-support.mdx#base-for-query-operations) + + +## ACID storage + +All storage operations performed in RavenDB are fully ACID-compliant (Atomicity, Consistency, Isolation, Durability), +this is because internally RavenDB uses a storage engine called [Voron](../../server/storage/storage-engine.mdx), built specifically for RavenDB's usage, +which guarantees all ACID properties, whether executed on document, index or local cluster data. + + + +## What is and what isn't a transaction + +* A transaction represents a set of operations executed against a database as a single, atomic, and isolated unit. + +* In RavenDB, a transaction (read or write) is limited to the scope of a __single__ HTTP request. + +* The terms "ACID transaction" or "transaction" refer to the storage engine transactions. + Whenever a database receives an operation or batch of operations in a request, it will wrap it in a "storage transaction", + execute the operations and commit the transaction. + +* RavenDB ensures that for a single HTTP request, all the operations in that request are transactional. + It employs _Serializable_ isolation for write operations and _Snapshot_ isolation for read operations. + +* RavenDB doesn't support a transaction spanning __multiple__ HTTP requests. Interactive transactions are not implemented by RavenDB + (see [below](../../client-api/faq/transaction-support.mdx#no-support-for-interactive-transactions) for the reasoning behind this decision). + RavenDB offers [optimistic concurrency](../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx) feature to achieve similar behavior. + +* The [Client API Session](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx) is a pure Client API object and does not represent a transaction, + thus it is not meant to provide interactive transaction semantics. + It is entirely managed on the client side without maintaining a corresponding session state on the server. + The server does not reference or keep track of the session context. + + + +## Working with transactions in RavenDB + +### Single-node model + +Transactional behavior with RavenDB is divided into two modes: + +* __Single requests__: +In this mode, a user can perform all requested operations (read and/or write) in a single request. + + * __Multiple writes__: + A batch of multiple write operations will be executed atomically in a single transaction when calling [SaveChanges()](../../client-api/session/saving-changes.mdx). + Multiple operations can also be executed in a single transaction using the low-level [SingleNodeBatchCommand](../../client-api/commands/batches/how-to-send-multiple-commands-using-a-batch.mdx). + In both cases, a single HTTP request is sent to the database. + + * __Multiple reads & writes__: + Performing interleaving reads and writes or conditional execution can be achieved by [running a patching script](../../client-api/operations/patching/single-document.mdx). + In the script you can read documents, make decisions based on their content and update or put document(s) within the scope of a single transaction. + If you only need to modify a document in a transaction, [JSON Patch syntax](../../client-api/operations/patching/json-patch-syntax.mdx) allows you to do that. + +* __Multiple requests__: + RavenDB does not support a single transaction that spans all requested operations within multiple requests. + Instead, users are expected to utilize [optimistic concurrency](../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx) to achieve similar behavior. + Your changes will get committed only if no one else has changed the data you are modifying in the meantime. + +#### No support for interactive transactions + +RavenDB client uses HTTP to communicate with the RavenDB server. +It means that RavenDB doesn't allow you to open a transaction on the server side, make multiple operations over a network connection, and then commit or roll it back. +This model, known as the interactive transactions model, is incredibly costly. Both in terms of engine complexity and the impact on the overall performance of the system. + + + +In [one study](http://nms.csail.mit.edu/~stavros/pubs/OLTP_sigmod08.pdf) the cost of managing the transaction state across multiple network operations was measured at over 40% of the total system performance. +This is because the server needs to maintain locks and state across potentially very large time frames. + + + +RavenDB's approach differs from the classical SQL model, which relies on interactive transactions. Instead, RavenDB uses the batch transaction model. It allows us to provide the same capabilities as interactive transactions in +conjunction with [optimistic concurrency](../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx), with much better performance. + +Key to that design decision is our ability to provide similar guarantees about the state of your data without experiencing the overhead of interactive transactions. + +#### Batch transaction model + +RavenDB uses the batch transaction model, where a RavenDB client submits all the operations to be run in a single transaction in one network call. +This allows the storage engine inside RavenDB to avoid holding locks for an extended period of time and gives plenty of room to optimize the performance. + +This decision is based on the typical interaction pattern by which RavenDB is used. +RavenDB serves as a transactional system of record for business applications, where the common workflow involves presenting data to users, +allowing them to make modifications, and subsequently save these changes. +A single request loads the data which is then presented to the user. +After a period of contemplation or "think time," the user submits a set of updates, which are then saved to the database. +This model fits the batch transaction model a lot more closely than the interactive one, as there's no necessity to keep a transaction open during the user's "think time." + +All changes that are sent via _SaveChanges_ are persisted in a single unit. +If you modify documents concurrently and you want to assure they won't by affected by the lost update problem, +then you must enable [optimistic concurrency](../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx) (turned off by default) across all sessions that modify those documents. + +<hr/> + +### Multi-master model + +RavenDB employs the multi-master model, allowing writes to be made to any node in the cluster. +These writes are then propagated asynchronously to the other nodes via [replication](../../server/clustering/replication/replication-overview.mdx). + +The interaction of transactions and distributed work is anything but trivial. Let's start from the obvious problem: + +* RavenDB allows you to perform concurrent write operations on multiple nodes. +* RavenDB explicitly allows you to write to a node that was partitioned from the rest of the network. + +Taken together, this violates the [CAP theorem](https://en.wikipedia.org/wiki/CAP_theorem) +which states that a system can only provide 2 out of 3 guarantees around consistency, availability, and partition tolerance. + +RavenDB's answer to distributed transactional work is nuanced and was designed to give you as the user the choice +so you can utilize RavenDB for each of your scenarios: + +* Single-node operations are available and partition tolerant (AP) but cannot meet the consistency guarantee. +* If you need to guarantee uniqueness or replicate the data for redundancy across more than one node, + you can choose to have higher consistency at the cost of availability (CP). + +When running in a multi-node setup, RavenDB still uses transactions. However, they are single-node transactions. +That means that the set of changes that you write in a transaction is committed only to the node you are writing to. +It will then asynchronously replicate to the other nodes. +To achieve consistency across the entire cluster please refer to the [Cluster-wide transactions](../../client-api/faq/transaction-support.mdx#cluster-wide-transactions) section below. + +#### Replication conflicts + +This is an important observation because you can get into situations where two clients wrote (even with [optimistic concurrency](../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx) turned on) +to the same document and both of them committed successfully (each one to a separate node). +RavenDB attempts to minimize this situation by designating a [preferred node](../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) for writes for each database, +but since writing to the preferred node isn't guaranteed, this might not alleviate the issue. + +In such a case, the data will replicate across the cluster, and RavenDB will detect that there were [conflicting](../../server/clustering/replication/replication-conflicts.mdx) modifications to the document. +It will then apply the [conflict resolution](../../studio/database/settings/conflict-resolution.mdx) strategy that you choose. +That can include selecting a manual resolution, running a [resolution script](../../server/clustering/replication/replication-conflicts.mdx#conflict-resolution-script) to reconcile the conflicting versions, +or simply selecting the latest version. You are in control of this behavior. + +This behavior was influenced by the [Dynamo paper](https://dl.acm.org/doi/10.1145/1323293.1294281) which emphasizes the importance of writes. +The assumption is that if you are writing data to the database, you expect it to be persisted. + +RavenDB will do its utmost to provide that to you, allowing you to write to the database even in the case of partitions or partial failure states. +However, handling replication conflicts is a consideration you have to take into account when using single-node transactions in RavenDB (see below for running a [cluster-wide transaction](../../client-api/faq/transaction-support.mdx#cluster-wide-transactions)). + + + +If no conflict resolution script is defined for a collection, then by default RavenDB resolves the conflict using the latest version based on the `@last-modified` property of conflicted versions of the document. +That might result in the lost update anomaly. + +If you care about avoiding lost updates, you need to ensure you have the conflict resolution script defined accordingly or use a [cluster-wide transaction](../../client-api/faq/transaction-support.mdx#cluster-wide-transactions). + + + +#### Replication & transaction boundary + +The following is an important aspect to RavenDB's transactional behavior with regards to asynchronous replication. + +When replicating modifications to another server, RavenDB will ensure that the [transaction boundaries](../../server/clustering/replication/replication-overview.mdx#replication--transaction-boundary) are maintained. +If there are several document modifications in the same transaction they will be sent in the same replication batch, keeping the transaction boundary on the destination as well. + +However, a special attention is needed when a document is modified in two separate transactions but the replication of the first transaction has not occurred yet. +Read more about that in [How revisions replication help data consistency](../../server/clustering/replication/replication-overview.mdx#how-revisions-replication-help-data-consistency). + +<hr/> + +### Cluster-wide transactions + +RavenDB also supports [cluster-wide transactions](../../client-api/session/cluster-transaction/overview.mdx). +This feature modifies the way RavenDB commits a transaction, and it is meant to address scenarios where you prefer to get a failure if the transaction cannot be persisted to a majority of the nodes in the cluster. +In other words, this feature is for scenarios where you want to favor consistency over availability. + +For cluster-wide transactions, RavenDB uses the [Raft](../../server/clustering/rachis/what-is-rachis.mdx#what-is-raft-?) protocol. +This protocol ensures that the transaction is acknowledged by a majority of the nodes in the cluster and once committed, the changes will be visible on any node that you'll use henceforth. + +Similar to single-node transactions, RavenDB requires that you submit the cluster-wide transaction as a single request of all the changes you want to commit to the database. + +Cluster-wide transactions have the notion of [atomic guards](../../client-api/session/cluster-transaction/atomic-guards.mdx) to prevent an overwrite of a document modified in a cluster transaction by a change made in another cluster transaction. + + + +The usage of atomic guards makes cluster-wide transactions conflict-free. +There is no way to make a conflict between two versions of the same document. +If a document got updated meanwhile by someone else then a `ConcurrencyException` will be thrown. + + + + + +## ACID for document operations + +In RavenDB all actions performed on documents are fully ACID. +Each document operation or a batch of operations applied to a set of documents sent in a single HTTP request will execute in a single transaction. +The ACID properties of RavenDB are: + +* __Atomicity__ + All operations are atomic. Either they fully succeed or fail without any partial execution. + In particular, operations on multiple documents will be carried out atomically, meaning they are either completed entirely or not executed at all. + +* __Consistency and Isolation / Consistency of Scans__ + Within a single _read_ transaction, all operations are performed under _Snapshot_ isolation. + This ensures that even if you access multiple documents, you'll get all of their state exactly as it was at the beginning of the request. + +* __Visibility__ + All changes to the database are immediately made available upon commit. + Therefore, if a transaction updates two documents and is committed, you will always see the updates to both documents at the same time. + That is, you either see the updates to both, or you don't see the update to either one. + +* __Durability__ + If an operation has been completed successfully, it is fsync'ed to disk. + Reads will never return any data that has not been flushed to disk. + +All of these constraints are guaranteed for each individual request made to the database when using a [Session](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx). +In particular, every `Load` call is a separate transaction, and the [`SaveChanges`](../../client-api/session/saving-changes.mdx) +call will encapsulate all documents created, deleted, or modified within the session into a single transaction. + + + +## BASE for query operations + +The transaction model is different when indexes are involved, because indexes are BASE (Basically Available, Soft state, Eventual consistency), not ACID. +The indexing in RavenDB will always happen in the background. When you write a new document or update an existing one, RavenDB doesn't wait to update all the indexes before it completes the write operation. +Instead, it writes the document data and completes the write operation as soon as the transaction is written to disk, scheduling any index updates to occur in an async manner. + +There are several reasons for this behavior: + +* Writes are faster because they aren't going to be held up by the indexes. +* Indexes running in an async manner allow to handle updates in batches instead of having to update all the indexes on every write. +* Indexes are operating independently, so a single slow or expensive index isn't going to impact any other indexes or the overall write performance in the system. +* Indexes can be added dynamically and on the fly to busy production systems. +* Indexes can be updated in a [side-by-side manner](../../indexes/creating-and-deploying.mdx). + +The BASE model means that the following constraints are applied to query operations: + +* __Basically Available__ + Index query results will be always available but they might be stale. + +* __Soft state__ + The state of the system could change over time because some amount of time is needed to perform the indexing. + This is an incremental operation; the fewer documents remain to index, the more accurate index results we have. + +* __Eventual consistency__ + The database will eventually become consistent once it stops receiving new documents and the indexing operation finishes. + +The async nature of RavenDB indexes means that you need to be aware that, by default, writes will complete without waiting for indexes. +Although there are ways to wait for the indexes to complete as part of the write or even during the read (although that is not recommended). +Please read a dedicated article about the [stale indexes](../../indexes/stale-indexes.mdx). + + + diff --git a/versioned_docs/version-7.1/client-api/faq/what-is-a-collection.mdx b/versioned_docs/version-7.1/client-api/faq/what-is-a-collection.mdx new file mode 100644 index 0000000000..ba336ae6d0 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/faq/what-is-a-collection.mdx @@ -0,0 +1,86 @@ +--- +title: "FAQ: What is a Collection" +hide_table_of_contents: true +sidebar_label: What is a Collection +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# FAQ: What is a Collection + + +* **A collection** in RavenDB is a set of documents tagged with the same `@collection` metadata property. + Every document belongs to exactly one collection. + +* Being a schemaless database, there is no requirement that documents in the same collection will share the same structure, + although typically, a collection holds similarly structured documents based on the entity type of the document. + +* The collection is just a virtual concept. + There is no influence on how or where documents within the same collection are physically stored. + +* Collections are used throughout many RavenDB features, such as defining indexes, setting revisions, and much more. + +* In this page: + * [Collection Name Generation](../../client-api/faq/what-is-a-collection.mdx#collection-name-generation) + * [Collection Usages](../../client-api/faq/what-is-a-collection.mdx#collection-usages) + +* For more information see [Documents and Collections](../../studio/database/documents/documents-and-collections.mdx) + + +![Figure 1. What is a Collection](./assets/what-is-a-collection.png) +## Collection Name Generation + +**When storing an entity from the client:** + +* The document collection metadata is generated **based on the stored entity object type**. + +* By default, the client pluralizes the collection name based on the type name. + e.g. storing an entity of type `Order` will generate the collection name `Orders`. + +* The function that is responsible for tagging the documents can be overridden. + See: [Global Identifier Generation Conventions](../../client-api/configuration/identifier-generation/global.mdx#findtypetagname-and-finddynamictagname). + +---- + +**When creating a new document through the Studio:** + +* The collection metadata is generated **based on the document ID prefix**. + e.g Documents that are created with the following IDs: `users|23` / `users/45` / `users/17` + will all belong to the same `Users` collection. + +* For more information see [Documents and Collections](../../studio/database/documents/documents-and-collections.mdx) + + +## Collection Usages + +* **A Collection Query** + * RavenDB keeps an internal storage index per collection created. + This internal index is used to query the database and retrieve only documents from a specified collection. + +* **In Indexing** + * Each [Map Index](../../indexes/map-indexes.mdx) is built against a single collection (or muliple collections when using a [Multi-Map Index](../../indexes/multi-map-indexes.mdx). + During the indexing process, the index function iterates only over the documents that belong to the specified collection(s). + +* **In Revisions** + * Documents [Revisions](../../document-extensions/revisions/overview.mdx) can be defined per collection. + +* **In Ongoing Tasks** + * A [RavenDB ETL Task](../../server/ongoing-tasks/etl/raven.mdx) & [SQL ETL Task](../../server/ongoing-tasks/etl/sql.mdx) are defined on specified collections. + +* **The @hilo Collection** + * The ranges of available IDs values returned by [HiLo algorithm](../../client-api/document-identifiers/hilo-algorithm.mdx) are per collection name. + Learn more in: [The @hilo Collection](../../studio/database/documents/documents-and-collections.mdx#the-@hilo-collection) + +* **The @empty Collection** + * Learn more in: [The @empty Collection](../../studio/database/documents/documents-and-collections.mdx#the-@empty-collection) + + + +---- + diff --git a/versioned_docs/version-7.1/client-api/how-to/_category_.json b/versioned_docs/version-7.1/client-api/how-to/_category_.json new file mode 100644 index 0000000000..2e1b0cd793 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/how-to/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 17, + "label": How to..., +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/how-to/_handle-document-relationships-csharp.mdx b/versioned_docs/version-7.1/client-api/how-to/_handle-document-relationships-csharp.mdx new file mode 100644 index 0000000000..33cf49e9e2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/how-to/_handle-document-relationships-csharp.mdx @@ -0,0 +1,740 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +One of the design principles that RavenDB adheres to is the idea that documents are independent, +meaning all data required to process a document is stored within the document itself. +However, this doesn't mean there should not be relations between objects. + +There are valid scenarios where we need to define relationships between objects. +By doing so, we expose ourselves to one major problem: whenever we load the containing entity, +we are going to need to load data from the referenced entities as well (unless we are not interested in them). +While the alternative of storing the whole entity in every object graph it is referenced in seems cheaper at first, +this proves to be quite costly in terms of database resources and network traffic. + +RavenDB offers three elegant approaches to solve this problem. Each scenario will need to use one or more of them. +When applied correctly, they can drastically improve performance, reduce network bandwidth, and speed up development. + + +* In this page: + * [Denormalization](../../client-api/how-to/handle-document-relationships.mdx#denormalization) + * [Includes](../../client-api/how-to/handle-document-relationships.mdx#includes) + * [One to many includes](../../client-api/how-to/handle-document-relationships.mdx#one-to-many-includes) + * [Secondary level includes](../../client-api/how-to/handle-document-relationships.mdx#secondary-level-includes) + * [Dictionary includes](../../client-api/how-to/handle-document-relationships.mdx#dictionary-includes) + * [Dictionary includes: complex types](../../client-api/how-to/handle-document-relationships.mdx#dictionary-includes-complex-types) + * [Combining approaches](../../client-api/how-to/handle-document-relationships.mdx#combining-approaches) + * [Summary](../../client-api/how-to/handle-document-relationships.mdx#summary) + +## Denormalization + +The easiest solution is to denormalize the data within the containing entity, +forcing it to contain the actual value of the referenced entity in addition to (or instead of) the foreign key. + +Take this JSON document for example: + + + +{`// Order document with ID: orders/1-A +\{ + "Customer": \{ + "Name": "Itamar", + "Id": "customers/1-A" + \}, + "Items": [ + \{ + "Product": \{ + "Id": "products/1-A", + "Name": "Milk", + "Cost": 2.3 + \}, + "Quantity": 3 + \} + ] +\} +`} + + + +As you can see, the `Order` document now contains denormalized data from both the `Customer` and the `Product` documents which are saved elsewhere in full. +Note we won't have copied all the customer properties into the order; +instead we just clone the ones that we care about when displaying or processing an order. +This approach is called *denormalized reference*. + +The denormalization approach avoids many cross document lookups and results in only the necessary data being transmitted over the network, +but it makes other scenarios more difficult. For example, consider the following entity structure as our start point: + + + +{`public class Order +\{ + public string CustomerId \{ get; set; \} + + public string[] SupplierIds \{ get; set; \} + + public Referral Referral \{ get; set; \} + + public LineItem[] LineItems \{ get; set; \} + + public double TotalPrice \{ get; set; \} +\} +`} + + + + + +{`public class Customer +\{ + public string Id \{ get; set; \} + + public string Name \{ get; set; \} +\} +`} + + + +If we know that whenever we load an `Order` from the database we will need to know the customer's name and address, +we could decide to create a denormalized `Order.Customer` field and store those details directly in the `Order` object. +Obviously, the password and other irrelevant details will not be denormalized: + + + +{`public class DenormalizedCustomer +\{ + public string Id \{ get; set; \} + + public string Name \{ get; set; \} + + public string Address \{ get; set; \} +\} +`} + + + +There wouldn't be a direct reference between the `Order` and the `Customer`. +Instead, `Order` holds a `DenormalizedCustomer`, which contains the interesting bits from `Customer` that we need whenever we process `Order` objects. + +But what happens when the user's address is changed? We will have to perform an aggregate operation to update all orders this customer has made. +What if the customer has a lot of orders or changes their address frequently? Keeping these details in sync could become very demanding on the server. +What if another process that works with orders needs a different set of customer properties? +The `DenormalizedCustomer` will need to be expanded, possibly to the point that the majority of the customer record is cloned. + + +**Denormalization** is a viable solution for rarely changing data or for data that must remain the same despite the underlying referenced data changing over time. + + + + +## Includes + +The **Includes** feature addresses the limitations of denormalization. +Instead of one object containing copies of the properties from another object, +it is only necessary to hold a reference to the second object, which can be: + +* a Document (as described below) +* a [Document Revision](../../document-extensions/revisions/client-api/session/including.mdx) +* a [Counter](../../document-extensions/counters/counters-and-other-features.mdx#including-counters) +* a [Time series](../../document-extensions/timeseries/client-api/session/include/overview.mdx) +* a [Compare exchange value](../../client-api/operations/compare-exchange/include-compare-exchange.mdx) + +The server can then be instructed to pre-load the referenced object at the same time that the root object is retrieved, using: + + + +{`Order order = session + .Include(x => x.CustomerId) + .Load("orders/1-A"); + +// this will not require querying the server! +Customer customer = session + .Load(order.CustomerId); +`} + + + +Above we are asking RavenDB to retrieve the `Order` `orders/1-A`, and at the same time "include" the `Customer` referenced by the `Order.CustomerId` property. +The second call to `Load()` is resolved completely client side (i.e. without a second request to the RavenDB server) +because the relevant `Customer` object has already been retrieved (this is the full `Customer` object not a denormalized version). + +There is also a possibility to load multiple documents: + + + +{`Dictionary orders = session + .Include(x => x.CustomerId) + .Load("orders/1-A", "orders/2-A"); + +foreach (Order order in orders.Values) +\{ + // this will not require querying the server! + Customer customer = session.Load(order.CustomerId); +\} +`} + + + +You can also use Includes with queries: + + + + +{`IList orders = session + .Query() + .Include(o => o.CustomerId) + .Where(x => x.TotalPrice > 100) + .ToList(); + +foreach (Order order in orders) +{ + // this will not require querying the server! + Customer customer = session + .Load(order.CustomerId); +} +`} + + + + +{`IList orders = session + .Query() + .Include(i => i + .IncludeDocuments(x => x.CustomerId) //single document + .IncludeCounter("OrderUpdateCount")) //fluent builder can include counters as well + .Where(x => x.TotalPrice > 100) + .ToList(); + +foreach (Order order in orders) +{ + // this will not require querying the server! + Customer customer = session + .Load(order.CustomerId); +} +`} + + + + +{`IList orders = session + .Advanced + .DocumentQuery() + .Include(x => x.CustomerId) + .WhereGreaterThan(x => x.TotalPrice, 100) + .ToList(); + +foreach (Order order in orders) +{ + // this will not require querying the server! + Customer customer = session + .Load(order.CustomerId); +} +`} + + + + +{`from Orders +where TotalPrice > 100 +include CustomerId +`} + + + + +{`from Orders as o +where TotalPrice > 100 +include CustomerId,counters(o,'OrderUpdateCount') +`} + + + + +This works because RavenDB has two channels through which it can return information in response to a load request. +The first is the Results channel, through which the root object retrieved by the `Load()` method call is returned. +The second is the Includes channel, through which any included documents are sent back to the client. +Client side, those included documents are not returned from the `Load()` method call, but they are added to the session unit of work, +and subsequent requests to load them are served directly from the session cache, without requiring any additional queries to the server. + + +Embedded and builder variants of Include clause are essentially syntax sugar and are equivalent at the server side. + + + +Streaming query results does not support the includes feature. +Learn more in [How to Stream Query Results](../../client-api/session/querying/how-to-stream-query-results.mdx#stream-related-documents). + +### One to many includes + +Include can be used with a many to one relationship. +In the above classes, an `Order` has a property `SupplierIds` which contains an array of references to `Supplier` documents. +The following code will cause the suppliers to be pre-loaded: + + + +{`Order order = session + .Include(x => x.SupplierIds) + .Load("orders/1-A"); + +foreach (string supplierId in order.SupplierIds) +\{ + // this will not require querying the server! + Supplier supplier = session.Load(supplierId); +\} +`} + + + +Alternatively, it is possible to use the fluent builder syntax. + + + +{`var order = session.Load( + "orders/1-A", + i => i.IncludeDocuments(x => x.SupplierIds)); + +foreach (string supplierId in order.SupplierIds) +\{ + // this will not require querying the server! + var supplier = session.Load(supplierId); +\} +`} + + + +The calls to `Load()` within the `foreach` loop will not require a call to the server as the `Supplier` objects will already be loaded into the session cache. + +Multi-loads are also possible: + + + +{`Dictionary orders = session + .Include(x => x.SupplierIds) + .Load("orders/1-A", "orders/2-A"); + +foreach (Order order in orders.Values) +\{ + foreach (string supplierId in order.SupplierIds) + \{ + // this will not require querying the server! + Supplier supplier = session.Load(supplierId); + \} +\} +`} + + +### Secondary level includes + +An Include does not need to work only on the value of a top level property within a document. +It can be used to load a value from a secondary level. +In the classes above, the `Order` contains a `Referral` property which is of the type: + + + +{`public class Referral +\{ + public string CustomerId \{ get; set; \} + + public double CommissionPercentage \{ get; set; \} +\} +`} + + + +This class contains an identifier for a `Customer`. +The following code will include the document referenced by that secondary level identifier: + + + +{`Order order = session + .Include(x => x.Referral.CustomerId) + .Load("orders/1-A"); + +// this will not require querying the server! +Customer customer = session.Load(order.Referral.CustomerId); +`} + + + +It is possible to execute the same code with the fluent builder syntax: + + + +{`var order = session.Load( + "orders/1-A", + i => i.IncludeDocuments(x => x.Referral.CustomerId)); + +// this will not require querying the server! +Customer customer = session.Load(order.Referral.CustomerId); +`} + + + +The alternative way is to provide a string-based path: + + + +{`Order order = session.Include("Referral.CustomerId") + .Load("orders/1-A"); + +// this will not require querying the server! +Customer customer = session.Load(order.Referral.CustomerId); +`} + + + +With the fluent builder syntax, it is also possible to use a string-based path: + + + +{`var order = session.Load( + "orders/1-A", + i => i.IncludeDocuments("Referral.CustomerId")); + +// this will not require querying the server! +Customer customer = session.Load(order.Referral.CustomerId); +`} + + + +This secondary level include will also work with collections. +The `Order.LineItems` property holds a collection of `LineItem` objects which each contain a reference to a `Product`: + + + +{`public class LineItem +\{ + public string ProductId \{ get; set; \} + + public string Name \{ get; set; \} + + public int Quantity \{ get; set; \} +\} +`} + + + +The `Product` documents can be included using the following syntax: + + + +{`Order order = session + .Include(x => x.LineItems.Select(l => l.ProductId)) + .Load("orders/1-A"); + +foreach (LineItem lineItem in order.LineItems) +\{ + // this will not require querying the server! + Product product = session.Load(lineItem.ProductId); +\} +`} + + + +The fluent builder syntax works here too. + + + +{`var order = session.Load( + "orders/1-A", + i => i.IncludeDocuments(x => x.LineItems.Select(l => l.ProductId))); + +foreach (LineItem lineItem in order.LineItems) +\{ + // this will not require querying the server! + Product product = session.Load(lineItem.ProductId); +\} +`} + + + +The `Select()` within the Include tells RavenDB which property of secondary level objects to use as a reference. + + +### String path conventions + +When using string-based includes like: + + + +{`Order order = session.Include("Referral.CustomerId") + .Load("orders/1-A"); + +// this will not require querying the server! +Customer customer = session.Load(order.Referral.CustomerId); +`} + + + +you must remember to follow certain rules that must apply to the provided string path: + +1. **Dots** are used to separate properties + e.g. `"Referral.CustomerId"` in the example above means that our `Order` contains property `Referral` and that property contains another property called `CustomerId`. + +2. **Indexer operator** is used to indicate that property is a collection type. + So if our `Order` has a list of LineItems and each `LineItem` contains a `ProductId` property, then we can create string path as follows: `"LineItems[].ProductId"`. + +3. **Prefixes** can be used to indicate the prefix of the identifier of the document that is going to be included. + It can be useful when working with custom or semantic identifiers. + For example, if you have a customer stored under `customers/login@domain.com` then you can include it using `"Referral.CustomerEmail(customers/)"` (`customers/` is the prefix here). + +Learning string path rules may be useful when you will want to query database using HTTP API. + + + +{`curl -X GET "http://localhost:8080/databases/Northwind/docs?id=orders/1-A&include=Lines[].Product" +`} + + + + +### Dictionary includes + +Dictionary keys and values can also be used when doing includes. Consider following scenario: + + + +{`public class Person +\{ + public string Id \{ get; set; \} + + public string Name \{ get; set; \} + + public Dictionary Attributes \{ get; set; \} +\} +`} + + + + + +{`session.Store( + new Person + \{ + Id = "people/1-A", + Name = "John Doe", + Attributes = new Dictionary + \{ + \{ "Mother", "people/2" \}, + \{ "Father", "people/3" \} + \} + \}); + +session.Store( + new Person + \{ + Id = "people/2", + Name = "Helen Doe", + Attributes = new Dictionary() + \}); + +session.Store( + new Person + \{ + Id = "people/3", + Name = "George Doe", + Attributes = new Dictionary() + \}); +`} + + + +Now we want to include all documents that are under dictionary values: + + + +{`var person = session + .Include(x => x.Attributes.Values) + .Load("people/1-A"); + +var mother = session + .Load(person.Attributes["Mother"]); + +var father = session + .Load(person.Attributes["Father"]); + +Assert.Equal(1, session.Advanced.NumberOfRequests); +`} + + + +The code above can be also rewritten with fluent builder syntax: + + + +{`var person = session.Load( + "people/1-A", + i => i.IncludeDocuments(x => x.Attributes.Values)); + +var mother = session + .Load(person.Attributes["Mother"]); + +var father = session + .Load(person.Attributes["Father"]); + +Assert.Equal(1, session.Advanced.NumberOfRequests); +`} + + + +You can also include values from dictionary keys: + + + +{`var person = session + .Include(x => x.Attributes.Keys) + .Load("people/1-A"); +`} + + + +Here, as well, this can be written with fluent builder syntax: + + + +{`var person = session + .Load("people/1-A", + i => i.IncludeDocuments(x => x.Attributes.Keys)); +`} + + +### Dictionary includes: complex types + +If values in dictionary are more complex, e.g. + + + +{`public class PersonWithAttribute +\{ + public string Id \{ get; set; \} + + public string Name \{ get; set; \} + + public Dictionary Attributes \{ get; set; \} +\} + +public class Attribute +\{ + public string Ref \{ get; set; \} +\} +`} + + + + + +{`session.Store( + new PersonWithAttribute + \{ + Id = "people/1-A", + Name = "John Doe", + Attributes = new Dictionary + \{ + \{ "Mother", new Attribute \{ Ref = "people/2" \} \}, + \{ "Father", new Attribute \{ Ref = "people/3" \} \} + \} + \}); + +session.Store( + new Person + \{ + Id = "people/2", + Name = "Helen Doe", + Attributes = new Dictionary() + \}); + +session.Store( + new Person + \{ + Id = "people/3", + Name = "George Doe", + Attributes = new Dictionary() + \}); +`} + + + +We can also do includes on specific properties: + + + +{`var person = session + .Include(x => x.Attributes.Values.Select(v => v.Ref)) + .Load("people/1-A"); + +var mother = session + .Load(person.Attributes["Mother"].Ref); + +var father = session + .Load(person.Attributes["Father"].Ref); + +Assert.Equal(1, session.Advanced.NumberOfRequests); +`} + + + + + +## Combining approaches + +It is possible to combine the above techniques. +Using the `DenormalizedCustomer` from above and creating an order that uses it: + + + +{`public class Order3 +\{ + public DenormalizedCustomer Customer \{ get; set; \} + + public string[] SupplierIds \{ get; set; \} + + public Referral Referral \{ get; set; \} + + public LineItem[] LineItems \{ get; set; \} + + public double TotalPrice \{ get; set; \} +\} +`} + + + +We have the advantages of a denormalization, a quick and simple load of an `Order`, +and the fairly static `Customer` details that are required for most processing. +But we also have the ability to easily and efficiently load the full `Customer` object when necessary using: + + + +{`Order3 order = session + .Include(x => x.Customer.Id) + .Load("orders/1-A"); + +// this will not require querying the server! +Customer customer = session.Load(order.Customer.Id); +`} + + + +This combining of denormalization and Includes could also be used with a list of denormalized objects. + +It is possible to use Include on a query being a projection. +Includes are evaluated after the projection has been evaluated. +This opens up the possibility of implementing Tertiary Includes (i.e. retrieving documents that are referenced by documents that are referenced by the root document). + +RavenDB can support Tertiary Includes, but before resorting to them you should re-evaluate your document model. +Needing Tertiary Includes can be an indication that you are designing your documents along "Relational" lines. + + + +## Summary + +There are no strict rules as to when to use which approach, but the general idea is to give it a lot of thought and consider the implications each approach has. + +As an example, in an e-commerce application it might be better to denormalize product names and prices into an order line object +since you want to make sure the customer sees the same price and product title in the order history. +But the customer name and addresses should probably be references rather than denormalized into the order entity. + +For most cases where denormalization is not an option, Includes are probably the answer. + + + + diff --git a/versioned_docs/version-7.1/client-api/how-to/_handle-document-relationships-java.mdx b/versioned_docs/version-7.1/client-api/how-to/_handle-document-relationships-java.mdx new file mode 100644 index 0000000000..593aa63ba1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/how-to/_handle-document-relationships-java.mdx @@ -0,0 +1,850 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +One of the design principles that RavenDB adheres to is the idea that documents are independent, +meaning all data required to process a document is stored within the document itself. +However, this doesn't mean there should not be relations between objects. + +There are valid scenarios where we need to define relationships between objects. +By doing so, we expose ourselves to one major problem: whenever we load the containing entity, +we are going to need to load data from the referenced entities as well (unless we are not interested in them). +While the alternative of storing the whole entity in every object graph it is referenced in seems cheaper at first, +this proves to be quite costly in terms of database resources and network traffic. + +RavenDB offers three elegant approaches to solve this problem. Each scenario will need to use one or more of them. +When applied correctly, they can drastically improve performance, reduce network bandwidth, and speed up development. + +## Denormalization + +The easiest solution is to denormalize the data within the containing entity, +forcing it to contain the actual value of the referenced entity in addition to (or instead of) the foreign key. + +Take this JSON document for example: + + + +{`// Order document with ID: orders/1-A +\{ + "Customer": \{ + "Name": "Itamar", + "Id": "customers/1-A" + \}, + "Items": [ + \{ + "Product": \{ + "Id": "products/1-A", + "Name": "Milk", + "Cost": 2.3 + \}, + "Quantity": 3 + \} + ] +\} +`} + + + +As you can see, the `Order` document now contains denormalized data from both the `Customer` and the `Product` documents which are saved elsewhere in full. +Note we won't have copied all the customer fields into the order; instead we just clone the ones that we care about when displaying or processing an order. +This approach is called *denormalized reference*. + +The denormalization approach avoids many cross document lookups and results in only the necessary data being transmitted over the network, +but it makes other scenarios more difficult. For example, consider the following entity structure as our start point: + + + +{`public class Order \{ + private String customerId; + private String[] supplierIds; + private Referral referral; + private LineItem[] lineItems; + private double totalPrice; + + public String getCustomerId() \{ + return customerId; + \} + + public void setCustomerId(String customerId) \{ + this.customerId = customerId; + \} + + public String[] getSupplierIds() \{ + return supplierIds; + \} + + public void setSupplierIds(String[] supplierIds) \{ + this.supplierIds = supplierIds; + \} + + public Referral getReferral() \{ + return referral; + \} + + public void setReferral(Referral referral) \{ + this.referral = referral; + \} + + public LineItem[] getLineItems() \{ + return lineItems; + \} + + public void setLineItems(LineItem[] lineItems) \{ + this.lineItems = lineItems; + \} + + public double getTotalPrice() \{ + return totalPrice; + \} + + public void setTotalPrice(double totalPrice) \{ + this.totalPrice = totalPrice; + \} +\} +`} + + + + + +{`public class Customer \{ + private String id; + private String name; + + public String getId() \{ + return id; + \} + + public void setId(String id) \{ + this.id = id; + \} + + public String getName() \{ + return name; + \} + + public void setName(String name) \{ + this.name = name; + \} +\} +`} + + + +If we know that whenever we load an `Order` from the database we will need to know the customer's name and address, +we could decide to create a denormalized `Order.Customer` field and store those details directly in the `Order` object. +Obviously, the password and other irrelevant details will not be denormalized: + + + +{`public class DenormalizedCustomer \{ + private String id; + private String name; + private String address; + + public String getId() \{ + return id; + \} + + public void setId(String id) \{ + this.id = id; + \} + + public String getName() \{ + return name; + \} + + public void setName(String name) \{ + this.name = name; + \} + + public String getAddress() \{ + return address; + \} + + public void setAddress(String address) \{ + this.address = address; + \} +\} +`} + + + +There wouldn't be a direct reference between the `Order` and the `Customer`. +Instead, `Order` holds a `DenormalizedCustomer`, which contains the interesting bits from `Customer` that we need whenever we process `Order` objects. + +But what happens when the user's address is changed? +We will have to perform an aggregate operation to update all orders this customer has made. +What if the customer has a lot of orders or changes their address frequently? +Keeping these details in sync could become very demanding on the server. +What if another process that works with orders needs a different set of customer fields? +The `DenormalizedCustomer` will need to be expanded, possibly to the point that the majority of the customer record is cloned. + + +**Denormalization** is a viable solution for rarely changing data or for data that must remain the same despite the underlying referenced data changing over time. + + +## Includes + +The **Includes** feature addresses the limitations of denormalization. +Instead of one object containing copies of the fields from another object, it is only necessary to hold a reference to the second object. +Then the server can be instructed to pre-load the referenced document at the same time that the root object is retrieved. We do this using: + + + +{`Order order = session + .include("CustomerId") + .load(Order.class, "orders/1-A"); + +// this will not require querying the server! +Customer customer = session.load(Customer.class, order.getCustomerId()); +`} + + + +Above we are asking RavenDB to retrieve the `Order` `orders/1-A`, and at the same time "include" the `Customer` referenced by the `Order.CustomerId` field. +The second call to `load()` is resolved completely client side (i.e. without a second request to the RavenDB server) +because the relevant `Customer` object has already been retrieved (this is the full `Customer` object not a denormalized version). + +There is also a possibility to load multiple documents: + + + +{`Map orders = session + .include("CustomerId") + .load(Order.class, "orders/1-A", "orders/2-A"); + +for (Order order : orders.values()) \{ + Customer customer = session.load(Customer.class, order.getCustomerId()); +\} +`} + + + +You can also use Includes with queries: + + + + +{`List orders = session + .query(Order.class) + .include("CustomerId") + .whereGreaterThan("TotalPrice", 100) + .toList(); + +for (Order order : orders) { + // this will not require querying the server! + Customer customer = session + .load(Customer.class, order.getCustomerId()); +} +`} + + + + +{`List orders = session + .query(Order.class) + .include(i -> i. + includeDocuments("CustomerId"). + includeCounter("OrderUpdateCount")) + .whereGreaterThan("TotalPrice", 100) + .toList(); + +for (Order order : orders) { + // this will not require querying the server! + Customer customer = session + .load(Customer.class, order.getCustomerId()); +} +`} + + + + +{`from Orders +where TotalPrice > 100 +include CustomerId +`} + + + + +{`from Orders as o +where TotalPrice > 100 +include CustomerId,counters(o,'OrderUpdateCount') +`} + + + + +This works because RavenDB has two channels through which it can return information in response to a load request. +The first is the Results channel, through which the root object retrieved by the `load()` method call is returned. +The second is the Includes channel, through which any included documents are sent back to the client. +Client side, those included documents are not returned from the `load()` method call, but they are added to the session unit of work, +and subsequent requests to load them are served directly from the session cache, without requiring any additional queries to the server. + + +Embedded and builder variants of Include clause are essentially syntax sugar and are equivalent at the server side. + + + +Streaming query results does not support the includes feature. +Learn more in [How to Stream Query Results](../../client-api/session/querying/how-to-stream-query-results.mdx#stream-related-documents). + + +### One to many includes + +Include can be used with a many to one relationship. +In the above classes, an `Order` has a field `SupplierIds` which contains an array of references to `Supplier` documents. +The following code will cause the suppliers to be pre-loaded: + + + +{`Order order = session + .include("SupplierIds") + .load(Order.class, "orders/1-A"); + +for (String supplierId : order.getSupplierIds()) \{ + // this will not require querying the server! + Supplier supplier = session.load(Supplier.class, supplierId); +\} +`} + + + +Alternatively, it is possible to use the fluent builder syntax. + + + +{`Order order = session.load(Order.class, "orders/1-A", + i -> i.includeDocuments("SupplierIds")); + +for (String supplierId : order.getSupplierIds()) \{ + // this will not require querying the server! + Supplier supplier = session.load(Supplier.class, supplierId); +\} +`} + + + +The calls to `load()` within the `foreach` loop will not require a call to the server as the `Supplier` objects will already be loaded into the session cache. + +Multi-loads are also possible: + + + +{`Map orders = session + .include("SupplierIds") + .load(Order.class, "orders/1-A", "orders/2-A"); + +for (Order order : orders.values()) \{ + for (String supplierId : order.getSupplierIds()) \{ + // this will not require querying the server! + + Supplier supplier = session.load(Supplier.class, supplierId); + \} +\} +`} + + + +### Secondary level includes + +An Include does not need to work only on the value of a top level field within a document. +It can be used to load a value from a secondary level. +In the classes above, the `Order` contains a `Referral` field which is of the type: + + + +{`public class Referral \{ + private String customerId; + private double commissionPercentage; + + public String getCustomerId() \{ + return customerId; + \} + + public void setCustomerId(String customerId) \{ + this.customerId = customerId; + \} + + public double getCommissionPercentage() \{ + return commissionPercentage; + \} + + public void setCommissionPercentage(double commissionPercentage) \{ + this.commissionPercentage = commissionPercentage; + \} +\} +`} + + + +This class contains an identifier for a `Customer`. The following code will include the document referenced by that secondary level identifier: + + + +{`Order order = session + .include("Referral.CustomerId") + .load(Order.class, "orders/1-A"); + +// this will not require querying the server! +Customer customer = session.load(Customer.class, order.getReferral().getCustomerId()); +`} + + + +It is possible to execute the same code with the fluent builder syntax: + + + +{`Order order = session + .load(Order.class, "orders/1-A", + i -> i.includeDocuments("Referral.CustomerId")); + +// this will not require querying the server! +Customer customer = session.load(Customer.class, order.getReferral().getCustomerId()); +`} + + + +This secondary level include will also work with collections. +The `Order.LineItems` field holds a collection of `LineItem` objects which each contain a reference to a `Product`: + + + +{`public class LineItem \{ + private String productId; + private String name; + private int quantity; + + public String getProductId() \{ + return productId; + \} + + public void setProductId(String productId) \{ + this.productId = productId; + \} + + public String getName() \{ + return name; + \} + + public void setName(String name) \{ + this.name = name; + \} + + public int getQuantity() \{ + return quantity; + \} + + public void setQuantity(int quantity) \{ + this.quantity = quantity; + \} +\} +`} + + + +The `Product` documents can be included using the following syntax: + + + +{`Order order = session + .include("LineItems[].ProductId") + .load(Order.class, "orders/1-A"); + +for (LineItem lineItem : order.getLineItems()) \{ + // this will not require querying the server! + Product product = session.load(Product.class, lineItem.getProductId()); +\} +`} + + + +The fluent builder syntax works here too. + + + +{`Order order = session.load(Order.class, "orders/1-A", + i -> i.includeDocuments("LineItems[].ProductId")); + +for (LineItem lineItem : order.getLineItems()) \{ + // this will not require querying the server! + Product product = session.load(Product.class, lineItem.getProductId()); +\} +`} + + + +The `[]` within the `include` tells RavenDB which field of secondary level objects to use as a reference. + + +### String path conventions + +When using string-based includes like: + + + +{`Order order = session + .include("Referral.CustomerId") + .load(Order.class, "orders/1-A"); + +// this will not require querying the server! +Customer customer = session.load(Customer.class, order.getReferral().getCustomerId()); +`} + + + +you must remember to follow certain rules that must apply to the provided string path: + +1. **Dots** are used to separate fields + e.g. `"Referral.CustomerId"` in the example above means that our `Order` contains field `Referral` and that field contains another field called `CustomerId`. + +2. **Indexer operator** is used to indicate that field is a collection type. + So if our `Order` has a list of LineItems and each `LineItem` contains a `ProductId` field, then we can create string path as follows: `"LineItems[].ProductId"`. + +3. **Prefixes** can be used to indicate the prefix of the identifier of the document that is going to be included. + It can be useful when working with custom or semantic identifiers. + For example, if you have a customer stored under `customers/login@domain.com` then you can include it + using `"Referral.CustomerEmail(customers/)"` (`customers/` is the prefix here). + +Learning string path rules may be useful when you will want to query database using HTTP API. + + + +{`curl -X GET "http://localhost:8080/databases/Northwind/docs?id=orders/1-A&include=lines[].product" +`} + + + + + +### Dictionary includes + +Dictionary keys and values can also be used when doing includes. Consider following scenario: + + + +{`public class Person \{ + private String id; + private String name; + private Map attributes; + + public String getId() \{ + return id; + \} + + public void setId(String id) \{ + this.id = id; + \} + + public String getName() \{ + return name; + \} + + public void setName(String name) \{ + this.name = name; + \} + + public Map getAttributes() \{ + return attributes; + \} + + public void setAttributes(Map attributes) \{ + this.attributes = attributes; + \} +\} +`} + + + + + +{`HashMap attributes1 = new HashMap<>(); +attributes1.put("Mother", "people/2"); +attributes1.put("Father", "people/3"); + +Person person1 = new Person(); +person1.setId("people/1-A"); +person1.setName("John Doe"); +person1.setAttributes(attributes1); + +session.store(person1); + +Person person2 = new Person(); +person2.setId("people/2"); +person2.setName("Helen Doe"); +person2.setAttributes(Collections.emptyMap()); + +session.store(person2); + +Person person3 = new Person(); +person3.setId("people/3"); +person3.setName("George Doe"); +person3.setAttributes(Collections.emptyMap()); + +session.store(person3); +`} + + + +Now we want to include all documents that are under dictionary values: + + + +{`Person person = session.include("Attributes.Values") + .load(Person.class, "people/1-A"); + +Person mother = session + .load(Person.class, person.getAttributes().get("Mother")); + +Person father = session + .load(Person.class, person.getAttributes().get("Father")); + +Assert.assertEquals(1, session.advanced().getNumberOfRequests()); +`} + + + +The code above can be also rewritten with fluent builder syntax: + + + +{`Person person = session.load(Person.class, "people/1-A", + i -> i.includeDocuments("Attributes.Values")); + +Person mother = session + .load(Person.class, person.getAttributes().get("Mother")); + +Person father = session + .load(Person.class, person.getAttributes().get("Father")); + +Assert.assertEquals(1, session.advanced().getNumberOfRequests()); +`} + + + +You can also include values from dictionary keys: + + + +{`Person person = session + .include("Attributes.Keys") + .load(Person.class, "people/1-A"); +`} + + + +Here, as well, this can be written with fluent builder syntax: + + + +{`Person person = session + .load(Person.class, "people/1-A", + i -> i.includeDocuments("Attributes.Keys")); +`} + + + +#### Complex types + +If values in dictionary are more complex e.g. + + + +{`public class PersonWithAttribute \{ + private String id; + private String name; + private Map attributes; + + public String getId() \{ + return id; + \} + + public void setId(String id) \{ + this.id = id; + \} + + public String getName() \{ + return name; + \} + + public void setName(String name) \{ + this.name = name; + \} + + public Map getAttributes() \{ + return attributes; + \} + + public void setAttributes(Map attributes) \{ + this.attributes = attributes; + \} +\} + +public class Attribute \{ + private String ref; + + public Attribute() \{ + \} + + public Attribute(String ref) \{ + this.ref = ref; + \} + + public String getRef() \{ + return ref; + \} + + public void setRef(String ref) \{ + this.ref = ref; + \} +\} +`} + + + + + +{`HashMap attributes = new HashMap<>(); +attributes.put("Mother", new Attribute("people/2")); +attributes.put("Father", new Attribute("people/3")); + +PersonWithAttribute person1 = new PersonWithAttribute(); +person1.setId("people/1-A"); +person1.setName("John Doe"); +person1.setAttributes(attributes); + +session.store(person1); + +Person person2 = new Person(); +person2.setId("people/2"); +person2.setName("Helen Doe"); +person2.setAttributes(Collections.emptyMap()); + +session.store(person2); + +Person person3 = new Person(); +person3.setId("people/3"); +person3.setName("George Doe"); +person3.setAttributes(Collections.emptyMap()); + +session.store(person3); +`} + + + +We can also do includes on specific fields: + + + +{`PersonWithAttribute person = session + .include("Attributes[].Ref") + .load(PersonWithAttribute.class, "people/1-A"); + +Person mother = session + .load(Person.class, person.getAttributes().get("Mother").getRef()); + +Person father = session + .load(Person.class, person.getAttributes().get("Father").getRef()); + +Assert.assertEquals(1, session.advanced().getNumberOfRequests()); +`} + + + +## Combining approaches + +It is possible to combine the above techniques. +Using the `DenormalizedCustomer` from above and creating an order that uses it: + + + +{`public class Order3 \{ + private DenormalizedCustomer customer; + private String[] supplierIds; + private Referral referral; + private LineItem[] lineItems; + private double totalPrice; + + public DenormalizedCustomer getCustomer() \{ + return customer; + \} + + public void setCustomer(DenormalizedCustomer customer) \{ + this.customer = customer; + \} + + public String[] getSupplierIds() \{ + return supplierIds; + \} + + public void setSupplierIds(String[] supplierIds) \{ + this.supplierIds = supplierIds; + \} + + public Referral getReferral() \{ + return referral; + \} + + public void setReferral(Referral referral) \{ + this.referral = referral; + \} + + public LineItem[] getLineItems() \{ + return lineItems; + \} + + public void setLineItems(LineItem[] lineItems) \{ + this.lineItems = lineItems; + \} + + public double getTotalPrice() \{ + return totalPrice; + \} + + public void setTotalPrice(double totalPrice) \{ + this.totalPrice = totalPrice; + \} +\} +`} + + + +We have the advantages of a denormalization, a quick and simple load of an `Order`, and the fairly static `Customer` details that are required for most processing. +But we also have the ability to easily and efficiently load the full `Customer` object when necessary using: + + + +{`Order3 order = session + .include("Customer.Id") + .load(Order3.class, "orders/1-A"); + +// this will not require querying the server! +Customer customer = session.load(Customer.class, order.getCustomer().getId()); +`} + + + +This combining of denormalization and Includes could also be used with a list of denormalized objects. + +It is possible to use Include on a query being a projection. Includes are evaluated after the projection has been evaluated. +This opens up the possibility of implementing Tertiary Includes +(i.e. retrieving documents that are referenced by documents that are referenced by the root document). + +RavenDB can support Tertiary Includes, but before resorting to them you should re-evaluate your document model. +Needing Tertiary Includes can be an indication that you are designing your documents along "Relational" lines. + +## Summary + +There are no strict rules as to when to use which approach, +but the general idea is to give it a lot of thought and consider the implications each approach has. + +As an example, in an e-commerce application it might be better to denormalize product names and prices into an order line object +since you want to make sure the customer sees the same price and product title in the order history. +But the customer name and addresses should probably be references rather than denormalized into the order entity. + +For most cases where denormalization is not an option, Includes are probably the answer. + + diff --git a/versioned_docs/version-7.1/client-api/how-to/_handle-document-relationships-nodejs.mdx b/versioned_docs/version-7.1/client-api/how-to/_handle-document-relationships-nodejs.mdx new file mode 100644 index 0000000000..1a857418ff --- /dev/null +++ b/versioned_docs/version-7.1/client-api/how-to/_handle-document-relationships-nodejs.mdx @@ -0,0 +1,737 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +One of the design principles that RavenDB adheres to is the idea that documents are independent, +meaning all data required to process a document is stored within the document itself. +However, this doesn't mean there should not be relations between objects. + +There are valid scenarios where we need to define relationships between objects. +By doing so, we expose ourselves to one major problem: whenever we load the containing entity, +we are going to need to load data from the referenced entities as well (unless we are not interested in them). +While the alternative of storing the whole entity in every object graph it is referenced in seems cheaper at first, +this proves to be quite costly in terms of database resources and network traffic. + +RavenDB offers three elegant approaches to solve this problem. Each scenario will need to use one or more of them. +When applied correctly, they can drastically improve performance, reduce network bandwidth, and speed up development. + + +* In this page: + * [Denormalization](../../client-api/how-to/handle-document-relationships.mdx#denormalization) + * [Includes](../../client-api/how-to/handle-document-relationships.mdx#includes) + * [One to many includes](../../client-api/how-to/handle-document-relationships.mdx#one-to-many-includes) + * [Secondary level includes](../../client-api/how-to/handle-document-relationships.mdx#secondary-level-includes) + * [Dictionary includes](../../client-api/how-to/handle-document-relationships.mdx#dictionary-includes) + * [Dictionary includes: complex types](../../client-api/how-to/handle-document-relationships.mdx#dictionary-includes-complex-types) + * [Combining approaches](../../client-api/how-to/handle-document-relationships.mdx#combining-approaches) + * [Summary](../../client-api/how-to/handle-document-relationships.mdx#summary) + +## Denormalization + +The easiest solution is to denormalize the data within the containing entity, +forcing it to contain the actual value of the referenced entity in addition to (or instead of) the foreign key. + +Take this JSON document for example: + + + +{`// Order document with ID: orders/1-A +\{ + "customer": \{ + "id": "customers/1-A", + "name": "Itamar" + \}, + "items": [ + \{ + "product": \{ + "id": "products/1-A", + "name": "Milk", + "cost": 2.3 + \}, + "quantity": 3 + \} + ] +\} +`} + + + +As you can see, the `Order` document now contains denormalized data from both the `Customer` and the `Product` documents which are saved elsewhere in full. +Note we won't have copied all the customer properties into the order; +instead we just clone the ones that we care about when displaying or processing an order. +This approach is called _denormalized reference_. + +The denormalization approach avoids many cross document lookups and results in only the necessary data being transmitted over the network, +but it makes other scenarios more difficult. For example, consider the following entity structure as our start point: + + + +{`class Order \{ + constructor( + customerId = '', + supplierIds = [], + referral = null, + lineItems = [], + totalPrice = 0 + ) \{ + Object.assign(this, \{ + customerId, + supplierIds, + referral, + lineItems, + totalPrice + \}); + \} +\} +`} + + + + + +{`class Customer \{ + constructor( + id = '', + name = '' + ) \{ + Object.assign(this, \{ + id, + name + \}); + \} +\} +`} + + + +If we know that whenever we load an `Order` from the database we will need to know the customer's name and address, +we could decide to create a denormalized `Order.customer` field and store those details directly in the `Order` object. +Obviously, the password and other irrelevant details will not be denormalized: + + + +{`class DenormalizedCustomer \{ + constructor( + id = '', + name = '', + address = '' + ) \{ + Object.assign(this, \{ + id, + name, + address + \}); + \} +\} +`} + + + +There wouldn't be a direct reference between the `Order` and the `Customer`. +Instead, `Order` holds a `DenormalizedCustomer`, which contains the interesting bits from `Customer` that we need whenever we process `Order` objects. + +But what happens when the user's address is changed? We will have to perform an aggregate operation to update all orders this customer has made. +What if the customer has a lot of orders or changes their address frequently? Keeping these details in sync could become very demanding on the server. +What if another process that works with orders needs a different set of customer properties? +The `DenormalizedCustomer` will need to be expanded, possibly to the point that the majority of the customer record is cloned. + + +**Denormalization** is a viable solution for rarely changing data or for data that must remain the same despite the underlying referenced data changing over time. + + + + +## Includes + +The **Includes** feature addresses the limitations of denormalization. +Instead of one object containing copies of the properties from another object, +it is only necessary to hold a reference to the second object, which can be: + +* a Document (as described below) +* a [Document Revision](../../document-extensions/revisions/client-api/session/including.mdx) +* a [Counter](../../document-extensions/counters/counters-and-other-features.mdx#including-counters) +* a [Time series](../../document-extensions/timeseries/client-api/session/include/overview.mdx) +* a [Compare exchange value](../../client-api/operations/compare-exchange/include-compare-exchange.mdx) + +The server can then be instructed to pre-load the referenced object at the same time that the root object is retrieved, using: + + + +{`const order = await session + // Call 'include' + // Pass the path of the document property that holds document to include + .include("customerId") + .load("orders/1-A"); + +const customer = await session + // This call to 'load' will not require querying the server + // No server request will be made + .load(order.customerId); +`} + + + +Above we are asking RavenDB to retrieve the `Order` `orders/1-A`, and at the same time "include" the `Customer` referenced by the `customerId` property. +The second call to `load()` is resolved completely client side (i.e. without a second request to the RavenDB server) +because the relevant `Customer` object has already been retrieved (this is the full `Customer` object not a denormalized version). + +There is also a possibility to load multiple documents: + + + +{`const orders = await session + .include("customerId") + .load(["orders/1-A", "orders/2-A"]); + +const orderEntities = Object.entries(orders); + +for (let i = 0; i < orderEntities.length; i++) \{ + // This will not require querying the server + const customer = await session.load(orderEntities[i][1].customerId); +\} +`} + + + +You can also use Includes with queries: + + + + +{`const orders = await session + .query({ collection: "orders" }) + .whereGreaterThan("totalPrice", 100) + .include("customerId") + .all(); + +for (let i = 0; i < orders.length; i++) { + // This will not require querying the server + const customer = await session.load(orders[i].customerId); +} +`} + + + + +{`const orders = await session + .query({ collection: "orders" }) + .whereGreaterThan("totalPrice", 100) + .include(i => i + .includeDocuments("customerId") // include document + .includeCounter("OrderUpdateCounter")) // builder can include counters as well + .all(); + +for (let i = 0; i < orders.length; i++) { + // This will not require querying the server + const customer = await session.load(orders[i].customerId); +} +`} + + + + +{`from "orders" +where totalPrice > 100 +include customerId +`} + + + + +{`from "orders" as o +where totalPrice > 100 +include customerId, counters(o,'OrderUpdateCount') +`} + + + + +This works because RavenDB has two channels through which it can return information in response to a load request. +The first is the Results channel, through which the root object retrieved by the `load()` method call is returned. +The second is the Includes channel, through which any included documents are sent back to the client. +Client side, those included documents are not returned from the `load()` method call, but they are added to the session unit of work, +and subsequent requests to load them are served directly from the session cache, without requiring any additional queries to the server. + + +Embedded and builder variants of `include` clause are essentially syntax sugar and are equivalent at the server side. + + + +Streaming query results does not support the includes feature. +Learn more in [How to Stream Query Results](../../client-api/session/querying/how-to-stream-query-results.mdx#stream-related-documents). + +### One to many includes + +Include can be used with a many to one relationship. +In the above classes, an `Order` has a property `supplierIds` which contains an array of references to `Supplier` documents. +The following code will cause the suppliers to be pre-loaded: + + + +{`const order = await session + .include("supplierIds") + .load("orders/1-A"); + +for (let i = 0; i < order.supplierIds.length; i++) \{ + // This will not require querying the server + const supplier = await session.load(order.supplierIds[i]); +\} +`} + + + +Alternatively, it is possible to use the fluent builder syntax. + + + +{`const order = await session + .load("orders/1-A", \{ + includes: i => i.includeDocuments("supplierIds") + \}); + +for (let i = 0; i < order.supplierIds.length; i++) \{ + // This will not require querying the server + const supplier = await session.load(order.supplierIds[i]); +\} +`} + + + +The calls to `load()` within the `for` loop will not require a call to the server as the `Supplier` objects will already be loaded into the session cache. + +Multi-loads are also possible: + + + +{`const orders = await session + .include("supplierIds") + .load(["orders/1-A", "orders/2-A"]); + +const orderEntities = Object.entries(orders); + +for (let i = 0; i < orderEntities.length; i++) \{ + const suppliers = orderEntities[i][1].supplierIds; + + for (let j = 0; j < suppliers.length; j++) \{ + // This will not require querying the server + const supplier = await session.load(suppliers[j]); + \} +\} +`} + + +### Secondary level includes + +An Include does not need to work only on the value of a top level property within a document. +It can be used to load a value from a secondary level. +In the classes above, the `Order` contains a `referral` property which is of the type: + + + +{`class Referral \{ + constructor( + customerId = '', + commissionPercentage = 0 + ) \{ + Object.assign(this, \{ + customerId, + commissionPercentage + \}); + \} +\} +`} + + + +This class contains an identifier for a `Customer`. +The following code will include the document referenced by that secondary level identifier: + + + +{`const order = await session + .include("referral.customerId") + .load("orders/1-A"); + +// This will not require querying the server +const customer = await session.load(order.referral.customerId); +`} + + + +It is possible to execute the same code with the fluent builder syntax: + + + +{`const order = await session + .load("orders/1-A", \{ + includes: i => i.includeDocuments("referral.customerId") + \}); + +// This will not require querying the server +const customer = await session.load(order.referral.customerId); +`} + + + +This secondary level include will also work with collections. +The `lineItems` property holds a collection of `LineItem` objects which each contain a reference to a `Product`: + + + +{`class LineItem \{ + constructor( + productId = '', + name = '', + quantity = 0 + ) \{ + Object.assign(this, \{ + productId, + name, + quantity + \}); + \} +\} +`} + + + +The `Product` documents can be included using the following syntax: + + + +{`const order = await session + .include("lineItems[].productId") + .load("orders/1-A"); + +for (let i = 0; i < order.lineItems.length; i++) \{ + // This will not require querying the server + const product = await session.load(order.lineItems[i].productId); +\} +`} + + + +The fluent builder syntax works here too. + + + +{`const order = await session + .load("orders/1-A", \{ + includes: i => i.includeDocuments("lineItems[].productId") + \}); + +for (let i = 0; i < order.lineItems.length; i++) \{ + // This will not require querying the server + const product = await session.load(order.lineItems[i].productId); +\} +`} + + + + +### String path conventions + +When using string-based includes like: + + + +{`const order = await session + .include("referral.customerId") + .load("orders/1-A"); + +// This will not require querying the server +const customer = await session.load(order.referral.customerId); +`} + + + +you must remember to follow certain rules that must apply to the provided string path: + +1. **Dots** are used to separate properties + e.g. `"referral.customerId"` in the example above means that our `Order` contains property `referral` and that property contains another property called `customerId`. + +2. **Indexer operator** is used to indicate that property is a collection type. + So if our `Order` has a list of LineItems and each `lineItem` contains a `productId` property, then we can create string path as follows: `"lineItems[].productId"`. + +3. **Prefixes** can be used to indicate the prefix of the identifier of the document that is going to be included. + It can be useful when working with custom or semantic identifiers. + For example, if you have a customer stored under `customers/login@domain.com` then you can include it using `"referral.customerEmail(customers/)"` (`customers/` is the prefix here). + +Learning string path rules may be useful when you will want to query database using HTTP API. + + + +{`curl -X GET "http://localhost:8080/databases/Northwind/docs?id=orders/1-A&include=Lines[].Product" +`} + + + + +### Dictionary includes + +Dictionary keys and values can also be used when doing includes. Consider following scenario: + + + +{`class Person \{ + constructor( + id = '', + name = '', + // attributes will be assigned a plain object containing key-value pairs + attributes = \{\} + ) \{ + Object.assign(this, \{ + id, + name, + attributes + \}); + \} +\} +`} + + + + + +{`const person1 = new Person(); +person1.name = "John Doe"; +person1.id = "people/1"; +person1.attributes = \{ + "mother": "people/2", + "father": "people/3" +\} + +const person2 = new Person(); +person2.name = "Helen Doe"; +person2.id = "people/2"; + +const person3 = new Person(); +person3.name = "George Doe"; +person3.id = "people/3"; + +await session.store(person1); +await session.store(person2); +await session.store(person3); + +await session.saveChanges(); +`} + + + +Now we want to include all documents that are under dictionary values: + + + +{`const person = await session + .include("attributes.$Values") + .load("people/1"); + +const mother = await session + .load(person.attributes["mother"]); + +const father = await session + .load(person.attributes["father"]); + +assert.equal(session.advanced.numberOfRequests, 1); +`} + + + +The code above can be also rewritten with fluent builder syntax: + + + +{`const person = await session + .load("people/1", \{ + includes: i => i.includeDocuments("attributes.$Values") + \}); + +const mother = await session + .load(person.attributes["mother"]); + +const father = await session + .load(person.attributes["father"]); + +assert.equal(session.advanced.numberOfRequests, 1); +`} + + + +You can also include values from dictionary keys: + + + +{`const person = await session + .include("attributes.$Keys") + .load("people/1"); +`} + + + +Here, as well, this can be written with fluent builder syntax: + + + +{`const person = await session + .load("people/1", \{ + includes: i => i.includeDocuments("attributes.$Keys") + \}); +`} + + +### Dictionary includes: complex types + +If values in dictionary are more complex, e.g. + + + +{`class PersonWithAttribute \{ + constructor( + id = '', + name = '', + // attributes will be assigned a complex object + attributes = \{\} + ) \{ + Object.assign(this, \{ + id, + name, + attributes + \}); + \} +\} + +class Attribute \{ + constructor( + ref = '' + ) \{ + Object.assign(this, \{ + ref + \}); + \} +\} +`} + + + + + +{`const attr2 = new Attribute(); +attr2.ref = "people/2"; +const attr3 = new Attribute(); +attr3.ref = "people/3"; + +const person1 = new PersonWithAttribute(); +person1.name = "John Doe"; +person1.id = "people/1"; +person1.attributes = \{ + "mother": attr2, + "father": attr3 +\} + +const person2 = new Person(); +person2.name = "Helen Doe"; +person2.id = "people/2"; + +const person3 = new Person(); +person3.name = "George Doe"; +person3.id = "people/3"; + +await session.store(person1); +await session.store(person2); +await session.store(person3); + +await session.saveChanges(); +`} + + + +We can also do includes on specific properties: + + + +{`const person = await session + .include("attributes.$Values[].ref") + .load("people/1"); + +const mother = await session + .load(person.attributes["mother"].ref); + +const father = await session + .load(person.attributes["father"].ref); + +assert.equal(session.advanced.numberOfRequests, 1); +`} + + + + + +## Combining approaches + +It is possible to combine the above techniques. +Using the `DenormalizedCustomer` from above and creating an order that uses it: + + + +{`class Order2 \{ + constructor( + customer = \{\}, + supplierIds = '', + referral = null, + lineItems = [], + totalPrice = 0 + ) \{ + Object.assign(this, \{ + customer, + supplierIds, + referral, + lineItems, + totalPrice + \}); + \} +\} +`} + + + +We have the advantages of a denormalization, a quick and simple load of an `Order`, +and the fairly static `Customer` details that are required for most processing. +But we also have the ability to easily and efficiently load the full `Customer` object when necessary using: + + + +{`const order = await session + .include("customer.id") + .load("orders/1-A"); + +// This will not require querying the server +const customer = await session.load(order.customer.id); +`} + + + +This combining of denormalization and Includes could also be used with a list of denormalized objects. + +It is possible to use Include on a query being a projection. +Includes are evaluated after the projection has been evaluated. +This opens up the possibility of implementing Tertiary Includes (i.e. retrieving documents that are referenced by documents that are referenced by the root document). + +RavenDB can support Tertiary Includes, but before resorting to them you should re-evaluate your document model. +Needing Tertiary Includes can be an indication that you are designing your documents along "Relational" lines. + + + +## Summary + +There are no strict rules as to when to use which approach, but the general idea is to give it a lot of thought and consider the implications each approach has. + +As an example, in an e-commerce application it might be better to denormalize product names and prices into an order line object +since you want to make sure the customer sees the same price and product title in the order history. +But the customer name and addresses should probably be references rather than denormalized into the order entity. + +For most cases where denormalization is not an option, Includes are probably the answer. + + + + diff --git a/versioned_docs/version-7.1/client-api/how-to/_setup-aggressive-caching-csharp.mdx b/versioned_docs/version-7.1/client-api/how-to/_setup-aggressive-caching-csharp.mdx new file mode 100644 index 0000000000..6baaec8205 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/how-to/_setup-aggressive-caching-csharp.mdx @@ -0,0 +1,142 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +## Standard Cache Configuration + +The RavenDB client provides a caching mechanism out of the box. The default caching configuration is to cache all requests. + +The size of cache can be configured by changing [`MaxHttpCacheSize` convention](../../client-api/configuration/conventions.mdx#maxhttpcachesize). + +The client utilizes the notion of the `304 Not Modified` server's response and will serve the data from the cache if available. + +## Aggressive Mode + +The aggressive caching feature goes even further. Enabling it means that the client doesn't need to ask the server. It will simply return the response directly from a local cache without any usage of `304 Not Modified` status. +Results will be returned very fast. + +Here's how it works: The client subscribes to server notifications using the [Changes API](../changes/what-is-changes-api.mdx). By taking advantage of them, he is able to invalidate cached documents when they are changed. +The client knows when it can serve the response from the cache, and when it has to send the request to get the up-to-date result. + + +Despite the fact that the aggressive cache uses the notifications to invalidate the cache, it is still possible to get stale data because of the time needed to receive the notification from the server. + + +Options for aggressive caching can be set in the Document Store conventions: + + + +{`var documentStore = new DocumentStore +\{ + Urls = new[] \{ "http://localhost:8080" \}, + Database = "NorthWind", + Conventions = + \{ + AggressiveCache = + \{ + Duration = TimeSpan.FromMinutes(5), + Mode = AggressiveCacheMode.TrackChanges + \} + \} +\} +`} + + + +We can activate this mode globally from the store or per session. + +To activate this mode globally from the store we just need to add one of the following lines: + + + +{`documentStore.AggressivelyCacheFor(TimeSpan.FromMinutes(5)); + +documentStore.AggressivelyCache(); // Defines the cache duration for 1 day +`} + + + +If we want to activate this mode only in the session we need to add this in the session: + + + +{`using (session.Advanced.DocumentStore.AggressivelyCacheFor(TimeSpan.FromMinutes(5))) +\{ + Order user = session.Load("orders/1"); +\} +`} + + + +If there is a value in the cache for `orders/1` that is at most 5 minutes old and we haven't got any change notification about it, we can directly return it. The same mechanism works on queries as well: + + + +{`using (session.Advanced.DocumentStore.AggressivelyCacheFor(TimeSpan.FromMinutes(5))) +\{ + List users = session.Query().ToList(); +\} +`} + + + +The usage of the notification system means that you can set an aggressive cache duration to a longer period. The document store exposes the method: + + + +{`using (session.Advanced.DocumentStore.AggressivelyCache()) +\{ \} +`} + + + +which is equivalent to: + + + +{`using (session.Advanced.DocumentStore.AggressivelyCacheFor(TimeSpan.FromDays(1))) +\{ \} +`} + + + +### Disable Change Tracking + +The client subscribes to change notifications from the server using the [Changes API](../changes/what-is-changes-api.mdx). You can choose to ignore +these notifications from the server by changing the `AggressiveCacheMode` in the Document Store conventions. + +The modes are: +* `AggressiveCacheMode.TrackChanges` - The default value. When the server sends a notification that some items (documents or indexes) have changed, +those items are invalidated from the cache. The next time these items are loaded they will be retrieved from the server. +* `AggressiveCacheMode.DoNotTrackChanges` - Notifications from the server will be ignored. For the aggressive cache `Duration`, results will be +retrieved from the cache and may therefore be stale. + + + +{`documentStore.AggressivelyCacheFor(TimeSpan.FromMinutes(5), AggressiveCacheMode.DoNotTrackChanges); + +//Disable change tracking for just one session: +using (session.Advanced.DocumentStore.AggressivelyCacheFor(TimeSpan.FromMinutes(5), + AggressiveCacheMode.DoNotTrackChanges)) +\{ \} +`} + + + +### Disable Aggressive Mode + +We can disable the aggressive mode by simply using `documentStore.DisableAggressiveCaching();`. In that way we will disable the aggressive caching +globally in the store. But what if we need to disable the aggressive caching only for a specific call, or to manually update the cache, just like before we can use `DisableAggressiveCaching()` +per session? + + + +{`using (session.Advanced.DocumentStore.DisableAggressiveCaching()) +\{ + Order user = session.Load("orders/1"); +\} +`} + + + diff --git a/versioned_docs/version-7.1/client-api/how-to/_setup-aggressive-caching-java.mdx b/versioned_docs/version-7.1/client-api/how-to/_setup-aggressive-caching-java.mdx new file mode 100644 index 0000000000..896b6238c3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/how-to/_setup-aggressive-caching-java.mdx @@ -0,0 +1,141 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +## Standard Cache Configuration + +The RavenDB client provides a caching mechanism out of the box. The default caching configuration is to cache all requests. + +The size of cache can be configured by changing [`MaxHttpCacheSize` convention](../../client-api/configuration/conventions.mdx#maxhttpcachesize). + +The client utilizes the notion of the `304 Not Modified` server's response and will serve the data from the cache if available. + +## Aggressive Mode + +The aggressive caching feature goes even further. Enabling it means that the client doesn't need to ask the server. It will simply return the response directly from a local cache without any usage of `304 Not Modified` status. +Results will be returned very fast. + +Here's how it works: The client subscribes to server notifications using the [Changes API](../changes/what-is-changes-api.mdx). By taking advantage of them, he is able to invalidate cached documents when they are changed. +The client knows when it can serve the response from the cache, and when it has to send the request to get the up-to-date result. + + +Despite the fact that the aggressive cache uses the notifications to invalidate the cache, it is still possible to get stale data because of the time needed to receive the notification from the server. + + +Options for aggressive caching can be set in the Document Store conventions: + + + +{`try (IDocumentStore documentStore = new DocumentStore()) \{ + DocumentConventions conventions = documentStore.getConventions(); + + conventions.aggressiveCache().setDuration(Duration.ofMinutes(5)); + conventions.aggressiveCache().setMode(AggressiveCacheMode.TRACK_CHANGES); + // Do your work here +\} +`} + + + +We can activate this mode globally from the store or per session. + +To activate this mode globally from the store we just need to add one of the following lines: + + + +{`documentStore.aggressivelyCacheFor(Duration.ofMinutes(5)); + +documentStore.aggressivelyCache(); // Defines the cache duration for 1 day +`} + + + +If we want to activate this mode only in the session we need to add this in the session: + + + +{`try (CleanCloseable cacheScope = session.advanced().getDocumentStore() + .aggressivelyCacheFor(Duration.ofMinutes(5))) \{ + Order user = session.load(Order.class, "orders/1"); +\} +`} + + + +If there is a value in the cache for `orders/1` that is at most 5 minutes old and we haven't got any change notification about it, we can directly return it. The same mechanism works on queries as well: + + + +{`try (CleanCloseable cacheScope = session.advanced().getDocumentStore() + .aggressivelyCacheFor(Duration.ofMinutes(5))) \{ + List orders = session.query(Order.class) + .toList(); +\} +`} + + + +The usage of the notification system means that you can set an aggressive cache duration to a longer period. The document store exposes the method: + + + +{`try (CleanCloseable cacheScope = session + .advanced().getDocumentStore().aggressivelyCache()) \{ + +\} +`} + + + +which is equivalent to: + + + +{`try (CleanCloseable cacheScope = session + .advanced().getDocumentStore().aggressivelyCacheFor(Duration.ofDays(1))) \{ + +\} +`} + + + +### Disable Change Tracking + +The client subscribes to change notifications from the server using the [Changes API](../changes/what-is-changes-api.mdx). You can choose to ignore +these notifications by changing the `AggressiveCacheMode` in the Document Store conventions. + +The modes are: +* `AggressiveCacheMode.TRACK_CHANGES` - The default value. When the server sends a notification that some items (documents or indexes) have changed, +those items are invalidated from the cache. The next time these items are loaded they will be retrieved from the server. +* `AggressiveCacheMode.DO_NOT_TRACK_CHANGES` - Notifications from the server will be ignored. For the aggressive cache `Duration`, results will be +retrieved from the cache and may therefore be stale. + + + +{`documentStore.aggressivelyCacheFor(Duration.ofMinutes(5), AggressiveCacheMode.DO_NOT_TRACK_CHANGES); + +// Disable change tracking for just one session: +try (session.advanced().getDocumentStore().aggressivelyCacheFor(Duration.ofMinutes(5), + AggressiveCacheMode.DO_NOT_TRACK_CHANGES)) \{ +\} +`} + + + +### Disable Aggressive Mode + +We can disable the aggressive mode by simply using `documentStore.disableAggressiveCaching();`. In that way we will disable the aggressive caching +globally in the store. But what if we need to disable the aggressive caching only for a specific call, or to manually update the cache, just like before we can use `disableAggressiveCaching()` +per session? + + + +{`try (CleanCloseable cacheScope = session.advanced().getDocumentStore() + .disableAggressiveCaching()) \{ + Order order = session.load(Order.class, "orders/1"); +\} +`} + + + diff --git a/versioned_docs/version-7.1/client-api/how-to/assets/excel_connections.png b/versioned_docs/version-7.1/client-api/how-to/assets/excel_connections.png new file mode 100644 index 0000000000..175d134cb7 Binary files /dev/null and b/versioned_docs/version-7.1/client-api/how-to/assets/excel_connections.png differ diff --git a/versioned_docs/version-7.1/client-api/how-to/assets/excel_connections_dialog_1.png b/versioned_docs/version-7.1/client-api/how-to/assets/excel_connections_dialog_1.png new file mode 100644 index 0000000000..a7889fe3a4 Binary files /dev/null and b/versioned_docs/version-7.1/client-api/how-to/assets/excel_connections_dialog_1.png differ diff --git a/versioned_docs/version-7.1/client-api/how-to/assets/excel_connections_dialog_2.png b/versioned_docs/version-7.1/client-api/how-to/assets/excel_connections_dialog_2.png new file mode 100644 index 0000000000..ec8b30b4b9 Binary files /dev/null and b/versioned_docs/version-7.1/client-api/how-to/assets/excel_connections_dialog_2.png differ diff --git a/versioned_docs/version-7.1/client-api/how-to/assets/excel_from_text.png b/versioned_docs/version-7.1/client-api/how-to/assets/excel_from_text.png new file mode 100644 index 0000000000..b6b112ef83 Binary files /dev/null and b/versioned_docs/version-7.1/client-api/how-to/assets/excel_from_text.png differ diff --git a/versioned_docs/version-7.1/client-api/how-to/assets/excel_from_text_dialog.png b/versioned_docs/version-7.1/client-api/how-to/assets/excel_from_text_dialog.png new file mode 100644 index 0000000000..d51cc54ee2 Binary files /dev/null and b/versioned_docs/version-7.1/client-api/how-to/assets/excel_from_text_dialog.png differ diff --git a/versioned_docs/version-7.1/client-api/how-to/assets/excel_from_text_results.png b/versioned_docs/version-7.1/client-api/how-to/assets/excel_from_text_results.png new file mode 100644 index 0000000000..6dca152c92 Binary files /dev/null and b/versioned_docs/version-7.1/client-api/how-to/assets/excel_from_text_results.png differ diff --git a/versioned_docs/version-7.1/client-api/how-to/assets/excel_from_text_select.png b/versioned_docs/version-7.1/client-api/how-to/assets/excel_from_text_select.png new file mode 100644 index 0000000000..b42944adb6 Binary files /dev/null and b/versioned_docs/version-7.1/client-api/how-to/assets/excel_from_text_select.png differ diff --git a/versioned_docs/version-7.1/client-api/how-to/assets/excel_from_text_wizard_1.png b/versioned_docs/version-7.1/client-api/how-to/assets/excel_from_text_wizard_1.png new file mode 100644 index 0000000000..f1c0af7bb2 Binary files /dev/null and b/versioned_docs/version-7.1/client-api/how-to/assets/excel_from_text_wizard_1.png differ diff --git a/versioned_docs/version-7.1/client-api/how-to/assets/excel_from_text_wizard_2.png b/versioned_docs/version-7.1/client-api/how-to/assets/excel_from_text_wizard_2.png new file mode 100644 index 0000000000..e5ec649f1f Binary files /dev/null and b/versioned_docs/version-7.1/client-api/how-to/assets/excel_from_text_wizard_2.png differ diff --git a/versioned_docs/version-7.1/client-api/how-to/assets/excel_from_text_wizard_3.png b/versioned_docs/version-7.1/client-api/how-to/assets/excel_from_text_wizard_3.png new file mode 100644 index 0000000000..cb381b8a3b Binary files /dev/null and b/versioned_docs/version-7.1/client-api/how-to/assets/excel_from_text_wizard_3.png differ diff --git a/versioned_docs/version-7.1/client-api/how-to/assets/excel_integrated_long_url.png b/versioned_docs/version-7.1/client-api/how-to/assets/excel_integrated_long_url.png new file mode 100644 index 0000000000..b6f4348551 Binary files /dev/null and b/versioned_docs/version-7.1/client-api/how-to/assets/excel_integrated_long_url.png differ diff --git a/versioned_docs/version-7.1/client-api/how-to/assets/excel_url_too_long.png b/versioned_docs/version-7.1/client-api/how-to/assets/excel_url_too_long.png new file mode 100644 index 0000000000..520da3d5ab Binary files /dev/null and b/versioned_docs/version-7.1/client-api/how-to/assets/excel_url_too_long.png differ diff --git a/versioned_docs/version-7.1/client-api/how-to/handle-document-relationships.mdx b/versioned_docs/version-7.1/client-api/how-to/handle-document-relationships.mdx new file mode 100644 index 0000000000..c1a7305b9a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/how-to/handle-document-relationships.mdx @@ -0,0 +1,49 @@ +--- +title: "How to Handle Document Relationships" +hide_table_of_contents: true +sidebar_label: ...handle document relationships +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HandleDocumentRelationshipsCsharp from './_handle-document-relationships-csharp.mdx'; +import HandleDocumentRelationshipsJava from './_handle-document-relationships-java.mdx'; +import HandleDocumentRelationshipsNodejs from './_handle-document-relationships-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/how-to/integrate-with-excel.mdx b/versioned_docs/version-7.1/client-api/how-to/integrate-with-excel.mdx new file mode 100644 index 0000000000..aa6a6c2362 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/how-to/integrate-with-excel.mdx @@ -0,0 +1,210 @@ +--- +title: "Client API: How to Integrate with Excel" +hide_table_of_contents: true +sidebar_label: ...integrate with Excel +sidebar_position: 3 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Client API: How to Integrate with Excel + +A very common use case for many applications is to expose data to users as an Excel file. RavenDB has dedicated support that allows you to directly consume data stored in a database by an Excel application. + +The integration of Excel with the data store is achieved by a designated query streaming endpoint that outputs a stream in a format acceptable by `Excel`, Comma Separated Values (CSV). + +In order to take advantage of this feature, you need to specify a valid query according to [RQL syntax](../../client-api/session/querying/what-is-rql.mdx). + +The generic HTTP request will have the following address: + + + +{`http://localhost:8080/databases/[db_name]/streams/queries?query=[query]&format=csv +`} + + + +In order to include only specific properties in the CSV output you can use the `field` parameter: + + + +{`http://localhost:8080/databases/[db_name]/streams/queries?query=[query]&field=[field-1]&field=[field-2]...&field=[field-N]&format=csv +`} + + + + + +In some cases it might be cumbersome to use the URL to send the query or the query might be too long. Please see our [dedicated section](../../client-api/how-to/integrate-with-excel.mdx#dealing-with-long-query-urls-in-excel) that deals with that problem. + + + +## Example + +First let's create a database, Northwind, and import the [sample data](../../studio/database/tasks/create-sample-data.mdx) into it. + +Now let's query the product collection include the category document and project some of its properties using the below RQL: + + + +{`from Products as p +load p.Category as c +select +\{ + Name: p.Name, + Category: c.Name, +\} +`} + + + +In order to execute the above query we will need to use the following URL: + + + +{`http://localhost:8080/databases/Northwind/streams/queries?query=from%20Products%20as%20p%0Aload%20p.Category%20as%20c%0Aselect%20%0A%7B%0A%20%20%20%20Name%3A%20p.Name%2C%0A%20%20%20%20Category%3A%20c.Name%2C%0A%7D&format=csv +`} + + + +Going to the above address in a web browser will download an export.csv file containing following results: + + + +{`Name,Category +Chang,Beverages +Aniseed Syrup,Condiments +Chef Anton's Cajun Seasoning,Condiments +Chef Anton's Gumbo Mix,Condiments +Grandma's Boysenberry Spread,Condiments +Uncle Bob's Organic Dried Pears,Produce +Northwoods Cranberry Sauce,Condiments +Mishi Kobe Niku,Meat/Poultry +Ikura,Seafood +Queso Cabrales,Dairy Products +Queso Manchego La Pastora,Dairy Products +Konbu,Seafood +Tofu,Produce +Genen Shouyu,Condiments +Pavlova,Confections +Alice Mutton,Meat/Poultry +Carnarvon Tigers,Seafood +`} + + + +To push them to Excel we need to create a new spreadsheet and import data `From Text`: + +![Importing data from text in Excel](./assets/excel_from_text.png) + +In an Open File Dialog we paste our querying url: + +![Open File Dialog](./assets/excel_from_text_dialog.png) + +Next, the Import Wizard will show up where we can adjust our import settings (don't forget to check `Comma` as a desired delimiter): + +![Import Wizard Step 1](./assets/excel_from_text_wizard_1.png) + +![Import Wizard Step 2](./assets/excel_from_text_wizard_2.png) + +![Import Wizard Step 3](./assets/excel_from_text_wizard_3.png) + +Finally we need to select where we would like to place the imported data: + +![Select where to put the data](./assets/excel_from_text_select.png) + +As a result of the previous actions, the spreadsheet data should look like: + +![Excel results](./assets/excel_from_text_results.png) + +Now we must tell Excel to refresh data. Click on `Connections` in the `Data` panel: + +![Excel connections](./assets/excel_connections.png) + +You will see something like: + +![Excel connections dialog](./assets/excel_connections_dialog_1.png) + +Go to Properties and: + +1. **uncheck** `Prompt for file name on refresh`. +2. **check** `Refresh data when opening the file`. + +![Excel connection properties](./assets/excel_connections_dialog_2.png) + +You can close the file, change something in the database, and reopen it. You will see new values. + +## Dealing with Long Query URLs in Excel + +If you try and query for a bit more complex query, you might realize that excel will refuse to execute your request. + +### Long Query Example + + + +{`from Products as p +load p.Category as c +select +\{ + Name: p.Name, + Category: c.Name, + Discontinued: p.Discontinued, + PricePerUnit: p.PricePerUnit +\} +`} + + + +After escaping the above query we will end up with the following request URL + + + +{`http://localhost:8080/databases/Northwind/streams/queries?query=from%20Products%20as%20p%0Aload%20p.Category%20as%20c%0Aselect%20%0A%7B%0A%20%20%20%20Name%3A%20p.Name%2C%0A%20%20%20%20Category%3A%20c.Name%2C%0A%20%20%20%20Discontinued%3A%20p.Discontinued%2C%0A%20%20%20%20PricePerUnit%3A%20p.PricePerUnit%0A%7D&format=csv +`} + + + +Trying to use this url will throw the following error in excel + +![Excel url too long](./assets/excel_url_too_long.png) + +There are two ways to deal with this problem: You can use an online service like [TinyUrl](https://tinyurl.com/) and provide them with the above url. + +What you get back is a url like, `https://tinyurl.com/y8t7j6r7`. This is a pretty nice workaround if you're not on an isolated system and have no security restrictions. +The other option is to redirect the query through a pre-defined query that resides in your database. +For that you will need to include a document in your database with a `Query` property. Let's generate such a document and call it `Excel/ProductWithCatagory`. +The name of the document has no significance, but it is recommanded to use a key that reflects the purpose of this document. +Let's add the `Query` property and set its value to the above query: + + + +{`\{ + "Query": "from%20Products%20as%20p%0Aload%20p.Category%20as%20c%0Aselect%20%0A%7B%0A%20%20%20%20Name%3A%20p.Name%2C%0A%20%20%20%20Category%3A%20c.Name%2C%0A%20%20%20%20Discontinued%3A%20p.Discontinued%2C%0A%20%20%20%20PricePerUnit%3A%20p.PricePerUnit%0A%7D", + "@metadata": \{ + "@collection": "Excel" + \} +\} +`} + + + +Now that we have the document ready for use, all we need to do is modify our URL so it will use the document redirection feature. + + + +{`http://localhost:8080/databases/Northwind/streams/queries?fromDocument=Excel%2FProductWithCatagory&format=csv +`} + + + +Repeating the instrucions above you should get the following result: + +![Excel integrated with long url](./assets/excel_integrated_long_url.png) + + + diff --git a/versioned_docs/version-7.1/client-api/how-to/setup-aggressive-caching.mdx b/versioned_docs/version-7.1/client-api/how-to/setup-aggressive-caching.mdx new file mode 100644 index 0000000000..054df4a26a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/how-to/setup-aggressive-caching.mdx @@ -0,0 +1,29 @@ +--- +title: "Client API: How to Setup Aggressive Caching" +hide_table_of_contents: true +sidebar_label: ...setup aggressive caching +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import SetupAggressiveCachingCsharp from './_setup-aggressive-caching-csharp.mdx'; +import SetupAggressiveCachingJava from './_setup-aggressive-caching-java.mdx'; + +export const supportedLanguages = ["csharp", "java"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/how-to/store-dates.mdx b/versioned_docs/version-7.1/client-api/how-to/store-dates.mdx new file mode 100644 index 0000000000..279b095087 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/how-to/store-dates.mdx @@ -0,0 +1,33 @@ +--- +title: "Client API: How to Store Dates in RavenDB Using UTC and Using Local Time" +hide_table_of_contents: true +sidebar_label: ...store dates +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Client API: How to Store Dates in RavenDB Using UTC and Using Local Time + +When you store a date to RavenDB, it will save whether it's UTC or not. When it's not UTC, a local date is treated as "Unspecified". + +However, if you have people from around the world using the same database and you use unspecified local times, the offset is not stored. If you want to deal with this scenario you need to store the date using a `DateTimeOffset` that will store the date and time, and its time zone offset. + +The decision of whether to use UTC, Local Time, or `DateTimeOffset` is an application decision, not an infrastructure decision. There are valid reasons for using any one of these. + + +## ISO 8601 Compliance and Default Storing Formats + +RavenDB is [ISO 8601](https://www.iso.org/iso-8601-date-and-time-format.html) compliant. + +The default storing format for `DateTime` is : **"yyyy'-'MM'-'dd'T'HH':'mm':'ss.fffffff"** + +For storing `DateTimeOffset`, RavenDB uses the [Round-trip ("o")](https://docs.microsoft.com/en-us/dotnet/standard/base-types/standard-date-and-time-format-strings#Roundtrip) format + +## More Information +For detailed information about this topic, please refer to the [Working with Date and Time in RavenDB](https://codeofmatt.com/date-and-time-in-ravendb/) article written by Matt Johnson. diff --git a/versioned_docs/version-7.1/client-api/how-to/subscribe-to-store-events.mdx b/versioned_docs/version-7.1/client-api/how-to/subscribe-to-store-events.mdx new file mode 100644 index 0000000000..1f4af75905 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/how-to/subscribe-to-store-events.mdx @@ -0,0 +1,428 @@ +--- +title: "Client API: Subscribing to Store Events" +hide_table_of_contents: true +sidebar_label: ...subscribe to Store events +sidebar_position: 3 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Client API: Subscribing to Store Events + + +* **Events** allow users to perform custom actions in response to operations made in + a `Document Store` or a `Session`. + +* An event is invoked when the selected action is executed on an entity, + or querying is performed. + +* Subscribing to an event at the `DocumentStore` level subscribes to this + event in all subsequent sessions. + + E.g., to invoke an event after SaveChanges() is called by **any subsequent session**, use - + `store.OnAfterSaveChanges += OnAfterSaveChangesEvent;` + +* Subscribing to an event in a `Session` is valid only for this session. + + E.g., to invoke an event after SaveChanges() is called by **this session** only, use - + `session.Advanced.OnAfterSaveChanges += OnAfterSaveChangesEvent;` + + Read more about `Session` events [here](../../client-api/session/how-to/subscribe-to-events.mdx). + +* In this page: + * [Store Events](../../client-api/how-to/subscribe-to-store-events.mdx#store-events) + * [OnBeforeRequest](../../client-api/how-to/subscribe-to-store-events.mdx#section) + * [OnSucceedRequest](../../client-api/how-to/subscribe-to-store-events.mdx#section-1) + * [AfterDispose](../../client-api/how-to/subscribe-to-store-events.mdx#section-2) + * [BeforeDispose](../../client-api/how-to/subscribe-to-store-events.mdx#section-3) + * [RequestExecutorCreated](../../client-api/how-to/subscribe-to-store-events.mdx#section-4) + * [OnSessionCreated](../../client-api/how-to/subscribe-to-store-events.mdx#section-5) + * [OnFailedRequest](../../client-api/how-to/subscribe-to-store-events.mdx#section-6) + * [OnTopologyUpdated](../../client-api/how-to/subscribe-to-store-events.mdx#section-7) + * [Store/Session Events](../../client-api/how-to/subscribe-to-store-events.mdx#store/session-events) + + +## Store Events + +You can subscribe to the following events only at the store level, not within a session. + +## `OnBeforeRequest` + +This event is invoked by sending a request to the server, before the request +is actually sent. +It should be defined with this signature: + + +{`private void OnBeforeRequestEvent(object sender, BeforeRequestEventArgs args); +`} + + + +**Parameters**: + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **sender** | `IDocumentStore ` | The subscribed store that triggered the event | +| **args** | `BeforeRequestEventArgs` | See details below | + +`BeforeRequestEventArgs`: + + +{`public class BeforeRequestEventArgs : EventArgs +\{ + // Database Name + public string Database \{ get; \} + // Database URL + public string Url \{ get; \} + // The request intended to be sent to the server + public HttpRequestMessage Request \{ get; \} + // The number of attempts made to send the request to the server + public int AttemptNumber \{ get; \} +\} +`} + + + +* **Example**: + To define a method that checks URLs sent in a document store request: + + +{`private void OnBeforeRequestEvent(object sender, BeforeRequestEventArgs args) +\{ + var forbiddenURL = new Regex("/databases/[^/]+/docs"); + + if (forbiddenURL.IsMatch(args.Url) == true) + \{ + // action to be taken if the URL is forbidden + \} +\} +`} + + + + To subscribe to the event: + + +{`// Subscribe to the event +store.OnBeforeRequest += OnBeforeRequestEvent; +`} + + + +## `OnSucceedRequest` + +This event is invoked by receiving a successful reply from the server. +It should be defined with this signature: + + +{`private void OnSucceedRequestEvent(object sender, SucceedRequestEventArgs args); +`} + + + +**Parameters**: + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **sender** | `IDocumentStore ` | The subscribed store that triggered the event | +| **args** | `SucceedRequestEventArgs` | See details below | + +`SucceedRequestEventArgs`: + + +{`public class SucceedRequestEventArgs : EventArgs +\{ + // Database Name + public string Database \{ get; \} + // Database URL + public string Url \{ get; \} + // The message returned from the server + public HttpResponseMessage Response \{ get; \} + // The original request sent to the server + public HttpRequestMessage Request \{ get; \} + // The number of attempts made to send the request to the server + public int AttemptNumber \{ get; \} +\} +`} + + + +* **Example** + To define a method that would be activated when a request succeeds: + + +{`private void OnSucceedRequestEvent(object sender, SucceedRequestEventArgs args) +\{ + if (args.Response.IsSuccessStatusCode == true) + \{ + // action to be taken after a successful request + \} +\} +`} + + + + To subscribe to the event: + + +{`// Subscribe to the event +store.OnSucceedRequest += OnSucceedRequestEvent; +`} + + + +## `AfterDispose` +This event is invoked immediately after a document store is disposed of. +It should be defined with this signature: + + +{`private void AfterDisposeEvent(object sender, EventHandler args); +`} + + + +**Parameters**: + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **sender** | `IDocumentStore ` | The subscribed store whose disposal triggered the event | +| **args** | `EventHandler` | **args** has no contents for this event | + +## `BeforeDispose` +This event is invoked immediately before a document store is disposed of. +It should be defined with this signature: + + +{`private void BeforeDisposeEvent(object sender, EventHandler args); +`} + + + +**Parameters**: + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **sender** | `IDocumentStore ` | The subscribed store whose disposal triggered the event | +| **args** | `EventHandler` | **args** has no contents for this event | + +## `RequestExecutorCreated` +This event is invoked when a Request Executor is created, +allowing you to subscribe to various events of the request executor. +It should be defined with this signature: + + +{`private void RequestExecutorCreatedEvent(object sender, RequestExecutor args); +`} + + + +**Parameters**: + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **sender** | `IDocumentStore ` | The subscribed store that triggered the event | +| **args** | `RequestExecutor` | The created Request Executor instance | + +## `OnSessionCreated` +This event is invoked after a session is created, allowing you, for example, +to change session configurations. +It should be defined with this signature: + + +{`private void OnSessionCreatedEvent(object sender, SessionCreatedEventArgs args); +`} + + + +**Parameters**: + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **sender** | `IDocumentStore ` | The subscribed store that triggered the event | +| **args** | `SessionCreatedEventArgs` | The created Session | + +`SessionCreatedEventArgs`: + + +{`public class SessionCreatedEventArgs : EventArgs +\{ + public InMemoryDocumentSessionOperations Session \{ get; \} +\} +`} + + + +* **Example** + To define a method that would be activated when a session is created: + + +{`private void OnSessionCreatedEvent(object sender, SessionCreatedEventArgs args) +\{ + args.Session.MaxNumberOfRequestsPerSession = 100; +\} +`} + + + + To subscribe to the event: + + +{`// Subscribe to the event +store.OnSessionCreated += OnSessionCreatedEvent; +`} + + + + +## `OnFailedRequest` +This event is invoked before a request fails. It allows you, for example, to track +and log failed requests. +It should be defined with this signature: + + +{`private void OnFailedRequestEvent(object sender, FailedRequestEventArgs args); +`} + + + +**Parameters**: + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **sender** | `IDocumentStore ` | The subscribed store that triggered the event | +| **args** | `FailedRequestEventArgs` | See details below | + +`FailedRequestEventArgs`: + + +{`public class FailedRequestEventArgs : EventArgs +\{ + // Database Name + public string Database \{ get; \} + // Database URL + public string Url \{ get; \} + // The exception returned from the server + public Exception Exception \{ get; \} + // The message returned from the server + public HttpResponseMessage Response \{ get; \} + // The original request sent to the server + public HttpRequestMessage Request \{ get; \} +\} +`} + + + +* **Example** + To define a method that would be activated when a request fails: + + +{`private void OnFailedRequestEvent(object sender, FailedRequestEventArgs args) +\{ + Logger($"Failed request for database '\{args.Database\}' ('\{args.Url\}')", args.Exception); +\} +`} + + + + To subscribe to the event: + + +{`// Subscribe to the event +store.OnFailedRequest += OnFailedRequestEvent; +`} + + + +## `OnTopologyUpdated` +This event is invoked by a topology update (e.g. when a node is added), +**after** the topology is updated. +It should be defined with this signature: + + +{`private void OnTopologyUpdatedEvent(object sender, TopologyUpdatedEventArgs args); +`} + + + +**Parameters**: + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **sender** | `IDocumentStore ` | The subscribed store that triggered the event | +| **args** | `TopologyUpdatedEventArgs` | The updated list of nodes | + +`TopologyUpdatedEventArgs`: + + +{`public class TopologyUpdatedEventArgs : EventArgs +\{ + public Topology Topology \{ get; \} +\} +`} + + + +`Topology`: +public class Topology +{ + public long Etag; + public List<ServerNode> Nodes; +} + +* **Example** + To define a method that would be activated on a topology update: + + +{`void OnTopologyUpdatedEvent(object sender, TopologyUpdatedEventArgs args) +\{ + var topology = args.Topology; + if (topology == null) + return; + for (var i = 0; i < topology.Nodes.Count; i++) + \{ + // perform relevant operations on the nodes after the topology was updated + \} +\} +`} + + + + To subscribe to the event: + + +{`// Subscribe to the event +store.OnTopologyUpdated += OnTopologyUpdatedEvent; +`} + + + + + +## Store/Session Events +You can subscribe to the following events both at the store level and in a session. + + + + * Subscribing to an event in a session limits the scope of the subscription to this session. + * When you subscribe to an event at the store level, the subscription is inherited by + all subsequent sessions. + + + +* [OnBeforeStore](../../client-api/session/how-to/subscribe-to-events.mdx#onbeforestore) +* [OnAfterSaveChanges](../../client-api/session/how-to/subscribe-to-events.mdx#onaftersavechanges) +* [OnBeforeDelete](../../client-api/session/how-to/subscribe-to-events.mdx#onbeforedelete) +* [OnBeforeQuery](../../client-api/session/how-to/subscribe-to-events.mdx#onbeforequery) +* [OnBeforeConversionToDocument](../../client-api/session/how-to/subscribe-to-events.mdx#onbeforeconversiontodocument) +* [OnAfterConversionToDocument](../../client-api/session/how-to/subscribe-to-events.mdx#onafterconversiontodocument) +* [OnBeforeConversionToEntity](../../client-api/session/how-to/subscribe-to-events.mdx#onbeforeconversiontoentity) +* [OnAfterConversionToEntity](../../client-api/session/how-to/subscribe-to-events.mdx#onafterconversiontoentity) +* [OnSessionDisposing](../../client-api/session/how-to/subscribe-to-events.mdx#onsessiondisposing) + + + diff --git a/versioned_docs/version-7.1/client-api/how-to/using-timeonly-and-dateonly.mdx b/versioned_docs/version-7.1/client-api/how-to/using-timeonly-and-dateonly.mdx new file mode 100644 index 0000000000..fdb8befd37 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/how-to/using-timeonly-and-dateonly.mdx @@ -0,0 +1,285 @@ +--- +title: "Client API: How to Use TimeOnly and DateOnly Types" +hide_table_of_contents: true +sidebar_label: ...use TimeOnly and DateOnly +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Client API: How to Use TimeOnly and DateOnly Types + + +* To save storage space and streamline your process when you only need to know the date or the time, you can store and query + [DateOnly](https://devblogs.microsoft.com/dotnet/date-time-and-time-zone-enhancements-in-net-6/#the-dateonly-type) + and [TimeOnly](https://devblogs.microsoft.com/dotnet/date-time-and-time-zone-enhancements-in-net-6/#the-timeonly-type) types + instead of `DateTime`. (As of .NET version 6.0+ and RavenDB 5.3+) + +* You can now convert `DateTime` or strings written in date/time formats to .NET's + `DateOnly` or `TimeOnly` types without slowing down queries and while leaving your existing data as is. + * Use `AsDateOnly` or `AsTimeOnly` in a static index ([see examples below](../../client-api/how-to/using-timeonly-and-dateonly.mdx#use--or--in-a-static-index-to-convert-strings-or-datetime)) + * `AsDateOnly` and `AsTimeOnly` automatically convert strings to ticks for faster querying. + +* We convert the types in [static indexes](../../indexes/map-indexes.mdx) so that the conversions and calculations are done behind the scenes + and the data is ready for fast queries. ([See sample index below.](../../client-api/how-to/using-timeonly-and-dateonly.mdx#convert-and-use-date/timeonly-without-affecting-your-existing-data)) + +* In this page: + * [About DateOnly and TimeOnly](../../client-api/how-to/using-timeonly-and-dateonly.mdx#about-dateonly-and-timeonly) + * [Convert and Use Date/TimeOnly Without Affecting Your Existing Data](../../client-api/how-to/using-timeonly-and-dateonly.mdx#convert-and-use-date/timeonly-without-affecting-your-existing-data) + * [Using already existing DateOnly or TimeOnly fields](../../client-api/how-to/using-timeonly-and-dateonly.mdx#using-already-existing-dateonly-or-timeonly-fields) + + + +## About DateOnly and TimeOnly + +These two new C# types are available from .NET 6.0+ (RavenDB 5.3+). + +* **DateOnly** + According to [Microsoft .NET Blog](https://devblogs.microsoft.com/dotnet/date-time-and-time-zone-enhancements-in-net-6/#the-dateonly-type) + DateOnly is ideal for scenarios such as birth dates, anniversaries, hire dates, + and other business dates that are not typically associated with any particular time. + * See [their usage examples here.](https://devblogs.microsoft.com/dotnet/date-time-and-time-zone-enhancements-in-net-6/#the-dateonly-type) + +* **TimeOnly** + According to [Microsoft .NET Blog](https://devblogs.microsoft.com/dotnet/date-time-and-time-zone-enhancements-in-net-6/#the-timeonly-type) + TimeOnly is ideal for scenarios such as recurring meeting times, daily alarm clock times, + or the times that a business opens and closes each day of the week. + * See [their usage examples here.](https://devblogs.microsoft.com/dotnet/date-time-and-time-zone-enhancements-in-net-6/#the-timeonly-type) + + + +## Convert and Use Date/TimeOnly Without Affecting Your Existing Data + +RavenDB offers conversion of types in static indexes with the methods [AsDateOnly or AsTimeOnly](../../client-api/how-to/using-timeonly-and-dateonly.mdx#use--or--in-a-static-index-to-convert-strings-or-datetime). + +* [Static indexes](../../indexes/indexing-basics.mdx) process new data in the background, + including calculations and conversions to DateOnly/TimeOnly values, which can be used as ticks, + so that the data is ready at query time when you [query the index](../../indexes/querying/query-index.mdx). + * These indexes do all of the calculations on the entire dataset that you define the first time they run, and then they only need to + process changes in data. + + +Ticks are faster to compute than other date/time formats because they are [simple numbers](https://docs.microsoft.com/en-us/dotnet/api/system.datetime.ticks?view=net-6.0) +that represent time since 1-1-0001 at midnight. + +If your data is in strings, to use ticks you must create a **static index** +that computes the conversion from strings to `DateOnly` or `TimeOnly`. + +RavenDB automatically converts strings into ticks via `AsDateOnly` or `AsTimeOnly`. + +An auto-index will not convert strings into ticks, but will index data as strings. +By defining a query that creates an auto-index which [orders](../../indexes/querying/sorting.mdx) the strings you can also compare strings, +though comparing ticks is faster. + + +### Use `AsDateOnly` or `AsTimeOnly` in a static index to convert strings or DateTime + +* [Converting Strings to DateOnly or TimeOnly](../../client-api/how-to/using-timeonly-and-dateonly.mdx#converting-strings-with-minimal-cost) +* [Converting DateTime to DateOnly or TimeOnly](../../client-api/how-to/using-timeonly-and-dateonly.mdx#converting--with-minimal-cost) + +#### Converting Strings with minimal cost + +The following generic sample is a map index where `AsDateOnly` converts the string `item.StringDateOnlyField` into `DateOnly`. + +When the converted data is available in the index, you can inexpensively [query the index](../../indexes/querying/query-index.mdx). + +Strings are automatically converted to ticks for faster querying. + + + +{`// Create a Static Index. +public class StringAsDateOnlyConversion : AbstractIndexCreationTask +\{ + public StringAsDateOnlyConversion() + \{ + // This map index converts strings that are in date format to DateOnly with AsDateOnly(). + Map = items => from item in items + // RavenDB doesn't look for DateOnly or TimeOnly as default types during indexing + // so the variables must by wrapped in AsDateDonly() or AsTimeOnly() explicitly. + where AsDateOnly(item.DateTimeValue) < AsDateOnly(item.DateOnlyValue).AddDays(-50) + select new DateOnlyItem \{ DateOnlyField = AsDateOnly(item.StringDateOnlyField) \}; + \} +\} + +public class StringItem +\{ + public string StringDateOnlyField \{ get; set; \} + public object DateTimeValue \{ get; set; \} + public object DateOnlyValue \{ get; set; \} +\} + +public class DateOnlyItem +\{ + public DateOnly? DateOnlyField \{ get; set; \} +\}; +`} + + + + +RavenDB doesn't look for DateOnly or TimeOnly types as default during indexing +so the variables must be wrapped in AsDateDonly() or AsTimeOnly() explicitly. + + +Using the static index above, here a string in date format "2022-05-12" is saved, the index above converts it to `DateOnly`, then +the index is queried. + + + +{`using (var session = store.OpenSession()) +\{ + // A string in date format is saved. + session.Store(new StringItem() + \{ + StringDateOnlyField = "2022-05-12" + \}); + session.SaveChanges(); +\} +// This is the index used earlier. +new StringAsDateOnlyConversion().Execute(store); +WaitForIndexing(store); + +using (var session = store.OpenSession()) +\{ + var today = new DateOnly(2022, 5, 12); + // Query the index created earlier for items which were marked with today's date + var element = session.Query() + .Where(item => item.DateOnlyField == today) + // This is an optional type relaxation for projections + .As().Single(); +\} +`} + + +#### Converting `DateTime` with minimal cost + +The following generic sample is a map index that converts `DateTime` into `DateOnly` and saves the values in the index. + +Once the converted data is available in the static index, you can inexpensively [query the index](../../indexes/querying/query-index.mdx). + + + +{`// Create a Static Index. +public class DateTimeAsDateOnlyConversion : AbstractIndexCreationTask +\{ + public DateTimeAsDateOnlyConversion() + \{ + // This map index converts DateTime to DateOnly with AsDateOnly(). + Map = items => from item in items + // RavenDB doesn't look for DateOnly or TimeOnly as default types during indexing + // so the variables must by wrapped in AsDateDonly() or AsTimeOnly() explicitly. + where AsDateOnly(item.DateTimeValue) < AsDateOnly(item.DateOnlyValue).AddDays(-50) + select new DateOnlyItem \{ DateOnlyField = AsDateOnly(item.DateTimeField) \}; + \} +\} + +public class DateTimeItem +\{ + public DateTime? DateTimeField \{ get; set; \} + public object DateTimeValue \{ get; set; \} + public object DateOnlyValue \{ get; set; \} +\} +`} + + + + +RavenDB doesn't look for DateOnly or TimeOnly as default types during indexing +so the variables must be wrapped in AsDateDonly() or AsTimeOnly() explicitly. + + +Using the index above, the following example saves `DateTime.Now`, the type is converted in the index, then +the index is queried. + + + +{`using (var session = store.OpenSession()) +\{ +// A DateTime value is saved +session.Store(new DateTimeItem() +\{ + DateTimeField = DateTime.Now +\}); +session.SaveChanges(); +\} +// The index above is called and we wait for the index to finish converting +new DateTimeAsDateOnlyConversion().Execute(store); +WaitForIndexing(store); + +using (var session = store.OpenSession()) +\{ + // Query the index + var today = DateOnly.FromDateTime(DateTime.Now); + var element = session.Query() + .Where(item => item.DateOnlyField == today) + // This is an optional type relaxation for projections + .As().Single(); +\} +`} + + + + + + +## Using already existing DateOnly or TimeOnly fields + +RavenDB doesn't look for DateOnly or TimeOnly as default types during indexing +so the index must have a field that declares the type as DateOnly or TimeOnly. + + + +{`public class DateAndTimeOnlyIndex : AbstractIndexCreationTask +\{ + public class IndexEntry + \{ + + public DateOnly DateOnly \{ get; set; \} + public int Year \{ get; set; \} + public DateOnly DateOnlyString \{ get; set; \} + public TimeOnly TimeOnlyString \{ get; set; \} + public TimeOnly TimeOnly \{ get; set; \} + \} + + public DateAndTimeOnlyIndex() + \{ + Map = dates => from date in dates + select new IndexEntry() \{ DateOnly = date.DateOnly, TimeOnly = date.TimeOnly \}; + \} + +\} +`} + + + +For example, the following query will find all of the entries that occured between 15:00 and 17:00 +without considering the date. + + + +{`var after = new TimeOnly(15, 00); +var before = new TimeOnly(17, 00); +var result = session +.Query() +.Where(i => i.TimeOnly > after && i.TimeOnly < before) +.ToList(); +`} + + + +**Querying on Ticks** +Strings are automatically converted to ticks with [`AsDateOnly` and `AsTimeOnly`](../../client-api/how-to/using-timeonly-and-dateonly.mdx#use--or--in-a-static-index-to-convert-strings-or-datetime). + + + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/net-client-versions.mdx b/versioned_docs/version-7.1/client-api/net-client-versions.mdx new file mode 100644 index 0000000000..88956401e2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/net-client-versions.mdx @@ -0,0 +1,26 @@ +--- +title: "Client API: .NET Client versions" +hide_table_of_contents: true +sidebar_label: .NET Client Versions +sidebar_position: 5 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import NetClientVersionsCsharp from './_net-client-versions-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/_category_.json b/versioned_docs/version-7.1/client-api/operations/_category_.json new file mode 100644 index 0000000000..bb8ee2ccfb --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 7, + "label": Operations, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/_what-are-operations-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/_what-are-operations-csharp.mdx new file mode 100644 index 0000000000..e37c702e25 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/_what-are-operations-csharp.mdx @@ -0,0 +1,771 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The RavenDB Client API is built with the notion of layers. + At the top, and what you will usually interact with, are the **[DocumentStore](../../client-api/what-is-a-document-store.mdx)** + and the **[Session](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx)**. + They, in turn, are built on top of the lower-level **Operations** and **Commands** API. + +* **RavenDB provides direct access to this lower-level API**, allowing you to send requests + directly to the server via DocumentStore Operations instead of using the higher-level Session API. + +* In this page: + * [Why use operations](../../client-api/operations/what-are-operations.mdx#why-use-operations) + * [How operations work](../../client-api/operations/what-are-operations.mdx#how-operations-work) + * **Operation types**: + * [Common operations](../../client-api/operations/what-are-operations.mdx#common-operations) + * [Maintenance operations](../../client-api/operations/what-are-operations.mdx#maintenance-operations) + * [Server-maintenance operations](../../client-api/operations/what-are-operations.mdx#server-maintenance-operations) + * [Manage lengthy operations](../../client-api/operations/what-are-operations.mdx#manage-lengthy-operations) + * [Wait for completion](../../client-api/operations/what-are-operations.mdx#wait-for-completion) + * [Kill operation](../../client-api/operations/what-are-operations.mdx#kill-operation) + + +## Why use operations + +* Operations provide **management functionality** that is not available in the context of the session, for example: + * Create/delete a database + * Execute administrative tasks + * Assign permissions + * Change server configuration, and more. + +* The operations are executed on the DocumentStore and are not part of the session transaction. + +* There are some client tasks, such as patching documents, that can be carried out either via the Session ([session.Advanced.Patch()](../../client-api/operations/patching/single-document.mdx#array-manipulation)) + or via an Operation on the DocumentStore ([PatchOperation](../../client-api/operations/patching/single-document.mdx#operations-api)). + + + +## How operations work + +* **Sending the request**: + Each Operation is an encapsulation of a `RavenCommand`. + The RavenCommand creates the HTTP request message to be sent to the relevant server endpoint. + The DocumentStore `OperationExecutor` sends the request and processes the results. +* **Target node**: + By default, the operation will be executed on the server node that is defined by the [client configuration](../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + However, server-maintenance operations can be executed on a specific node by using the [ForNode](../../client-api/operations/how-to/switch-operations-to-a-different-node.mdx) method. +* **Target database**: + By default, operations work on the default database defined in the DocumentStore. + However, common operations & maintenance operations can operate on a different database by using the [ForDatabase](../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx) method. +* **Transaction scope**: + Operations execute as a single-node transaction. + If needed, data will then replicate to the other nodes in the database-group. +* **Background operations**: + Some operations may take a long time to complete and can be awaited for completion. + Learn more [below](../../client-api/operations/what-are-operations.mdx#wait-for-completion). + + + +## Common operations + +* All common operations implement the `IOperation` interface. + The operation is executed within the **database scope**. + Use [ForDatabase](../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx) to operate on a specific database other than the default defined in the store. + +* These operations include set-based operations such as _PatchOperation_, _CounterBatchOperation_, + document-extensions related operations such as getting/putting an attachment, and more. + See all available operations [below](../../client-api/operations/what-are-operations#the-following-common-operations-are-available). + +* To execute a common operation request, + use the `Send` method on the `Operations` property of the DocumentStore. + +#### Example: + + + + +{`// Define operation, e.g. get all counters info for a document +IOperation getCountersOp = new GetCountersOperation("products/1-A"); + +// Execute the operation by passing the operation to Operations.Send +CountersDetail allCountersResult = documentStore.Operations.Send(getCountersOp); + +// Access the operation result +int numberOfCounters = allCountersResult.Counters.Count; +`} + + + + +{`// Define operation, e.g. get all counters info for a document +IOperation getCountersOp = new GetCountersOperation("products/1-A"); + +// Execute the operation by passing the operation to Operations.Send +CountersDetail allCountersResult = await documentStore.Operations.SendAsync(getCountersOp); + +// Access the operation result +int numberOfCounters = allCountersResult.Counters.Count; +`} + + + + +##### Syntax: + + + + +{`// Available overloads: +void Send(IOperation operation, SessionInfo sessionInfo = null); +TResult Send(IOperation operation, SessionInfo sessionInfo = null); +Operation Send(IOperation operation, SessionInfo sessionInfo = null); + +PatchStatus Send(PatchOperation operation); +PatchOperation.Result Send(PatchOperation operation); +`} + + + + +{`// Available overloads: +Task SendAsync(IOperation operation, + CancellationToken token = default(CancellationToken), SessionInfo sessionInfo = null); +Task SendAsync(IOperation operation, + CancellationToken token = default(CancellationToken), SessionInfo sessionInfo = null); +Task SendAsync(IOperation operation, + CancellationToken token = default(CancellationToken), SessionInfo sessionInfo = null); + +Task SendAsync(PatchOperation operation, + CancellationToken token = default(CancellationToken)); +Task> SendAsync(PatchOperation operation, + CancellationToken token = default(CancellationToken)); +`} + + + + + + +#### The following common operations are available: + +* **Attachments**: +        [PutAttachmentOperation](../../client-api/operations/attachments/put-attachment.mdx) +        [GetAttachmentOperation](../../client-api/operations/attachments/get-attachment.mdx) +        [DeleteAttachmentOperation](../../client-api/operations/attachments/delete-attachment.mdx) + +* **Counters**: +        [CounterBatchOperation](../../client-api/operations/counters/counter-batch.mdx) +        [GetCountersOperation](../../client-api/operations/counters/get-counters.mdx) + +* **Time series**: +        [TimeSeriesBatchOperation](../../document-extensions/timeseries/client-api/operations/append-and-delete.mdx) +        [GetMultipleTimeSeriesOperation](../../document-extensions/timeseries/client-api/operations/get.mdx) +        [GetTimeSeriesOperation](../../document-extensions/timeseries/client-api/operations/get.mdx) +        GetTimeSeriesStatisticsOperation + +* **Revisions**: +        [GetRevisionsOperation](../../document-extensions/revisions/client-api/operations/get-revisions.mdx) +        [RevertRevisionsByIdOperation](../../document-extensions/revisions/client-api/operations/revert-document-to-revision.mdx) + +* **Patching**: +        [PatchOperation](../../client-api/operations/patching/single-document.mdx) +        [PatchByQueryOperation](../../client-api/operations/patching/set-based.mdx) + +* **Delete by query**: +        [DeleteByQueryOperation](../../client-api/operations/common/delete-by-query.mdx) + +* **Compare-exchange**: +        [PutCompareExchangeValueOperation](../../compare-exchange/create-cmpxchg-items#create-item-using-a-store-operation) +        [GetCompareExchangeValueOperation](../../compare-exchange/get-cmpxchg-item#get-item-using-a-store-operation) +        [GetCompareExchangeValuesOperation](../../compare-exchange/get-cmpxchg-items) +        [DeleteCompareExchangeValueOperation](../../compare-exchange/delete-cmpxchg-items#delete-compare-exchange-item-using-a-store-operation) + + + + +## Maintenance operations + +* All maintenance operations implement the `IMaintenanceOperation` interface. + The operation is executed within the **database scope**. + Use [ForDatabase](../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx) to operate on a specific database other than the default defined in the store. + +* These operations include database management operations such as setting client configuration, + managing indexes & ongoing-tasks operations, getting stats, and more. + See all available maintenance operations [below](../../client-api/operations/what-are-operations#the-following-maintenance-operations-are-available). + +* To execute a maintenance operation request, + use the `Send` method on the `Maintenance` property in the DocumentStore. + +#### Example: + + + + +{`// Define operation, e.g. stop an index +IMaintenanceOperation stopIndexOp = new StopIndexOperation("Orders/ByCompany"); + +// Execute the operation by passing the operation to Maintenance.Send +documentStore.Maintenance.Send(stopIndexOp); + +// This specific operation returns void +// You can send another operation to verify the index running status +IMaintenanceOperation indexStatsOp = new GetIndexStatisticsOperation("Orders/ByCompany"); +IndexStats indexStats = documentStore.Maintenance.Send(indexStatsOp); +IndexRunningStatus status = indexStats.Status; // will be "Paused" +`} + + + + +{`// Define operation, e.g. stop an index +IMaintenanceOperation stopIndexOp = new StopIndexOperation("Orders/ByCompany"); + +// Execute the operation by passing the operation to Maintenance.Send +await documentStore.Maintenance.SendAsync(stopIndexOp); + +// This specific operation returns void +// You can send another operation to verify the index running status +IMaintenanceOperation indexStatsOp = new GetIndexStatisticsOperation("Orders/ByCompany"); +IndexStats indexStats = await documentStore.Maintenance.SendAsync(indexStatsOp); +IndexRunningStatus status = indexStats.Status; // will be "Paused" +`} + + + + +##### Syntax: + + + + +{`// Available overloads: +void Send(IMaintenanceOperation operation); +TResult Send(IMaintenanceOperation operation); +Operation Send(IMaintenanceOperation operation); +`} + + + + +{`// Available overloads: +Task SendAsync(IMaintenanceOperation operation, + CancellationToken token = default(CancellationToken)); +Task SendAsync(IMaintenanceOperation operation, + CancellationToken token = default(CancellationToken)); +Task SendAsync(IMaintenanceOperation operation, + CancellationToken token = default(CancellationToken)); +`} + + + + + + +#### The following maintenance operations are available: + +* **Statistics**: +        [GetStatisticsOperation](../../client-api/operations/maintenance/get-stats.mdx#get-database-stats) +        [GetDetailedStatisticsOperation](../../client-api/operations/maintenance/get-stats.mdx#get-detailed-database-stats) +        [GetCollectionStatisticsOperation](../../client-api/operations/maintenance/get-stats.mdx#get-collection-stats) +        [GetDetailedCollectionStatisticsOperation](../../client-api/operations/maintenance/get-stats.mdx#get-detailed-collection-stats) + +* **Client Configuration**: +        [PutClientConfigurationOperation](../../client-api/operations/maintenance/configuration/put-client-configuration.mdx) +        [GetClientConfigurationOperation](../../client-api/operations/maintenance/configuration/get-client-configuration.mdx) + +* **Indexes**: +        [PutIndexesOperation](../../client-api/operations/maintenance/indexes/put-indexes.mdx) +        [SetIndexesLockOperation](../../client-api/operations/maintenance/indexes/set-index-lock.mdx) +        [SetIndexesPriorityOperation](../../client-api/operations/maintenance/indexes/set-index-priority.mdx) +        [GetIndexErrorsOperation](../../client-api/operations/maintenance/indexes/get-index-errors.mdx) +        [GetIndexOperation](../../client-api/operations/maintenance/indexes/get-index.mdx) +        [GetIndexesOperation](../../client-api/operations/maintenance/indexes/get-indexes.mdx) +        [GetTermsOperation](../../client-api/operations/maintenance/indexes/get-terms.mdx) +        GetIndexPerformanceStatisticsOperation +        GetIndexStatisticsOperation +        GetIndexesStatisticsOperation +        GetIndexingStatusOperation +        GetIndexStalenessOperation +        [GetIndexNamesOperation](../../client-api/operations/maintenance/indexes/get-index-names.mdx) +        [StartIndexOperation](../../client-api/operations/maintenance/indexes/start-index.mdx) +        [StartIndexingOperation](../../client-api/operations/maintenance/indexes/start-indexing.mdx) +        [StopIndexOperation](../../client-api/operations/maintenance/indexes/stop-index.mdx) +        [StopIndexingOperation](../../client-api/operations/maintenance/indexes/stop-indexing.mdx) +        [ResetIndexOperation](../../client-api/operations/maintenance/indexes/reset-index.mdx) +        [DeleteIndexOperation](../../client-api/operations/maintenance/indexes/delete-index.mdx) +        [DeleteIndexErrorsOperation](../../client-api/operations/maintenance/indexes/delete-index-errors.mdx) +        [DisableIndexOperation](../../client-api/operations/maintenance/indexes/disable-index.mdx) +        [EnableIndexOperation](../../client-api/operations/maintenance/indexes/enable-index.mdx) +        [IndexHasChangedOperation](../../client-api/operations/maintenance/indexes/index-has-changed.mdx) + +* **Analyzers**: +        [PutAnalyzersOperation](../../indexes/using-analyzers.mdx#add-custom-analyzer-via-client-api) +        DeleteAnalyzerOperation + +* **Ongoing tasks**: +        [GetOngoingTaskInfoOperation](../../client-api/operations/maintenance/ongoing-tasks/ongoing-task-operations.mdx#get-ongoing-task-info) +        [ToggleOngoingTaskStateOperation](../../client-api/operations/maintenance/ongoing-tasks/ongoing-task-operations.mdx#toggle-ongoing-task-state) +        [DeleteOngoingTaskOperation](../../client-api/operations/maintenance/ongoing-tasks/ongoing-task-operations.mdx#delete-ongoing-task) + +* **ETL tasks**: +        AddEtlOperation +        UpdateEtlOperation +        [ResetEtlOperation](../../client-api/operations/maintenance/etl/reset-etl.mdx) + +* **AI tasks**: +        [AddEmbeddingsGenerationOperation](../../ai-integration/generating-embeddings/embeddings-generation-task.mdx#configuring-an-embeddings-generation-task---from-the-client-api) + +* **Replication tasks**: +        PutPullReplicationAsHubOperation +        GetPullReplicationTasksInfoOperation +        GetReplicationHubAccessOperation +        GetReplicationPerformanceStatisticsOperation +        RegisterReplicationHubAccessOperation +        UnregisterReplicationHubAccessOperation +        UpdateExternalReplicationOperation +        UpdatePullReplicationAsSinkOperation + +* **Backup**: +        BackupOperation +        GetPeriodicBackupStatusOperation +        StartBackupOperation +        UpdatePeriodicBackupOperation + +* **Connection strings**: +        [PutConnectionStringOperation](../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx) +        [RemoveConnectionStringOperation](../../client-api/operations/maintenance/connection-strings/remove-connection-string.mdx) +        [GetConnectionStringsOperation](../../client-api/operations/maintenance/connection-strings/get-connection-string.mdx) + +* **Transaction recording**: +        StartTransactionsRecordingOperation +        StopTransactionsRecordingOperation +        ReplayTransactionsRecordingOperation + +* **Database settings**: +        [PutDatabaseSettingsOperation](../../client-api/operations/maintenance/configuration/database-settings-operation.mdx#put-database-settings-operation) +        [GetDatabaseSettingsOperation](../../client-api/operations/maintenance/configuration/database-settings-operation.mdx#get-database-settings-operation) + +* **Identities**: +        [GetIdentitiesOperation](../../client-api/operations/maintenance/identities/get-identities.mdx) +        [NextIdentityForOperation](../../client-api/operations/maintenance/identities/increment-next-identity.mdx) +        [SeedIdentityForOperation](../../client-api/operations/maintenance/identities/seed-identity.mdx) + +* **Time series**: +        ConfigureTimeSeriesOperation +        ConfigureTimeSeriesPolicyOperation +        ConfigureTimeSeriesValueNamesOperation +        RemoveTimeSeriesPolicyOperation + +* **Revisions**: +        [ConfigureRevisionsOperation](../../document-extensions/revisions/client-api/operations/configure-revisions.mdx) +        [DeleteRevisionsOperation](../../document-extensions/revisions/client-api/operations/delete-revisions.mdx) +        [ConfigureRevisionsBinCleanerOperation](../../document-extensions/revisions/revisions-bin-cleaner.mdx#setting-the-revisions-bin-cleaner---from-the-client-api) + +* **Sorters**: +        [PutSortersOperation](../../client-api/operations/maintenance/sorters/put-sorter.mdx) +        DeleteSorterOperation + +* **Sharding**: +        [AddPrefixedShardingSettingOperation](../../sharding/administration/sharding-by-prefix.mdx#add-prefixes-after-database-creation) +        [DeletePrefixedShardingSettingOperation](../../sharding/administration/sharding-by-prefix.mdx#removing-prefixes) +        [UpdatePrefixedShardingSettingOperation](../../sharding/administration/sharding-by-prefix.mdx#updating-shard-configurations-for-prefixes) + +* **Misc**: +        ConfigureExpirationOperation +        ConfigureRefreshOperation +        [ConfigureDataArchivalOperation](../../data-archival/enable-data-archiving.mdx#enable-archiving---from-the-client-api) +        UpdateDocumentsCompressionConfigurationOperation +        DatabaseHealthCheckOperation +        GetOperationStateOperation +        CreateSampleDataOperation + + + + +## Server-maintenance operations + +* All server-maintenance operations implement the `IServerOperation` interface. + The operation is executed within the **server scope**. + Use [ForNode](../../client-api/operations/how-to/switch-operations-to-a-different-node.mdx) to operate on a specific node other than the default defined in the client configuration. + +* These operations include server management and configuration operations. + See all available operations [below](../../client-api/operations/what-are-operations#the-following-server-maintenance-operations-are-available). + +* To execute a server-maintenance operation request, + use the `Send` method on the `Maintenance.Server` property in the DocumentStore. + +#### Example: + + + + +{`// Define operation, e.g. get the server build number +IServerOperation getBuildNumberOp = new GetBuildNumberOperation(); + +// Execute the operation by passing the operation to Maintenance.Server.Send +BuildNumber buildNumberResult = documentStore.Maintenance.Server.Send(getBuildNumberOp); + +// Access the operation result +int version = buildNumberResult.BuildVersion; +`} + + + + +{`// Define operation, e.g. get the server build number +IServerOperation getBuildNumberOp = new GetBuildNumberOperation(); + +// Execute the operation by passing the operation to Maintenance.Server.Send +BuildNumber buildNumberResult = await documentStore.Maintenance.Server.SendAsync(getBuildNumberOp); + +// Access the operation result +int version = buildNumberResult.BuildVersion; +`} + + + + +##### Syntax: + + + + +{`// Available overloads: +void Send(IServerOperation operation); +TResult Send(IServerOperation operation); +Operation Send(IServerOperation operation); +`} + + + + +{`// Available overloads: +Task SendAsync(IServerOperation operation, + CancellationToken token = default(CancellationToken)); +Task SendAsync(IServerOperation operation, + CancellationToken token = default(CancellationToken)); +Task SendAsync(IServerOperation operation, + CancellationToken token = default(CancellationToken)); +`} + + + + + + +#### The following server-maintenance operations are available: + +* **Client certificates**: +        [PutClientCertificateOperation](../../client-api/operations/server-wide/certificates/put-client-certificate.mdx) +        [CreateClientCertificateOperation](../../client-api/operations/server-wide/certificates/create-client-certificate.mdx) +        [GetCertificatesOperation](../../client-api/operations/server-wide/certificates/get-certificates.mdx) +        [DeleteCertificateOperation](../../client-api/operations/server-wide/certificates/delete-certificate.mdx) +        EditClientCertificateOperation +        GetCertificateMetadataOperation +        ReplaceClusterCertificateOperation + +* **Server-wide client configuration**: +        [PutServerWideClientConfigurationOperation](../../client-api/operations/server-wide/configuration/put-serverwide-client-configuration.mdx) +        [GetServerWideClientConfigurationOperation](../../client-api/operations/server-wide/configuration/get-serverwide-client-configuration.mdx) + +* **Database management**: +        [CreateDatabaseOperation](../../client-api/operations/server-wide/create-database.mdx) +        [DeleteDatabasesOperation](../../client-api/operations/server-wide/delete-database.mdx) +        [ToggleDatabasesStateOperation](../../client-api/operations/server-wide/toggle-databases-state.mdx) +        [GetDatabaseNamesOperation](../../client-api/operations/server-wide/get-database-names.mdx) +        [AddDatabaseNodeOperation](../../client-api/operations/server-wide/add-database-node.mdx) +        [PromoteDatabaseNodeOperation](../../client-api/operations/server-wide/promote-database-node.mdx) +        [ReorderDatabaseMembersOperation](../../client-api/operations/server-wide/reorder-database-members.mdx) +        [CompactDatabaseOperation](../../client-api/operations/server-wide/compact-database.mdx) +        GetDatabaseRecordOperation +        SetDatabasesLockOperation +        CreateDatabaseOperationWithoutNameValidation +        SetDatabaseDynamicDistributionOperation +        ModifyDatabaseTopologyOperation +        UpdateDatabaseOperation +        UpdateUnusedDatabasesOperation + +* **Server-wide ongoing tasks**: +        DeleteServerWideTaskOperation +        ToggleServerWideTaskStateOperation + +* **Server-wide replication tasks**: +        PutServerWideExternalReplicationOperation +        GetServerWideExternalReplicationOperation +        GetServerWideExternalReplicationsOperation + +* **Server-wide backup tasks**: +        PutServerWideBackupConfigurationOperation +        GetServerWideBackupConfigurationOperation +        GetServerWideBackupConfigurationsOperation +        RestoreBackupOperation + +* **Server-wide analyzers**: +        [PutServerWideAnalyzersOperation](../../indexes/using-analyzers.mdx#add-custom-analyzer-via-client-api) +        DeleteServerWideAnalyzerOperation + +* **Server-wide sorters**: +        [PutServerWideSortersOperation](../../client-api/operations/server-wide/sorters/put-sorter-server-wide.mdx) +        DeleteServerWideSorterOperation + +* **Logs & debug**: +        SetLogsConfigurationOperation +        GetLogsConfigurationOperation +        GetClusterDebugInfoPackageOperation +        [GetBuildNumberOperation](../../client-api/operations/server-wide/get-build-number.mdx) +        GetServerWideOperationStateOperation + +* **Traffic watch**: +        PutTrafficWatchConfigurationOperation +        GetTrafficWatchConfigurationOperation + +* **Revisions**: +        [ConfigureRevisionsForConflictsOperation](../../document-extensions/revisions/client-api/operations/conflict-revisions-configuration.mdx) + +* **Misc**: +        ModifyConflictSolverOperation +        OfflineMigrationOperation + + + + +## Manage lengthy operations + +* Some operations that run in the server background may take a long time to complete. + +* For Operations that implement an interface with type `OperationIdResult`, + executing the operation via the `Send` method will return an `Operation` object, + which can be **awaited for completion** or **aborted (killed)**. +#### Wait for completion: + + + + +{`public void WaitForCompletionWithTimeout( + TimeSpan timeout, + DocumentStore documentStore) +{ + // Define operation, e.g. delete all discontinued products + // Note: This operation implements interface: 'IOperation' + IOperation deleteByQueryOp = + new DeleteByQueryOperation("from Products where Discontinued = true"); + + // Execute the operation + // Send returns an 'Operation' object that can be awaited on + Operation operation = documentStore.Operations.Send(deleteByQueryOp); + + try + { + // Call method 'WaitForCompletion' to wait for the operation to complete. + // If a timeout is specified, the method will only wait for the specified time frame. + BulkOperationResult result = + (BulkOperationResult)operation.WaitForCompletion(timeout); + + // The operation has finished within the specified timeframe + long numberOfItemsDeleted = result.Total; // Access the operation result + } + catch (TimeoutException e) + { + // The operation did not finish within the specified timeframe + } +} +`} + + + + +{`public async Task WaitForCompletionWithTimeoutAsync( + TimeSpan timeout, + DocumentStore documentStore) +{ + // Define operation, e.g. delete all discontinued products + // Note: This operation implements interface: 'IOperation' + IOperation deleteByQueryOp = + new DeleteByQueryOperation("from Products where Discontinued = true"); + + // Execute the operation + // SendAsync returns an 'Operation' object that can be awaited on + Operation operation = await documentStore.Operations.SendAsync(deleteByQueryOp); + + try + { + // Call method 'WaitForCompletionAsync' to wait for the operation to complete. + // If a timeout is specified, the method will only wait for the specified time frame. + BulkOperationResult result = + await operation.WaitForCompletionAsync(timeout) + .ConfigureAwait(false) as BulkOperationResult; + + // The operation has finished within the specified timeframe + long numberOfItemsDeleted = result.Total; // Access the operation result + } + catch (TimeoutException e) + { + // The operation did Not finish within the specified timeframe + } +} +`} + + + + +{`public void WaitForCompletionWithCancellationToken( + CancellationToken token, + DocumentStore documentStore) +{ + // Define operation, e.g. delete all discontinued products + // Note: This operation implements interface: 'IOperation' + IOperation deleteByQueryOp = + new DeleteByQueryOperation("from Products where Discontinued = true"); + + // Execute the operation + // Send returns an 'Operation' object that can be awaited on + Operation operation = documentStore.Operations.Send(deleteByQueryOp); + + try + { + // Call method 'WaitForCompletion' to wait for the operation to complete. + // Pass a CancellationToken in order to stop waiting upon a cancellation request. + BulkOperationResult result = + (BulkOperationResult)operation.WaitForCompletion(token); + + // The operation has finished, no cancellation request was made + long numberOfItemsDeleted = result.Total; // Access the operation result + } + catch (TimeoutException e) + { + // The operation did not finish at cancellation time + } +} +`} + + + + +{`public async Task WaitForCompletionWithCancellationTokenAsync( + CancellationToken token, + DocumentStore documentStore) +{ + // Define operation, e.g. delete all discontinued products + // Note: This operation implements interface: 'IOperation' + IOperation deleteByQueryOp = + new DeleteByQueryOperation("from Products where Discontinued = true"); + + // Execute the operation + // SendAsync returns an 'Operation' object that can be awaited on + Operation operation = await documentStore.Operations.SendAsync(deleteByQueryOp); + + try + { + // Call method 'WaitForCompletionAsync' to wait for the operation to complete. + // Pass a CancellationToken in order to stop waiting upon a cancellation request. + BulkOperationResult result = + await operation.WaitForCompletionAsync(token) + .ConfigureAwait(false) as BulkOperationResult; + + // The operation has finished, no cancellation request was made + long numberOfItemsDeleted = result.Total; // Access the operation result + } + catch (TimeoutException e) + { + // The operation did Not finish at cancellation time + } +} +`} + + + + +##### Syntax: + + + + +{`// Available overloads: +public IOperationResult WaitForCompletion(TimeSpan? timeout = null) +public IOperationResult WaitForCompletion(CancellationToken token) + +public TResult WaitForCompletion(TimeSpan? timeout = null) + where TResult : IOperationResult +public TResult WaitForCompletion(CancellationToken token) + where TResult : IOperationResult +`} + + + + +{`// Available overloads: +public Task WaitForCompletionAsync(TimeSpan? timeout = null) +public Task WaitForCompletionAsync(CancellationToken token) + +public async Task WaitForCompletionAsync(TimeSpan? timeout = null) + where TResult : IOperationResult +public async Task WaitForCompletionAsync(CancellationToken token) + where TResult : IOperationResult +`} + + + + +| Parameter | Type | Description | +|-------------|---------------------|-------------| +| **timeout** | `TimeSpan` |
  • When timespan is specified -
    The server will throw a `TimeoutException` if the operation has not completed within the specified time frame.
    The operation itself continues to run in the background,
    no rollback action takes place.
  • `null` -
    WaitForCompletion will wait for the operation to complete forever.
| +| **token** | `CancellationToken` |
  • When cancellation token is specified -
    The server will throw a `TimeoutException` if the operation has not completed at cancellation time.
    The operation itself continues to run in the background,
    no rollback action takes place.
| + +| Return type | | +|--------------------|-------------------------------| +| `IOperationResult` | The operation result content. | + +#### Kill operation: + + + + +{`// Define operation, e.g. delete all discontinued products +// Note: This operation implements interface: 'IOperation' +IOperation deleteByQueryOp = + new DeleteByQueryOperation("from Products where Discontinued = true"); + +// Execute the operation +// Send returns an 'Operation' object that can be 'killed' +Operation operation = documentStore.Operations.Send(deleteByQueryOp); + +// Call 'Kill' to abort operation +operation.Kill(); +`} + + + + +{`// Define operation, e.g. delete all discontinued products +// Note: This operation implements interface: 'IOperation' +IOperation deleteByQueryOp = + new DeleteByQueryOperation("from Products where Discontinued = true"); + +// Execute the operation +// SendAsync returns an 'Operation' object that can be 'killed' +Operation operation = await documentStore.Operations.SendAsync(deleteByQueryOp); + +// Call 'KillAsync' to abort operation +await operation.KillAsync(); + +// Assert that operation is no longer running +await Assert.ThrowsAsync(() => + operation.WaitForCompletionAsync(TimeSpan.FromSeconds(30))); +`} + + + + +##### Syntax: + + + +{`// Available overloads: +public void Kill() +public async Task KillAsync(CancellationToken token = default) +`} + + + +| Parameter | Type | Description | +|-------------|---------------------|----------------------------------------------------------------------| +| **token** | `CancellationToken` | Provide a cancellation token if needed to abort the KillAsync method | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/_what-are-operations-java.mdx b/versioned_docs/version-7.1/client-api/operations/_what-are-operations-java.mdx new file mode 100644 index 0000000000..e73c3f9ba8 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/_what-are-operations-java.mdx @@ -0,0 +1,203 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +The RavenDB client API is built with the notion of layers. At the top, and what you will usually interact with, are the **[DocumentStore](../../client-api/what-is-a-document-store.mdx)** and the **[DocumentSession](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx)**. + +They, in turn, are built on top of the notion of Operations and Commands. + +Operations are an encapsulation of a set of low-level commands which are used to manipulate data, execute administrative tasks, and change the configuration on a server. + +They are available in the DocumentStore under the **operations**, **maintenance**, and **maintenance().server** methods. + +## Common Operations + +Common operations include set-based operations for [Patching](../../client-api/operations/patching/set-based.mdx) or removal of documents by using queries (more can be read [here](../../client-api/operations/common/delete-by-query.mdx)). +There is also the ability to handle distributed [Compare Exchange](../../client-api/operations/compare-exchange/overview.mdx) operations and manage [Attachments](../../client-api/operations/attachments/get-attachment.mdx) and [Counters](../../client-api/operations/counters/counter-batch.mdx). + +### How to Send an Operation + +In order to execute an operation, you will need to use the `send` or `sendAsync` methods. Available overloads are: + + + +{`public void send(IVoidOperation operation) + +public void send(IVoidOperation operation, SessionInfo sessionInfo) + +public TResult send(IOperation operation) + +public TResult send(IOperation operation, SessionInfo sessionInfo) + +public PatchStatus send(PatchOperation operation, SessionInfo sessionInfo) + +public PatchOperation.Result send(Class entityClass, PatchOperation operation, SessionInfo sessionInfo) +`} + + + + +{`public Operation sendAsync(IOperation operation) + +public Operation sendAsync(IOperation operation, SessionInfo sessionInfo) +`} + + + + +### The following operations are available: + +#### Compare Exchange + +* [CompareExchange](../../compare-exchange/overview) + +#### Attachments + +* [GetAttachmentOperation](../../client-api/operations/attachments/get-attachment.mdx) +* [PutAttachmentOperation](../../client-api/operations/attachments/put-attachment.mdx) +* [DeleteAttachmentOperation](../../client-api/operations/attachments/delete-attachment.mdx) + +#### Patching + +* [PatchByQueryOperation](../../client-api/operations/patching/set-based.mdx) +* [PatchOperation](../../client-api/operations/patching/single-document.mdx) + + +#### Counters + +* [CounterBatchOperation](../../client-api/operations/counters/counter-batch.mdx) +* [GetCountersOperation](../../client-api/operations/counters/get-counters.mdx) + + +#### Misc + +* [DeleteByQueryOperation](../../client-api/operations/common/delete-by-query.mdx) + +### Example - Get Attachment + + + +{`try (CloseableAttachmentResult fetchedAttachment = store + .operations() + .send(new GetAttachmentOperation("users/1", "file.txt", AttachmentType.DOCUMENT, null))) \{ + // do stuff with the attachment stream --> fetchedAttachment.data +\} +`} + + + + + +## Maintenance Operations + +Maintenance operations include operations for changing the configuration at runtime and for management of index operations. + +### How to Send an Operation + + + +{`public void send(IVoidMaintenanceOperation operation) + +public TResult send(IMaintenanceOperation operation) +`} + + + +### The following maintenance operations are available: + +#### Client Configuration + +* [PutClientConfigurationOperation](../../client-api/operations/maintenance/configuration/put-client-configuration.mdx) +* [GetClientConfigurationOperation](../../client-api/operations/maintenance/configuration/get-client-configuration.mdx) + +#### Indexing + +* [DeleteIndexOperation](../../client-api/operations/maintenance/indexes/delete-index.mdx) +* [DisableIndexOperation](../../client-api/operations/maintenance/indexes/disable-index.mdx) +* [EnableIndexOperation](../../client-api/operations/maintenance/indexes/enable-index.mdx) +* [ResetIndexOperation](../../client-api/operations/maintenance/indexes/reset-index.mdx) +* [SetIndexesLockOperation](../../client-api/operations/maintenance/indexes/set-index-lock.mdx) +* [SetIndexesPriorityOperation](../../client-api/operations/maintenance/indexes/set-index-priority.mdx) +* [StartIndexOperation](../../client-api/operations/maintenance/indexes/start-index.mdx) +* [StartIndexingOperation](../../client-api/operations/maintenance/indexes/start-indexing.mdx) +* [StopIndexOperation](../../client-api/operations/maintenance/indexes/stop-index.mdx) +* [StopIndexingOperation](../../client-api/operations/maintenance/indexes/stop-indexing.mdx) +* [GetIndexErrorsOperation](../../client-api/operations/maintenance/indexes/get-index-errors.mdx) +* [GetIndexOperation](../../client-api/operations/maintenance/indexes/get-index.mdx) +* [GetIndexesOperation](../../client-api/operations/maintenance/indexes/get-indexes.mdx) +* [GetTermsOperation](../../client-api/operations/maintenance/indexes/get-terms.mdx) +* [IndexHasChangedOperation](../../client-api/operations/maintenance/indexes/index-has-changed.mdx) +* [PutIndexesOperation](../../client-api/operations/maintenance/indexes/put-indexes.mdx) + +#### Misc + +* [GetCollectionStatisticsOperation](../../client-api/operations/maintenance/get-stats.mdx) +* [GetStatisticsOperation](../../client-api/operations/maintenance/get-stats.mdx) +* [GetIdentitiesOperation](../../client-api/operations/maintenance/identities/get-identities.mdx) + +### Example - Stop Index + + + +{`store.maintenance().send(new StopIndexOperation("Orders/ByCompany")); +`} + + + + + +## Server Operations + +These types of operations contain various administrative and miscellaneous configuration operations. + +### How to Send an Operation + + + + +{`public void send(IVoidServerOperation operation) + +public TResult send(IServerOperation operation) +`} + + + + +{`public Operation sendAsync(IServerOperation operation) +`} + + + + +### The following server-wide operations are available: + + +#### Cluster Management + +* [CreateDatabaseOperation](../../client-api/operations/server-wide/create-database.mdx) +* [DeleteDatabasesOperation](../../client-api/operations/server-wide/delete-database.mdx) + +#### Miscellaneous + +* [GetDatabaseNamesOperation](../../client-api/operations/server-wide/get-database-names.mdx) + +### Example - Get Build Number + + + +{`GetClientConfigurationOperation.Result result + = store.maintenance().send(new GetClientConfigurationOperation()); +`} + + + + + +## Remarks + + +By default, operations available in `store.operations` or `store.maintenance` are working on a default database that was setup for that store. To switch operations to a different database that is available on that server use the **[forDatabase](../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx)** method. + + + diff --git a/versioned_docs/version-7.1/client-api/operations/_what-are-operations-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/_what-are-operations-nodejs.mdx new file mode 100644 index 0000000000..875d0f0fa4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/_what-are-operations-nodejs.mdx @@ -0,0 +1,511 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The RavenDB Client API is built with the notion of layers. + At the top, and what you will usually interact with, are the **[DocumentStore](../../client-api/what-is-a-document-store.mdx)** + and the **[Session](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx)**. + They, in turn, are built on top of the lower-level **Operations** and **Commands** API. + +* **RavenDB provides direct access to this lower-level API**, allowing you to send requests + directly to the server via DocumentStore Operations instead of using the higher-level Session API. + +* In this page: + * [Why use operations](../../client-api/operations/what-are-operations.mdx#why-use-operations) + * [How operations work](../../client-api/operations/what-are-operations.mdx#how-operations-work) + * __Operation types__: + * [Common operations](../../client-api/operations/what-are-operations.mdx#common-operations) + * [Maintenance operations](../../client-api/operations/what-are-operations.mdx#maintenance-operations) + * [Server-maintenance operations](../../client-api/operations/what-are-operations.mdx#server-maintenance-operations) + * [Manage lengthy operations](../../client-api/operations/what-are-operations.mdx#manage-lengthy-operations) + * [Wait for completion](../../client-api/operations/what-are-operations.mdx#wait-for-completion) + * [Kill operation](../../client-api/operations/what-are-operations.mdx#killoperation) + + +## Why use operations + +* Operations provide __management functionality__ that is not available in the context of the session, for example: + * Create/delete a database + * Execute administrative tasks + * Assign permissions + * Change server configuration, and more. + +* The operations are executed on the DocumentStore and are not part of the session transaction. + +* There are some client tasks, such as patching documents, that can be carried out either via the Session ([session.advanced.patch()](../../client-api/operations/patching/single-document.mdx#array-manipulation)) + or via an Operation on the DocumentStore ([PatchOperation](../../client-api/operations/patching/single-document.mdx#operations-api)). + + + +## How operations work + +* __Sending the request__: + Each Operation creates an HTTP request message to be sent to the relevant server endpoint. + The DocumentStore `OperationExecutor` sends the request and processes the results. +* __Target node__: + By default, the operation will be executed on the server node that is defined by the [client configuration](../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + However, server-maintenance operations can be executed on a specific node by using the [forNode](../../client-api/operations/how-to/switch-operations-to-a-different-node.mdx) method. +* __Target database__: + By default, operations work on the default database defined in the DocumentStore. + However, common operations & maintenance operations can operate on a different database by using the [forDatabase](../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx) method. +* __Transaction scope__: + Operations execute as a single-node transaction. + If needed, data will then replicate to the other nodes in the database-group. +* __Background operations__: + Some operations may take a long time to complete and can be awaited for completion. + Learn more [below](../../client-api/operations/what-are-operations.mdx#wait-for-completion). + + + +## Common operations + + + +* All common operations implement the `IOperation` interface. + The operation is executed within the __database scope__. + Use [forDatabase](../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx) to operate on a specific database other than the default defined in the store. + +* These operations include set-based operations such as _PatchOperation_, _CounterBatchOperation_, + document-extensions related operations such as getting/putting an attachment, and more. + See all available operations [below](../../client-api/operations/what-are-operations#the-following-common-operations-are-available). + +* To execute a common operation request, + use the `send` method on the `operations` property of the DocumentStore. + +__Example__: + + + +{`// Define operation, e.g. get all counters info for a document +const getCountersOp = new GetCountersOperation("products/1-A"); + +// Execute the operation by passing the operation to operations.send +const allCountersResult = await documentStore.operations.send(getCountersOp); + +// Access the operation result +const numberOfCounters = allCountersResult.counters.length; +`} + + + + + + + +__Send syntax__: + + + +{`// Available overloads: +await send(operation); +await send(operation, sessionInfo); +await send(operation, sessionInfo, documentType); + +await send(patchOperaton); +await send(patchOperation, sessionInfo); +await send(patchOperation, sessionInfo, resultType); +`} + + + + + + + +#### The following common operations are available: + +* __Attachments__: +        [PutAttachmentOperation](../../client-api/operations/attachments/put-attachment.mdx) +        [GetAttachmentOperation](../../client-api/operations/attachments/get-attachment.mdx) +        [DeleteAttachmentOperation](../../client-api/operations/attachments/delete-attachment.mdx) + +* __Counters__: +        [CounterBatchOperation](../../client-api/operations/counters/counter-batch.mdx) +        [GetCountersOperation](../../client-api/operations/counters/get-counters.mdx) + +* __Time series__: +        TimeSeriesBatchOperation +        GetMultipleTimeSeriesOperation +        GetTimeSeriesOperation +        GetTimeSeriesStatisticsOperation + +* __Revisions__: +        [GetRevisionsOperation](../../document-extensions/revisions/client-api/operations/get-revisions.mdx) + +* __Patching__: +        [PatchOperation](../../client-api/operations/patching/single-document.mdx) +        [PatchByQueryOperation](../../client-api/operations/patching/set-based.mdx) + +* __Delete by query__: +        [DeleteByQueryOperation](../../client-api/operations/common/delete-by-query.mdx) + +* __Compare-exchange__: +        [PutCompareExchangeValueOperation](../../compare-exchange/create-cmpxchg-items#create-item-using-a-store-operation) +        [GetCompareExchangeValueOperation](../../compare-exchange/get-cmpxchg-item#get-item-using-a-store-operation) +        [GetCompareExchangeValuesOperation](../../compare-exchange/get-cmpxchg-items) +        [DeleteCompareExchangeValueOperation](../../compare-exchange/delete-cmpxchg-items#delete-compare-exchange-item-using-a-store-operation) + + + + +## Maintenance operations + + + +* All maintenance operations implement the `IMaintenanceOperation` interface. + The operation is executed within the __database scope__. + Use [forDatabase](../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx) to operate on a specific database other than the default defined in the store. + +* These operations include database management operations such as setting client configuration, + managing indexes & ongoing-tasks operations, getting stats, and more. + See all available maintenance operations [below](../../client-api/operations/what-are-operations#the-following-maintenance-operations-are-available). + +* To execute a maintenance operation request, + use the `send` method on the `maintenance` property in the DocumentStore. + +__Example__: + + + +{`// Define operation, e.g. stop an index +const stopIndexOp = new StopIndexOperation("Orders/ByCompany"); + +// Execute the operation by passing the operation to maintenance.send +await documentStore.maintenance.send(stopIndexOp); + +// This specific operation returns void +// You can send another operation to verify the index running status +const indexStatsOp = new GetIndexStatisticsOperation("Orders/ByCompany"); +const indexStats = await documentStore.maintenance.send(indexStatsOp); +const status = indexStats.status; // will be "Paused" +`} + + + + + + + +__Send syntax__: + + + +{`await send(operation); +`} + + + + + + + +#### The following maintenance operations are available: + +* __Statistics__: +        [GetStatisticsOperation](../../client-api/operations/maintenance/get-stats.mdx#get-database-stats) +        [GetDetailedStatisticsOperation](../../client-api/operations/maintenance/get-stats.mdx#get-detailed-database-stats) +        [GetCollectionStatisticsOperation](../../client-api/operations/maintenance/get-stats.mdx#get-collection-stats) +        [GetDetailedCollectionStatisticsOperation](../../client-api/operations/maintenance/get-stats.mdx#get-detailed-collection-stats) + +* __Client Configuration__: +        [PutClientConfigurationOperation](../../client-api/operations/maintenance/configuration/put-client-configuration.mdx) +        [GetClientConfigurationOperation](../../client-api/operations/maintenance/configuration/get-client-configuration.mdx) + +* __Indexes__: +        [PutIndexesOperation](../../client-api/operations/maintenance/indexes/put-indexes.mdx) +        [SetIndexesLockOperation](../../client-api/operations/maintenance/indexes/set-index-lock.mdx) +        [SetIndexesPriorityOperation](../../client-api/operations/maintenance/indexes/set-index-priority.mdx) +        [GetIndexErrorsOperation](../../client-api/operations/maintenance/indexes/get-index-errors.mdx) +        [GetIndexOperation](../../client-api/operations/maintenance/indexes/get-index.mdx) +        [GetIndexesOperation](../../client-api/operations/maintenance/indexes/get-indexes.mdx) +        [GetTermsOperation](../../client-api/operations/maintenance/indexes/get-terms.mdx) +        GetIndexPerformanceStatisticsOperation +        GetIndexStatisticsOperation +        GetIndexesStatisticsOperation +        GetIndexingStatusOperation +        GetIndexStalenessOperation +        [GetIndexNamesOperation](../../client-api/operations/maintenance/indexes/get-index-names.mdx) +        [StartIndexOperation](../../client-api/operations/maintenance/indexes/start-index.mdx) +        [StartIndexingOperation](../../client-api/operations/maintenance/indexes/start-indexing.mdx) +        [StopIndexOperation](../../client-api/operations/maintenance/indexes/stop-index.mdx) +        [StopIndexingOperation](../../client-api/operations/maintenance/indexes/stop-indexing.mdx) +        [ResetIndexOperation](../../client-api/operations/maintenance/indexes/reset-index.mdx) +        [DeleteIndexOperation](../../client-api/operations/maintenance/indexes/delete-index.mdx) +        [DeleteIndexErrorsOperation](../../client-api/operations/maintenance/indexes/delete-index-errors.mdx) +        [DisableIndexOperation](../../client-api/operations/maintenance/indexes/disable-index.mdx) +        [EnableIndexOperation](../../client-api/operations/maintenance/indexes/enable-index.mdx) +        [IndexHasChangedOperation](../../client-api/operations/maintenance/indexes/index-has-changed.mdx) + +* __Analyzers__: +        [PutAnalyzersOperation](../../indexes/using-analyzers.mdx#add-custom-analyzer-via-client-api) +        DeleteAnalyzerOperation + +* **Ongoing tasks**: +        [GetOngoingTaskInfoOperation](../../client-api/operations/maintenance/ongoing-tasks/ongoing-task-operations.mdx#get-ongoing-task-info) +        [ToggleOngoingTaskStateOperation](../../client-api/operations/maintenance/ongoing-tasks/ongoing-task-operations.mdx#toggle-ongoing-task-state) +        [DeleteOngoingTaskOperation](../../client-api/operations/maintenance/ongoing-tasks/ongoing-task-operations.mdx#delete-ongoing-task) + +* __ETL tasks__: +        AddEtlOperation +        UpdateEtlOperation +        [ResetEtlOperation](../../client-api/operations/maintenance/etl/reset-etl.mdx) + +* __Replication tasks__: +        PutPullReplicationAsHubOperation +        GetPullReplicationTasksInfoOperation +        GetReplicationHubAccessOperation +        GetReplicationPerformanceStatisticsOperation +        RegisterReplicationHubAccessOperation +        UnregisterReplicationHubAccessOperation +        UpdateExternalReplicationOperation +        UpdatePullReplicationAsSinkOperation + +* __Backup__: +        BackupOperation +        GetPeriodicBackupStatusOperation +        StartBackupOperation +        UpdatePeriodicBackupOperation + +* __Connection strings__: +        [PutConnectionStringOperation](../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx) +        [RemoveConnectionStringOperation](../../client-api/operations/maintenance/connection-strings/remove-connection-string.mdx) +        [GetConnectionStringsOperation](../../client-api/operations/maintenance/connection-strings/get-connection-string.mdx) + +* __Transaction recording__: +        StartTransactionsRecordingOperation +        StopTransactionsRecordingOperation +        ReplayTransactionsRecordingOperation + +* __Database settings__: +        [PutDatabaseSettingsOperation](../../client-api/operations/maintenance/configuration/database-settings-operation.mdx#put-database-settings-operation) +        [GetDatabaseSettingsOperation](../../client-api/operations/maintenance/configuration/database-settings-operation.mdx#get-database-settings-operation) + +* __Identities__: +        [GetIdentitiesOperation](../../client-api/operations/maintenance/identities/get-identities.mdx) +        [NextIdentityForOperation](../../client-api/operations/maintenance/identities/increment-next-identity.mdx) +        [SeedIdentityForOperation](../../client-api/operations/maintenance/identities/seed-identity.mdx) + +* __Time series__: +        ConfigureTimeSeriesOperation +        ConfigureTimeSeriesPolicyOperation +        ConfigureTimeSeriesValueNamesOperation +        RemoveTimeSeriesPolicyOperation + +* __Revisions__: +        [ConfigureRevisionsOperation](../../document-extensions/revisions/client-api/operations/configure-revisions.mdx) + +* __Sorters__: +        [PutSortersOperation](../../client-api/operations/maintenance/sorters/put-sorter.mdx) +        DeleteSorterOperation + +* **Sharding**: +        [AddPrefixedShardingSettingOperation](../../sharding/administration/sharding-by-prefix.mdx#add-prefixes-after-database-creation) +        [DeletePrefixedShardingSettingOperation](../../sharding/administration/sharding-by-prefix.mdx#removing-prefixes) +        [UpdatePrefixedShardingSettingOperation](../../sharding/administration/sharding-by-prefix.mdx#updating-shard-configurations-for-prefixes) + +* __Misc__: +        ConfigureExpirationOperation +        ConfigureRefreshOperation +        ConfigureDataArchivalOperation +        UpdateDocumentsCompressionConfigurationOperation +        DatabaseHealthCheckOperation +        GetOperationStateOperation +        CreateSampleDataOperation + + + + +## Server-maintenance operations + + + +* All server-maintenance operations implement the `IServerOperation` interface. + The operation is executed within the __server scope__. + Use [forNode](../../client-api/operations/how-to/switch-operations-to-a-different-node.mdx) to operate on a specific node other than the default defined in the client configuration. + +* These operations include server management and configuration operations. + See all available operations [below](../../client-api/operations/what-are-operations#the-following-server-maintenance-operations-are-available). + +* To execute a server-maintenance operation request, + use the `send` method on the `maintenance.server` property of the DocumentStore. + +__Example__: + + + +{`// Define operation, e.g. get the server build number +const getBuildNumberOp = new GetBuildNumberOperation(); + +// Execute the operation by passing the operation to maintenance.server.send +const buildNumberResult = await documentStore.maintenance.server.send(getBuildNumberOp); + +// Access the operation result +const version = buildNumberResult.buildVersion; +`} + + + + + + + +__Send syntax__: + + + +{`await send(operation); +`} + + + + + + + +#### The following server-maintenance operations are available: + +* __Client certificates__: +        [PutClientCertificateOperation](../../client-api/operations/server-wide/certificates/put-client-certificate.mdx) +        [CreateClientCertificateOperation](../../client-api/operations/server-wide/certificates/create-client-certificate.mdx) +        [GetCertificatesOperation](../../client-api/operations/server-wide/certificates/get-certificates.mdx) +        [DeleteCertificateOperation](../../client-api/operations/server-wide/certificates/delete-certificate.mdx) +        EditClientCertificateOperation +        GetCertificateMetadataOperation +        ReplaceClusterCertificateOperation + +* __Server-wide client configuration__: +        [PutServerWideClientConfigurationOperation](../../client-api/operations/server-wide/configuration/put-serverwide-client-configuration.mdx) +        [GetServerWideClientConfigurationOperation](../../client-api/operations/server-wide/configuration/get-serverwide-client-configuration.mdx) + +* __Database management__: +        [CreateDatabaseOperation](../../client-api/operations/server-wide/create-database.mdx) +        [DeleteDatabasesOperation](../../client-api/operations/server-wide/delete-database.mdx) +        [ToggleDatabasesStateOperation](../../client-api/operations/server-wide/toggle-databases-state.mdx) +        [GetDatabaseNamesOperation](../../client-api/operations/server-wide/get-database-names.mdx) +        [AddDatabaseNodeOperation](../../client-api/operations/server-wide/add-database-node.mdx) +        [PromoteDatabaseNodeOperation](../../client-api/operations/server-wide/promote-database-node.mdx) +        [ReorderDatabaseMembersOperation](../../client-api/operations/server-wide/reorder-database-members.mdx) +        [CompactDatabaseOperation](../../client-api/operations/server-wide/compact-database.mdx) +        GetDatabaseRecordOperation +        SetDatabasesLockOperation +        CreateDatabaseOperationWithoutNameValidation +        SetDatabaseDynamicDistributionOperation +        ModifyDatabaseTopologyOperation +        UpdateDatabaseOperation +        UpdateUnusedDatabasesOperation + +* __Server-wide ongoing tasks__: +        DeleteServerWideTaskOperation +        ToggleServerWideTaskStateOperation + +* __Server-wide replication tasks__: +        PutServerWideExternalReplicationOperation +        GetServerWideExternalReplicationOperation +        GetServerWideExternalReplicationsOperation + +* __Server-wide backup tasks__: +        PutServerWideBackupConfigurationOperation +        GetServerWideBackupConfigurationOperation +        GetServerWideBackupConfigurationsOperation +        RestoreBackupOperation + +* __Server-wide analyzers__: +        [PutServerWideAnalyzersOperation](../../indexes/using-analyzers.mdx#add-custom-analyzer-via-client-api) +        DeleteServerWideAnalyzerOperation + +* __Server-wide sorters__: +        [PutServerWideSortersOperation](../../client-api/operations/server-wide/sorters/put-sorter-server-wide.mdx) +        DeleteServerWideSorterOperation + +* __Logs & debug__: +        SetLogsConfigurationOperation +        GetLogsConfigurationOperation +        GetClusterDebugInfoPackageOperation +        [GetBuildNumberOperation](../../client-api/operations/server-wide/get-build-number.mdx) +        GetServerWideOperationStateOperation + +* __Traffic watch__: +        PutTrafficWatchConfigurationOperation +        GetTrafficWatchConfigurationOperation + +* __Revisions__: +        [ConfigureRevisionsForConflictsOperation](../../document-extensions/revisions/client-api/operations/conflict-revisions-configuration.mdx) + +* __Misc__: +        ModifyConflictSolverOperation +        OfflineMigrationOperation + + + + +## Manage lengthy operations + +* Some operations that run in the server background may take a long time to complete. + +* For Operations that implement an interface with type `OperationIdResult`, + executing the operation via the `send` method will return a promise for `OperationCompletionAwaiter` object, + which can then be __awaited for completion__ or __aborted (killed)__. + + + __Wait for completion__: + + + +{`// Define operation, e.g. delete all discontinued products +// Note: This operation implements interface: 'IOperation' +const deleteByQueryOp = new DeleteByQueryOperation("from Products where Discontinued = true"); + +// Execute the operation +// 'send' returns an object that can be awaited on +const asyncOperation = await documentStore.operations.send(deleteByQueryOp); + +// Call method 'waitForCompletion' to wait for the operation to complete +await asyncOperation.waitForCompletion(); +`} + + + + + + + + __Kill operation__: + + + +{`// Define operation, e.g. delete all discontinued products +// Note: This operation implements interface: 'IOperation' +const deleteByQueryOp = new DeleteByQueryOperation("from Products where Discontinued = true"); + +// Execute the operation +// 'send' returns an object that can be 'killed' +const asyncOperation = await documentStore.operations.send(deleteByQueryOp); + +// Call method 'kill' to abort operation +await asyncOperation.kill(); +`} + + + + + + + +##### Syntax: + + + +{`await waitForCompletion(); +await kill(); +`} + + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/_what-are-operations-php.mdx b/versioned_docs/version-7.1/client-api/operations/_what-are-operations-php.mdx new file mode 100644 index 0000000000..71f6abebc2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/_what-are-operations-php.mdx @@ -0,0 +1,493 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The RavenDB Client API is built with the notion of layers. + At the top, and what you will usually interact with, are the **[documentStore](../../client-api/what-is-a-document-store.mdx)** + and the **[session](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx)**. + They, in turn, are built on top of the lower-level **Operations** and **Commands** API. + +* **RavenDB provides direct access to this lower-level API**, allowing you to send requests + directly to the server via DocumentStore Operations instead of using the higher-level Session API. + +* In this page: + * [Why use operations](../../client-api/operations/what-are-operations.mdx#why-use-operations) + * [How operations work](../../client-api/operations/what-are-operations.mdx#how-operations-work) + * **Operation types**: + * [Common operations](../../client-api/operations/what-are-operations.mdx#common-operations) + * [Maintenance operations](../../client-api/operations/what-are-operations.mdx#maintenance-operations) + * [Server-maintenance operations](../../client-api/operations/what-are-operations.mdx#server-maintenance-operations) + * [Manage lengthy operations](../../client-api/operations/what-are-operations.mdx#manage-lengthy-operations) + * [Wait for completion](../../client-api/operations/what-are-operations.mdx#wait-for-completion) + + +## Why use operations + +* Operations provide **management functionality** that is not available in the context of the session, for example: + * Create/delete a database + * Execute administrative tasks + * Assign permissions + * Change server configuration, and more. + +* The operations are executed on the DocumentStore and are not part of the session transaction. + +* There are some client tasks, such as patching documents, that can be carried out either via the Session + ([session.advanced.patch()](../../client-api/operations/patching/single-document.mdx#array-manipulation)) + or via an Operation on the DocumentStore ([PatchOperation](../../client-api/operations/patching/single-document.mdx#operations-api)). + + + +## How operations work + +* **Sending the request**: + Each Operation is an encapsulation of a `RavenCommand`. + The RavenCommand creates the HTTP request message to be sent to the relevant server endpoint. + The DocumentStore `OperationExecutor` sends the request and processes the results. +* **Target node**: + By default, the operation will be executed on the server node that is defined by the [client configuration](../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + However, server-maintenance operations can be executed on a specific node by using the [forNode](../../client-api/operations/how-to/switch-operations-to-a-different-node.mdx) method. +* **Target database**: + By default, operations work on the default database defined in the DocumentStore. + However, common operations & maintenance operations can operate on a different database by using the [forDatabase](../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx) method. +* **Transaction scope**: + Operations execute as a single-node transaction. + If needed, data will then replicate to the other nodes in the database-group. +* **Background operations**: + Some operations may take a long time to complete and can be awaited for completion. + Learn more [below](../../client-api/operations/what-are-operations.mdx#wait-for-completion). + + + +## Common operations + +* All common operations implement the `IOperation` interface. + The operation is executed within the **database scope**. + Use [forDatabase](../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx) to operate on a specific database other than the default defined in the store. + +* These operations include set-based operations such as _PatchOperation_, _CounterBatchOperation_, + document-extensions related operations such as getting/putting an attachment, and more. + See all available operations [below](../../client-api/operations/what-are-operations#the-following-common-operations-are-available). + +* To execute a common operation request, + use the `send` method on the `operations` property in the DocumentStore. + +#### Example: + + + +{`// Define operation, e.g. get all counters info for a document +$getCountersOp = new GetCountersOperation("products/1-A"); + +// Execute the operation by passing the operation to Operations.Send +/** @var CountersDetail $allCountersResult */ +$allCountersResult = $documentStore->operations()->send($getCountersOp); + +// Access the operation result +$numberOfCounters = count($allCountersResult->getCounters()); +`} + + + +##### Syntax: + + + +{`/** + * Usage and available overloads: + * + * - send(?OperationInterface $operation, ?SessionInfo $sessionInfo = null): ResultInterface; + * - send(string $entityClass, ?PatchOperation $operation, ?SessionInfo $sessionInfo = null): PatchOperationResult; + * - send(?PatchOperation $operation, ?SessionInfo $sessionInfo = null): PatchStatus; + * + * @param mixed ...$parameters + */ +public function send(...$parameters); +`} + + + + + +#### The following common operations are available: + +* **Attachments**: +        [PutAttachmentOperation](../../client-api/operations/attachments/put-attachment.mdx) +        [GetAttachmentOperation](../../client-api/operations/attachments/get-attachment.mdx) +        [DeleteAttachmentOperation](../../client-api/operations/attachments/delete-attachment.mdx) + +* **Counters**: +        [CounterBatchOperation](../../client-api/operations/counters/counter-batch.mdx) +        [GetCountersOperation](../../client-api/operations/counters/get-counters.mdx) + +* **Time series**: +        [TimeSeriesBatchOperation](../../document-extensions/timeseries/client-api/operations/append-and-delete.mdx) +        [GetMultipleTimeSeriesOperation](../../document-extensions/timeseries/client-api/operations/get.mdx) +        [GetTimeSeriesOperation](../../document-extensions/timeseries/client-api/operations/get.mdx) +        GetTimeSeriesStatisticsOperation + +* **Revisions**: +        [GetRevisionsOperation](../../document-extensions/revisions/client-api/operations/get-revisions.mdx) + +* **Patching**: +        [PatchOperation](../../client-api/operations/patching/single-document.mdx) +        [PatchByQueryOperation](../../client-api/operations/patching/set-based.mdx) + +* **Delete by query**: +        [DeleteByQueryOperation](../../client-api/operations/common/delete-by-query.mdx) + +* **Compare-exchange**: +        PutCompareExchangeValueOperation +        GetCompareExchangeValueOperation +        [GetCompareExchangeValuesOperation](../../compare-exchange/get-cmpxchg-items) +        DeleteCompareExchangeValueOperation + + + + +## Maintenance operations + +* All maintenance operations implement the `IMaintenanceOperation` interface. + The operation is executed within the **database scope**. + Use [forDatabase](../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx) to operate on a specific database other than the default defined in the store. + +* These operations include database management operations such as setting client configuration, + managing indexes & ongoing-tasks operations, getting stats, and more. + See all available maintenance operations [below](../../client-api/operations/what-are-operations#the-following-maintenance-operations-are-available). + +* To execute a maintenance operation request, + use the `send` method on the `maintenance` property in the DocumentStore. + +#### Example: + + + +{`// Define operation, e.g. stop an index +$stopIndexOp = new StopIndexOperation("Orders/ByCompany"); + +// Execute the operation by passing the operation to Maintenance.Send +$documentStore->maintenance()->send($stopIndexOp); + +// This specific operation returns void +// You can send another operation to verify the index running status +$indexStatsOp = new GetIndexStatisticsOperation("Orders/ByCompany"); +/** @var IndexStats $indexStats */ +$indexStats = $documentStore->maintenance()->send($indexStatsOp); + +/** @var IndexRunningStatus $status */ +$status = $indexStats->getStatus(); // will be "Paused" +`} + + + +##### Syntax: + + + +{`public function send(MaintenanceOperationInterface $operation): ResultInterface; +`} + + + + + +#### The following maintenance operations are available: + +* **Statistics**: +        [GetStatisticsOperation](../../client-api/operations/maintenance/get-stats.mdx#get-database-stats) +        [GetDetailedStatisticsOperation](../../client-api/operations/maintenance/get-stats.mdx#get-detailed-database-stats) +        [GetCollectionStatisticsOperation](../../client-api/operations/maintenance/get-stats.mdx#get-collection-stats) +        [GetDetailedCollectionStatisticsOperation](../../client-api/operations/maintenance/get-stats.mdx#get-detailed-collection-stats) + +* **Client Configuration**: +        [PutClientConfigurationOperation](../../client-api/operations/maintenance/configuration/put-client-configuration.mdx) +        [GetClientConfigurationOperation](../../client-api/operations/maintenance/configuration/get-client-configuration.mdx) + +* **Indexes**: +        [PutIndexesOperation](../../client-api/operations/maintenance/indexes/put-indexes.mdx) +        [SetIndexesLockOperation](../../client-api/operations/maintenance/indexes/set-index-lock.mdx) +        [SetIndexesPriorityOperation](../../client-api/operations/maintenance/indexes/set-index-priority.mdx) +        [GetIndexErrorsOperation](../../client-api/operations/maintenance/indexes/get-index-errors.mdx) +        [GetIndexOperation](../../client-api/operations/maintenance/indexes/get-index.mdx) +        [GetIndexesOperation](../../client-api/operations/maintenance/indexes/get-indexes.mdx) +        [GetTermsOperation](../../client-api/operations/maintenance/indexes/get-terms.mdx) +        GetIndexPerformanceStatisticsOperation +        GetIndexStatisticsOperation +        GetIndexesStatisticsOperation +        GetIndexingStatusOperation +        GetIndexStalenessOperation +        [GetIndexNamesOperation](../../client-api/operations/maintenance/indexes/get-index-names.mdx) +        [StartIndexOperation](../../client-api/operations/maintenance/indexes/start-index.mdx) +        [StartIndexingOperation](../../client-api/operations/maintenance/indexes/start-indexing.mdx) +        [StopIndexOperation](../../client-api/operations/maintenance/indexes/stop-index.mdx) +        [StopIndexingOperation](../../client-api/operations/maintenance/indexes/stop-indexing.mdx) +        [ResetIndexOperation](../../client-api/operations/maintenance/indexes/reset-index.mdx) +        [DeleteIndexOperation](../../client-api/operations/maintenance/indexes/delete-index.mdx) +        [DeleteIndexErrorsOperation](../../client-api/operations/maintenance/indexes/delete-index-errors.mdx) +        [DisableIndexOperation](../../client-api/operations/maintenance/indexes/disable-index.mdx) +        [EnableIndexOperation](../../client-api/operations/maintenance/indexes/enable-index.mdx) +        [IndexHasChangedOperation](../../client-api/operations/maintenance/indexes/index-has-changed.mdx) + +* **Analyzers**: +        PutAnalyzersOperation +        DeleteAnalyzerOperation + +* **Ongoing tasks**: +        [GetOngoingTaskInfoOperation](../../client-api/operations/maintenance/ongoing-tasks/ongoing-task-operations.mdx#get-ongoing-task-info) +        [ToggleOngoingTaskStateOperation](../../client-api/operations/maintenance/ongoing-tasks/ongoing-task-operations.mdx#toggle-ongoing-task-state) +        [DeleteOngoingTaskOperation](../../client-api/operations/maintenance/ongoing-tasks/ongoing-task-operations.mdx#delete-ongoing-task) + +* **ETL tasks**: +        AddEtlOperation +        UpdateEtlOperation +        [ResetEtlOperation](../../client-api/operations/maintenance/etl/reset-etl.mdx) + +* **Replication tasks**: +        PutPullReplicationAsHubOperation +        GetPullReplicationTasksInfoOperation +        GetReplicationHubAccessOperation +        GetReplicationPerformanceStatisticsOperation +        RegisterReplicationHubAccessOperation +        UnregisterReplicationHubAccessOperation +        UpdateExternalReplicationOperation +        UpdatePullReplicationAsSinkOperation + +* **Backup**: +        BackupOperation +        GetPeriodicBackupStatusOperation +        StartBackupOperation +        UpdatePeriodicBackupOperation + +* **Connection strings**: +        [PutConnectionStringOperation](../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx) +        [RemoveConnectionStringOperation](../../client-api/operations/maintenance/connection-strings/remove-connection-string.mdx) +        [GetConnectionStringsOperation](../../client-api/operations/maintenance/connection-strings/get-connection-string.mdx) + +* **Transaction recording**: +        StartTransactionsRecordingOperation +        StopTransactionsRecordingOperation +        ReplayTransactionsRecordingOperation + +* **Database settings**: +        [PutDatabaseSettingsOperation](../../client-api/operations/maintenance/configuration/database-settings-operation.mdx#put-database-settings-operation) +        [GetDatabaseSettingsOperation](../../client-api/operations/maintenance/configuration/database-settings-operation.mdx#get-database-settings-operation) + +* **Identities**: +        [GetIdentitiesOperation](../../client-api/operations/maintenance/identities/get-identities.mdx) +        [NextIdentityForOperation](../../client-api/operations/maintenance/identities/increment-next-identity.mdx) +        [SeedIdentityForOperation](../../client-api/operations/maintenance/identities/seed-identity.mdx) + +* **Time series**: +        ConfigureTimeSeriesOperation +        ConfigureTimeSeriesPolicyOperation +        ConfigureTimeSeriesValueNamesOperation +        RemoveTimeSeriesPolicyOperation + +* **Revisions**: +        [ConfigureRevisionsOperation](../../document-extensions/revisions/client-api/operations/configure-revisions.mdx) + +* **Sorters**: +        [PutSortersOperation](../../client-api/operations/maintenance/sorters/put-sorter.mdx) +        DeleteSorterOperation + +* **Sharding**: +        [AddPrefixedShardingSettingOperation](../../sharding/administration/sharding-by-prefix.mdx#add-prefixes-after-database-creation) +        [DeletePrefixedShardingSettingOperation](../../sharding/administration/sharding-by-prefix.mdx#removing-prefixes) +        [UpdatePrefixedShardingSettingOperation](../../sharding/administration/sharding-by-prefix.mdx#updating-shard-configurations-for-prefixes) + +* **Misc**: +        ConfigureExpirationOperation +        ConfigureRefreshOperation +        ConfigureDataArchivalOperation +        UpdateDocumentsCompressionConfigurationOperation +        DatabaseHealthCheckOperation +        GetOperationStateOperation +        CreateSampleDataOperation + + + + +## Server-maintenance operations + +* All server-maintenance operations implement the `IServerOperation` interface. + The operation is executed within the **server scope**. + Use [forNode](../../client-api/operations/how-to/switch-operations-to-a-different-node.mdx) to operate on a specific node other than the default defined in the client configuration. + +* These operations include server management and configuration operations. + See all available operations [below](../../client-api/operations/what-are-operations#the-following-server-maintenance-operations-are-available). + +* To execute a server-maintenance operation request, + use the `send` method on the `maintenance.server` property in the DocumentStore. + +#### Example: + + + +{`// Define operation, e.g. get the server build number +$getBuildNumberOp = new GetBuildNumberOperation(); + +// Execute the operation by passing the operation to Maintenance.Server.Send +/** @var BuildNumber $buildNumberResult */ +$buildNumberResult = $documentStore->maintenance()->server()->send($getBuildNumberOp); + +// Access the operation result +$version = $buildNumberResult->getBuildVersion(); +`} + + + +##### Syntax: + + + +{`public function send(ServerOperationInterface $operation): ?object; +`} + + + + + +#### The following server-maintenance operations are available: + +* **Client certificates**: +        [PutClientCertificateOperation](../../client-api/operations/server-wide/certificates/put-client-certificate.mdx) +        [CreateClientCertificateOperation](../../client-api/operations/server-wide/certificates/create-client-certificate.mdx) +        [GetCertificatesOperation](../../client-api/operations/server-wide/certificates/get-certificates.mdx) +        [DeleteCertificateOperation](../../client-api/operations/server-wide/certificates/delete-certificate.mdx) +        EditClientCertificateOperation +        GetCertificateMetadataOperation +        ReplaceClusterCertificateOperation + +* **Server-wide client configuration**: +        [PutServerWideClientConfigurationOperation](../../client-api/operations/server-wide/configuration/put-serverwide-client-configuration.mdx) +        [GetServerWideClientConfigurationOperation](../../client-api/operations/server-wide/configuration/get-serverwide-client-configuration.mdx) + +* **Database management**: +        [CreateDatabaseOperation](../../client-api/operations/server-wide/create-database.mdx) +        [DeleteDatabasesOperation](../../client-api/operations/server-wide/delete-database.mdx) +        [ToggleDatabasesStateOperation](../../client-api/operations/server-wide/toggle-databases-state.mdx) +        [GetDatabaseNamesOperation](../../client-api/operations/server-wide/get-database-names.mdx) +        [AddDatabaseNodeOperation](../../client-api/operations/server-wide/add-database-node.mdx) +        [PromoteDatabaseNodeOperation](../../client-api/operations/server-wide/promote-database-node.mdx) +        [ReorderDatabaseMembersOperation](../../client-api/operations/server-wide/reorder-database-members.mdx) +        [CompactDatabaseOperation](../../client-api/operations/server-wide/compact-database.mdx) +        GetDatabaseRecordOperation +        SetDatabasesLockOperation +        CreateDatabaseOperationWithoutNameValidation +        SetDatabaseDynamicDistributionOperation +        ModifyDatabaseTopologyOperation +        UpdateDatabaseOperation +        UpdateUnusedDatabasesOperation + +* **Server-wide ongoing tasks**: +        DeleteServerWideTaskOperation +        ToggleServerWideTaskStateOperation + +* **Server-wide replication tasks**: +        PutServerWideExternalReplicationOperation +        GetServerWideExternalReplicationOperation +        GetServerWideExternalReplicationsOperation + +* **Server-wide backup tasks**: +        PutServerWideBackupConfigurationOperation +        GetServerWideBackupConfigurationOperation +        GetServerWideBackupConfigurationsOperation +        RestoreBackupOperation + +* **Server-wide analyzers**: +        PutServerWideAnalyzersOperation +        DeleteServerWideAnalyzerOperation + +* **Server-wide sorters**: +        [PutServerWideSortersOperation](../../client-api/operations/server-wide/sorters/put-sorter-server-wide.mdx) +        DeleteServerWideSorterOperation + +* **Logs & debug**: +        SetLogsConfigurationOperation +        GetLogsConfigurationOperation +        GetClusterDebugInfoPackageOperation +        [GetBuildNumberOperation](../../client-api/operations/server-wide/get-build-number.mdx) +        GetServerWideOperationStateOperation + +* **Traffic watch**: +        PutTrafficWatchConfigurationOperation +        GetTrafficWatchConfigurationOperation + +* **Revisions**: +        [ConfigureRevisionsForConflictsOperation](../../document-extensions/revisions/client-api/operations/conflict-revisions-configuration.mdx) + +* **Misc**: +        ModifyConflictSolverOperation +        OfflineMigrationOperation + + + + +## Manage lengthy operations + +* Some operations that run in the server background may take a long time to complete. + +* For Operations that implement an interface with type `OperationIdResult`, + executing the operation via the `Send` method will return an `Operation` object, + which can be **awaited for completion**. +#### Wait for completion: + + + +{`public function WaitForCompletionWithTimeout(DocumentStore $documentStore, Duration $duration) +\{ + // Define operation, e.g. delete all discontinued products + // Note: This operation implements interface: 'OperationInterface' + $deleteByQueryOp = new DeleteByQueryOperation("from Products where Discontinued = true"); + + // Execute the operation + // Send returns an 'Operation' object that can be awaited on + + /** @var Operation $operation */ + $operation = $documentStore->operations()->sendAsync($deleteByQueryOp); + + try \{ + // Call method 'waitForCompletion()' to wait for the operation to complete. + + /** @var BulkOperationResult $result */ + $result = $operation->waitForCompletion($duration); + + // The operation has finished within the specified timeframe + $numberOfItemsDeleted = $result->getTotal(); // Access the operation result + + + \} catch (TimeoutException $exception) \{ + // The operation did Not finish within the specified timeframe + \} + +\} +`} + + + +##### Syntax: + + + +{`/** + * Wait for operation completion. + * + * It throws TimeoutException if $duration is set and operation execution time elapses duration interval. + * + * Usage: + * - waitForCompletion(): void; // It will wait until operation is finished + * - waitForCompletion(Duration $duration); // It will wait for given duration + * - waitForCompletion(int $seconds); // It will wait for given seconds + * + * @param Duration|int|null $duration + */ +public function waitForCompletion(Duration|int|null $duration = null): void; +`} + + + +| Parameter | Type | Description | +|---------------|---------------------|-------------| +| **$duration** | `Duration` or `int` |
  • When a duration is specified -
    The server will throw a `TimeoutException` if the operation has not completed within the specified time frame.
    The operation itself continues to run in the background,
    no rollback action takes place.
  • `null` -
    `waitForCompletion` will wait for the operation to complete indefinitely.
| diff --git a/versioned_docs/version-7.1/client-api/operations/_what-are-operations-python.mdx b/versioned_docs/version-7.1/client-api/operations/_what-are-operations-python.mdx new file mode 100644 index 0000000000..f01ea43d95 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/_what-are-operations-python.mdx @@ -0,0 +1,442 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The RavenDB Client API is built with the notion of layers. + At the top, and what you will usually interact with, are the **[documentStore](../../client-api/what-is-a-document-store.mdx)** + and the **[session](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx)**. + They, in turn, are built on top of the lower-level **Operations** and **Commands** API. + +* **RavenDB provides direct access to this lower-level API**, allowing you to send requests + directly to the server via DocumentStore Operations instead of using the higher-level Session API. + +* In this page: + * [Why use operations](../../client-api/operations/what-are-operations.mdx#why-use-operations) + * [How operations work](../../client-api/operations/what-are-operations.mdx#how-operations-work) + * **Operation types**: + * [Common operations](../../client-api/operations/what-are-operations.mdx#common-operations) + * [Maintenance operations](../../client-api/operations/what-are-operations.mdx#maintenance-operations) + * [Server-maintenance operations](../../client-api/operations/what-are-operations.mdx#server-maintenance-operations) + * [Manage lengthy operations](../../client-api/operations/what-are-operations.mdx#manage-lengthy-operations) + * [Wait for completion](../../client-api/operations/what-are-operations.mdx#wait-for-completion) + * [Kill operation](../../client-api/operations/what-are-operations.mdx#kill-operation) + + +## Why use operations + +* Operations provide **management functionality** that is not available in the context of the session, for example: + * Create/delete a database + * Execute administrative tasks + * Assign permissions + * Change server configuration, and more. + +* The operations are executed on the DocumentStore and are not part of the session transaction. + +* There are some client tasks, such as patching documents, that can be carried out either via the Session + ([session.advanced.patch()](../../client-api/operations/patching/single-document.mdx#array-manipulation)) + or via an Operation on the DocumentStore ([PatchOperation](../../client-api/operations/patching/single-document.mdx#operations-api)). + + + +## How operations work + +* **Sending the request**: + Each Operation is an encapsulation of a `RavenCommand`. + The RavenCommand creates the HTTP request message to be sent to the relevant server endpoint. + The DocumentStore `OperationExecutor` sends the request and processes the results. +* **Target node**: + By default, the operation will be executed on the server node that is defined by the [client configuration](../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + However, server-maintenance operations can be executed on a specific node by using the [for_node](../../client-api/operations/how-to/switch-operations-to-a-different-node.mdx) method. +* **Target database**: + By default, operations work on the default database defined in the DocumentStore. + However, common operations & maintenance operations can operate on a different database by using the [for_database](../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx) method. +* **Transaction scope**: + Operations execute as a single-node transaction. + If needed, data will then replicate to the other nodes in the database-group. +* **Background operations**: + Some operations may take a long time to complete and can be awaited for completion. + Learn more [below](../../client-api/operations/what-are-operations.mdx#wait-for-completion). + + + +## Common operations + +* All common operations implement the `IOperation` interface. + The operation is executed within the **database scope**. + Use [for_database](../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx) to operate on a specific database other than the default defined in the store. + +* These operations include set-based operations such as _PatchOperation_, _CounterBatchOperation_, + document-extensions related operations such as getting/putting an attachment, and more. + See all available operations [below](../../client-api/operations/what-are-operations#the-following-common-operations-are-available). + +* To execute a common operation request, + use the `send` method on the `operations` property of the DocumentStore. + +#### Example: + + + +{`# Define operation, e.g. get all counters info for a document +get_counters_op = GetCountersOperation("products/1-A") + +# Execute the operation by passing the operation to operations.send +all_counters_result = store.operations.send(get_counters_op) + +# Access the operation result +number_of_counters = len(all_counters_result.counters) +`} + + + +##### Syntax: + + + +{`# Available overloads: +def send(self, operation: IOperation[_Operation_T], session_info: SessionInfo = None) -> _Operation_T: ... + +def send_async(self, operation: IOperation[OperationIdResult]) -> Operation: ... + +def send_patch_operation(self, operation: PatchOperation, session_info: SessionInfo) -> PatchStatus: ... + +def send_patch_operation_with_entity_class( + self, entity_class: _T, operation: PatchOperation, session_info: Optional[SessionInfo] = None +) -> PatchOperation.Result[_T]: ... +`} + + + + + +#### The following common operations are available: + +* **Attachments**: +        [PutAttachmentOperation](../../client-api/operations/attachments/put-attachment.mdx) +        [GetAttachmentOperation](../../client-api/operations/attachments/get-attachment.mdx) +        [DeleteAttachmentOperation](../../client-api/operations/attachments/delete-attachment.mdx) + +* **Counters**: +        [CounterBatchOperation](../../client-api/operations/counters/counter-batch.mdx) +        [GetCountersOperation](../../client-api/operations/counters/get-counters.mdx) + +* **Time series**: +        [TimeSeriesBatchOperation](../../document-extensions/timeseries/client-api/operations/append-and-delete.mdx) +        [GetMultipleTimeSeriesOperation](../../document-extensions/timeseries/client-api/operations/get.mdx) +        [GetTimeSeriesOperation](../../document-extensions/timeseries/client-api/operations/get.mdx) +        GetTimeSeriesStatisticsOperation + +* **Revisions**: +        [GetRevisionsOperation](../../document-extensions/revisions/client-api/operations/get-revisions.mdx) + +* **Patching**: +        [PatchOperation](../../client-api/operations/patching/single-document.mdx) +        [PatchByQueryOperation](../../client-api/operations/patching/set-based.mdx) + +* **Delete by query**: +        [DeleteByQueryOperation](../../client-api/operations/common/delete-by-query.mdx) + +* **Compare-exchange**: +        PutCompareExchangeValueOperation +        GetCompareExchangeValueOperation +        [GetCompareExchangeValuesOperation](../../compare-exchange/get-cmpxchg-items) +        DeleteCompareExchangeValueOperation] + + + + +## Maintenance operations + +* All maintenance operations implement the `IMaintenanceOperation` interface. + The operation is executed within the **database scope**. + Use [for_database](../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx) to operate on a specific database other than the default defined in the store. + +* These operations include database management operations such as setting client configuration, + managing indexes & ongoing-tasks operations, getting stats, and more. + See all available maintenance operations [below](../../client-api/operations/what-are-operations#the-following-maintenance-operations-are-available). + +* To execute a maintenance operation request, + use the `send` method on the `maintenance` property in the DocumentStore. + +#### Example: + + + +{`# Define operation, e.g. stop an index +stop_index_op = StopIndexOperation("Orders/ByCompany") + +# Execute the operation by passing the operation to maintenance.send +store.maintenance.send(stop_index_op) + +# This specific operation returns void +# You can send another operation to verify the index running status +index_stats_op = GetIndexStatisticsOperation("Orders/ByCompany") +index_stats = store.maintenance.send(index_stats_op) +status = index_stats.status # will be "Paused" +`} + + + +##### Syntax: + + + +{`def send( + self, operation: Union[VoidMaintenanceOperation, MaintenanceOperation[_Operation_T]] +) -> Optional[_Operation_T]: ... + +def send_async(self, operation: MaintenanceOperation[OperationIdResult]) -> Operation: ... +`} + + + + + +#### The following maintenance operations are available: + +* **Statistics**: +        [GetStatisticsOperation](../../client-api/operations/maintenance/get-stats.mdx#get-database-stats) +        [GetDetailedStatisticsOperation](../../client-api/operations/maintenance/get-stats.mdx#get-detailed-database-stats) +        [GetCollectionStatisticsOperation](../../client-api/operations/maintenance/get-stats.mdx#get-collection-stats) +        [GetDetailedCollectionStatisticsOperation](../../client-api/operations/maintenance/get-stats.mdx#get-detailed-collection-stats) + +* **Client Configuration**: +        [PutClientConfigurationOperation](../../client-api/operations/maintenance/configuration/put-client-configuration.mdx) +        [GetClientConfigurationOperation](../../client-api/operations/maintenance/configuration/get-client-configuration.mdx) + +* **Indexes**: +        [PutIndexesOperation](../../client-api/operations/maintenance/indexes/put-indexes.mdx) +        [SetIndexesLockOperation](../../client-api/operations/maintenance/indexes/set-index-lock.mdx) +        [SetIndexesPriorityOperation](../../client-api/operations/maintenance/indexes/set-index-priority.mdx) +        [GetIndexErrorsOperation](../../client-api/operations/maintenance/indexes/get-index-errors.mdx) +        [GetIndexOperation](../../client-api/operations/maintenance/indexes/get-index.mdx) +        [GetIndexesOperation](../../client-api/operations/maintenance/indexes/get-indexes.mdx) +        [GetTermsOperation](../../client-api/operations/maintenance/indexes/get-terms.mdx) +        GetIndexPerformanceStatisticsOperation +        GetIndexStatisticsOperation +        GetIndexesStatisticsOperation +        GetIndexingStatusOperation +        GetIndexStalenessOperation +        [GetIndexNamesOperation](../../client-api/operations/maintenance/indexes/get-index-names.mdx) +        [StartIndexOperation](../../client-api/operations/maintenance/indexes/start-index.mdx) +        [StartIndexingOperation](../../client-api/operations/maintenance/indexes/start-indexing.mdx) +        [StopIndexOperation](../../client-api/operations/maintenance/indexes/stop-index.mdx) +        [StopIndexingOperation](../../client-api/operations/maintenance/indexes/stop-indexing.mdx) +        [ResetIndexOperation](../../client-api/operations/maintenance/indexes/reset-index.mdx) +        [DeleteIndexOperation](../../client-api/operations/maintenance/indexes/delete-index.mdx) +        [DeleteIndexErrorsOperation](../../client-api/operations/maintenance/indexes/delete-index-errors.mdx) +        [DisableIndexOperation](../../client-api/operations/maintenance/indexes/disable-index.mdx) +        [EnableIndexOperation](../../client-api/operations/maintenance/indexes/enable-index.mdx) +        [IndexHasChangedOperation](../../client-api/operations/maintenance/indexes/index-has-changed.mdx) + +* **Analyzers**: +        PutAnalyzersOperation +        DeleteAnalyzerOperation + +* **Ongoing tasks**: +        [GetOngoingTaskInfoOperation](../../client-api/operations/maintenance/ongoing-tasks/ongoing-task-operations.mdx#get-ongoing-task-info) +        [ToggleOngoingTaskStateOperation](../../client-api/operations/maintenance/ongoing-tasks/ongoing-task-operations.mdx#toggle-ongoing-task-state) +        [DeleteOngoingTaskOperation](../../client-api/operations/maintenance/ongoing-tasks/ongoing-task-operations.mdx#delete-ongoing-task) + +* **ETL tasks**: +        AddEtlOperation +        UpdateEtlOperation +        [ResetEtlOperation](../../client-api/operations/maintenance/etl/reset-etl.mdx) + +* **Replication tasks**: +        PutPullReplicationAsHubOperation +        GetPullReplicationTasksInfoOperation +        GetReplicationHubAccessOperation +        GetReplicationPerformanceStatisticsOperation +        RegisterReplicationHubAccessOperation +        UnregisterReplicationHubAccessOperation +        UpdateExternalReplicationOperation +        UpdatePullReplicationAsSinkOperation + +* **Backup**: +        BackupOperation +        GetPeriodicBackupStatusOperation +        StartBackupOperation +        UpdatePeriodicBackupOperation + +* **Connection strings**: +        [PutConnectionStringOperation](../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx) +        [RemoveConnectionStringOperation](../../client-api/operations/maintenance/connection-strings/remove-connection-string.mdx) +        [GetConnectionStringsOperation](../../client-api/operations/maintenance/connection-strings/get-connection-string.mdx) + +* **Transaction recording**: +        StartTransactionsRecordingOperation +        StopTransactionsRecordingOperation +        ReplayTransactionsRecordingOperation + +* **Database settings**: +        [PutDatabaseSettingsOperation](../../client-api/operations/maintenance/configuration/database-settings-operation.mdx#put-database-settings-operation) +        [GetDatabaseSettingsOperation](../../client-api/operations/maintenance/configuration/database-settings-operation.mdx#get-database-settings-operation) + +* **Identities**: +        [GetIdentitiesOperation](../../client-api/operations/maintenance/identities/get-identities.mdx) +        [NextIdentityForOperation](../../client-api/operations/maintenance/identities/increment-next-identity.mdx) +        [SeedIdentityForOperation](../../client-api/operations/maintenance/identities/seed-identity.mdx) + +* **Time series**: +        ConfigureTimeSeriesOperation +        ConfigureTimeSeriesPolicyOperation +        ConfigureTimeSeriesValueNamesOperation +        RemoveTimeSeriesPolicyOperation + +* **Revisions**: +        [ConfigureRevisionsOperation](../../document-extensions/revisions/client-api/operations/configure-revisions.mdx) + +* **Sorters**: +        [PutSortersOperation](../../client-api/operations/maintenance/sorters/put-sorter.mdx) +        DeleteSorterOperation + +* **Sharding**: +        [AddPrefixedShardingSettingOperation](../../sharding/administration/sharding-by-prefix.mdx#add-prefixes-after-database-creation) +        [DeletePrefixedShardingSettingOperation](../../sharding/administration/sharding-by-prefix.mdx#removing-prefixes) +        [UpdatePrefixedShardingSettingOperation](../../sharding/administration/sharding-by-prefix.mdx#updating-shard-configurations-for-prefixes) + +* **Misc**: +        ConfigureExpirationOperation +        ConfigureRefreshOperation +        ConfigureDataArchivalOperation +        UpdateDocumentsCompressionConfigurationOperation +        DatabaseHealthCheckOperation +        GetOperationStateOperation +        CreateSampleDataOperation + + + + +## Server-maintenance operations + +* All server-maintenance operations implement the `IServerOperation` interface. + The operation is executed within the **server scope**. + Use [for_node](../../client-api/operations/how-to/switch-operations-to-a-different-node.mdx) to operate on a specific node other than the default defined in the client configuration. + +* These operations include server management and configuration operations. + See all available operations [below](../../client-api/operations/what-are-operations#the-following-server-maintenance-operations-are-available). + +* To execute a server-maintenance operation request, + use the `send` method on the `maintenance.server` property in the DocumentStore. + +#### Example: + + + +{`# Define operation, e.g. get the server build number +get_build_number_op = GetBuildNumberOperation() + +# Execute the operation by passing to maintenance.server.send +build_number_result = store.maintenance.server.send(get_build_number_op) + +# Access the operation result +version = build_number_result.build_version +`} + + + +##### Syntax: + + + +{`def send(self, operation: ServerOperation[_T_OperationResult]) -> Optional[_T_OperationResult]: ... + +def send_async(self, operation: ServerOperation[OperationIdResult]) -> Operation: ... + +test_examples(self): +with self.embedded_server.get_document_store("WhatAreOperations") as store: + # region operations_ex + # Define operation, e.g. get all counters info for a document + get_counters_op = GetCountersOperation("products/1-A") + + # Execute the operation by passing the operation to operations.send + all_counters_result = store.operations.send(get_counters_op) + + # Access the operation result + number_of_counters = len(all_counters_result.counters) +`} + + + + + +#### The following server-maintenance operations are available: + +* **Client certificates**: +        [PutClientCertificateOperation](../../client-api/operations/server-wide/certificates/put-client-certificate.mdx) +        [CreateClientCertificateOperation](../../client-api/operations/server-wide/certificates/create-client-certificate.mdx) +        [GetCertificatesOperation](../../client-api/operations/server-wide/certificates/get-certificates.mdx) +        [DeleteCertificateOperation](../../client-api/operations/server-wide/certificates/delete-certificate.mdx) +        EditClientCertificateOperation +        GetCertificateMetadataOperation +        ReplaceClusterCertificateOperation + +* **Server-wide client configuration**: +        [PutServerWideClientConfigurationOperation](../../client-api/operations/server-wide/configuration/put-serverwide-client-configuration.mdx) +        [GetServerWideClientConfigurationOperation](../../client-api/operations/server-wide/configuration/get-serverwide-client-configuration.mdx) + +* **Database management**: +        [CreateDatabaseOperation](../../client-api/operations/server-wide/create-database.mdx) +        [DeleteDatabasesOperation](../../client-api/operations/server-wide/delete-database.mdx) +        [ToggleDatabasesStateOperation](../../client-api/operations/server-wide/toggle-databases-state.mdx) +        [GetDatabaseNamesOperation](../../client-api/operations/server-wide/get-database-names.mdx) +        [AddDatabaseNodeOperation](../../client-api/operations/server-wide/add-database-node.mdx) +        [PromoteDatabaseNodeOperation](../../client-api/operations/server-wide/promote-database-node.mdx) +        [ReorderDatabaseMembersOperation](../../client-api/operations/server-wide/reorder-database-members.mdx) +        [CompactDatabaseOperation](../../client-api/operations/server-wide/compact-database.mdx) +        GetDatabaseRecordOperation +        SetDatabasesLockOperation +        CreateDatabaseOperationWithoutNameValidation +        SetDatabaseDynamicDistributionOperation +        ModifyDatabaseTopologyOperation +        UpdateDatabaseOperation +        UpdateUnusedDatabasesOperation + +* **Server-wide ongoing tasks**: +        DeleteServerWideTaskOperation +        ToggleServerWideTaskStateOperation + +* **Server-wide replication tasks**: +        PutServerWideExternalReplicationOperation +        GetServerWideExternalReplicationOperation +        GetServerWideExternalReplicationsOperation + +* **Server-wide backup tasks**: +        PutServerWideBackupConfigurationOperation +        GetServerWideBackupConfigurationOperation +        GetServerWideBackupConfigurationsOperation +        RestoreBackupOperation + +* **Server-wide analyzers**: +        PutServerWideAnalyzersOperation +        DeleteServerWideAnalyzerOperation + +* **Server-wide sorters**: +        [PutServerWideSortersOperation](../../client-api/operations/server-wide/sorters/put-sorter-server-wide.mdx) +        DeleteServerWideSorterOperation + +* **Logs & debug**: +        SetLogsConfigurationOperation +        GetLogsConfigurationOperation +        GetClusterDebugInfoPackageOperation +        [GetBuildNumberOperation](../../client-api/operations/server-wide/get-build-number.mdx) +        GetServerWideOperationStateOperation + +* **Traffic watch**: +        PutTrafficWatchConfigurationOperation +        GetTrafficWatchConfigurationOperation + +* **Revisions**: +        [ConfigureRevisionsForConflictsOperation](../../document-extensions/revisions/client-api/operations/conflict-revisions-configuration.mdx) + +* **Misc**: +        ModifyConflictSolverOperation +        OfflineMigrationOperation + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/attachments/_category_.json b/versioned_docs/version-7.1/client-api/operations/attachments/_category_.json new file mode 100644 index 0000000000..b2b7ed7266 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/attachments/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 5, + "label": Attachments, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/attachments/_delete-attachment-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/attachments/_delete-attachment-csharp.mdx new file mode 100644 index 0000000000..b22232e875 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/attachments/_delete-attachment-csharp.mdx @@ -0,0 +1,32 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +This operation is used to delete an attachment from a document. + +## Syntax + + + +{`public DeleteAttachmentOperation(string documentId, string name, string changeVector = null) +`} + + + +| Parameter | | | +|------------------|--------|-------------------------------------------------------------------------| +| **documentId** | string | ID of a document containing an attachment | +| **name** | string | Name of an attachment | +| **changeVector** | string | Entity changeVector, used for concurrency checks (`null` to skip check) | + +## Example + + + +{`store.Operations.Send(new DeleteAttachmentOperation("orders/1-A", "invoice.pdf")); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/attachments/_delete-attachment-java.mdx b/versioned_docs/version-7.1/client-api/operations/attachments/_delete-attachment-java.mdx new file mode 100644 index 0000000000..eac3cba4bb --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/attachments/_delete-attachment-java.mdx @@ -0,0 +1,35 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +This operation is used to delete an attachment from a document. + +## Syntax + + + +{`DeleteAttachmentOperation(String documentId, String name) + +DeleteAttachmentOperation(String documentId, String name, String changeVector) +`} + + + +| Parameter | | | +|------------------|--------|-------------------------------------------------------------------------| +| **documentId** | String | ID of a document containing an attachment | +| **name** | String | Name of an attachment | +| **changeVector** | String | Entity changeVector, used for concurrency checks (`null` to skip check) | + +## Example + + + +{`store.operations().send( + new DeleteAttachmentOperation("orders/1-A", "invoice.pdf")); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/attachments/_delete-attachment-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/attachments/_delete-attachment-nodejs.mdx new file mode 100644 index 0000000000..05e8c3117d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/attachments/_delete-attachment-nodejs.mdx @@ -0,0 +1,50 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the `DeleteAttachmentOperation` to delete an attachment from a document. + +* In this page: + + * [Delete attachment example](../../../client-api/operations/attachments/delete-attachment.mdx#delete-attachment-example) + * [Syntax](../../../client-api/operations/attachments/delete-attachment.mdx#syntax) + + +## Delete attachment example + + + +{`// Define the delete attachment operation +const deleteAttachmentOp = new DeleteAttachmentOperation("employees/1-A", "photo.jpg"); + +// Execute the operation by passing it to operations.send +await documentStore.operations.send(deleteAttachmentOp); +`} + + + + + +## Syntax + + + +{`// Available overloads: +const deleteAttachmentOp = new DeleteAttachmentOperation(documentId, name); +const deleteAttachmentOp = new DeleteAttachmentOperation(documentId, name, changeVector); +`} + + + +| Parameter | Type | Description | +|------------------|----------|-----------------------------------------------------------------------------------| +| __documentId__ | `string` | ID of document from which attachment will be removed | +| __name__ | `string` | Name of attachment to delete | +| __changeVector__ | `string` | ChangeVector of attachment,
used for concurrency checks (`null` to skip check) | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/attachments/_get-attachment-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/attachments/_get-attachment-csharp.mdx new file mode 100644 index 0000000000..47d869ffda --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/attachments/_get-attachment-csharp.mdx @@ -0,0 +1,71 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +This operation is used to get an attachment from a document. + +## Syntax + + + +{`public GetAttachmentOperation(string documentId, string name, AttachmentType type, string changeVector) +`} + + + + + +{`public class AttachmentResult +\{ + public Stream Stream; + public AttachmentDetails Details; +\} + +public class AttachmentDetails : AttachmentName +\{ + public string ChangeVector; + public string DocumentId; +\} + +public class AttachmentName +\{ + public string Name; + public string Hash; + public string ContentType; + public long Size; +\} +`} + + + +| Parameter | | | +|------------------|----------------| ----- | +| **documentId** | string | ID of a document which will contain an attachment | +| **name** | string | Name of an attachment | +| **type** | AttachmentType | Specify whether getting an attachment from a document or from a revision.
(`Document` or `Revision`). | +| **changeVector** | string | The ChangeVector of the document or the revision to which the attachment belongs.
Mandatory when getting an attachment from a revision.
Used for concurrency checks (use `null` to skip the check). | + +| Return Value | | +| ------------- | ----- | +| **Stream** | Stream containing an attachment | +| **ChangeVector** | Change vector of document | +| **DocumentId** | ID of document | +| **Name** | Name of attachment | +| **Hash** | Hash of attachment | +| **ContentType** | MIME content type of an attachment | +| **Size** | Size of attachment | + +## Example + + + +{`store.Operations.Send(new GetAttachmentOperation("orders/1-A", + "invoice.pdf", + AttachmentType.Document, + changeVector: null)); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/attachments/_get-attachment-java.mdx b/versioned_docs/version-7.1/client-api/operations/attachments/_get-attachment-java.mdx new file mode 100644 index 0000000000..5f5c63f118 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/attachments/_get-attachment-java.mdx @@ -0,0 +1,122 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +This operation is used to get an attachment from a document. + +## Syntax + + + +{`GetAttachmentOperation(String documentId, String name, AttachmentType type, String changeVector) +`} + + + + + +{`public class CloseableAttachmentResult implements AutoCloseable \{ + private AttachmentDetails details; + private CloseableHttpResponse response; + + public InputStream getData() throws IOException \{ + return response.getEntity().getContent(); + \} + + public AttachmentDetails getDetails() \{ + return details; + \} +\} + +public class AttachmentDetails extends Foo.AttachmentName \{ + private String changeVector; + private String documentId; + + public String getChangeVector() \{ + return changeVector; + \} + + public void setChangeVector(String changeVector) \{ + this.changeVector = changeVector; + \} + + public String getDocumentId() \{ + return documentId; + \} + + public void setDocumentId(String documentId) \{ + this.documentId = documentId; + \} +\} + +public class AttachmentName \{ + private String name; + private String hash; + private String contentType; + private long size; + + public String getName() \{ + return name; + \} + + public void setName(String name) \{ + this.name = name; + \} + + public String getHash() \{ + return hash; + \} + + public void setHash(String hash) \{ + this.hash = hash; + \} + + public String getContentType() \{ + return contentType; + \} + + public void setContentType(String contentType) \{ + this.contentType = contentType; + \} + + public long getSize() \{ + return size; + \} + + public void setSize(long size) \{ + this.size = size; + \} +\} +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **documentId** | String | ID of a document which will contain an attachment | +| **name** | String | Name of an attachment | +| **type** | AttachmentType | Specify whether getting an attachment from a document or from a revision.
(`DOCUMENT` or `REVISION`). | +| **changeVector** | String | The ChangeVector of the document or the revision to which the attachment belongs.
Mandatory when getting an attachment from a revision.
Used for concurrency checks (use `null` to skip the check). | + +| Return Value | | +| ------------- | ----- | +| **Stream** | InputStream containing an attachment | +| **ChangeVector** | Change vector of document | +| **DocumentId** | ID of document | +| **Name** | Name of attachment | +| **Hash** | Hash of attachment | +| **ContentType** | MIME content type of an attachment | +| **Size** | Size of attachment | + +## Example + + + +{`store.operations().send( + new GetAttachmentOperation("orders/1-A", "invoice.pdf", AttachmentType.DOCUMENT, null)); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/attachments/_get-attachment-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/attachments/_get-attachment-nodejs.mdx new file mode 100644 index 0000000000..7a4b8ed098 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/attachments/_get-attachment-nodejs.mdx @@ -0,0 +1,99 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the `GetAttachmentOperation` to retrieve an attachment from a document. + +* In this page: + + * [Get attachment example](../../../client-api/operations/attachments/get-attachment.mdx#get-attachment-example) + * [Syntax](../../../client-api/operations/attachments/get-attachment.mdx#syntax) + + +## Get attachment example + + + +{`// Define the get attachment operation +const getAttachmentOp = new GetAttachmentOperation("employees/1-A", "attachmentName.txt", "Document", null); + +// Execute the operation by passing it to operations.send +const attachmentResult = await documentStore.operations.send(getAttachmentOp); + +// Retrieve attachment content: +attachmentResult.data + .pipe(fs.createWriteStream("attachment")) + .on("finish", () => \{ + fs.readFile("attachment", "utf8", (err, data) => \{ + if (err) \{ + console.error("Error reading file:", err); + return; + \} + console.log("Content of attachment:", data); + next(); + \}); + \}); +`} + + + + + +## Syntax + + + +{`const getAttachmentOp = new GetAttachmentOperation(documentId, name, type, changeVector); +`} + + + +| Parameter | Type | Description | +|------------------|----------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| __documentId__ | `string` | Document ID that contains the attachment. | +| __name__ | `string` | Name of attachment to get. | +| __type__ | `string` | Specify whether getting an attachment from a document or from a revision.
(`"Document"` or `"Revision"`). | +| __changeVector__ | `string` | The ChangeVector of the document or the revision to which the attachment belongs.
Mandatory when getting an attachment from a revision.
Used for concurrency checks (use `null` to skip the check). | + +| Return Value of `store.operations.send(getAttachmentOp)` | | +|-----------------------------------------------------------|-----------------------------------------| +| `AttachmentResult` | An instance of class `AttachmentResult` | + + + +{`class AttachmentResult \{ + data; // Stream containing the attachment content + details; // The AttachmentDetails object +\} + +// The AttachmentDetails object: +// ============================= +\{ + // Change vector of the document that contains the attachment + changeVector; // string + + // ID of the document that contains the attachment + documentId?; // string + + // Name of attachment + name; // string; + + // Hash of attachment + hash; // string; + + // Content type of attachment + contentType; // string + + // Size of attachment + size; // number +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/attachments/_put-attachment-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/attachments/_put-attachment-csharp.mdx new file mode 100644 index 0000000000..8e86ea993d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/attachments/_put-attachment-csharp.mdx @@ -0,0 +1,71 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +This operation is used to put an attachment to a document. + +## Syntax + + + +{`public PutAttachmentOperation(string documentId, + string name, + Stream stream, + string contentType = null, + string changeVector = null) +`} + + + + + +{`public class AttachmentDetails : AttachmentName +\{ + public string ChangeVector; + public string DocumentId; +\} + +public class AttachmentName +\{ + public string Name; + public string Hash; + public string ContentType; + public long Size; +\} +`} + + + +| Parameter | | | +|------------------|--------|-------------------------------------------------------------------------| +| **documentId** | string | ID of a document which will contain an attachment | +| **name** | string | Name of an attachment | +| **stream** | Stream | Stream contains attachment raw bytes | +| **contentType** | string | MIME type of attachment | +| **changeVector** | string | Entity changeVector, used for concurrency checks (`null` to skip check) | + +| Return Value | | +|------------------|-------------------------------------| +| **ChangeVector** | Change vector of created attachment | +| **DocumentId** | ID of document | +| **Name** | Name of created attachment | +| **Hash** | Hash of created attachment | +| **ContentType** | MIME content type of attachment | +| **Size** | Size of attachment | + +## Example + + + +{`AttachmentDetails attachmentDetails = + store.Operations.Send( + new PutAttachmentOperation("orders/1-A", + "invoice.pdf", + stream, + "application/pdf")); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/attachments/_put-attachment-java.mdx b/versioned_docs/version-7.1/client-api/operations/attachments/_put-attachment-java.mdx new file mode 100644 index 0000000000..4bfe0db7f3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/attachments/_put-attachment-java.mdx @@ -0,0 +1,116 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +This operation is used to put an attachment to a document. + +## Syntax + + + +{`PutAttachmentOperation(String documentId, String name, InputStream stream) + +PutAttachmentOperation(String documentId, String name, InputStream stream, String contentType) + +PutAttachmentOperation(String documentId, String name, InputStream stream, String contentType, String changeVector) +`} + + + + + +{`public class AttachmentDetails extends AttachmentName \{ + private String changeVector; + private String documentId; + + public String getChangeVector() \{ + return changeVector; + \} + + public void setChangeVector(String changeVector) \{ + this.changeVector = changeVector; + \} + + public String getDocumentId() \{ + return documentId; + \} + + public void setDocumentId(String documentId) \{ + this.documentId = documentId; + \} +\} + +public class AttachmentName \{ + private String name; + private String hash; + private String contentType; + private long size; + + public String getName() \{ + return name; + \} + + public void setName(String name) \{ + this.name = name; + \} + + public String getHash() \{ + return hash; + \} + + public void setHash(String hash) \{ + this.hash = hash; + \} + + public String getContentType() \{ + return contentType; + \} + + public void setContentType(String contentType) \{ + this.contentType = contentType; + \} + + public long getSize() \{ + return size; + \} + + public void setSize(long size) \{ + this.size = size; + \} +\} +`} + + + +| Parameter | | | +|------------------| ------------- | ----- | +| **documentId** | String | ID of a document which will contain an attachment | +| **name** | String | Name of an attachment | +| **stream** | InputStream | Stream contains attachment raw bytes | +| **contentType** | String | MIME type of attachment | +| **changeVector** | String | Entity changeVector, used for concurrency checks (`null` to skip check) | + +| Return Value | | +| ------------- | ----- | +| **ChangeVector** | Change vector of created attachment | +| **DocumentId** | ID of document | +| **Name** | Name of created attachment | +| **Hash** | Hash of created attachment | +| **ContentType** | MIME content type of attachment | +| **Size** | Size of attachment | + +## Example + + + +{`AttachmentDetails attachmentDetails = store + .operations().send(new PutAttachmentOperation("orders/1-A", + "invoice.pdf", + stream, + "application/pdf")); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/attachments/_put-attachment-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/attachments/_put-attachment-nodejs.mdx new file mode 100644 index 0000000000..b81b0f1ab2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/attachments/_put-attachment-nodejs.mdx @@ -0,0 +1,89 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the `PutAttachmentOperation` to add an attachment to a document. + +* In this page: + + * [Put attachment example](../../../client-api/operations/attachments/put-attachment.mdx#put-attachment-example) + * [Syntax](../../../client-api/operations/attachments/put-attachment.mdx#syntax) + + +## Put attachment example + + + +{`// Prepare content to attach +const text = "Some content..."; +const byteArray = Buffer.from(text); + +// Define the put attachment operation +const putAttachmentOp = new PutAttachmentOperation( + "employees/1-A", "attachmentName.txt", byteArray, "text/plain"); + +// Execute the operation by passing it to operations.send +const attachmentDetails = await documentStore.operations.send(putAttachmentOp); +`} + + + + + +## Syntax + + + +{`// Available overloads: +const putAttachmentOp = new PutAttachmentOperation(documentId, name, stream); +const putAttachmentOp = new PutAttachmentOperation(documentId, name, stream, contentType); +const putAttachmentOp = new PutAttachmentOperation(documentId, name, stream, contentType, changeVector); +`} + + + +| Parameter | Type | Description | +|------------------|------------------------------|-----------------------------------------------------------------------------------| +| __documentId__ | `string` | Document ID to which the attachment will be added | +| __name__ | `string` | Name of attachment to put | +| __stream__ | `stream.Readable` / `Buffer` | A stream that contains the raw bytes of the attachment | +| __contentType__ | `string` | Content type of attachment | +| __changeVector__ | `string` | ChangeVector of attachment,
used for concurrency checks (`null` to skip check) | + +| Return Value of `store.operations.send(putAttachmentOp)` | | +|----------------------------------------------------------|---------------------------------------------| +| `object` | An object with the new attachment's details | + + + +{`// The AttachmentDetails object: +// ============================= +\{ + // Change vector of attachment + changeVector; // string + + // ID of the document that contains the attachment + documentId?; // string + + // Name of attachment + name; // string; + + // Hash of attachment + hash; // string; + + // Content type of attachment + contentType; // string + + // Size of attachment + size; // number +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/attachments/delete-attachment.mdx b/versioned_docs/version-7.1/client-api/operations/attachments/delete-attachment.mdx new file mode 100644 index 0000000000..b2e2842ac0 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/attachments/delete-attachment.mdx @@ -0,0 +1,39 @@ +--- +title: "Delete Attachment Operation" +hide_table_of_contents: true +sidebar_label: Delete Attachment +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import DeleteAttachmentCsharp from './_delete-attachment-csharp.mdx'; +import DeleteAttachmentJava from './_delete-attachment-java.mdx'; +import DeleteAttachmentNodejs from './_delete-attachment-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/attachments/get-attachment.mdx b/versioned_docs/version-7.1/client-api/operations/attachments/get-attachment.mdx new file mode 100644 index 0000000000..e7f476f546 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/attachments/get-attachment.mdx @@ -0,0 +1,39 @@ +--- +title: "Get Attachment Operation" +hide_table_of_contents: true +sidebar_label: Get Attachment +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetAttachmentCsharp from './_get-attachment-csharp.mdx'; +import GetAttachmentJava from './_get-attachment-java.mdx'; +import GetAttachmentNodejs from './_get-attachment-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/attachments/put-attachment.mdx b/versioned_docs/version-7.1/client-api/operations/attachments/put-attachment.mdx new file mode 100644 index 0000000000..91e8871301 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/attachments/put-attachment.mdx @@ -0,0 +1,39 @@ +--- +title: "Put Attachment Operation" +hide_table_of_contents: true +sidebar_label: Put Attachment +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import PutAttachmentCsharp from './_put-attachment-csharp.mdx'; +import PutAttachmentJava from './_put-attachment-java.mdx'; +import PutAttachmentNodejs from './_put-attachment-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/common/_category_.json b/versioned_docs/version-7.1/client-api/operations/common/_category_.json new file mode 100644 index 0000000000..8ab6dded8e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/common/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 2, + "label": Common Operations, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/common/_delete-by-query-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/common/_delete-by-query-csharp.mdx new file mode 100644 index 0000000000..90c216fc7e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/common/_delete-by-query-csharp.mdx @@ -0,0 +1,386 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `DeleteByQueryOperation` to delete a large number of documents that match the provided query in a single server call. + +* **Dynamic behavior**: + The deletion of documents matching the specified query is performed in batches of size 1024. + During the deletion process, documents that are added/modified **after** the delete operation has started + may also be deleted if they match the query criteria. + +* **Background operation**: + This operation is performed in the background on the server. + If needed, you can wait for the operation to complete. See: [Wait for completion](../../../client-api/operations/what-are-operations.mdx#wait-for-completion). + +* **Operation scope**: + `DeleteByQueryOperation` runs as a single-node transaction, not a cluster-wide transaction. As a result, + if you use this operation to delete documents that were originally created using a cluster-wide transaction, + their associated [Atomic guards](../../../client-api/session/cluster-transaction/atomic-guards.mdx) will Not be deleted. + + * To avoid issues when recreating such documents using a cluster-wide session, + see [Best practice when storing a document](../../../client-api/session/cluster-transaction/atomic-guards.mdx#best-practice-when-storing-a-document-in-a-cluster-wide-transaction). + * To learn more about the differences between transaction types, + see [Cluster-wide transaction vs. Single-node transaction](../../../client-api/session/cluster-transaction/overview.mdx#cluster-wide-transaction-vs-single-node-transaction). +* In this article: + * [Delete by dynamic query](../../../client-api/operations/common/delete-by-query.mdx#delete-by-dynamic-query) + * [Delete by index query](../../../client-api/operations/common/delete-by-query.mdx#delete-by-index-query) + * [Syntax](../../../client-api/operations/common/delete-by-query.mdx#syntax) + + + +## Delete by dynamic query + + + +##### Delete all documents in a collection + + + + +{`// Define the delete by query operation, pass an RQL querying a collection +var deleteByQueryOp = new DeleteByQueryOperation("from 'Orders'"); + +// Execute the operation by passing it to Operations.Send +var operation = store.Operations.Send(deleteByQueryOp); + +// All documents in collection 'Orders' will be deleted from the server. +`} + + + + +{`// Define the delete by query operation, pass an RQL querying a collection +var deleteByQueryOp = new DeleteByQueryOperation("from 'Orders'"); + +// Execute the operation by passing it to Operations.SendAsync +var result = await store.Operations.SendAsync(deleteByQueryOp); + +// All documents in collection 'Orders' will be deleted from the server. +`} + + + + +{`from "Orders" +`} + + + + + + + +##### Delete with filtering + + + + +{`// Define the delete by query operation, pass an RQL querying a collection +var deleteByQueryOp = new DeleteByQueryOperation("from 'Orders' where Freight > 30"); + +// Execute the operation by passing it to Operations.Send +var operation = store.Operations.Send(deleteByQueryOp); + +// * All documents matching the specified RQL will be deleted from the server. + +// * Since the dynamic query was made with a filtering condition, +// an auto-index is generated (if no other matching auto-index already exists). +`} + + + + +{`// Define the delete by query operation, pass an RQL querying a collection +var deleteByQueryOp = new DeleteByQueryOperation("from 'Orders' where Freight > 30"); + +// Execute the operation by passing it to Operations.SendAsync +var result = await store.Operations.SendAsync(deleteByQueryOp); + +// * All documents matching the provided RQL will be deleted from the server. + +// * Since a dynamic query was made with a filtering condition, +// an auto-index is generated (if no other matching auto-index already exists). +`} + + + + +{`from "Orders" where Freight > 30 +`} + + + + + + + +## Delete by index query + +* `DeleteByQueryOperation` can only be performed on a **Map-index**. + An exception is thrown when executing the operation on a Map-Reduce index. + +* A few overloads are available, see the following examples: + + +##### A sample Map-index + + + +{`// The index definition: +// ===================== + +public class Products_ByPrice : AbstractIndexCreationTask +\{ + public class IndexEntry + \{ + public decimal Price \{ get; set; \} + \} + + public Products_ByPrice() + \{ + Map = products => from product in products + select new IndexEntry + \{ + Price = product.PricePerUnit + \}; + \} +\} +`} + + + + + + +##### Delete documents via an index query + + + + +{`// Define the delete by query operation, pass an RQL querying the index +var deleteByQueryOp = + new DeleteByQueryOperation("from index 'Products/ByPrice' where Price > 10"); + +// Execute the operation by passing it to Operations.Send +var operation = store.Operations.Send(deleteByQueryOp); + +// All documents with document-field PricePerUnit > 10 will be deleted from the server. +`} + + + + +{`// Define the delete by query operation +var deleteByQueryOp = new DeleteByQueryOperation(new IndexQuery +{ + // Provide an RQL querying the index + Query = "from index 'Products/ByPrice' where Price > 10" +}); + +// Execute the operation by passing it to Operations.Send +var operation = store.Operations.Send(deleteByQueryOp); + +// All documents with document-field PricePerUnit > 10 will be deleted from the server. +`} + + + + +{`// Define the delete by query operation +var deleteByQueryOp = + // Pass parameters: + // * The index name + // * A filtering expression on the index-field + new DeleteByQueryOperation("Products/ByPrice", + x => x.Price > 10); + +// Execute the operation by passing it to Operations.Send +var operation = store.Operations.Send(deleteByQueryOp); + +// All documents with document-field PricePerUnit > 10 will be deleted from the server. +`} + + + + +{`// Define the delete by query operation +var deleteByQueryOp = + // Pass param: + // * A filtering expression on the index-field + new DeleteByQueryOperation( + x => x.Price > 10); + +// Execute the operation by passing it to Operations.Send +var operation = store.Operations.Send(deleteByQueryOp); + +// All documents with document-field PricePerUnit > 10 will be deleted from the server. +`} + + + + +{`from index "Products/ByPrice" where Price > 10 +`} + + + + + + + +##### Delete with options + + + + +{`// Define the delete by query operation +var deleteByQueryOp = new DeleteByQueryOperation( + // QUERY: Specify the query + new IndexQuery + { + Query = "from index 'Products/ByPrice' where Price > 10" + }, + // OPTIONS: Specify the options for the operation + // (See all other available options in the Syntax section below) + new QueryOperationOptions + { + // Allow the operation to operate even if index is stale + AllowStale = true, + // Get info in the operation result about documents that were deleted + RetrieveDetails = true + }); + +// Execute the operation by passing it to Operations.Send +Operation operation = store.Operations.Send(deleteByQueryOp); + +// Wait for operation to complete +var result = operation.WaitForCompletion(TimeSpan.FromSeconds(15)); + +// * All documents with document-field PricePerUnit > 10 will be deleted from the server. + +// * Details about deleted documents are available: +var details = result.Details; +var documentIdThatWasDeleted = details[0].ToJson()["Id"]; +`} + + + + +{`// Define the delete by query operation +var deleteByQueryOp = new DeleteByQueryOperation( + // QUERY: Specify the query + new IndexQuery + { + Query = "from index 'Products/ByPrice' where Price > 10" + }, + // OPTIONS: Specify the options for the operation + // (See all other available options in the Syntax section below) + new QueryOperationOptions + { + // Allow the operation to operate even if index is stale + AllowStale = true, + // Get info in the operation result about documents that were deleted + RetrieveDetails = true + }); + +// Execute the operation by passing it to Operations.Send +Operation operation = await store.Operations.SendAsync(deleteByQueryOp); + +// Wait for operation to complete +BulkOperationResult result = + await operation.WaitForCompletionAsync(TimeSpan.FromSeconds(15)) + .ConfigureAwait(false); + +// * All documents with document-field PricePerUnit > 10 will be deleted from the server. + +// * Details about deleted documents are available: +var details = result.Details; +var documentIdThatWasDeleted = details[0].ToJson()["Id"]; +`} + + + + +{`from index "Products/ByPrice" where Price > 10 +`} + + + + +* Specifying `QueryOperationOptions` is also supported by the other overload methods, see the Syntax section below. + + + + +## Syntax + + + +{`// Available overload: +// =================== + +DeleteByQueryOperation DeleteByQueryOperation( + string queryToDelete); + +DeleteByQueryOperation DeleteByQueryOperation( + IndexQuery queryToDelete, + QueryOperationOptions options = null); + +DeleteByQueryOperation DeleteByQueryOperation( + string indexName, + Expression> expression, + QueryOperationOptions options = null); + +DeleteByQueryOperation DeleteByQueryOperation( + Expression> expression, + QueryOperationOptions options = null) + where TIndexCreator : AbstractIndexCreationTask, new(); +`} + + + +| Parameter | Type | Description | +|-------------------|-----------------------------|------------------------------------------------------------| +| **queryToDelete** | string | The RQL query to perform | +| **queryToDelete** | `IndexQuery` | Holds all the information required to query an index | +| **indexName** | string | The name of the index queried | +| **expression** | `Expression>` | The expression that defines the query criteria | +| **options** | `QueryOperationOptions` | Object holding different setting options for the operation | + + + +{`public class QueryOperationOptions +\{ + // Indicates whether operations are allowed on stale indexes. + // DEFAULT: false + public bool AllowStale \{ get; set; \} + + // If AllowStale is set to false and index is stale, + // then this is the maximum timeout to wait for index to become non-stale. + // If timeout is exceeded then exception is thrown. + // DEFAULT: null (if index is stale then exception is thrown immediately) + public TimeSpan? StaleTimeout \{ get; set; \} + + // Limits the number of base operations per second allowed. + // DEFAULT: no limit + public int? MaxOpsPerSecond + + // Determines whether operation details about each document should be returned by server. + // DEFAULT: false + public bool RetrieveDetails \{ get; set; \} + + // Ignore the maximum number of statements a script can execute. + // Note: this is only relevant for the PatchByQueryOperation. + public bool IgnoreMaxStepsForScript \{ get; set; \} +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/common/_delete-by-query-java.mdx b/versioned_docs/version-7.1/client-api/operations/common/_delete-by-query-java.mdx new file mode 100644 index 0000000000..0797865693 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/common/_delete-by-query-java.mdx @@ -0,0 +1,129 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +`DeleteByQueryOperation` gives you the ability to delete a large number of documents with a single query. +This operation is performed in the background on the server. + +## Syntax + + + +{`public DeleteByQueryOperation(IndexQuery queryToDelete) + +public DeleteByQueryOperation(IndexQuery queryToDelete, QueryOperationOptions options) +`} + + + +| Parameters | Type | Description | +| ------------- | ------------- | ----- | +| **indexName** | String | Name of an index to perform a query on | +| **queryToDelete** | IndexQuery | Holds all the information required to query an index | +| **options** | QueryOperationOptions | Holds different setting options for base operations | + +## Example I + + + + +{`// remove all documents from the server where Name == Bob using Person/ByName index +store + .operations() + .send(new DeleteByQueryOperation(new IndexQuery("from Persons where name = 'Bob'"))); +`} + + + + +{`from index 'Person/ByName' where Name = 'Bob' +`} + + + + + +## Example II + + + + +{`// remove all documents from the server where Age > 35 using Person/ByAge index +store + .operations() + .send(new DeleteByQueryOperation(new IndexQuery("from 'Person/ByAge' where age < 35"))); +`} + + + + +{`from index 'Person/ByName' where Age < 35 +`} + + + + +## Example III + + + + +{`// delete multiple docs with specific ids in a single run without loading them into the session +Operation operation = store + .operations() + .sendAsync(new DeleteByQueryOperation(new IndexQuery( + "from People u where id(u) in ('people/1-A', 'people/3-A')" + ))); +`} + + + + +{`from People u where id(u) in ('people/1-A', 'people/3-A') +`} + + + + + +`DeleteByQueryOperation` is performed in the background on the server. +You have the option to **wait** for it using `waitForCompletion`. + + + + +{`// remove all document from server where Name == Bob and Age >= 29 using People collection +Operation operation = store.operations() + .sendAsync(new DeleteByQueryOperation(new IndexQuery( + "from People where Name = 'Bob' and Age >= 29" + ))); + +operation.waitForCompletion(); +`} + + + + +{`from People where Name = 'Bob' and Age >= 29 +`} + + + + + +## Remarks + + +`DeleteByQueryOperation` can only be performed on a map index. Executing it on map-reduce index will lead to an exception. + + + + +The deletion of documents matching a specified query is run in batches of size 1024. RavenDB doesn't do concurrency checks during the operation +so it can happen than a document has been updated or deleted meanwhile. + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/common/_delete-by-query-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/common/_delete-by-query-nodejs.mdx new file mode 100644 index 0000000000..2995c81584 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/common/_delete-by-query-nodejs.mdx @@ -0,0 +1,253 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `DeleteByQueryOperation` to delete a large number of documents that match the provided query in a single server call. + +* **Dynamic behavior**: + The deletion of documents matching the specified query is performed in batches of size 1024. + During the deletion process, documents that are added/modified **after** the delete operation has started + may also be deleted if they match the query criteria. + +* **Background operation**: + This operation is performed in the background on the server. + If needed, you can wait for the operation to complete. See: [Wait for completion](../../../client-api/operations/what-are-operations.mdx#wait-for-completion). + +* **Operation scope**: + `DeleteByQueryOperation` runs as a single-node transaction, not a cluster-wide transaction. As a result, + if you use this operation to delete documents that were originally created using a cluster-wide transaction, + their associated [Atomic guards](../../../client-api/session/cluster-transaction/atomic-guards.mdx) will Not be deleted. + + * To avoid issues when recreating such documents using a cluster-wide session, + see [Best practice when storing a document](../../../client-api/session/cluster-transaction/atomic-guards.mdx#best-practice-when-storing-a-document-in-a-cluster-wide-transaction). + * To learn more about the differences between transaction types, + see [Cluster-wide transaction vs. Single-node transaction](../../../client-api/session/cluster-transaction/overview.mdx#cluster-wide-transaction-vs-single-node-transaction). +* In this article: + * [Delete by dynamic query](../../../client-api/operations/common/delete-by-query.mdx#delete-by-dynamic-query) + * [Delete by index query](../../../client-api/operations/common/delete-by-query.mdx#delete-by-index-query) + * [Syntax](../../../client-api/operations/common/delete-by-query.mdx#syntax) + + + +## Delete by dynamic query + + + +##### Delete all documents in collection + + + + +{`// Define the delete by query operation, pass an RQL querying a collection +const deleteByQueryOp = new DeleteByQueryOperation("from 'Orders'"); + +// Execute the operation by passing it to operations.send +const operation = await store.operations.send(deleteByQueryOp); + +// All documents in collection 'Orders' will be deleted from the server. +`} + + + + +{`from "Orders" +`} + + + + + + + +##### Delete with filtering + + + + +{`// Define the delete by query operation, pass an RQL querying a collection +const deleteByQueryOp = new DeleteByQueryOperation("from 'Orders' where Freight > 30"); + +// Execute the operation by passing it to operations.send +const operation = await store.operations.send(deleteByQueryOp); + +// * All documents matching the specified RQL will be deleted from the server. + +// * Since the dynamic query was made with a filtering condition, +// an auto-index is generated (if no other matching auto-index already exists). +`} + + + + +{`from "Orders" where Freight > 30 +`} + + + + + + + +## Delete by index query + +* `DeleteByQueryOperation` can only be performed on a **Map-index**. + An exception is thrown when executing the operation on a Map-Reduce index. + +* A few overloads are available, see the following examples: + + +##### A sample Map-index + + + +{`// The index definition: +// ===================== + +class Products_ByPrice extends AbstractJavaScriptIndexCreationTask \{ + constructor () \{ + super(); + + this.map("products", product => \{ + return \{ + Price: product.PricePerUnit + \}; + \}); + \} +\} +`} + + + + + + +##### Delete documents via an index query + + + + +{`// Define the delete by query operation, pass an RQL querying the index +const deleteByQueryOp = + new DeleteByQueryOperation("from index 'Products/ByPrice' where Price > 10"); + +// Execute the operation by passing it to operations.send +const operation = await store.operations.send(deleteByQueryOp); + +// All documents with document-field PricePerUnit > 10 will be deleted from the server. +`} + + + + +{`// Define the index query, provide an RQL querying the index +const indexQuery = new IndexQuery(); +indexQuery.query = "from index 'Products/ByPrice' where Price > 10"; + +// Define the delete by query operation +const deleteByQueryOp = new DeleteByQueryOperation(indexQuery); + +// Execute the operation by passing it to operations.send +const operation = await store.operations.send(deleteByQueryOp); + +// All documents with document-field PricePerUnit > 10 will be deleted from the server. +`} + + + + +{`from index "Products/ByPrice" where Price > 10 +`} + + + + + + + +##### Delete with options + + + + +{`// QUERY: Define the index query, provide an RQL querying the index +const indexQuery = new IndexQuery(); +indexQuery.query = "from index 'Products/ByPrice' where Price > 10"; + +// OPTIONS: Define the operations options +// (See all available options in the Syntax section below) +const options = { + // Allow the operation to operate even if index is stale + allowStale: true, + // Limit the number of base operations per second allowed. + maxOpsPerSecond: 500 +} + +// Define the delete by query operation +const deleteByQueryOp = new DeleteByQueryOperation(indexQuery, options); + +// Execute the operation by passing it to operations.send +const operation = await store.operations.send(deleteByQueryOp); + +// All documents with document-field PricePerUnit > 10 will be deleted from the server. +`} + + + + +{`from index "Products/ByPrice" where Price > 10 +`} + + + + +* Specifying `options` is also supported by the other overload methods, see the Syntax section below. + + + + +## Syntax + + + +{`// Available overload: +// =================== +const deleteByQueryOp = new DeleteByQueryOperation(indexQuery); +const deleteByQueryOp = new DeleteByQueryOperation(indexQuery, options); +`} + + + +| Parameter | Type | Description | +|-------------------|--------------|------------------------------------------------------------| +| **queryToDelete** | `string` | The RQL query to perform | +| **queryToDelete** | `IndexQuery` | Holds all the information required to query an index | +| **options** | `object` | Object holding different setting options for the operation | + + + +{`// options object +\{ + // Indicates whether operations are allowed on stale indexes. + // DEFAULT: false + allowStale, // boolean + + // If AllowStale is set to false and index is stale, + // then this is the maximum timeout to wait for index to become non-stale. + // If timeout is exceeded then exception is thrown. + // DEFAULT: null (if index is stale then exception is thrown immediately) + staleTimeout, // number + + // Limits the number of base operations per second allowed. + // DEFAULT: null (no limit) + maxOpsPerSecond, // number +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/common/_delete-by-query-php.mdx b/versioned_docs/version-7.1/client-api/operations/common/_delete-by-query-php.mdx new file mode 100644 index 0000000000..e45b7da850 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/common/_delete-by-query-php.mdx @@ -0,0 +1,294 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `DeleteByQueryOperation` to delete a large number of documents that match the provided query in a single server call. + +* **Dynamic behavior**: + The deletion of documents matching the specified query is performed in batches of size 1024. + During the deletion process, documents that are added/modified **after** the delete operation has started + may also be deleted if they match the query criteria. + +* **Background operation**: + This operation is performed in the background on the server. + If needed, you can wait for the operation to complete. See: [Wait for completion](../../../client-api/operations/what-are-operations.mdx#wait-for-completion). + +* **Operation scope**: + `DeleteByQueryOperation` runs as a single-node transaction, not a cluster-wide transaction. As a result, + if you use this operation to delete documents that were originally created using a cluster-wide transaction, + their associated [Atomic guards](../../../client-api/session/cluster-transaction/atomic-guards.mdx) will Not be deleted. + + * To avoid issues when recreating such documents using a cluster-wide session, + see [Best practice when storing a document](../../../client-api/session/cluster-transaction/atomic-guards.mdx#best-practice-when-storing-a-document-in-a-cluster-wide-transaction). + * To learn more about the differences between transaction types, + see [Cluster-wide transaction vs. Single-node transaction](../../../client-api/session/cluster-transaction/overview.mdx#cluster-wide-transaction-vs-single-node-transaction). +* In this article: + * [Delete by dynamic query](../../../client-api/operations/common/delete-by-query.mdx#delete-by-dynamic-query) + * [Delete by index query](../../../client-api/operations/common/delete-by-query.mdx#delete-by-index-query) + * [Syntax](../../../client-api/operations/common/delete-by-query.mdx#syntax) + + + +## Delete by dynamic query + + + +##### Delete all documents in a collection + + + + +{`// Define the delete by query operation, pass an RQL querying a collection +$deleteByQueryOp = new DeleteByQueryOperation("from 'Orders'"); + +// Execute the operation by passing it to Operations.Send +$operation = $store->operations()->send($deleteByQueryOp); + +// All documents in collection 'Orders' will be deleted from the server. +`} + + + + +{`from "Orders" +`} + + + + + + + +##### Delete with filtering + + + + +{`// Define the delete by query operation, pass an RQL querying a collection +$deleteByQueryOp = new DeleteByQueryOperation("from 'Orders' where Freight > 30"); + +// Execute the operation by passing it to Operations.Send +$operation = $store->operations()->send($deleteByQueryOp); + +// * All documents matching the specified RQL will be deleted from the server. + +// * Since the dynamic query was made with a filtering condition, +// an auto-index is generated (if no other matching auto-index already exists). +`} + + + + +{`from "Orders" where Freight > 30 +`} + + + + + + + +## Delete by index query + +* `DeleteByQueryOperation` can only be performed on a **Map-index**. + An exception is thrown when executing the operation on a Map-Reduce index. + +* A few overloads are available, see the following examples: + + +##### A sample Map-index + + + +{`// The index definition: +// ===================== + +class IndexEntry +\{ + public float $price; + + public function getPrice(): float + \{ + return $this->price; + \} + + public function setPrice(float $price): void + \{ + $this->price = $price; + \} +\} + +class Products_ByPrice extends AbstractIndexCreationTask +\{ + public function __construct() + \{ + parent::__construct(); + + $this->map = "from product in products select new \{price = product.PricePerUnit\}"; + \} +\} +`} + + + + + + +##### Delete documents via an index query + + + + +{`// Define the delete by query operation, pass an RQL querying the index +$deleteByQueryOp = new DeleteByQueryOperation("from index 'Products/ByPrice' where Price > 10"); + +// Execute the operation by passing it to Operations.Send +$operation = $store->operations()->send($deleteByQueryOp); + + +// All documents with document-field PricePerUnit > 10 will be deleted from the server. +`} + + + + +{`// Define the delete by query operation +$deleteByQueryOp = new DeleteByQueryOperation( + // Provide an RQL querying the index + new IndexQuery("from index 'Products/ByPrice' where Price > 10") +); + +// Execute the operation by passing it to Operations.Send +$operation = $store->operations()->send($deleteByQueryOp); + +// All documents with document-field PricePerUnit > 10 will be deleted from the server. +`} + + + + +{`from index "Products/ByPrice" where Price > 10 +`} + + + + + + + +##### Delete with options + + + + +{`// OPTIONS: Specify the options for the operation +// (See all other available options in the Syntax section below) +$options = new QueryOperationOptions(); +// Allow the operation to operate even if index is stale +$options->setAllowStale(true); +// Get info in the operation result about documents that were deleted +$options->setRetrieveDetails(true); + +// Define the delete by query operation +$deleteByQueryOp = new DeleteByQueryOperation( + new IndexQuery("from index 'Products/ByPrice' where Price > 10"), // QUERY: Specify the query + $options // OPTIONS: +); + +// Execute the operation by passing it to Operations.Send +/** @var Operation $operation */ +$operation = $store->operations()->sendAsync($deleteByQueryOp); + +// Wait for operation to complete +/** @var BulkOperationResult $result */ +$result = $operation->waitForCompletion(Duration::ofSeconds(15)); + +// * All documents with document-field PricePerUnit > 10 will be deleted from the server. + +// * Details about deleted documents are available: +$details = $result->getDetails(); +$documentIdThatWasDeleted = $details[0]->getId(); +`} + + + + +{`from index "Products/ByPrice" where Price > 10 +`} + + + + + + + +## Syntax + + + +{`class DeleteByQueryOperation implements OperationInterface +\{ + /** + * Usage: + * - new DeleteByQueryOperation("from 'Orders'") + * - new DeleteByQueryOperation("from 'Orders'", $options) + * + * - new DeleteByQueryOperation(new IndexQuery("from 'Orders'")) + * - new DeleteByQueryOperation(new IndexQuery("from 'Orders'"), $options) + * + * @param IndexQuery|string|null $queryToDelete + * @param QueryOperationOptions|null $options + */ + public function __construct(IndexQuery|string|null $queryToDelete, ?QueryOperationOptions $options = null) \{ + // ... + \} + + // ... +\} +`} + + + +| Parameter | Type | Description | +|--------------------|--------------------------|------------------------------------------------------------| +| **$queryToDelete** | `string` | The RQL query to perform | +| **$queryToDelete** | `IndexQuery` | Holds all the information required to query an index | +| **$options** | `?QueryOperationOptions` | Object holding different setting options for the operation | + + + +{`class QueryOperationOptions +\{ + // Indicates whether operations are allowed on stale indexes. + private bool $allowStale = false; + + // Limits the number of base operations per second allowed. + // DEFAULT: no limit + private ?int $maxOpsPerSecond = null; + + // If AllowStale is set to false and index is stale, + // then this is the maximum timeout to wait for index to become non-stale. + // If timeout is exceeded then exception is thrown. + // DEFAULT: null (if index is stale then exception is thrown immediately) + private ?Duration $staleTimeout = null; + + // Determines whether operation details about each document should be returned by server. + private bool $retrieveDetails = false; + + // Ignore the maximum number of statements a script can execute. + // Note: this is only relevant for the patchByQueryOperation. + private bool $ignoreMaxStepsForScript = false; + + // getters and setters +\} +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/common/_delete-by-query-python.mdx b/versioned_docs/version-7.1/client-api/operations/common/_delete-by-query-python.mdx new file mode 100644 index 0000000000..2a6827af39 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/common/_delete-by-query-python.mdx @@ -0,0 +1,204 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `DeleteByQueryOperation` to delete a large number of documents that match the provided query in a single server call. + +* **Dynamic behavior**: + The deletion of documents matching the specified query is performed in batches of size 1024. + During the deletion process, documents that are added/modified **after** the delete operation has started + may also be deleted if they match the query criteria. + +* **Background operation**: + This operation is performed in the background on the server. + +* **Operation scope**: + `DeleteByQueryOperation` runs as a single-node transaction, not a cluster-wide transaction. As a result, + if you use this operation to delete documents that were originally created using a cluster-wide transaction, + their associated [Atomic guards](../../../client-api/session/cluster-transaction/atomic-guards.mdx) will Not be deleted. + + * To avoid issues when recreating such documents using a cluster-wide session, + see [Best practice when storing a document](../../../client-api/session/cluster-transaction/atomic-guards.mdx#best-practice-when-storing-a-document-in-a-cluster-wide-transaction). + * To learn more about the differences between transaction types, + see [Cluster-wide transaction vs. Single-node transaction](../../../client-api/session/cluster-transaction/overview.mdx#cluster-wide-transaction-vs-single-node-transaction). +* In this article: + * [Delete by dynamic query](../../../client-api/operations/common/delete-by-query.mdx#delete-by-dynamic-query) + * [Delete by index query](../../../client-api/operations/common/delete-by-query.mdx#delete-by-index-query) + + + +## Delete by dynamic query + + + +##### Delete all documents in a collection + + + + +{`# Define the delete by query operation, pass an RQL querying a collection +delete_by_query_op = DeleteByQueryOperation("from 'Orders'") + +# Execute the operation by passing it to Operation.send_async +operation = store.operations.send_async(delete_by_query_op) + +# All documents in collection 'Orders' will be deleted from the server +`} + + + + +{`from "Orders" +`} + + + + + + + +##### Delete with filtering + + + + +{`# Define the delete by query operation, pass an RQL querying a collection +delete_by_query_op = DeleteByQueryOperation("from 'Orders' where Freight > 30") + +# Execute the operation by passing it to Operation.send_async +operation = store.operations.send_async(delete_by_query_op) + +# * All documents matching the specified RQL will be deleted from the server. +# +# * Since the dynamic query was made with a filtering condition, +# an auto-index is generated (if no other matching auto-index already exists). +`} + + + + +{`from "Orders" where Freight > 30 +`} + + + + + + + +## Delete by index query + +* `DeleteByQueryOperation` can only be performed on a **Map-index**. + An exception is thrown when executing the operation on a Map-Reduce index. + +* A few overloads are available, see the following examples: + + +##### A sample Map-index + + + +{`# The index definition: +# ===================== +class ProductsByPrice(AbstractIndexCreationTask): + class IndexEntry: + def __init__(self, price: int): + self.price = price + + def __init__(self): + super().__init__() + self.map = "from product in products select new \{price = product.PricePerUnit\}" +`} + + + + + + +##### Delete documents via an index query + + + + +{`# Define the delete by query operation, pass an RQL querying the index +delete_by_query_op = DeleteByQueryOperation("from index 'Products/ByPrice' where Price > 10") + +# Execute the operation by passing it to Operation.send_async +operation = store.operations.send_async(delete_by_query_op) + +# All documents with document-field PricePerUnit > 10 will be deleted from the server. +`} + + + + +{`# Define the delete by query operation +delete_by_query_op = DeleteByQueryOperation( + IndexQuery(query="from index 'Products/ByPrice' where Price > 10") +) + +# Execute the operation by passing it to Operation.send_async +operation = store.operations.send_async(delete_by_query_op) + +# All documents with document-field PricePerUnit > 10 will be deleted from the server. +`} + + + + +{`from index "Products/ByPrice" where Price > 10 +`} + + + + + + + +##### Delete with options + + + + +{`# Define the delete by query operation +delete_by_query_op = DeleteByQueryOperation( + # QUERY: Specify the query + IndexQuery(query="from index 'Products/ByPrice' where Price > 10"), + # OPTIONS: Specify the options for the operations + # (See all other available options in the Syntax section below) + QueryOperationOptions( + # Allow the operation to operate even if index is stale + allow_stale=True, + # Get info in the operation result about documents that were deleted + retrieve_details=True, + ), +) + +# Execute the operation by passing it to Operations.send_async +operation = store.operations.send_async(delete_by_query_op) + +# * All documents with document-field PricePerUnit > 10 will be deleted from the server + +# * Details about deleted documents are available: +details = result.details +document_id_that_was_deleted = details[0]["Id"] +`} + + + + +{`from index "Products/ByPrice" where Price > 10 +`} + + + + +* Specifying `QueryOperationOptions` is also supported by the other overload methods, see the Syntax section below. + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/common/delete-by-query.mdx b/versioned_docs/version-7.1/client-api/operations/common/delete-by-query.mdx new file mode 100644 index 0000000000..f242bdf5c7 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/common/delete-by-query.mdx @@ -0,0 +1,55 @@ +--- +title: "Delete by Query Operation" +hide_table_of_contents: true +sidebar_label: Delete by Query +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import DeleteByQueryCsharp from './_delete-by-query-csharp.mdx'; +import DeleteByQueryJava from './_delete-by-query-java.mdx'; +import DeleteByQueryPython from './_delete-by-query-python.mdx'; +import DeleteByQueryPhp from './_delete-by-query-php.mdx'; +import DeleteByQueryNodejs from './_delete-by-query-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/counters/_category_.json b/versioned_docs/version-7.1/client-api/operations/counters/_category_.json new file mode 100644 index 0000000000..1c2a845242 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/counters/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 6, + "label": Counters, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/counters/_counter-batch-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/counters/_counter-batch-csharp.mdx new file mode 100644 index 0000000000..37dea336d3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/counters/_counter-batch-csharp.mdx @@ -0,0 +1,444 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +*CounterBatchOperation* allows you to operate on multiple counters (`Increment`, `Get`, `Delete`) of different documents in a **single request**. + +## Syntax + + + +{`public CounterBatchOperation(CounterBatch counterBatch) +`} + + + +| Parameter | | | +|------------------|----------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **counterBatch** | `CounterBatch` | An object that holds a list of `DocumentCountersOperation`.
Each element in the list describes the counter operations to perform for a specific document | + + + +{`public class CounterBatch +\{ + public bool ReplyWithAllNodesValues; // A flag that indicates if the results should include a + // dictionary of counter values per database node + public List Documents = new List(); +\} +`} + + + +#### DocumentCountersOperation + + + +{`public class DocumentCountersOperation +\{ + public string DocumentId; // Id of the document that holds the counters + public List Operations; // A list of counter operations to perform +\} +`} + + + +#### CounterOperation + + + +{`public class CounterOperation +\{ + public CounterOperationType Type; + public string CounterName; + public long Delta; // the value to increment by +\} +`} + + + +#### CounterOperationType + + + +{`public enum CounterOperationType +\{ + Increment, + Delete, + Get +\} +`} + + + + +A document that has counters holds all its counter names in the `metadata`. +Therefore, when creating a new counter, the parent document is modified, as the counter's name needs to be added to the metadata. +Deleting a counter also modifies the parent document, as the counter's name needs to be removed from the metadata. +Incrementing an existing counter will not modify the parent document. + +Even if a `DocumentCountersOperation` contains several `CounterOperation` items that affect the document's metadata (create, delete), +the parent document will be modified **only once**, after all the `CounterOperation` items in this `DocumentCountersOperation` have been processed. +If `DocumentCountersOperation` doesn't contain any `CounterOperation` that affects the metadata, the parent document won't be modified. + + + + + +## Return Value + +* *CounterBatchOperation* returns a `CountersDetail` object, which holds a list of `CounterDetail` objects. + +* If a `CounterOperationType` is `Increment` or `Get`, a `CounterDetail` object will be added to the result. + `Delete` operations will not be included in the result. + + + +{`public class CountersDetail +\{ + public List Counters; +\} +`} + + + + + +{`public class CounterDetail +\{ + public string DocumentId; // ID of the document that holds the counter + public string CounterName; // The counter name + public long TotalValue; // Total counter value + public Dictionary CounterValues; // A dictionary of counter values per database node + public long Etag; // Counter Etag + public string ChangeVector; // Change vector of the counter +\} +`} + + + + + +## Examples + +Assume we have two documents, *"users/1"* and *"users/2"*, that hold 3 counters each - +*"likes"*, *"dislikes"* and *"downloads"* - with values 10, 20 and 30 (respectively) +### Example #1 : Increment Multiple Counters in a Batch + + + +{`var operationResult = store.Operations.Send(new CounterBatchOperation(new CounterBatch +\{ + Documents = new List + \{ + new DocumentCountersOperation + \{ + DocumentId = "users/1", + Operations = new List + \{ + new CounterOperation + \{ + Type = CounterOperationType.Increment, + CounterName = "likes", + Delta = 5 + \}, + new CounterOperation + \{ + // No Delta specified, value will be incremented by 1 + // (From RavenDB 6.2 on, the default Delta is 1) + + Type = CounterOperationType.Increment, + CounterName = "dislikes" + \} + \} + \}, + new DocumentCountersOperation + \{ + DocumentId = "users/2", + Operations = new List + \{ + new CounterOperation + \{ + Type = CounterOperationType.Increment, + CounterName = "likes", + Delta = 100 + \}, + new CounterOperation + \{ + // this will create a new counter "score", with initial value 50 + // "score" will be added to counter-names in "users/2" metadata + + Type = CounterOperationType.Increment, + CounterName = "score", + Delta = 50 + \} + \} + \} + \} +\})); +`} + + + +#### Result: + + +{`\{ + "Counters": + [ + \{ + "DocumentId" : "users/1", + "CounterName" : "likes", + "TotalValue" : 15, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/1", + "CounterName" : "dislikes", + "TotalValue" : 20, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/2", + "CounterName" : "likes", + "TotalValue" : 110, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/2", + "CounterName" : "score", + "TotalValue" : 50, + "CounterValues" : null + \} + ] +\} +`} + + +### Example #2 : Get Multiple Counters in a Batch + + + +{`var operationResult = store.Operations.Send(new CounterBatchOperation(new CounterBatch +\{ + Documents = new List + \{ + new DocumentCountersOperation + \{ + DocumentId = "users/1", + Operations = new List + \{ + new CounterOperation + \{ + Type = CounterOperationType.Get, + CounterName = "likes" + \}, + new CounterOperation + \{ + Type = CounterOperationType.Get, + CounterName = "downloads" + \} + \} + \}, + new DocumentCountersOperation + \{ + DocumentId = "users/2", + Operations = new List + \{ + new CounterOperation + \{ + Type = CounterOperationType.Get, + CounterName = "likes" + \}, + new CounterOperation + \{ + Type = CounterOperationType.Get, + CounterName = "score" + \} + \} + \} + \} +\})); +`} + + + +#### Result: + + + +{`\{ + "Counters": + [ + \{ + "DocumentId" : "users/1", + "CounterName" : "likes", + "TotalValue" : 15, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/1", + "CounterName" : "downloads", + "TotalValue" : 30, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/2", + "CounterName" : "likes", + "TotalValue" : 110, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/2", + "CounterName" : "score", + "TotalValue" : 50, + "CounterValues" : null + \} + ] +\} +`} + + +### Example #3 : Delete Multiple Counters in a Batch + + + +{`var operationResult = store.Operations.Send(new CounterBatchOperation(new CounterBatch +\{ + Documents = new List + \{ + new DocumentCountersOperation + \{ + DocumentId = "users/1", + Operations = new List + \{ + // "likes" and "dislikes" will be removed from counter-names in "users/1" metadata + new CounterOperation + \{ + Type = CounterOperationType.Delete, + CounterName = "likes" + \}, + new CounterOperation + \{ + Type = CounterOperationType.Delete, + CounterName = "dislikes" + \} + \} + \}, + new DocumentCountersOperation + \{ + DocumentId = "users/2", + Operations = new List + \{ + // "downloads" will be removed from counter-names in "users/2" metadata + + new CounterOperation + \{ + Type = CounterOperationType.Delete, + CounterName = "downloads" + \} + \} + \} + \} +\})); +`} + + + +#### Result: + + + +{`\{ + "Counters": [] +\} +`} + + +### Example #4 : Mix Different Types of CounterOperations in a Batch + + + +{`var operationResult = store.Operations.Send(new CounterBatchOperation(new CounterBatch +\{ + Documents = new List + \{ + new DocumentCountersOperation + \{ + DocumentId = "users/1", + Operations = new List + \{ + new CounterOperation + \{ + Type = CounterOperationType.Increment, + CounterName = "likes", + Delta = 30 + \}, + new CounterOperation + \{ + // The results will include null for this 'Get' + // since we deleted the "dislikes" counter in the previous example flow + Type = CounterOperationType.Get, + CounterName = "dislikes" + \}, + new CounterOperation + \{ + Type = CounterOperationType.Delete, + CounterName = "downloads" + \} + \} + \}, + new DocumentCountersOperation + \{ + DocumentId = "users/2", + Operations = new List + \{ + new CounterOperation + \{ + Type = CounterOperationType.Get, + CounterName = "likes" + \}, + new CounterOperation + \{ + Type = CounterOperationType.Delete, + CounterName = "dislikes" + \} + \} + \} + \} +\})); +`} + + + +#### Result: + +* Note: The `Delete` operations are Not included in the results. + + + +{`\{ + "Counters": + [ + \{ + "DocumentId" : "users/1", + "CounterName" : "likes", + "TotalValue" : 30, + "CounterValues" : null + \}, + null, + \{ + "DocumentId" : "users/2", + "CounterName" : "likes", + "TotalValue" : 110, + "CounterValues" : null + \} + ] +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/counters/_counter-batch-java.mdx b/versioned_docs/version-7.1/client-api/operations/counters/_counter-batch-java.mdx new file mode 100644 index 0000000000..735c58cfa0 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/counters/_counter-batch-java.mdx @@ -0,0 +1,352 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +*CounterBatchOperation* allows you to operate on multiple counters (`INCREMENT`, `GET`, `DELETE`) of different documents in a **single request**. + +## Syntax + + + +{`public CounterBatchOperation(CounterBatch counterBatch) +`} + + + +| Parameter | | | +|------------------|----------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **counterBatch** | `CounterBatch` | An object that holds a list of `DocumentCountersOperation`.
Each element in the list describes the counter operations to perform for a specific document | + + + +{`public class CounterBatch \{ + private boolean replyWithAllNodesValues; + private List documents = new ArrayList<>(); + + // getters and setters +\} +`} + + + +#### DocumentCountersOperation + + + +{`public class DocumentCountersOperation \{ + private List operations; + private String documentId; + + // getters and setters +\} +`} + + + +#### CounterOperation + + + +{`public static class CounterOperation \{ + private CounterOperationType type; + private String counterName; + private long delta; // the value to increment by + + // getters and setters +\} +`} + + + +#### CounterOperationType + + + +{`public enum CounterOperationType \{ + NONE, + INCREMENT, + DELETE, + GET, + PUT +\} +`} + + + + +A document that has counters holds all its counter names in the `metadata`. +Therefore, when creating a new counter, the parent document is modified, as the counter's name needs to be added to the metadata. +Deleting a counter also modifies the parent document, as the counter's name needs to be removed from the metadata. +Incrementing an existing counter will not modify the parent document. + +Even if a `DocumentCountersOperation` contains several `CounterOperation` items that affect the document's metadata (create, delete), +the parent document will be modified **only once**, after all the `CounterOperation` items in this `DocumentCountersOperation` have been processed. +If `DocumentCountersOperation` doesn't contain any `CounterOperation` that affects the metadata, the parent document won't be modified. + + + + + +## Return Value + +* *CounterBatchOperation* returns a `CountersDetail` object, which holds a list of `CounterDetail` objects. + +* If a `CounterOperationType` is `INCREMENT` or `GET`, a `CounterDetail` object will be added to the result. + `DELETE` operations will not be included in the result. + + + +{`public class CountersDetail \{ + + private List counters; + + // getters and setters +\} +`} + + + + + +{`public class CounterDetail \{ + private String documentId; // ID of the document that holds the counter + private String counterName; // The counter name + private long totalValue; // Total counter value + private long etag; // Counter Etag + private Map counterValues; // A map of counter values per database node + + private String changeVector; // Change vector of the counter + + // getters and setters +\} +`} + + + + + +## Examples + +Assume we have two documents, *"users/1"* and *"users/2"*, that hold 3 counters each - +*"likes"*, *"dislikes"* and *"downloads"* - with values 10, 20 and 30 (respectively) +### Example #1 : Increment Multiple Counters in a Batch + + + +{`DocumentCountersOperation operation1 = new DocumentCountersOperation(); +operation1.setDocumentId("users/1"); +operation1.setOperations(Arrays.asList( + CounterOperation.create("likes", CounterOperationType.INCREMENT, 5), + CounterOperation.create("dislikes", CounterOperationType.INCREMENT) // No delta specified, value will stay the same +)); + +DocumentCountersOperation operation2 = new DocumentCountersOperation(); +operation2.setDocumentId("users/2"); +operation2.setOperations(Arrays.asList( + CounterOperation.create("likes", CounterOperationType.INCREMENT, 100), + + // this will create a new counter "score", with initial value 50 + // "score" will be added to counter-names in "users/2" metadata + CounterOperation.create("score", CounterOperationType.INCREMENT, 50) +)); + +CounterBatch counterBatch = new CounterBatch(); +counterBatch.setDocuments(Arrays.asList(operation1, operation2)); +store.operations().send(new CounterBatchOperation(counterBatch)); +`} + + + +#### Result: + + + +{`\{ + "Counters": + [ + \{ + "DocumentId" : "users/1", + "CounterName" : "likes", + "TotalValue" : 15, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/1", + "CounterName" : "dislikes", + "TotalValue" : 20, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/2", + "CounterName" : "likes", + "TotalValue" : 110, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/2", + "CounterName" : "score", + "TotalValue" : 50, + "CounterValues" : null + \} + ] +\} +`} + + +### Example #2 : Get Multiple Counters in a Batch + + + +{`DocumentCountersOperation operation1 = new DocumentCountersOperation(); +operation1.setDocumentId("users/1"); +operation1.setOperations(Arrays.asList( + CounterOperation.create("likes", CounterOperationType.GET), + CounterOperation.create("downloads", CounterOperationType.GET) +)); + +DocumentCountersOperation operation2 = new DocumentCountersOperation(); +operation2.setDocumentId("users/2"); +operation2.setOperations(Arrays.asList( + CounterOperation.create("likes", CounterOperationType.GET), + CounterOperation.create("score", CounterOperationType.GET) +)); + +CounterBatch counterBatch = new CounterBatch(); +counterBatch.setDocuments(Arrays.asList(operation1, operation2)); + +store.operations().send(new CounterBatchOperation(counterBatch)); +`} + + + +#### Result: + + +{`\{ + "Counters": + [ + \{ + "DocumentId" : "users/1", + "CounterName" : "likes", + "TotalValue" : 15, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/1", + "CounterName" : "downloads", + "TotalValue" : 30, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/2", + "CounterName" : "likes", + "TotalValue" : 110, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/2", + "CounterName" : "score", + "TotalValue" : 50, + "CounterValues" : null + \} + ] +\} +`} + + +### Example #3 : Delete Multiple Counters in a Batch + + + +{`DocumentCountersOperation operation1 = new DocumentCountersOperation(); +operation1.setDocumentId("users/1"); +operation1.setOperations(Arrays.asList( + // "likes" and "dislikes" will be removed from counter-names in "users/1" metadata + CounterOperation.create("likes", CounterOperationType.DELETE), + CounterOperation.create("dislikes", CounterOperationType.DELETE) +)); + +DocumentCountersOperation operation2 = new DocumentCountersOperation(); +operation2.setDocumentId("users/2"); +operation2.setOperations(Arrays.asList( + // "downloads" will be removed from counter-names in "users/2" metadata + CounterOperation.create("downloads", CounterOperationType.DELETE) +)); + +CounterBatch counterBatch = new CounterBatch(); +counterBatch.setDocuments(Arrays.asList(operation1, operation2)); +store.operations().send(new CounterBatchOperation(counterBatch)); +`} + + + +#### Result: + + + +{`\{ + "Counters": [] +\} +`} + + +### Example #4 : Mix Different Types of CounterOperations in a Batch + + + +{`DocumentCountersOperation operation1 = new DocumentCountersOperation(); +operation1.setDocumentId("users/1"); +operation1.setOperations(Arrays.asList( + CounterOperation.create("likes", CounterOperationType.INCREMENT, 30), + // The results will include null for this 'Get' + // since we deleted the "dislikes" counter in the previous example flow + CounterOperation.create("dislikes", CounterOperationType.GET), + CounterOperation.create("downloads", CounterOperationType.DELETE) +)); + +DocumentCountersOperation operation2 = new DocumentCountersOperation(); +operation2.setDocumentId("users/2"); +operation2.setOperations(Arrays.asList( + CounterOperation.create("likes", CounterOperationType.GET), + CounterOperation.create("dislikes", CounterOperationType.DELETE) +)); + +CounterBatch counterBatch = new CounterBatch(); +counterBatch.setDocuments(Arrays.asList(operation1, operation2)); +store.operations().send(new CounterBatchOperation(counterBatch)); +`} + + + +#### Result: + +* Note: The `Delete` operations are Not included in the result. + + + +{`\{ + "Counters": + [ + \{ + "DocumentId" : "users/1", + "CounterName" : "likes", + "TotalValue" : 30, + "CounterValues" : null + \}, + null, + \{ + "DocumentId" : "users/2", + "CounterName" : "likes", + "TotalValue" : 110, + "CounterValues" : null + \} + ] +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/counters/_counter-batch-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/counters/_counter-batch-nodejs.mdx new file mode 100644 index 0000000000..58cf903b70 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/counters/_counter-batch-nodejs.mdx @@ -0,0 +1,399 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +*CounterBatchOperation* allows you to operate on multiple counters (`Increment`, `Get`, `Delete`) of different documents in a **single request**. + +## Syntax + + + +{`const counterBatchOp = new CounterBatchOperation(counterBatch); +`} + + + +| Parameter | | | +|------------------|----------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **counterBatch** | `CounterBatch` | An object that holds a list of `DocumentCountersOperation`.
Each element in the list describes the counter operations to perform for a specific document | + + + +{`// The CounterBatch object: +// ======================== +\{ + // A list of "DocumentCountersOperation" objects + documents; + // A flag indicating if results should include a dictionary of counter values per database node + replyWithAllNodesValues; +\} +`} + + + + + +{`// The DocumentCountersOperation object: +// ===================================== +\{ + // Id of the document that holds the counters + documentId; + // A list of "CounterOperation" objects to perform + operations; +\} +`} + + + + + +{`// The CounterOperation object: +// ============================ +\{ + // The operation type: "Increment" | "Delete" | "Get" + type; + // The counter name + counterName; + // The value to increment by + delta; +\} +`} + + + + +A document that has counters holds all its counter names in the `metadata`. +Therefore, when creating a new counter, the parent document is modified, as the counter's name needs to be added to the metadata. +Deleting a counter also modifies the parent document, as the counter's name needs to be removed from the metadata. +Incrementing an existing counter will not modify the parent document. + +Even if a `DocumentCountersOperation` contains several `CounterOperation` items that affect the document's metadata (create, delete), +the parent document will be modified **only once**, after all the `CounterOperation` items in this `DocumentCountersOperation` have been processed. +If `DocumentCountersOperation` doesn't contain any `CounterOperation` that affects the metadata, the parent document won't be modified. + + + + + +## Return Value + +* *CounterBatchOperation* returns a `CountersDetail` object, which holds a list of `CounterDetail` objects. + +* If the type is `Increment` or `Get`, a `CounterDetail` object will be added to the result. + `Delete` operations will Not be included in the result. + + + +{`// The CounterDetails object: +// ========================== +\{ + // A list of "CounterDetail" objects; + counters; +\} +`} + + + + + +{`// The CounterDetail object: +// ========================= +\{ + // ID of the document that holds the counter; + documentId; // string + + // The counter name + counterName; //string + + // Total counter value + totalValue; // number + + // A dictionary of counter values per database node + counterValues?; + + // Etag of counter + etag?; // number; + + // Change vector of counter + changeVector?; // string +\} +`} + + + + + +## Examples + +Assume we have two documents, `users/1` and `users/2`, that hold 3 counters each - +_"Likes"_, _"Dislikes"_ and _"Downloads"_ - with values 10, 20 and 30 (respectively) +### Example #1 : Increment Multiple Counters in a Batch + + + +{`// Define the counter actions you want to make per document: +// ========================================================= + +const counterActions1 = new DocumentCountersOperation(); +counterActions1.documentId = "users/1"; +counterActions1.operations = [ + CounterOperation.create("Likes", "Increment", 5), // Increment "Likes" by 5 + CounterOperation.create("Dislikes", "Increment") // No delta specified, value will stay the same +]; + +const counterActions2 = new DocumentCountersOperation(); +counterActions2.documentId = "users/2"; +counterActions2.operations = [ + CounterOperation.create("Likes", "Increment", 100), // Increment "Likes" by 100 + CounterOperation.create("Score", "Increment", 50) // Create a new counter "Score" with value 50 +]; + +// Define the batch: +// ================= +const batch = new CounterBatch(); +batch.documents = [counterActions1, counterActions2]; + +// Define the counter batch operation, pass the batch: +// =================================================== +const counterBatchOp = new CounterBatchOperation(batch); + +// Execute the operation by passing it to operations.send: +// ======================================================= +const result = await documentStore.operations.send(counterBatchOp); +const counters = result.counters; +`} + + + +#### Result: + + + +{`\{ + "counters": + [ + \{ + "documentId" : "users/1", + "counterName" : "Likes", + "totalValue" : 15, + "counterValues" : null + \}, + \{ + "documentId" : "users/1", + "counterName" : "Dislikes", + "totalValue" : 20, + "counterValues" : null + \}, + \{ + "documentId" : "users/2", + "counterName" : "Likes", + "totalValue" : 110, + "counterValues" : null + \}, + \{ + "documentId" : "users/2", + "counterName" : "score", + "totalValue" : 50, + "counterValues" : null + \} + ] +\} +`} + + +### Example #2 : Get Multiple Counters in a Batch + + + +{`// Define the counter actions you want to make per document: +// ========================================================= + +const counterActions1 = new DocumentCountersOperation(); +counterActions1.documentId = "users/1"; +counterActions1.operations = [ + CounterOperation.create("Likes", "Get"), + CounterOperation.create("Downloads", "Get") +]; + +const counterActions2 = new DocumentCountersOperation(); +counterActions2.documentId = "users/2"; +counterActions2.operations = [ + CounterOperation.create("Likes", "Get"), + CounterOperation.create("Score", "Get") +]; + +// Define the batch: +// ================= +const batch = new CounterBatch(); +batch.documents = [counterActions1, counterActions2]; + +// Define the counter batch operation, pass the batch: +// =================================================== +const counterBatchOp = new CounterBatchOperation(batch); + +// Execute the operation by passing it to operations.send: +// ======================================================= +const result = await documentStore.operations.send(counterBatchOp); +const counters = result.counters; +`} + + + +#### Result: + + + +{`\{ + "counters": + [ + \{ + "documentId" : "users/1", + "counterName" : "Likes", + "totalValue" : 15, + "counterValues" : null + \}, + \{ + "documentId" : "users/1", + "counterName" : "Downloads", + "totalValue" : 30, + "counterValues" : null + \}, + \{ + "documentId" : "users/2", + "counterName" : "Likes", + "totalValue" : 110, + "counterValues" : null + \}, + \{ + "documentId" : "users/2", + "counterName" : "Score", + "totalValue" : 50, + "counterValues" : null + \} + ] +\} +`} + + +### Example #3 : Delete Multiple Counters in a Batch + + + +{`// Define the counter actions you want to make per document: +// ========================================================= + +const counterActions1 = new DocumentCountersOperation(); +counterActions1.documentId = "users/1"; +counterActions1.operations = [ + // "Likes" and "Dislikes" will be removed from counter-names in "users/1" metadata + CounterOperation.create("Likes", "Delete"), + CounterOperation.create("Dislikes", "Delete") +]; + +const counterActions2 = new DocumentCountersOperation(); +counterActions2.documentId = "users/2"; +counterActions2.operations = [ + // "Downloads" will be removed from counter-names in "users/2" metadata + CounterOperation.create("Downloads", "Delete") +]; + +// Define the batch: +// ================= +const batch = new CounterBatch(); +batch.documents = [counterActions1, counterActions2]; + +// Define the counter batch operation, pass the batch: +// =================================================== +const counterBatchOp = new CounterBatchOperation(batch); + +// Execute the operation by passing it to operations.send: +// ======================================================= +const result = await documentStore.operations.send(counterBatchOp); +const counters = result.counters; +`} + + + +#### Result: + + + +{`\{ + "counters": [] +\} +`} + + +### Example #4 : Mix Different Types of CounterOperations in a Batch + + + +{`// Define the counter actions you want to make per document: +// ========================================================= + +const counterActions1 = new DocumentCountersOperation(); +counterActions1.documentId = "users/1"; +counterActions1.operations = [ + CounterOperation.create("Likes", "Increment", 30), + // The results will include null for this 'Get' + // since we deleted the "Dislikes" counter in the previous example flow + CounterOperation.create("Dislikes", "Get"), + CounterOperation.create("Downloads", "Delete") +]; + +const counterActions2 = new DocumentCountersOperation(); +counterActions2.documentId = "users/2"; +counterActions2.operations = [ + CounterOperation.create("Likes", "Get"), + CounterOperation.create("Dislikes", "Delete") +]; + +// Define the batch: +// ================= +const batch = new CounterBatch(); +batch.documents = [counterActions1, counterActions2]; + +// Define the counter batch operation, pass the batch: +// =================================================== +const counterBatchOp = new CounterBatchOperation(batch); + +// Execute the operation by passing it to operations.send: +// ======================================================= +const result = await documentStore.operations.send(counterBatchOp); +const counters = result.counters; +`} + + + +#### Result: + +* Note: The `Delete` operations are Not included in the result. + + + +{`\{ + "counters": + [ + \{ + "documentId" : "users/1", + "counterName" : "Likes", + "totalValue" : 30, + "counterValues" : null + \}, + null, + \{ + "documentId" : "users/2", + "counterName" : "Likes", + "totalValue" : 110, + "counterValues" : null + \} + ] +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/counters/_counter-batch-php.mdx b/versioned_docs/version-7.1/client-api/operations/counters/_counter-batch-php.mdx new file mode 100644 index 0000000000..781d0c0112 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/counters/_counter-batch-php.mdx @@ -0,0 +1,374 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +*CounterBatchOperation* allows you to operate on multiple counters (`Increment`, `Get`, `Delete`) of different documents in a **single request**. + +## Syntax + + + +{`class CounterBatchOperation +\{ + public function __construct(CounterBatch $counterBatch) \{ ... \} +\} +`} + + + +| Parameter | | | +|------------------|----------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **counterBatch** | `CounterBatch` | An object that holds a list of `DocumentCountersOperation`.
Each element in the list describes the counter operations to perform for a specific document | + + + +{`class CounterBatch +\{ + private bool $replyWithAllNodesValues = false; // A flag that indicates if the results should include a + // dictionary of counter values per database node + + private ?DocumentCountersOperationList $documents = null; + + private bool $fromEtl = false; + + // ... getter and setters +\} +`} + + + +#### DocumentCountersOperation + + + +{`class DocumentCountersOperation +\{ + private ?CounterOperationList $operations = null; // A list of counter operations to perform + private ?string $documentId = null; // Id of the document that holds the counters +\} +`} + + + +#### CounterOperation + + + +{`/* +class CounterOperation +\{ + private ?CounterOperationType $type = null; + private ?string $counterName = null; + private ?int $delta = null; // the value to increment by +\} +`} + + + +#### CounterOperationType + + + +{`class CounterOperationType +\{ + public function isIncrement(): bool; + public static function increment(): CounterOperationType; + + public function isDelete(): bool; + public static function delete(): CounterOperationType; + + public function isGet(): bool; + public static function get(): CounterOperationType; + + public function isPut(): bool; + public static function put(): CounterOperationType; +\} +`} + + + + +A document that has counters holds all its counter names in the `metadata`. +Therefore, when creating a new counter, the parent document is modified, as the counter's name needs to be added to the metadata. +Deleting a counter also modifies the parent document, as the counter's name needs to be removed from the metadata. +Incrementing an existing counter will not modify the parent document. + +Even if a `DocumentCountersOperation` contains several `CounterOperation` items that affect the document's metadata (create, delete), +the parent document will be modified **only once**, after all the `CounterOperation` items in this `DocumentCountersOperation` have been processed. +If `DocumentCountersOperation` doesn't contain any `CounterOperation` that affects the metadata, the parent document won't be modified. + + + + + +## Return Value + +* *CounterBatchOperation* returns a `CountersDetail` object, which holds a list of `CounterDetail` objects. + +* If a `CounterOperationType` is `Increment` or `Get`, a `CounterDetail` object will be added to the result. + `Delete` operations will not be included in the result. + + + +{`class CountersDetail +\{ + private ?CounterDetailList $counters = null; +\} +`} + + + + + +{`class CounterDetail +\{ + private ?string $documentId = null; // ID of the document that holds the counter + private ?string $counterName = null; // The counter name + private ?int $totalValue = null; // Total counter value + private ?int $etag = null; // Counter Etag + private ?array $counterValues = []; // A dictionary of counter values per database node + + private ?string $changeVector = null; // Change vector of the counter + + // ... getters and setters +\} + +class CounterDetailList extends TypedList +\{ + public function __construct() + \{ + parent::__construct(CounterDetail::class); + $this->setNullAllowed(true); + \} +\} +`} + + + + + +## Examples + +Assume we have two documents, `users/1` and `users/2`, that hold 3 counters each: +`likes`, `dislikes` and `downloads` - with values 10, 20 and 30 (respectively) +### Example #1 : Increment Multiple Counters in a Batch + + + +{`$operation1 = new DocumentCountersOperation(); +$operation1->setDocumentId("users/1"); +$operation1->setOperations([ + CounterOperation::create("likes", CounterOperationType::increment(), 5), + CounterOperation::create("dislikes", CounterOperationType::increment()) // No delta specified, value will stay the same +]); + +$operation2 = new DocumentCountersOperation(); +$operation2->setDocumentId("users/2"); +$operation2->setOperations([ + CounterOperation::create("likes", CounterOperationType::increment(), 100), + + // this will create a new counter "score", with initial value 50 + // "score" will be added to counter-names in "users/2" metadata + CounterOperation::create("score", CounterOperationType::increment(), 50) +]); + +$counterBatch = new CounterBatch(); +$counterBatch->setDocuments([$operation1, $operation2]); +$store->operations()->send(new CounterBatchOperation($counterBatch)); +`} + + + +#### Result: + + +{`\{ + "Counters": + [ + \{ + "DocumentId" : "users/1", + "CounterName" : "likes", + "TotalValue" : 15, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/1", + "CounterName" : "dislikes", + "TotalValue" : 20, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/2", + "CounterName" : "likes", + "TotalValue" : 110, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/2", + "CounterName" : "score", + "TotalValue" : 50, + "CounterValues" : null + \} + ] +\} +`} + + +### Example #2 : Get Multiple Counters in a Batch + + + +{`$operation1 = new DocumentCountersOperation(); +$operation1->setDocumentId("users/1"); +$operation1->setOperations([ + CounterOperation::create("likes", CounterOperationType::get()), + CounterOperation::create("downloads", CounterOperationType::get()) +]); + +$operation2 = new DocumentCountersOperation(); +$operation2->setDocumentId("users/2"); +$operation2->setOperations([ + CounterOperation::create("likes", CounterOperationType::get()), + CounterOperation::create("score", CounterOperationType::get()) +]); + +$counterBatch = new CounterBatch(); +$counterBatch->setDocuments([$operation1, $operation2]); + +$store->operations()->send(new CounterBatchOperation($counterBatch)); +`} + + + +#### Result: + + + +{`\{ + "Counters": + [ + \{ + "DocumentId" : "users/1", + "CounterName" : "likes", + "TotalValue" : 15, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/1", + "CounterName" : "downloads", + "TotalValue" : 30, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/2", + "CounterName" : "likes", + "TotalValue" : 110, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/2", + "CounterName" : "score", + "TotalValue" : 50, + "CounterValues" : null + \} + ] +\} +`} + + +### Example #3 : Delete Multiple Counters in a Batch + + + +{`$operation1 = new DocumentCountersOperation(); +$operation1->setDocumentId("users/1"); +$operation1->setOperations([ + // "likes" and "dislikes" will be removed from counter-names in "users/1" metadata + CounterOperation::create("likes", CounterOperationType::delete()), + CounterOperation::create("dislikes", CounterOperationType::delete()) +]); + +$operation2 = new DocumentCountersOperation(); +$operation2->setDocumentId("users/2"); +$operation2->setOperations([ + // "downloads" will be removed from counter-names in "users/2" metadata + CounterOperation::create("downloads", CounterOperationType::delete()) +]); + +$counterBatch = new CounterBatch(); +$counterBatch->setDocuments([$operation1, $operation2]); +$store->operations()->send(new CounterBatchOperation($counterBatch)); +`} + + + +#### Result: + + + +{`\{ + "Counters": [] +\} +`} + + +### Example #4 : Mix Different Types of CounterOperations in a Batch + + + +{`$operation1 = new DocumentCountersOperation(); +$operation1->setDocumentId("users/1"); +$operation1->setOperations([ + CounterOperation::create("likes", CounterOperationType::increment(), 30), + // The results will include null for this 'Get' + // since we deleted the "dislikes" counter in the previous example flow + CounterOperation::create("dislikes", CounterOperationType::get()), + CounterOperation::create("downloads", CounterOperationType::delete()) +]); + +$operation2 = new DocumentCountersOperation(); +$operation2->setDocumentId("users/2"); +$operation2->setOperations([ + CounterOperation::create("likes", CounterOperationType::get()), + CounterOperation::create("dislikes", CounterOperationType::delete()) +]); + +$counterBatch = new CounterBatch(); +$counterBatch->setDocuments([$operation1, $operation2]); +$store->operations()->send(new CounterBatchOperation($counterBatch)); +`} + + + +#### Result: + +* Note: The `Delete` operations are Not included in the results. + + + +{`\{ + "Counters": + [ + \{ + "DocumentId" : "users/1", + "CounterName" : "likes", + "TotalValue" : 30, + "CounterValues" : null + \}, + null, + \{ + "DocumentId" : "users/2", + "CounterName" : "likes", + "TotalValue" : 110, + "CounterValues" : null + \} + ] +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/counters/_get-counters-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/counters/_get-counters-csharp.mdx new file mode 100644 index 0000000000..4159ee013b --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/counters/_get-counters-csharp.mdx @@ -0,0 +1,245 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +This operation is used to get counters' values for a specific document. +It can be used to get the value of a single counter, multiple counters' values, or all counters' values. + +## Syntax + +#### Get Single Counter + + + +{`public GetCountersOperation(string docId, string counter, bool returnFullResults = false) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **docId** | string | The ID of the document that holds the counters | +| **counter** | string | The name of the counter to get | +| **returnFullResults** | bool | A flag which indicates if the operation should include a dictionary of counter values per database node in the result | + + + +**Return Full Results flag:** + +If RavenDB is running in a distributed cluster, and the database resides on several nodes, +a counter can have a different *local* value on each database node, and the total counter value is the +sum of all the local values of this counter from each node. +In order to get the counter values per database node, set the `returnFullResults` flag to `true` + + +#### Get Multiple Counters + + + +{`public GetCountersOperation(string docId, string[] counters, bool returnFullResults = false) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **docId** | string | The ID of the document that holds the counters | +| **counters** | string[] | The names of the counters to get | +| **returnFullResults** | bool | A flag which indicates if the operation should include a dictionary of counter values per database node in the result | +#### Get All Counters of a Document + + + +{`public GetCountersOperation(string docId, bool returnFullResults = false) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **docId** | string | The ID of the document that holds the counters | +| **returnFullResults** | bool | A flag which indicates if the operation should include a dictionary of counter values per database node in the result | + + + +## Return Value + +The operation returns a `CountersDetail` object, which holds a list of `CounterDetail` objects + + + +{`public class CountersDetail +\{ + public List Counters; +\} +`} + + + + + +{`public class CounterDetail +\{ + public string DocumentId; // ID of the document that holds the counter + public string CounterName; // The counter name + public long TotalValue; // Total counter value + public Dictionary CounterValues; // A dictionary of counter values per database node + public long Etag; // Counter Etag + public string ChangeVector; // Change vector of the counter +\} +`} + + + + + +## Examples + +Assume we have a `users/1` document that holds 3 counters: +`likes`, `dislikes` and `downloads` - with values 10, 20 and 30 (respectively) + +### Example #1 : Get single counter + + + +{`var operationResult = store.Operations + .Send(new GetCountersOperation("users/1", "likes")); +`} + + + +#### Result: + + + +{`\{ + "Counters": + [ + \{ + "DocumentId" : "users/1", + "CounterName" : "likes", + "TotalValue" : 10, + "CounterValues" : null + \} + ] +\} +`} + + + +### Example #2 : Get multiple counters + + + +{`var operationResult = store.Operations + .Send(new GetCountersOperation("users/1", new []\{"likes", "dislikes" \})); +`} + + + +#### Result: + + + +{`\{ + "Counters": + [ + \{ + "DocumentId" : "users/1", + "CounterName" : "likes", + "TotalValue" : 10, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/1", + "CounterName" : "dislikes", + "TotalValue" : 20, + "CounterValues" : null + \} + ] +\} +`} + + + +### Example #3 : Get all counters + + + +{`var operationResult = store.Operations + .Send(new GetCountersOperation("users/1")); +`} + + + +#### Result: + + + +{`\{ + "Counters": + [ + \{ + "DocumentId" : "users/1", + "CounterName" : "likes", + "TotalValue" : 10, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/1", + "CounterName" : "dislikes", + "TotalValue" : 20, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/1", + "CounterName" : "downloads", + "TotalValue" : 30, + "CounterValues" : null + \} + ] +\} +`} + + + +### Example #4 : Include full values in the result + + + +{`var operationResult = store.Operations + .Send(new GetCountersOperation("users/1", "likes", true)); +`} + + + +#### Result: + +Assuming a 3-node cluster, the distribution of the counter's value to nodes A, B, and C could be as follows: + + + +{`\{ + "Counters": + [ + \{ + "DocumentId" : "users/1", + "CounterName" : "likes", + "TotalValue" : 10, + "CounterValues" : + \{ + "A:35-UuCp420vs0u+URADcGVURA" : 5, + "B:83-SeCFU29daUOxfjUcAlLiJw" : 3, + "C:27-7i7GP8bOOkGYLNflO/rSeg" : 2, + \} + \} + ] +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/counters/_get-counters-java.mdx b/versioned_docs/version-7.1/client-api/operations/counters/_get-counters-java.mdx new file mode 100644 index 0000000000..4fed461c61 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/counters/_get-counters-java.mdx @@ -0,0 +1,252 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +This operation is used to get counters' values for a specific document. +It can be used to get the value of a single counter, multiple counters' values, or all counters' values. + +## Syntax + +#### Get Single Counter + + + +{`public GetCountersOperation(String docId, String counter) +public GetCountersOperation(String docId, String counter, boolean returnFullResults) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **docId** | String | The ID of the document that holds the counters | +| **counter** | String | The name of the counter to get | +| **returnFullResults** | boolean | A flag which indicates if the operation should include a dictionary of counter values per database node in the result | + + + +**Return Full Results flag**: + +If RavenDB is running in a distributed cluster, and the database resides on several nodes, +a counter can have a different *local* value on each database node, and the total counter value is the +sum of all the local values of this counter from each node. +In order to get the counter values per database node, set the `returnFullResults` flag to `true` + + +#### Get Multiple Counters + + + +{`public GetCountersOperation(String docId, String[] counters) +public GetCountersOperation(String docId, String[] counters, boolean returnFullResults) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **docId** | String | The ID of the document that holds the counters | +| **counters** | String[] | The names of the counters to get | +| **returnFullResults** | boolean | A flag which indicates if the operation should include a dictionary of counter values per database node in the result | +#### Get All Counters of a Document + + + +{`public GetCountersOperation(String docId) +public GetCountersOperation(String docId, boolean returnFullResults) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **docId** | String | The ID of the document that holds the counters | +| **returnFullResults** | boolean | A flag which indicates if the operation should include a dictionary of counter values per database node in the result | + + + +## Return Value + +The operation returns a `CountersDetail` object, which holds a list of `CounterDetail` objects + + + +{`public class CountersDetail \{ + + private List counters; + + // getters and setters +\} +`} + + + + + +{`public class CounterDetail \{ + private String documentId; // ID of the document that holds the counter + private String counterName; // The counter name + private long totalValue; // Total counter value + private long etag; // Counter Etag + private Map counterValues; // A map of counter values per database node + + private String changeVector; // Change vector of the counter + + // getters and setters +\} +`} + + + + + +## Examples + +Assume we have a `users/1` document that holds 3 counters: +`likes`, `dislikes` and `downloads` - with values 10, 20 and 30 (respectively) + +### Example #1 : Get single counter + + + +{`CountersDetail operationResult = store.operations() + .send(new GetCountersOperation("users/1", "likes")); +`} + + + +#### Result: + + + +{`\{ + "Counters": + [ + \{ + "DocumentId" : "users/1", + "CounterName" : "likes", + "TotalValue" : 10, + "CounterValues" : null + \} + ] +\} +`} + + + +### Example #2 : Get multiple counters + + + +{`CountersDetail operationResult = store.operations() + .send(new GetCountersOperation("users/1", new String[]\{ "likes", "dislikes" \})); +`} + + + +#### Result: + + + +{`\{ + "Counters": + [ + \{ + "DocumentId" : "users/1", + "CounterName" : "likes", + "TotalValue" : 10, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/1", + "CounterName" : "dislikes", + "TotalValue" : 20, + "CounterValues" : null + \} + ] +\} +`} + + + +### Example #3 : Get all counters + + + +{`CountersDetail operationResult = store.operations() + .send(new GetCountersOperation("users/1")); +`} + + + +#### Result: + + + +{`\{ + "Counters": + [ + \{ + "DocumentId" : "users/1", + "CounterName" : "likes", + "TotalValue" : 10, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/1", + "CounterName" : "dislikes", + "TotalValue" : 20, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/1", + "CounterName" : "downloads", + "TotalValue" : 30, + "CounterValues" : null + \} + ] +\} +`} + + + +### Example #4 : Include full values in the result + + + +{`CountersDetail operationResult = store.operations() + .send(new GetCountersOperation("users/1", "likes", true)); +`} + + + +#### Result: + +Assuming a 3-node cluster, the distribution of the counter's value to nodes A, B, and C could be as follows: + + + +{`\{ + "Counters": + [ + \{ + "DocumentId" : "users/1", + "CounterName" : "likes", + "TotalValue" : 10, + "CounterValues" : + \{ + "A:35-UuCp420vs0u+URADcGVURA" : 5, + "B:83-SeCFU29daUOxfjUcAlLiJw" : 3, + "C:27-7i7GP8bOOkGYLNflO/rSeg" : 2, + \} + \} + ] +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/counters/_get-counters-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/counters/_get-counters-nodejs.mdx new file mode 100644 index 0000000000..df12c024d6 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/counters/_get-counters-nodejs.mdx @@ -0,0 +1,259 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +This operation is used to get counters' values for a specific document. +It can be used to get the value of a single counter, multiple counters' values, or all counters' values. + +## Syntax + + + +{`// Get single counter +const getCountersOp = new GetCountersOperation(docId, counter); +const getCountersOp = new GetCountersOperation(docId, counter, returnFullResults = false); +`} + + + + + +{`// Get multiple counters +const getCountersOp = new GetCountersOperation(docId, counters); +const getCountersOp = new GetCountersOperation(docId, counters, returnFullResults = false); +`} + + + + + +{`// Get all counters of a document +const getCountersOp = new GetCountersOperation(docId); +`} + + + +| Parameter | Type | Description | +|-----------------------|----------|-----------------------------------------------------------------------------------------------------------------------| +| **docId** | string | The ID of the document that holds the counters | +| **counter** | string | The name of the counter to get | +| **counters** | string[] | The list of counter names to get | +| **returnFullResults** | boolean | A flag which indicates if the operation should include a dictionary of counter values per database node in the result | + + + +**The full results flag:** + +If RavenDB is running in a distributed cluster, and the database resides on several nodes, +then a counter can have a different *local* value on each database node. +The total counter value is the sum of all the local values of this counter from each node. +To get the counter values per database node, set the `returnFullResults` flag to `true`. + + + + + +## Return Value + +The operation returns a `CountersDetail` object, which holds a list of `CounterDetail` objects + + + +{`// The CounterDetails object: +// ========================== +\{ + // A list of "CounterDetail" objects; + counters; +\} +`} + + + + + +{`// The CounterDetail object: +// ========================= +\{ + // ID of the document that holds the counter; + documentId; // string + + // The counter name + counterName; //string + + // Total counter value + totalValue; // number + + // A dictionary of counter values per database node + counterValues?; + + // Etag of counter + etag?; // number; + + // Change vector of counter + changeVector?; // string +\} +`} + + + + + +## Examples + +Assume we have a `users/1` document that holds 3 counters: +`Likes`, `Dislikes` and `Downloads` - with values 10, 20 and 30 (respectively) +### Example #1 : Get single counter + + + +{`// Define the get counters operation +const getCountersOp = new GetCountersOperation("users/1", "Likes"); + +// Execute the operation by passing it to operations.send +const result = await documentStore.operations.send(getCountersOp); +const counters = result.counters; +`} + + + +#### Result: + + + +{`\{ + "counters": + [ + \{ + "documentId" : "users/1", + "counterName" : "Likes", + "totalValue" : 10, + "counterValues" : null + \} + ] +\} +`} + + + +### Example #2 : Get multiple counters + + + +{`const getCountersOp = new GetCountersOperation("users/1", ["Likes", "Dislikes"]); + +const result = await documentStore.operations.send(getCountersOp); +const counters = result.counters; +`} + + + +#### Result: + + + +{`\{ + "counters": + [ + \{ + "documentId" : "users/1", + "counterName" : "Likes", + "totalValue" : 10, + "counterValues" : null + \}, + \{ + "documentId" : "users/1", + "counterName" : "Dislikes", + "totalValue" : 20, + "counterValues" : null + \} + ] +\} +`} + + + +### Example #3 : Get all counters + + + +{`const getCountersOp = new GetCountersOperation("users/1"); + +const result = await documentStore.operations.send(getCountersOp); +const counters = result.counters; +`} + + + +#### Result: + + + +{`\{ + "counters": + [ + \{ + "documentId" : "users/1", + "counterName" : "Likes", + "totalValue" : 10, + "counterValues" : null + \}, + \{ + "documentId" : "users/1", + "counterName" : "Dislikes", + "totalValue" : 20, + "counterValues" : null + \}, + \{ + "documentId" : "users/1", + "counterName" : "Downloads", + "totalValue" : 30, + "counterValues" : null + \} + ] +\} +`} + + + +### Example #4 : Include full values in the result + + + +{`const getCountersOp = new GetCountersOperation("users/1", "Likes", true); + +const result = await documentStore.operations.send(getCountersOp); +const counters = result.counters; +`} + + + +#### Result: + +Assuming a 3-node cluster, the distribution of the counter's value to nodes A, B, and C could be as follows: + + + +{`\{ + "counters": + [ + \{ + "documentId" : "users/1", + "counterName" : "Likes", + "totalValue" : 10, + "counterValues" : + \{ + "A:35-UuCp420vs0u+URADcGVURA" : 5, + "B:83-SeCFU29daUOxfjUcAlLiJw" : 3, + "C:27-7i7GP8bOOkGYLNflO/rSeg" : 2, + \} + \} + ] +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/counters/_get-counters-php.mdx b/versioned_docs/version-7.1/client-api/operations/counters/_get-counters-php.mdx new file mode 100644 index 0000000000..3f7b215106 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/counters/_get-counters-php.mdx @@ -0,0 +1,294 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +This operation is used to get counters' values for a specific document. +It can be used to get the value of a single counter, multiple counters' values, or all counters' values. + +## Syntax + +#### `GetCountersOperation` + +Use `GetCountersOperation` to get counters. +Find usage examples below for getting a single counter, multiple counters, or all document counters. + + +{`class GetCountersOperation \{ + public function __construct(?string $docId, string|StringArray|array|null $counters = + null, bool $returnFullResults = false) \{ ... \} +\} +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **docId** | string | The ID of the document that holds the counters | +| **counter** | `string` or `StringArray` or `array` or `null` | The name or names array of the counter/s to get,
or `null` for all document counters | +| **returnFullResults** | bool | A flag which indicates if the operation should include a dictionary of counter values per database node in the result | +#### Get Single Counter + + + +{`$docId = "users/1"; +$counter = "likes"; +$returnFullResults = false; + +$operation = new GetCountersOperation($docId, $counter, $returnFullResults); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **docId** | string | The ID of the document that holds the counters | +| **counter** | string | The name of the counter to get | +| **returnFullResults** | bool | A flag which indicates if the operation should include a dictionary of counter values per database node in the result | + + + +**Return Full Results flag:** + +If RavenDB is running in a distributed cluster, and the database resides on several nodes, +a counter can have a different *local* value on each database node, and the total counter value is the +sum of all the local values of this counter from each node. +In order to get the counter values per database node, set the `returnFullResults` flag to `true` + + +#### Get Multiple Counters + + + +{`$docId = "users/1"; +$counters = [ "likes", "score"]; +$returnFullResults = false; + +$operation = new GetCountersOperation($docId, $counters, $returnFullResults); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **docId** | string | The ID of the document that holds the counters | +| **counters** | `StringArray` or `array` | The names of the counters to get | +| **returnFullResults** | bool | A flag which indicates if the operation should include a dictionary of counter values per database node in the result | +#### Get All Counters of a Document + + + +{`$docId = "users/1"; +$returnFullResults = false; + +$operation = new GetCountersOperation($docId, null, $returnFullResults); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **docId** | string | The ID of the document that holds the counters | +| **returnFullResults** | bool | A flag which indicates if the operation should include a dictionary of counter values per database node in the result | + + + +## Return Value + +The operation returns a `CountersDetail` object, which holds a list of `CounterDetail` objects + + + +{`class CountersDetail +\{ + private ?CounterDetailList $counters = null; +\} +`} + + + + + +{`class CounterDetail +\{ + private ?string $documentId = null; // ID of the document that holds the counter + private ?string $counterName = null; // The counter name + private ?int $totalValue = null; // Total counter value + private ?int $etag = null; // Counter Etag + private ?array $counterValues = []; // A dictionary of counter values per database node + + private ?string $changeVector = null; // Change vector of the counter + + // ... getters and setters +\} + +class CounterDetailList extends TypedList +\{ + public function __construct() + \{ + parent::__construct(CounterDetail::class); + $this->setNullAllowed(true); + \} +\} +`} + + + + + +## Examples + +Assume we have a `users/1` document that holds 3 counters: +`likes`, `dislikes` and `downloads` - with values 10, 20 and 30 (respectively) + +### Example #1 : Get single counter + + + +{`/** @var CountersDetail $operationResult */ +$operationResult = $store + ->operations() + ->send(new GetCountersOperation("users/1", "likes")); +`} + + + +#### Result: + + + +{`\{ + "Counters": + [ + \{ + "DocumentId" : "users/1", + "CounterName" : "likes", + "TotalValue" : 10, + "CounterValues" : null + \} + ] +\} +`} + + + +### Example #2 : Get multiple counters + + + +{`/** @var CountersDetail $operationResult */ +$operationResult = $store + ->operations() + ->send(new GetCountersOperation("users/1", [ "likes", "dislikes" ])); +`} + + + +#### Result: + + + +{`\{ + "Counters": + [ + \{ + "DocumentId" : "users/1", + "CounterName" : "likes", + "TotalValue" : 10, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/1", + "CounterName" : "dislikes", + "TotalValue" : 20, + "CounterValues" : null + \} + ] +\} +`} + + + +### Example #3 : Get all counters + + + +{`/** @var CountersDetail $operationResult */ +$operationResult = $store->operations() + ->send(new GetCountersOperation("users/1")); +`} + + + +#### Result: + + + +{`\{ + "Counters": + [ + \{ + "DocumentId" : "users/1", + "CounterName" : "likes", + "TotalValue" : 10, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/1", + "CounterName" : "dislikes", + "TotalValue" : 20, + "CounterValues" : null + \}, + \{ + "DocumentId" : "users/1", + "CounterName" : "downloads", + "TotalValue" : 30, + "CounterValues" : null + \} + ] +\} +`} + + + +### Example #4 : Include full values in the result + + + +{`/** @var CountersDetail $operationResult */ +$operationResult = $store + ->operations() + ->send(new GetCountersOperation("users/1", "likes", true)); +`} + + + +#### Result: + +Assuming a 3-node cluster, the distribution of the counter's value to nodes A, B, and C could be as follows: + + + +{`\{ + "Counters": + [ + \{ + "DocumentId" : "users/1", + "CounterName" : "likes", + "TotalValue" : 10, + "CounterValues" : + \{ + "A:35-UuCp420vs0u+URADcGVURA" : 5, + "B:83-SeCFU29daUOxfjUcAlLiJw" : 3, + "C:27-7i7GP8bOOkGYLNflO/rSeg" : 2, + \} + \} + ] +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/counters/counter-batch.mdx b/versioned_docs/version-7.1/client-api/operations/counters/counter-batch.mdx new file mode 100644 index 0000000000..a235199acf --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/counters/counter-batch.mdx @@ -0,0 +1,43 @@ +--- +title: "Counters Batch Operation" +hide_table_of_contents: true +sidebar_label: Counters Batch +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import CounterBatchCsharp from './_counter-batch-csharp.mdx'; +import CounterBatchJava from './_counter-batch-java.mdx'; +import CounterBatchPhp from './_counter-batch-php.mdx'; +import CounterBatchNodejs from './_counter-batch-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/counters/get-counters.mdx b/versioned_docs/version-7.1/client-api/operations/counters/get-counters.mdx new file mode 100644 index 0000000000..fa9914a891 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/counters/get-counters.mdx @@ -0,0 +1,43 @@ +--- +title: "Get Counters Operation" +hide_table_of_contents: true +sidebar_label: Get Counters +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetCountersCsharp from './_get-counters-csharp.mdx'; +import GetCountersJava from './_get-counters-java.mdx'; +import GetCountersPhp from './_get-counters-php.mdx'; +import GetCountersNodejs from './_get-counters-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/how-to/_category_.json b/versioned_docs/version-7.1/client-api/operations/how-to/_category_.json new file mode 100644 index 0000000000..61a11ebe76 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/how-to/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 1, + "label": How to..., +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/how-to/_switch-operations-to-a-different-database-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/how-to/_switch-operations-to-a-different-database-csharp.mdx new file mode 100644 index 0000000000..3baf4d6ed1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/how-to/_switch-operations-to-a-different-database-csharp.mdx @@ -0,0 +1,113 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, all operations work on the default database defined in the [Document Store](../../../client-api/creating-document-store.mdx). + +* **To operate on a different database**, use the `ForDatabase` method. + If the requested database doesn't exist on the server, an exception will be thrown. + +* In this page: + * [Common operation: `Operations.ForDatabase`](../../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx#common-operation:-operationsfordatabase) + * [Maintenance operation: `Maintenance.ForDatabase`](../../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx#maintenance-operation:-maintenancefordatabase) + +## Common operation: `Operations.ForDatabase` + +* For reference, all common operations are listed [here](../../../client-api/operations/what-are-operations.mdx#common-operations). + + + +{`// Define default database on the store +var documentStore = new DocumentStore +\{ + Urls = new[] \{ "yourServerURL" \}, + Database = "DefaultDB" +\}.Initialize(); + +using (documentStore) +\{ + // Use 'ForDatabase', get operation executor for another database + OperationExecutor opExecutor = documentStore.Operations.ForDatabase("AnotherDB"); + + // Send the operation, e.g. 'GetRevisionsOperation' will be executed on "AnotherDB" + var revisionsInAnotherDB = + opExecutor.Send(new GetRevisionsOperation("Orders/1-A")); + + // Without 'ForDatabase', the operation is executed "DefaultDB" + var revisionsInDefaultDB = + documentStore.Operations.Send(new GetRevisionsOperation("Company/1-A")); +\} +`} + + +**Syntax**: + + + +{`OperationExecutor ForDatabase(string databaseName); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **databaseName** | `string` | Name of the database to operate on | + +| Return Value | Description | +| - | - | +| `OperationExecutor` | New instance of Operation Executor that is scoped to the requested database | + + + +## Maintenance operation: `Maintenance.ForDatabase` + +* For reference, all maintenance operations are listed [here](../../../client-api/operations/what-are-operations.mdx#maintenance-operations). + + + +{`// Define default database on the store +var documentStore = new DocumentStore +\{ + Urls = new[] \{ "yourServerURL" \}, + Database = "DefaultDB" +\}.Initialize(); + +using (documentStore = new DocumentStore()) +\{ + // Use 'ForDatabase', get maintenance operation executor for another database + MaintenanceOperationExecutor opExecutor = documentStore.Maintenance.ForDatabase("AnotherDB"); + + // Send the maintenance operation, e.g. get database stats for "AnotherDB" + var statsForAnotherDB = + opExecutor.Send(new GetStatisticsOperation()); + + // Without 'ForDatabase', the stats are retrieved for "DefaultDB" + var statsForDefaultDB = + documentStore.Maintenance.Send(new GetStatisticsOperation()); +\} +`} + + +**Syntax**: + + + +{`MaintenanceOperationExecutor ForDatabase(string databaseName); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **databaseName** | `string` | Name of the database to operate on | + +| Return Value | Description | +| - | - | +| `MaintenanceOperationExecutor` | New instance of Maintenance Operation Executor that is scoped to the requested database | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/how-to/_switch-operations-to-a-different-database-java.mdx b/versioned_docs/version-7.1/client-api/operations/how-to/_switch-operations-to-a-different-database-java.mdx new file mode 100644 index 0000000000..41693de669 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/how-to/_switch-operations-to-a-different-database-java.mdx @@ -0,0 +1,68 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +By default, the operations available directly in store are working on a default database that was setup for that store. To switch operations to a different database that is available on that server use the **forDatabase** method. + +## Operations.forDatabase + + + +{`OperationExecutor forDatabase(String databaseName); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **databaseName** | String | Name of a database for which you want to get new Operations | + +| Return Value | | +| ------------- | ----- | +| OperationExecutor | New instance of Operations that is scoped to the requested database | + +### Example + + + +{`OperationExecutor operations = documentStore.operations().forDatabase("otherDatabase"); +`} + + + + + +## How to Switch Maintenance Operations to a Different Database + +As with `operations`, by default the `maintenance` operations available directly in store are working on a default database that was setup for that store. To switch maintenance operations to a different database use the **forDatabase** method. + +## Maintenance.forDatabase + + + +{`MaintenanceOperationExecutor forDatabase(String databaseName); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **databaseName** | String | Name of a database for which you want to get new maintenance operations | + +| Return Value | | +| ------------- | ----- | +| MaintenanceOperationExecutor | New instance of maintenance that is scoped to the requested database | + +### Example + + + +{`MaintenanceOperationExecutor maintenanceOperations = documentStore.maintenance().forDatabase("otherDatabase"); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/how-to/_switch-operations-to-a-different-database-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/how-to/_switch-operations-to-a-different-database-nodejs.mdx new file mode 100644 index 0000000000..873d48e936 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/how-to/_switch-operations-to-a-different-database-nodejs.mdx @@ -0,0 +1,101 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, all operations work on the default database defined in the [document store](../../../client-api/creating-document-store.mdx). + +* **To operate on a different database**, use the `forDatabase` method. + If the requested database doesn't exist on the server, an exception will be thrown. + +* In this page: + * [Common operation: `operations.forDatabase`](../../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx#common-operation:-operationsfordatabase) + * [Maintenance operation: `maintenance.forDatabase`](../../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx#maintenance-operation:-maintenancefordatabase) + +## Common operation: `operations.forDatabase` + +* For reference, all common operations are listed [here](../../../client-api/operations/what-are-operations.mdx#common-operations). + + + +{`// Define default database on the store +const documentStore = new DocumentStore("yourServerURL", "DefaultDB"); +documentStore.initialize(); + +// Use 'forDatabase', get operation executor for another database + const opExecutor = documentStore.operations.forDatabase("AnotherDB"); + +// Send the operation, e.g. 'GetRevisionsOperation' will be executed on "AnotherDB" +const revisionsInAnotherDB = + await opExecutor.send(new GetRevisionsOperation("Orders/1-A")); + +// Without 'forDatabase', the operation is executed "DefaultDB" +const revisionsInDefaultDB = + await documentStore.operations.send(new GetRevisionsOperation("Company/1-A")); +`} + + +**Syntax**: + + + +{`store.operations.forDatabase(databaseName); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **databaseName** | `string` | Name of the database to operate on | + +| Return Value | Description | +| - | - | +| `OperationExecutor` | New instance of Operation Executor that is scoped to the requested database | + + + +## Maintenance operation: `maintenance.forDatabase` + +* For reference, all maintenance operations are listed [here](../../../client-api/operations/what-are-operations.mdx#maintenance-operations). + + + +{`// Define default database on the store +const documentStore = new DocumentStore("yourServerURL", "DefaultDB"); +documentStore.initialize(); + +// Use 'forDatabase', get maintenance operation executor for another database +const opExecutor = documentStore.maintenance.forDatabase("AnotherDB"); + +// Send the maintenance operation, e.g. get database stats for "AnotherDB" +const statsForAnotherDB = + await opExecutor.send(new GetStatisticsOperation()); + +// Without 'forDatabase', the stats are retrieved for "DefaultDB" +const statsForDefaultDB = + await documentStore.maintenance.send(new GetStatisticsOperation()); +`} + + +**Syntax**: + + + +{`store.maintenance.forDatabase(databaseName); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **databaseName** | `string` | Name of the database to operate on | + +| Return Value | Description | +| - | - | +| `MaintenanceOperationExecutor` | New instance of Maintenance Operation Executor that is scoped to the requested database | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/how-to/_switch-operations-to-a-different-database-php.mdx b/versioned_docs/version-7.1/client-api/operations/how-to/_switch-operations-to-a-different-database-php.mdx new file mode 100644 index 0000000000..69e7a47830 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/how-to/_switch-operations-to-a-different-database-php.mdx @@ -0,0 +1,113 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, all operations work on the default database defined in the [Document Store](../../../client-api/creating-document-store.mdx). + +* **To operate on a different database**, use the `for_database` method. + If the requested database doesn't exist on the server, an exception will be thrown. + +* In this page: + * [Common operation: `forDatabase`](../../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx#common-operation:-fordatabase) + * [Maintenance operation: `maintenance.for_database`](../../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx#maintenance-operation:-maintenancefor_database) + +## Common operation: `forDatabase` + +* For reference, all common operations are listed [here](../../../client-api/operations/what-are-operations.mdx#common-operations). + + + +{`// Define default database on the store +$documentStore = new DocumentStore( + ["yourServerURL"], + "DefaultDB" +); +$documentStore->initialize(); + +try \{ + // Use 'ForDatabase', get operation executor for another database + /** @var OperationExecutor $opExecutor */ + $opExecutor = $documentStore->operations()->forDatabase("AnotherDB"); + + // Send the operation, e.g. 'GetRevisionsOperation' will be executed on "AnotherDB" + $revisionsInAnotherDB = $opExecutor->send(new GetRevisionsOperation(Order::class, "Orders/1-A")); + + // Without 'ForDatabase', the operation is executed "DefaultDB" + $revisionsInDefaultDB = $documentStore->operations()->send(new GetRevisionsOperation(Company::class, "Company/1-A")); +\} finally \{ + $documentStore->close(); +\} +`} + + +**Syntax**: + + + +{`public function forDatabase(?string $databaseName): OperationExecutor; +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **$databaseName** | `?string` | Name of the database to operate on | + +| Return Value | Description | +| - | - | +| `OperationExecutor` | New instance of Operation Executor that is scoped to the requested database | + + + +## Maintenance operation: `maintenance.for_database` + +* For reference, all maintenance operations are listed [here](../../../client-api/operations/what-are-operations.mdx#maintenance-operations). + + + +{`// Define default database on the store +$documentStore = new DocumentStore( + [ "yourServerURL" ], + "DefaultDB" +); +$documentStore->initialize(); + +try \{ + // Use 'ForDatabase', get maintenance operation executor for another database + /** @var MaintenanceOperationExecutor $opExecutor */ + $opExecutor = $documentStore->maintenance()->forDatabase("AnotherDB"); + + // Send the maintenance operation, e.g. get database stats for "AnotherDB" + $statsForAnotherDB = $opExecutor->send(new GetStatisticsOperation()); + + // Without 'ForDatabase', the stats are retrieved for "DefaultDB" + $statsForDefaultDB = $documentStore->maintenance()->send(new GetStatisticsOperation()); +\} finally \{ + $documentStore->close(); +\} +`} + + +**Syntax**: + + + +{`public function forDatabase(?string $databaseName): MaintenanceOperationExecutor; +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **$databaseName** | `?string` | Name of the database to operate on | + +| Return Value | Description | +| - | - | +| `MaintenanceOperationExecutor` | New instance of Maintenance Operation Executor that is scoped to the requested database | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/how-to/_switch-operations-to-a-different-database-python.mdx b/versioned_docs/version-7.1/client-api/operations/how-to/_switch-operations-to-a-different-database-python.mdx new file mode 100644 index 0000000000..e511d26de1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/how-to/_switch-operations-to-a-different-database-python.mdx @@ -0,0 +1,97 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, all operations work on the default database defined in the [Document Store](../../../client-api/creating-document-store.mdx). + +* **To operate on a different database**, use the `for_database` method. + If the requested database doesn't exist on the server, an exception will be thrown. + +* In this page: + * [Common operation: `operations.for_database`](../../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx#common-operation:-operationsfor_database) + * [Maintenance operation: `maintenance.for_database`](../../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx#maintenance-operation:-maintenancefor_database) + +## Common operation: `operations.for_database` + +* For reference, all common operations are listed [here](../../../client-api/operations/what-are-operations.mdx#common-operations). + + + +{`# Define default database on the store +document_store = DocumentStore(urls=["yourServerURL"], database="DefaultDB") +document_store.initialize() + +with document_store: + # Use 'for_database', get operation executor for another database + op_executor = document_store.operations.for_database("AnotherDB") + + # Send the operation, e.g. 'GetRevisionsOperation' will be executed on "AnotherDB" + revisions_in_another_db = op_executor.send(GetRevisionsOperation("Orders/1-A", Order)) + + # Without 'for_database', the operation is executed "DefaultDB" + revisions_in_default_db = document_store.operations.send(GetRevisionsOperation("Company/1-A", Company)) +`} + + +**Syntax**: + + + +{`def for_database(self, database_name: str) -> OperationExecutor: ... +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **database_name** | `str` | Name of the database to operate on | + +| Return Value | Description | +| - | - | +| `OperationExecutor` | New instance of Operation Executor that is scoped to the requested database | + + + +## Maintenance operation: `maintenance.for_database` + +* For reference, all maintenance operations are listed [here](../../../client-api/operations/what-are-operations.mdx#maintenance-operations). + + + +{`# Define default database on the store +document_store = DocumentStore(urls=["yourServerURL"], database="DefaultDB") +document_store.initialize() + +with DocumentStore() as document_store: + # Use 'for_database', get maintenance operation executor for another database + op_executor = document_store.maintenance.for_database("AnotherDB") + # Send the maintenance operation, e.g. get database stats for "AnotherDB" + stats_for_another_db = op_executor.send(GetStatisticsOperation()) + # Without 'for_database', the stats are retrieved for "DefaultDB" + stats_for_default_db = document_store.maintenance.send(GetStatisticsOperation()) +`} + + +**Syntax**: + + + +{`def for_database(self, database_name: str) -> MaintenanceOperationExecutor: ... +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **database_name** | `str` | Name of the database to operate on | + +| Return Value | Description | +| - | - | +| `MaintenanceOperationExecutor` | New instance of Maintenance Operation Executor that is scoped to the requested database | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/how-to/_switch-operations-to-a-different-node-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/how-to/_switch-operations-to-a-different-node-csharp.mdx new file mode 100644 index 0000000000..53d9460da2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/how-to/_switch-operations-to-a-different-node-csharp.mdx @@ -0,0 +1,69 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, when working with multiple nodes, + all client requests will access the server node that is defined by the client configuration. + (Learn more in: [Load balancing client requests](../../../client-api/configuration/load-balance/overview.mdx)). + +* However, **server maintenance operations** can be executed on a specific node by using the `ForNode` method. + (An exception is thrown if that node is not available). + +* In this page: + * [Server-maintenance operations - ForNode](../../../client-api/operations/how-to/switch-operations-to-a-different-node.mdx#server-maintenance-operations---fornode) + +## Server maintenance operations - ForNode + +* For reference, all server maintenance operations are listed [here](../../../client-api/operations/what-are-operations.mdx#server-maintenance-operations). + + + +{`// Default node access can be defined on the store +var documentStore = new DocumentStore +\{ + Urls = new[] \{ "ServerURL_1", "ServerURL_2", "..." \}, + Database = "DefaultDB", + Conventions = new DocumentConventions + \{ + // For example: + // With ReadBalanceBehavior set to: 'FastestNode': + // Client READ requests will address the fastest node + // Client WRITE requests will address the preferred node + ReadBalanceBehavior = ReadBalanceBehavior.FastestNode + \} +\}.Initialize(); + +using (documentStore) +\{ + // Use 'ForNode' to override the default node configuration + // The Maintenance.Server operation will be executed on the specified node + var dbNames = documentStore.Maintenance.Server.ForNode("C") + .Send(new GetDatabaseNamesOperation(0, 25)); +\} +`} + + + +**Syntax**: + + + +{`ServerOperationExecutor ForNode(string nodeTag); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **nodeTag** | string | The tag of the node to operate on | + +| Return Value | | +| - | - | +| `ServerOperationExecutor` | New instance of Server Operation Executor that is scoped to the requested node | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/how-to/_switch-operations-to-a-different-node-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/how-to/_switch-operations-to-a-different-node-nodejs.mdx new file mode 100644 index 0000000000..6289c58606 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/how-to/_switch-operations-to-a-different-node-nodejs.mdx @@ -0,0 +1,63 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, when working with multiple nodes, + all client requests will access the server node that is defined by the client configuration. + (Learn more in: [Load balancing client requests](../../../client-api/configuration/load-balance/overview.mdx)). + +* However, **server maintenance operations** can be executed on a specific node by using the `forNode` method. + (An exception is thrown if that node is not available). + +* In this page: + * [Server maintenance operations - forNode](../../../client-api/operations/how-to/switch-operations-to-a-different-node.mdx#server-maintenance-operations---fornode) + +## Server maintenance operations - forNode + +* For reference, all server maintenance operations are listed [here](../../../client-api/operations/what-are-operations.mdx#server-maintenance-operations). + + + +{`// Default node access can be defined on the store +const documentStore = new DocumentStore(["serverUrl_1", "serverUrl_2", "..."], "DefaultDB"); + +// For example: +// With readBalanceBehavior set to: 'FastestNode': +// Client READ requests will address the fastest node +// Client WRITE requests will address the preferred node +documentStore.conventions.readBalanceBehavior = "FastestNode"; +documentStore.initialize(); + +// Use 'forNode' to override the default node configuration +// Get a server operation executor for a specific node +const serverOpExecutor = await documentStore.maintenance.server.forNode("C"); + +// The maintenance.server operation will be executed on the specified node 'C' +const dbNames = await serverOpExecutor.send(new GetDatabaseNamesOperation(0, 25)); +`} + + + +**Syntax**: + + + +{`await store.maintenance.server.forNode(nodeTag); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **nodeTag** | string | The tag of the node to operate on | + +| Return Value | | +| - | - | +| `Promise` | A promise that returns a new instance of Server Operation Executor
scoped to the requested node | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/how-to/_switch-operations-to-a-different-node-php.mdx b/versioned_docs/version-7.1/client-api/operations/how-to/_switch-operations-to-a-different-node-php.mdx new file mode 100644 index 0000000000..a15c6d6dac --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/how-to/_switch-operations-to-a-different-node-php.mdx @@ -0,0 +1,72 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, when working with multiple nodes, + all client requests will access the server node that is defined by the client configuration. + (Learn more in: [Load balancing client requests](../../../client-api/configuration/load-balance/overview.mdx)). + +* However, **server maintenance operations** can be executed on a specific node by using the `forNode` method. + (An exception is thrown if that node is not available). + +* In this page: + * [Server-maintenance operations - ForNode](../../../client-api/operations/how-to/switch-operations-to-a-different-node.mdx#server-maintenance-operations---fornode) + +## Server maintenance operations - ForNode + +* For reference, all server maintenance operations are listed [here](../../../client-api/operations/what-are-operations.mdx#server-maintenance-operations). + + + +{`// Default node access can be defined on the store +$documentStore = new DocumentStore( + ["ServerURL_1", "ServerURL_2", "..."], + "DefaultDB" +); + +$conventions = new DocumentConventions(); + +// For example: +// With ReadBalanceBehavior set to: 'FastestNode': +// Client READ requests will address the fastest node +// Client WRITE requests will address the preferred node +$conventions->setReadBalanceBehavior(ReadBalanceBehavior::fastestNode()); +$documentStore->setConventions($conventions); + +$documentStore->initialize(); + +try \{ + // Use 'ForNode' to override the default node configuration + // The Maintenance.Server operation will be executed on the specified node + $dbNames = $documentStore->maintenance()->server()->forNode("C") + ->send(new GetDatabaseNamesOperation(0, 25)); +\} finally \{ + $documentStore->close(); +\} +`} + + + +**Syntax**: + + + +{`public function forNode(string $nodeTag): ServerOperationExecutor +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **$nodeTag** | `string` | The tag of the node to operate on | + +| Return Value | | +| - | - | +| `ServerOperationExecutor` | New instance of Server Operation Executor that is scoped to the requested node | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/how-to/switch-operations-to-a-different-database.mdx b/versioned_docs/version-7.1/client-api/operations/how-to/switch-operations-to-a-different-database.mdx new file mode 100644 index 0000000000..ba863f86ad --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/how-to/switch-operations-to-a-different-database.mdx @@ -0,0 +1,47 @@ +--- +title: "Switch Operations to a Different Database" +hide_table_of_contents: true +sidebar_label: Switch operations to different database +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import SwitchOperationsToADifferentDatabaseCsharp from './_switch-operations-to-a-different-database-csharp.mdx'; +import SwitchOperationsToADifferentDatabaseJava from './_switch-operations-to-a-different-database-java.mdx'; +import SwitchOperationsToADifferentDatabasePython from './_switch-operations-to-a-different-database-python.mdx'; +import SwitchOperationsToADifferentDatabasePhp from './_switch-operations-to-a-different-database-php.mdx'; +import SwitchOperationsToADifferentDatabaseNodejs from './_switch-operations-to-a-different-database-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/how-to/switch-operations-to-a-different-node.mdx b/versioned_docs/version-7.1/client-api/operations/how-to/switch-operations-to-a-different-node.mdx new file mode 100644 index 0000000000..b9ff34e726 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/how-to/switch-operations-to-a-different-node.mdx @@ -0,0 +1,37 @@ +--- +title: "Switch Operations to a Different Node" +hide_table_of_contents: true +sidebar_label: Switch operations to different node +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import SwitchOperationsToADifferentNodeCsharp from './_switch-operations-to-a-different-node-csharp.mdx'; +import SwitchOperationsToADifferentNodePhp from './_switch-operations-to-a-different-node-php.mdx'; +import SwitchOperationsToADifferentNodeNodejs from './_switch-operations-to-a-different-node-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "php", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/_category_.json b/versioned_docs/version-7.1/client-api/operations/maintenance/_category_.json new file mode 100644 index 0000000000..3df66cfce3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 3, + "label": Maintenance Operations, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/_get-stats-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/_get-stats-csharp.mdx new file mode 100644 index 0000000000..9c5e83cea3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/_get-stats-csharp.mdx @@ -0,0 +1,193 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Statistics can be retrieved for the database and for collections. + +* By default, statistics are retrieved for the database defined in the Document Store. + To get database and collection statistics for another database use [ForDatabase](../../../client-api/operations/maintenance/get-stats.mdx#get-stats-for-another-database). + +* In this page: + * [Get collection statistics](../../../client-api/operations/maintenance/get-stats.mdx#get-collection-statistics) + * [Get detailed collection statistics](../../../client-api/operations/maintenance/get-stats.mdx#get-detailed-collection-statistics) + * [Get database statistics](../../../client-api/operations/maintenance/get-stats.mdx#get-database-statistics) + * [Get detailed database statistics](../../../client-api/operations/maintenance/get-stats.mdx#get-detailed-database-statistics) + * [Get statistics for another database](../../../client-api/operations/maintenance/get-stats.mdx#get-statistics-for-another-database) + +## Get collection statistics + +To get **collection statistics**, use `GetCollectionStatisticsOperation`: + + +{`// Pass an instance of class \`GetCollectionStatisticsOperation\` to the store +CollectionStatistics stats = + store.Maintenance.Send(new GetCollectionStatisticsOperation()); +`} + + +Statistics are returned in the `CollectionStatistics` object. + + +{`// Collection stats results: +public class CollectionStatistics +\{ + // Total # of documents in all collections + public long CountOfDocuments \{ get; set; \} + // Total # of conflicts + public long CountOfConflicts \{ get; set; \} + // Total # of documents per collection + public Dictionary Collections \{ get; set; \} +\} +`} + + + + + +## Get detailed collection statistics + +To get **detailed collection statistics**, use `GetDetailedCollectionStatisticsOperation`: + + +{`// Pass an instance of class \`GetDetailedCollectionStatisticsOperation\` to the store +DetailedCollectionStatistics stats = + store.Maintenance.Send(new GetDetailedCollectionStatisticsOperation()); +`} + + +Statistics are returned in the `DetailedCollectionStatistics` object. + + +{`// Detailed collection stats results: +public class DetailedCollectionStatistics +\{ + // Total # of documents in all collections + public long CountOfDocuments \{ get; set; \} + // Total # of conflicts + public long CountOfConflicts \{ get; set; \} + // Collection details per collection + public Dictionary Collections \{ get; set; \} +\} + +// Details per collection +public class CollectionDetails +\{ + public string Name \{ get; set; \} + public long CountOfDocuments \{ get; set; \} + public Size Size \{ get; set; \} + public Size DocumentsSize \{ get; set; \} + public Size TombstonesSize \{ get; set; \} + public Size RevisionsSize \{ get; set; \} +\} +`} + + + + + +## Get database statistics + +To get **database statistics**, use `GetStatisticsOperation`: + + +{`// Pass an instance of class \`GetStatisticsOperation\` to the store +DatabaseStatistics stats = + store.Maintenance.Send(new GetStatisticsOperation()); +`} + + +Statistics are returned in the `DatabaseStatistics` object. + + +{`// Database stats results: +public class DatabaseStatistics +\{ + public long? LastDocEtag \{ get; set; \} // Last document etag in database + public long? LastDatabaseEtag \{ get; set; \} // Last database etag + + public int CountOfIndexes \{ get; set; \} // Total # of indexes in database + public long CountOfDocuments \{ get; set; \} // Total # of documents in database + public long CountOfRevisionDocuments \{ get; set; \} // Total # of revision documents in database + public long CountOfDocumentsConflicts \{ get; set; \} // Total # of documents conflicts in database + public long CountOfTombstones \{ get; set; \} // Total # of tombstones in database + public long CountOfConflicts \{ get; set; \} // Total # of conflicts in database + public long CountOfAttachments \{ get; set; \} // Total # of attachments in database + public long CountOfUniqueAttachments \{ get; set; \} // Total # of unique attachments in database + public long CountOfCounterEntries \{ get; set; \} // Total # of counter-group entries in database + public long CountOfTimeSeriesSegments \{ get; set; \} // Total # of time-series segments in database + + // List of stale index names in database + public string[] StaleIndexes => Indexes?.Where(x => x.IsStale).Select(x => x.Name).ToArray(); + // Statistics for each index in database + public IndexInformation[] Indexes \{ get; set; \} + + public string DatabaseChangeVector \{ get; set; \} // Global change vector of the database + public string DatabaseId \{ get; set; \} // Database identifier + public bool Is64Bit \{ get; set; \} // Indicates if process is 64-bit + public string Pager \{ get; set; \} // Component handling the memory-mapped files + public DateTime? LastIndexingTime \{ get; set; \} // Last time of indexing an item + public Size SizeOnDisk \{ get; set; \} // Database size on disk + public Size TempBuffersSizeOnDisk \{ get; set; \} // Temp buffers size on disk + public int NumberOfTransactionMergerQueueOperations \{ get; set; \} +\} +`} + + + + + +## Get detailed database statistics + +To get **detailed database statistics**, use `GetDetailedStatisticsOperation`: + + +{`// Pass an instance of class \`GetDetailedStatisticsOperation\` to the store +DetailedDatabaseStatistics stats = + store.Maintenance.Send(new GetDetailedStatisticsOperation()); +`} + + +Statistics are returned in the `DetailedDatabaseStatistics` object. + + +{`// Detailed database stats results: +public class DetailedDatabaseStatistics : DatabaseStatistics +\{ + // Total # of identities in database + public long CountOfIdentities \{ get; set; \} + // Total # of compare-exchange items in database + public long CountOfCompareExchange \{ get; set; \} + // Total # of cmpXchg tombstones in database + public long CountOfCompareExchangeTombstones \{ get; set; \} + // Total # of TS deleted ranges values in database + public long CountOfTimeSeriesDeletedRanges \{ get; set; \} +\} +`} + + + + + +## Get statistics for another database + +* By default, you get statistics for the database defined in your Document Store. +* Use `ForDatabase` to get database and collection statistics for another database. +* `ForDatabase` can be used with **any** of the above statistics options. + + + +{`// Get stats for 'AnotherDatabase': +DatabaseStatistics stats = + store.Maintenance.ForDatabase("AnotherDatabase").Send(new GetStatisticsOperation()); +`} + + + +* Learn more about switching operations to another database [here](../../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx). + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/_get-stats-java.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/_get-stats-java.mdx new file mode 100644 index 0000000000..be3f5037de --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/_get-stats-java.mdx @@ -0,0 +1,195 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Statistics can be retrieved for the database and for collections. + +* By default, statistics are retrieved for the database defined in the Document Store. + To get database and collection statistics for another database use [ForDatabase](../../../client-api/operations/maintenance/get-stats.mdx#get-stats-for-another-database). + +* In this page: + * [Get collection stats](../../../client-api/operations/maintenance/get-stats.mdx#get-collection-stats) + * [Get detailed collection stats](../../../client-api/operations/maintenance/get-stats.mdx#get-detailed-collection-stats) + * [Get database stats](../../../client-api/operations/maintenance/get-stats.mdx#get-database-stats) + * [Get detailed database stats](../../../client-api/operations/maintenance/get-stats.mdx#get-detailed-database-stats) + * [Get stats for another database](../../../client-api/operations/maintenance/get-stats.mdx#get-stats-for-another-database) + +## Get collection stats + +Use `GetCollectionStatisticsOperation` to get **collection stats**. + + +{`// Pass an instance of class \`GetCollectionStatisticsOperation\` to the store +CollectionStatistics stats = + store.maintenance().send(new GetCollectionStatisticsOperation()); +`} + + + + +Stats are returned in the `CollectionStatistics` object. + + +{`public class CollectionStatistics \{ +// Collection stats results: + // Total # of documents in all collections + int CountOfDocuments; + // Total # of conflicts + int CountOfConflicts; + // Total # of documents per collection + Map Collections; +\} +`} + + + + + +## Get detailed collection stats + +Use `GetDetailedCollectionStatisticsOperation` to get **detailed collection stats**. + + +{`// Pass an instance of class \`GetDetailedCollectionStatisticsOperation\` to the store +DetailedCollectionStatistics stats = + store.maintenance().send(new GetDetailedCollectionStatisticsOperation()); +`} + + + + +Stats are returned in the `DetailedCollectionStatistics` object. + + +{`// Detailed collection stats results: +public class DetailedCollectionStatistics \{ + // Total # of documents in all collections + long CountOfDocuments; + // Total # of conflicts + long CountOfConflicts; + // Collection details per collection + Map Collections; +\} + +// Details per collection +public class CollectionDetails \{ + String Name; + long CountOfDocuments; + Size Size; + Size DocumentsSize; + Size TombstonesSize; + Size RevisionsSize; +\} +`} + + + + + +## Get database stats + +Use `GetStatisticsOperation` to get **database stats**. + + +{`// Pass an instance of class \`GetStatisticsOperation\` to the store +DatabaseStatistics stats = + store.maintenance().send(new GetStatisticsOperation()); +`} + + + + +Stats are returned in the `DatabaseStatistics` object. + + +{`// Database stats results: +public class DatabaseStatistics \{ + Long LastDocEtag; // Last document etag in database + Long LastDatabaseEtag; // Last database etag + + int CountOfIndexes; // Total # of indexes in database + long CountOfDocuments; // Total # of documents in database + long CountOfRevisionDocuments; // Total # of revision documents in database + long CountOfDocumentsConflicts; // Total # of documents conflicts in database + long CountOfTombstones; // Total # of tombstones in database + long CountOfConflicts; // Total # of conflicts in database + long CountOfAttachments; // Total # of attachments in database + long CountOfUniqueAttachments; // Total # of unique attachments in database + long CountOfCounterEntries; // Total # of counter-group entries in database + long CountOfTimeSeriesSegments; // Total # of time-series segments in database + + IndexInformation[] Indexes; // Statistics for each index in database + + String DatabaseChangeVector; // Global change vector of the database + String DatabaseId; // Database identifier + boolean Is64Bit; // Indicates if process is 64-bit + String Pager; // Component handling the memory-mapped files + Date LastIndexingTime; // Last time of indexing an item + Size SizeOnDisk; // Database size on disk + Size TempBuffersSizeOnDisk; // Temp buffers size on disk + int NumberOfTransactionMergerQueueOperations; +\} +`} + + + + + +## Get detailed database stats + +Use `GetDetailedStatisticsOperation` to get **detailed database stats**. + + +{`// Pass an instance of class \`GetDetailedStatisticsOperation\` to the store +DetailedDatabaseStatistics stats = + store.maintenance().send(new GetDetailedStatisticsOperation()); +`} + + + + +Stats are returned in the `DetailedDatabaseStatistics` object. + + +{`// Detailed database stats results: +public class DetailedDatabaseStatistics extends DatabaseStatistics \{ + // Total # of identities in database + long CountOfIdentities; + // Total # of compare-exchange items in database + long CountOfCompareExchange; + // Total # of cmpXchg tombstones in database + long CountOfCompareExchangeTombstones; + // Total # of TS deleted ranges values in database + long CountOfTimeSeriesDeletedRanges; +\} +`} + + + + + +## Get stats for another database + + +* By default, you get stats for the database defined in your Document Store. +* Use `forDatabase` to get database & collection stats for another database. +* 'ForDatabase' can be used with **any** of the above stats options. + + + +{`// Get stats for 'AnotherDatabase': +DatabaseStatistics stats = + store.maintenance().forDatabase("AnotherDatabase").send(new GetStatisticsOperation()); +`} + + + +* Learn more about switching operations to another database [here](../../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx). + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/_get-stats-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/_get-stats-nodejs.mdx new file mode 100644 index 0000000000..6e68a6b1e9 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/_get-stats-nodejs.mdx @@ -0,0 +1,200 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Statistics can be retrieved for the database and for collections. + +* By default, statistics are retrieved for the database defined in the Document Store. + To get database and collection statistics for another database use [forDatabase](../../../client-api/operations/maintenance/get-stats.mdx#get-stats-for-another-database). + +* In this page: + * [Get collection statistics](../../../client-api/operations/maintenance/get-stats.mdx#get-collection-statistics) + * [Get detailed collection statistics](../../../client-api/operations/maintenance/get-stats.mdx#get-detailed-collection-statistics) + * [Get database statistics](../../../client-api/operations/maintenance/get-stats.mdx#get-database-statistics) + * [Get detailed database statistics](../../../client-api/operations/maintenance/get-stats.mdx#get-detailed-database-statistics) + * [Get statistics for another database](../../../client-api/operations/maintenance/get-stats.mdx#get-statistics-for-another-database) + +## Get collection statistics + +To get **collection statistics**, use `GetCollectionStatisticsOperation`: + + +{`// Pass an instance of class \`GetCollectionStatisticsOperation\` to the store +const stats = await store.maintenance.send(new GetCollectionStatisticsOperation()); +`} + + +Statistics are returned in the `CollectionStatistics` object. + + +{`// Object with following props is returned: +\{ + // Total # of documents in all collections + countOfDocuments, + // Total # of conflicts + countOfConflicts, + // Dictionary with total # of documents per collection + collections +\} +`} + + + + + +## Get detailed collection statistics + +To get **detailed collection statistics**, use `GetDetailedCollectionStatisticsOperation`: + + +{`// Object with following props is returned: +\{ + // Total # of documents in all collections + countOfDocuments, + // Total # of conflicts + countOfConflicts, + // Dictionary with 'collection details per collection' + collections, +\} + +// 'Collection details per collection' object props: +\{ + name, + countOfDocuments, + size, + documentsSize, + tombstonesSize, + revisionsSize +\} +`} + + +Statistics are returned in the `DetailedCollectionStatistics` object. + + +{`class Size: + def __init__(self, size_in_bytes: int = None, human_size: str = None): ... + +class CollectionDetails: + def __init__( + self, + name: str = None, + count_of_documents: int = None, + size: Size = None, + documents_size: Size = None, + tombstones_size: Size = None, + revisions_size: Size = None, + ): ... + +class DetailedCollectionStatistics: + def __init__( + self, + count_of_documents: int = None, + count_of_conflicts: int = None, + collections: Dict[str, CollectionDetails] = None, + ) -> None: ... +`} + + + + + +## Get database statistics + +To get **database statistics**, use `GetStatisticsOperation`: + + +{`// Pass an instance of class \`GetStatisticsOperation\` to the store +const stats = await store.maintenance.send(new GetStatisticsOperation()); +`} + + +Statistics are returned in the `DatabaseStatistics` object. + + +{`// Object with following props is returned: +\{ + lastDocEtag, // Last document etag in database + lastDatabaseEtag, // Last database etag + + countOfIndexes, // Total # of indexes in database + countOfDocuments, // Total # of documents in database + countOfRevisionDocuments, // Total # of revision documents in database + countOfDocumentsConflicts, // Total # of documents conflicts in database + countOfTombstones, // Total # of tombstones in database + countOfConflicts, // Total # of conflicts in database + countOfAttachments, // Total # of attachments in database + countOfUniqueAttachments, // Total # of unique attachments in database + countOfCounterEntries, // Total # of counter-group entries in database + countOfTimeSeriesSegments, // Total # of time-series segments in database + + indexes, // Statistics for each index in database (array of IndexInformation) + + databaseChangeVector, // Global change vector of the database + databaseId, // Database identifier + is64Bit, // Indicates if process is 64-bit + pager, // Component handling the memory-mapped files + lastIndexingTime, // Last time of indexing an item + sizeOnDisk, // Database size on disk + tempBuffersSizeOnDisk, // Temp buffers size on disk + numberOfTransactionMergerQueueOperations +\} +`} + + + + + +## Get detailed database statistics + +To get **detailed database statistics**, use `GetDetailedStatisticsOperation`: + + +{`// Pass an instance of class \`GetDetailedStatisticsOperation\` to the store +const stats = await store.maintenance.send(new GetDetailedStatisticsOperation()); +`} + + +Statistics are returned in the `DetailedDatabaseStatistics` object. + + +{`// Resulting object contains all database stats props from above and the following in addition: +\{ + // Total # of identities in database + countOfIdentities, + // Total # of compare-exchange items in database + countOfCompareExchange, + // Total # of cmpXchg tombstones in database + countOfCompareExchangeTombstones, + // Total # of TS deleted ranges values in database + countOfTimeSeriesDeletedRanges +\} +`} + + + + + +## Get statistics for another database + +* By default, you get statistics for the database defined in your Document Store. +* Use `forDatabase` to get database and collection statistics for another database. +* `forDatabase` can be used with **any** of the above statistics options. + + + +{`// Get stats for 'AnotherDatabase': +const stats = + await store.maintenance.forDatabase("AnotherDatabase").send(new GetStatisticsOperation()); +`} + + + +* Learn more about switching operations to another database [here](../../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx). + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/_get-stats-php.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/_get-stats-php.mdx new file mode 100644 index 0000000000..ac19ad0831 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/_get-stats-php.mdx @@ -0,0 +1,211 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Statistics can be retrieved for the database and for collections. + +* By default, statistics are retrieved for the database defined in the Document Store. + To get database and collection statistics for another database use [forDatabase](../../../client-api/operations/maintenance/get-stats.mdx#get-stats-for-another-database). + +* In this page: + * [Get collection statistics](../../../client-api/operations/maintenance/get-stats.mdx#get-collection-statistics) + * [Get detailed collection statistics](../../../client-api/operations/maintenance/get-stats.mdx#get-detailed-collection-statistics) + * [Get database statistics](../../../client-api/operations/maintenance/get-stats.mdx#get-database-statistics) + * [Get detailed database statistics](../../../client-api/operations/maintenance/get-stats.mdx#get-detailed-database-statistics) + * [Get statistics for another database](../../../client-api/operations/maintenance/get-stats.mdx#get-statistics-for-another-database) + +## Get collection statistics + +To get **collection statistics**, use `GetCollectionStatisticsOperation`: + + +{`// Pass an instance of class \`GetCollectionStatisticsOperation\` to the store +/** @var CollectionStatistics $stats */ +$stats = $store->maintenance()->send((new GetCollectionStatisticsOperation()); +`} + + +Statistics are returned in the `CollectionStatistics` object. + + +{`// Collection stats results: +class CollectionStatistics +\{ + // Total # of documents in all collections + private ?int $countOfDocuments = null; + // Total # of conflicts + private ?int $countOfConflicts = null; + // Total # of documents per collection + private array $collections = []; + + // ... getters and setters +\} +`} + + + + + +## Get detailed collection statistics + +To get **detailed collection statistics**, use `GetDetailedCollectionStatisticsOperation`: + + +{`// Pass an instance of class \`GetDetailedCollectionStatisticsOperation\` to the store +/** @var DetailedCollectionStatistics $stats */ +$stats = $store->maintenance()->send(new GetDetailedCollectionStatisticsOperation()); +`} + + +Statistics are returned in the `DetailedCollectionStatistics` object. + + +{`// Detailed collection stats results: +public class DetailedCollectionStatistics +\{ + // Total # of documents in all collections + public long CountOfDocuments \{ get; set; \} + // Total # of conflicts + public long CountOfConflicts \{ get; set; \} + // Collection details per collection + public Dictionary Collections \{ get; set; \} +\} + +// Details per collection +class CollectionDetails +\{ + private ?string $name = null; + private ?int $countOfDocuments = null; + private ?Size $size = null; + private ?Size $documentsSize = null; + private ?Size $tombstonesSize = null; + private ?Size $revisionsSize = null; + + // ... getters and setters +\} +`} + + + + + +## Get database statistics + +To get **database statistics**, use `GetStatisticsOperation`: + + +{`// Pass an instance of class \`GetStatisticsOperation\` to the store +/** @var DatabaseStatistics $stats */ +$stats = $store->maintenance()->send(new GetStatisticsOperation()); +`} + + +Statistics are returned in the `DatabaseStatistics` object. + + +{`// Database stats results: +class DatabaseStatistics implements ResultInterface +\{ + private ?int $lastDocEtag = null; // Last document etag in database + private ?int $lastDatabaseEtag = null; // Last database etag + + private ?int $countOfIndexes = null; // Total # of indexes in database + private ?int $countOfDocuments = null; // Total # of documents in database + private ?int $countOfRevisionDocuments = null; // Total # of revision documents in database + private ?int $countOfDocumentsConflicts = null; // Total # of documents conflicts in database + private ?int $countOfTombstones = null; // Total # of tombstones in database + private ?int $countOfConflicts = null; // Total # of conflicts in database + private ?int $countOfAttachments = null; // Total # of attachments in database + private ?int $countOfUniqueAttachments = null; // Total # of unique attachments in database + private ?int $countOfCounterEntries = null; // Total # of counter-group entries in database + private ?int $countOfTimeSeriesSegments = null; // Total # of time-series segments in database + + // List of stale index names in database + public function getStaleIndexes(): IndexInformationArray + \{ + return IndexInformationArray::fromArray( + array_map( + function (IndexInformation $index) \{ + return $index->isStale(); + \}, + $this->indexes->getArrayCopy()) + ); + \} + + // Statistics for each index in database + private ?IndexInformationArray $indexes = null; + + private ?string $databaseChangeVector = null; // Global change vector of the database + private ?string $databaseId = null; // Database identifier + private bool $is64Bit = false; // Indicates if process is 64-bit + private ?string $pager = null; // Component handling the memory-mapped files + private ?DateTimeInterface $lastIndexingTime = null; // Last time of indexing an item + private ?Size $sizeOnDisk = null; // Database size on disk + private ?Size $tempBuffersSizeOnDisk = null; // Temp buffers size on disk + private ?int $numberOfTransactionMergerQueueOperations = null; + + // ... getters and setters +\} +`} + + + + + +## Get detailed database statistics + +To get **detailed database statistics**, use `GetDetailedStatisticsOperation`: + + +{`// Pass an instance of class \`GetDetailedStatisticsOperation\` to the store +/** @var DetailedDatabaseStatistics $stats */ +$stats = $store->maintenance()->send(new GetDetailedStatisticsOperation()); +`} + + +Statistics are returned in the `DetailedDatabaseStatistics` object. + + +{`// Detailed database stats results: +class DetailedDatabaseStatistics extends DatabaseStatistics implements ResultInterface +\{ + // Total # of identities in database + private ?int $countOfIdentities = null; + // Total # of compare-exchange items in database + private ?int $countOfCompareExchange = null; + // Total # of cmpXchg tombstones in database + private ?int $countOfCompareExchangeTombstones = null; + // Total # of TS deleted ranges values in database + private ?int $countOfTimeSeriesDeletedRanges = null; + + // ... getters and setters +\} +`} + + + + + +## Get statistics for another database + +* By default, you get statistics for the database defined in your Document Store. +* Use `forDatabase` to get database and collection statistics for another database. +* `forDatabase` can be used with **any** of the above statistics options. + + + +{`// Get stats for 'AnotherDatabase': +/** @var DatabaseStatistics $stats */ +$stats = $store->maintenance()->forDatabase("AnotherDatabase")->send(new GetStatisticsOperation()); +`} + + + +* Learn more about switching operations to another database [here](../../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx). + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/_get-stats-python.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/_get-stats-python.mdx new file mode 100644 index 0000000000..38b373cdd3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/_get-stats-python.mdx @@ -0,0 +1,195 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Statistics can be retrieved for the database and for collections. + +* By default, statistics are retrieved for the database defined in the Document Store. + To get database and collection statistics for another database use [for_database](../../../client-api/operations/maintenance/get-stats.mdx#get-stats-for-another-database). + +* In this page: + * [Get collection statistics](../../../client-api/operations/maintenance/get-stats.mdx#get-collection-statistics) + * [Get detailed collection statistics](../../../client-api/operations/maintenance/get-stats.mdx#get-detailed-collection-statistics) + * [Get database statistics](../../../client-api/operations/maintenance/get-stats.mdx#get-database-statistics) + * [Get detailed database statistics](../../../client-api/operations/maintenance/get-stats.mdx#get-detailed-database-statistics) + * [Get statistics for another database](../../../client-api/operations/maintenance/get-stats.mdx#get-statistics-for-another-database) + +## Get collection statistics + +To get **collection statistics**, use `GetCollectionStatisticsOperation`: + + +{`# Pass an instance of class 'GetCollectionStatisticsOperation' to the store +stats = store.maintenance.send(GetCollectionStatisticsOperation()) +`} + + +Statistics are returned in the `CollectionStatistics` object. + + +{`class CollectionStatistics: + def __init__( + self, + count_of_documents: Optional[int] = None, + count_of_conflicts: Optional[int] = None, + collections: Optional[Dict[str, int]] = None, + ): ... +`} + + + + + +## Get detailed collection statistics + +To get **detailed collection statistics**, use `GetDetailedCollectionStatisticsOperation`: + + +{`# Pass an instance of class 'GetDetailedCollectionStatisticsOperation' to the store +stats = store.maintenance.send(GetDetailedCollectionStatisticsOperation()) +`} + + +Statistics are returned in the `DetailedCollectionStatistics` object. + + +{`class Size: + def __init__(self, size_in_bytes: int = None, human_size: str = None): ... + +class CollectionDetails: + def __init__( + self, + name: str = None, + count_of_documents: int = None, + size: Size = None, + documents_size: Size = None, + tombstones_size: Size = None, + revisions_size: Size = None, + ): ... + +class DetailedCollectionStatistics: + def __init__( + self, + count_of_documents: int = None, + count_of_conflicts: int = None, + collections: Dict[str, CollectionDetails] = None, + ) -> None: ... +`} + + + + + +## Get database statistics + +To get **database statistics**, use `GetStatisticsOperation`: + + +{`# Pass an instance of class 'GetStatisticsOperation' to the store +stats = store.maintenance.send(GetStatisticsOperation()) +`} + + +Statistics are returned in the `DatabaseStatistics` object. + + +{`class DatabaseStatistics: + def __init__( + self, + last_doc_etag: int = None, + last_database_etag: int = None, + count_of_indexes: int = None, + count_of_documents: int = None, + count_of_revision_documents: int = None, + count_of_documents_conflicts: int = None, + count_of_tombstones: int = None, + count_of_conflicts: int = None, + count_of_attachments: int = None, + count_of_unique_attachments: int = None, + count_of_counter_entries: int = None, + count_of_time_series_segments: int = None, + indexes: List[IndexInformation] = None, + database_change_vector: str = None, + database_id: str = None, + is_64_bit: bool = None, + pager: str = None, + last_indexing_time: datetime.datetime = None, + size_on_disk: Size = None, + temp_buffers_size_on_disk: Size = None, + number_of_transaction_merger_queue_operations: int = None, + ): ... +`} + + + + + +## Get detailed database statistics + +To get **detailed database statistics**, use `GetDetailedStatisticsOperation`: + + +{`# Pass an instance of class 'GetDetailedStatisticsOperation' to the store +stats = store.maintenance.send(GetDetailedStatisticsOperation()) +`} + + +Statistics are returned in the `DetailedDatabaseStatistics` object. + + +{`class DetailedDatabaseStatistics(DatabaseStatistics): + def __init__( + self, + last_doc_etag: int = None, + last_database_etag: int = None, + count_of_indexes: int = None, + count_of_documents: int = None, + count_of_revision_documents: int = None, + count_of_documents_conflicts: int = None, + count_of_tombstones: int = None, + count_of_conflicts: int = None, + count_of_attachments: int = None, + count_of_unique_attachments: int = None, + count_of_counter_entries: int = None, + count_of_time_series_segments: int = None, + indexes: List[IndexInformation] = None, + database_change_vector: str = None, + database_id: str = None, + is_64_bit: bool = None, + pager: str = None, + last_indexing_time: datetime.datetime = None, + size_on_disk: Size = None, + temp_buffers_size_on_disk: Size = None, + number_of_transaction_merger_queue_operations: int = None, + count_of_identities: int = None, # Total # of identities in database + count_of_compare_exchange: int = None, # Total # of compare-exchange items in database + count_of_compare_exchange_tombstones: int = None, # Total # of cmpXchg tombstones in database + ): ... +`} + + + + + +## Get statistics for another database + +* By default, you get statistics for the database defined in your Document Store. +* Use `for_database` to get database and collection statistics for another database. +* `for_database` can be used with **any** of the above statistics options. + + + +{`# Get stats for 'AnotherDatabase' +stats = store.maintenance.for_database("AnotherDatabase").send(GetStatisticsOperation()) +`} + + + +* Learn more about switching operations to another database [here](../../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx). + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/assets/clean-change-vector-after.png b/versioned_docs/version-7.1/client-api/operations/maintenance/assets/clean-change-vector-after.png new file mode 100644 index 0000000000..5ed0688eb5 Binary files /dev/null and b/versioned_docs/version-7.1/client-api/operations/maintenance/assets/clean-change-vector-after.png differ diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/assets/clean-change-vector.png b/versioned_docs/version-7.1/client-api/operations/maintenance/assets/clean-change-vector.png new file mode 100644 index 0000000000..9962658885 Binary files /dev/null and b/versioned_docs/version-7.1/client-api/operations/maintenance/assets/clean-change-vector.png differ diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/backup/_category_.json b/versioned_docs/version-7.1/client-api/operations/maintenance/backup/_category_.json new file mode 100644 index 0000000000..578bdcc591 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/backup/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 6, + "label": Backup, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/backup/backup-overview.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/backup/backup-overview.mdx new file mode 100644 index 0000000000..d0252ef9ca --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/backup/backup-overview.mdx @@ -0,0 +1,598 @@ +--- +title: "Backup" +hide_table_of_contents: true +sidebar_label: Backup +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Backup + + +* Create a backup of your data to secure it or to preserve a copy of it in its current state for future reference. + +* RavenDB's Backup task is an [Ongoing-Task](../../../../studio/database/tasks/ongoing-tasks/general-info.mdx) + designed to run periodically on a pre-defined schedule. + You can run it as a one-time operation as well, by using [Export](../../../../client-api/smuggler/what-is-smuggler.mdx#export) + or executing a backup task [immediately](../../../../client-api/operations/maintenance/backup/backup-overview.mdx#initiate-immediate-backup-execution). + +* On a [sharded](../../../../sharding/overview.mdx) database, a single backup task + is defined by the user for all shards, and RavenDB automatically defines + sub-tasks that create backups per shard. + Read about backups on a sharded database [in the section dedicated to it](../../../../sharding/backup-and-restore/backup.mdx). + +* In this page: + * [Backup Types](../../../../client-api/operations/maintenance/backup/backup-overview.mdx#backup-types) + * [Logical-Backup](../../../../client-api/operations/maintenance/backup/backup-overview.mdx#logical-backup) + * [Snapshot](../../../../client-api/operations/maintenance/backup/backup-overview.mdx#snapshot) + * [Backup Scope](../../../../client-api/operations/maintenance/backup/backup-overview.mdx#backup-scope) + * [Full Backup](../../../../client-api/operations/maintenance/backup/backup-overview.mdx#full-backup) + * [Incremental Backup](../../../../client-api/operations/maintenance/backup/backup-overview.mdx#incremental-backup) + * [Backup to Local and Remote Destinations](../../../../client-api/operations/maintenance/backup/backup-overview.mdx#backup-to-local-and-remote-destinations) + * [Backup Retention Policy](../../../../client-api/operations/maintenance/backup/backup-overview.mdx#backup-retention-policy) + * [Server-Wide Backup](../../../../client-api/operations/maintenance/backup/backup-overview.mdx#server-wide-backup) + * [Initiate Immediate Backup Execution](../../../../client-api/operations/maintenance/backup/backup-overview.mdx#initiate-immediate-backup-execution) + * [Delay Backup Execution](../../../../client-api/operations/maintenance/backup/backup-overview.mdx#delay-backup-execution) + * [Recommended Precautions](../../../../client-api/operations/maintenance/backup/backup-overview.mdx#recommended-precautions) + + +## Backup Types + +#### Logical-Backup + +* Data, index definitions, and ongoing tasks are backed-up in [compressed](../../../../server/ongoing-tasks/backup-overview.mdx#compression) + JSON files. + +* During the restoration, RavenDB - + * Re-inserts all data into the database. + * Inserts the saved index definitions. To save space, Logical Backup stores index definitions only. + After restoration, the dataset is scanned and indexed according to the definitions. + +* See [backup contents](../../../../server/ongoing-tasks/backup-overview.mdx#backup-contents). + +* Restoration time is, therefore, **slower** than when restoring from a Snapshot. + +* The backup file size is **significantly smaller** than that of a Snapshot. + +* In addition to full data backup, Logical Backups can be defined as **incremental**, + saving any changes made since the previous backup. + +* The following code sample defines a full-backup task that would be executed every 3 hours: + + +{`var config = new PeriodicBackupConfiguration +\{ + LocalSettings = new LocalSettings + \{ + // Local path for storing the backup + FolderPath = @"E:\\RavenBackups" + \}, + + // Full Backup period (Cron expression for a 3-hours period) + FullBackupFrequency = "0 */3 * * *", + + // Set backup type to Logical-Backup + BackupType = BackupType.Backup, + + // Task Name + Name = "fullBackupTask", +\}; +var operation = new UpdatePeriodicBackupOperation(config); +var result = await docStore.Maintenance.SendAsync(operation); +`} + + + Note the usage of [Cron scheduling](https://en.wikipedia.org/wiki/Cron) when setting backup frequency. +#### Snapshot + +* A Snapshot is a compressed binary duplication of the full database structure. + This includes the data file and the journals at a given point in time. + Therefore it includes fully built indexes and ongoing tasks. + See [file structure](../../../../server/storage/directory-structure.mdx#storage--directory-structure) for more info. + +* Snapshot backups are available only for **Enterprise subscribers**. + +* During restoration - + * Re-inserting data into the database is not required. + * Re-indexing is not required. + +* See [backup contents](../../../../server/ongoing-tasks/backup-overview.mdx#backup-contents). + +* Restoration is typically **faster** than that of a logical backup. + +* Snapshot size is typically **larger** than that of a logical backup. + +* If Incremental backups are created for a Snapshot-type backup: + * The first backup will be a full Snapshot. + * The following backups will be Incremental. + * [Incremental backups](../../../../client-api/operations/maintenance/backup/backup-overview.mdx#incremental-backup) + have different storage contents than Snapshots. + +* Code Sample: + + +{`// Set backup type to Snapshot +BackupType = BackupType.Snapshot, +`} + + +#### Basic Comparison Between a Logical-Backup and a Snapshot: + + | Backup Type | Stored Format | Restoration speed | Size | + | ------ | ------ | --- | --- | + | Snapshot | Compressed Binary Image | Fast | Larger than a logical-backup | + | Logical backup | Compressed Textual Data - JSON | Slow | Smaller than a Snapshot | + + +Verify that RavenDB is allowed to store files in the path set in `LocalSettings.FolderPath`. + + + + + + +## Backup Scope + +As described in [the overview](../../../../server/ongoing-tasks/backup-overview.mdx#backing-up-and-restoring-a-database), a backup task can create **full** and **incremental** backups. + +* A Backup Task can be defined to create either a full data backup or an incremental backup. + In both cases, the backup task adds a single new backup file to the backup folder each time it runs, + leaving the existing backup files untouched. +#### Full-Backup + + +* **File Format** + A full-backup is a **compressed JSON file** if it is a logical + backup, or a **compressed binary file** if it is a snapshot. + +* **Task Ownership** + There are no preliminary conditions for creating a full-backup. + Any node can perform this task. + +* **To run a full-backup** + Set `FullBackupFrequency`. + + +{`// A full-backup will run every 6-hours (Cron expression) +FullBackupFrequency = "0 */6 * * *", +`} + + +#### Incremental-Backup + +* **File Format and Notes About Contents** + * An incremental-backup file is **always in JSON format**. + It is so even when the full-backup it is associated with is a binary snapshot. + * An incremental backup stores index definitions (not full indexes). + After the backup is restored, the dataset is re-indexed according to the index definitions. + + This initial re-indexing can be time-consuming on large datasets. + + * An incremental backup doesn't store [change vectors](../../../../server/clustering/replication/change-vector.mdx). + + +* **Task Ownership** + The ownership of an incremental-backup task is granted dynamically by the cluster. + An incremental-backup can be executed only by the same node that currently owns the backup task. + A node can run an incremental-backup, only after running full-backup at least once. + +* **To run an incremental-backup** + Set `IncrementalBackupFrequency`. + + + +{`// An incremental-backup will run every 20 minutes (Cron expression) +IncrementalBackupFrequency = "*/20 * * * *", +`} + + + + + +## Backup to Local and Remote Destinations + +* Backups can be made **locally**, as well as to a set of **remote locations** including - + * A network path + * An FTP/SFTP target + * Azure Storage + * Amazon S3 + * Amazon Glacier + * Google Cloud + +* RavenDB will store data in a local folder first, and transfer it to the remote + destination from the local one. + * If a local folder hasn't been specified, RavenDB will use the + temp folder defined in its [Storage.TempPath](../../../../server/configuration/storage-configuration.mdx#storagetemppath) setting. + If _Storage.TempPath_ is not defined, the temporary files + will be created at the same location as the data file. + In either case, the folder will be used as temporary storage + and the local files deleted from it when the transfer is completed. + * If a local folder **has** been specified, RavenDB will use it both + for the transfer and as its permanent local backup location. + +* Local and Remote Destinations Settings Code Sample: + + +{`var config = new PeriodicBackupConfiguration +\{ + LocalSettings = new LocalSettings + \{ + FolderPath = @"E:\\RavenBackups" + \}, + + // FTP Backup settings + FtpSettings = new FtpSettings + \{ + Url = "192.168.10.4:8080", + UserName = "John", + Password = "JohnDoe38" + \}, + + // Azure Backup settings + AzureSettings = new AzureSettings + \{ + StorageContainer = "storageContainer", + RemoteFolderName = "remoteFolder", + AccountName = "JohnAccount", + AccountKey = "key" + \}, + + // Amazon S3 bucket settings. + S3Settings = new S3Settings + \{ + AwsAccessKey = "your access key here", + AwsSecretKey = "your secret key here", + AwsRegionName = "OPTIONAL", + BucketName = "john-bucket" + \}, + + // Amazon Glacier settings. + GlacierSettings = new GlacierSettings + \{ + AwsAccessKey = "your access key here", + AwsSecretKey = "your secret key here", + AwsRegionName = "OPTIONAL", + VaultName = "john-glacier", + RemoteFolderName = "john/backups" + \}, + + // Google Cloud Backup settings + GoogleCloudSettings = new GoogleCloudSettings + \{ + BucketName = "RavenBucket", + RemoteFolderName = "BackupFolder", + GoogleCredentialsJson = "GoogleCredentialsJson" + \} + +\}; +var operation = new UpdatePeriodicBackupOperation(config); +var result = await docStore.Maintenance.SendAsync(operation); +`} + + + + + Use AWS [IAM](https://aws.amazon.com/iam/) (Identity and Access Management) + to restrict users access while they create backups. + E.g. - + + +{`\{ + "Version": "2012-10-17", + "Statement": [ + \{ + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::BUCKET_NAME/*" + \}, + \{ + "Sid": "VisualEditor1", + "Effect": "Allow", + "Action": [ + "s3:ListBucket", + "s3:GetBucketAcl", + "s3:GetBucketLocation" + ], + "Resource": "arn:aws:s3:::BUCKET_NAME" + \} + ] +\} +`} + + + + + +## Backup Retention Policy + +By default, backups are stored indefinitely. The backup retention policy sets +a retention period, at the end of which backups are deleted. Deletion occurs +during the next scheduled backup task after the end of the retention period. + +Full backups and their corresponding incremental backups are deleted together. +Before a full backup can be deleted, all of its incremental backups must be older +than the retention period as well. + +The retention policy is a property of `PeriodicBackupConfiguration`: + + + +{`public class RetentionPolicy +\{ + public bool Disabled \{ get; set; \} + public TimeSpan? MinimumBackupAgeToKeep \{ get; set; \} +\} +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **Disabled** | `bool` | If set to `true`, backups will be retained indefinitely, and not deleted. Default: false | +| **MinimumBackupAgeToKeep** | `TimeSpan` | The minimum amount of time to retain a backup. Once a backup is older than this time span, it will be deleted during the next scheduled backup task. | + +#### Example + + + +{`var config = new PeriodicBackupConfiguration +\{ + RetentionPolicy = new RetentionPolicy + \{ + Disabled = false, // False is the default value + MinimumBackupAgeToKeep = TimeSpan.FromDays(100) + \} +\}; +`} + + + + + +## Server-Wide Backup + +You can create a Server-Wide Backup task to back-up **all the databases in your cluster** at a scheduled time. +Individual databases can be excluded from the backup. Learn more in [Studio: Server-Wide Backup](../../../../studio/server/server-wide-backup.mdx). + +Backups can be made locally, as well as to a [set of remote locations](../../../../client-api/operations/maintenance/backup/backup-overview.mdx#backup-to-local-and-remote-destinations). + +#### Examples + +A server-wide backup configuration that sets multiple destinations: + + + +{`var putConfiguration = new ServerWideBackupConfiguration +\{ + Disabled = true, + FullBackupFrequency = "0 2 * * 0", + IncrementalBackupFrequency = "0 2 * * 1", + + //Backups are stored in this folder first, and sent from it to remote destinations (if defined). + LocalSettings = new LocalSettings + \{ + FolderPath = "localFolderPath" + \}, + + //FTP settings + FtpSettings = new FtpSettings + \{ + Url = "ftps://localhost/john/backups" + \}, + + //Microsoft Azure settings. + AzureSettings = new AzureSettings + \{ + AccountKey = "Azure Account Key", + AccountName = "Azure Account Name", + RemoteFolderName = "john/backups" + \}, + + //Amazon S3 bucket settings. + S3Settings = new S3Settings + \{ + AwsAccessKey = "Amazon S3 Access Key", + AwsSecretKey = "Amazon S3 Secret Key", + AwsRegionName = "Amazon S3 Region Name", + BucketName = "john-bucket", + RemoteFolderName = "john/backups" + \}, + + //Amazon Glacier settings. + GlacierSettings = new GlacierSettings + \{ + AwsAccessKey = "Amazon Glacier Access Key", + AwsSecretKey = "Amazon Glacier Secret Key", + AwsRegionName = "Amazon Glacier Region Name", + VaultName = "john-glacier", + RemoteFolderName = "john/backups" + \}, + + //Google Cloud Backup settings + GoogleCloudSettings = new GoogleCloudSettings + \{ + BucketName = "Google Cloud Bucket", + RemoteFolderName = "BackupFolder", + GoogleCredentialsJson = "GoogleCredentialsJson" + \} +\}; + +var result = await store.Maintenance.Server.SendAsync(new PutServerWideBackupConfigurationOperation(putConfiguration)); +var serverWideConfiguration = await store.Maintenance.Server.SendAsync(new GetServerWideBackupConfigurationOperation(result.Name)); +`} + + + +A server-wide backup configuration that excludes several databases: + + + +{`var DBExcludeConfiguration = new ServerWideBackupConfiguration +\{ + Disabled = true, + FullBackupFrequency = "0 2 * * 0", + LocalSettings = new LocalSettings + \{ + FolderPath = "localFolderPath" + \}, + ExcludedDatabases = new [] + \{ + "DB1", + "DB2", + "DB5", + "NorthWind", + "DB2_Jun_2018_Backup" + \} +\}; + +var result = await store.Maintenance.Server.SendAsync(new PutServerWideBackupConfigurationOperation(DBExcludeConfiguration)); +`} + + + + + +## Initiate Immediate Backup Execution + +The Backup task is [executed periodically](../../../../server/ongoing-tasks/backup-overview.mdx#backup--restore-overview) on its predefined schedule. +If needed, it can also be executed immediately. + +* To execute an existing backup task immediately, use the `StartBackupOperation` method. + + +{`// Create a new backup task +var operation = new UpdatePeriodicBackupOperation(config); +var result = await docStore.Maintenance.SendAsync(operation); + +// Run the backup task immediately +await docStore.Maintenance.SendAsync(new StartBackupOperation(true, result.TaskId)); +`} + + + + * Definition: + + +{`public StartBackupOperation(bool isFullBackup, long taskId) +`} + + + + * Parameters: + + | Parameter | Type | Functionality | + | ------ | ------ | ------ | + | isFullBackup | bool | true: full-backup
false: incremental-backup | + | taskId | long | The existing backup task ID | + + +* To verify the execution results, use the `GetPeriodicBackupStatusOperation` method. + + +{`// Pass the the ongoing backup task ID to GetPeriodicBackupStatusOperation +var backupStatus = new GetPeriodicBackupStatusOperation(result.TaskId); +`} + + + * Return Value: + The **PeriodicBackupStatus** object returned from **GetPeriodicBackupStatusOperation** is filled with the previously configured backup parameters and with the execution results. + + +{`public class PeriodicBackupStatus : IDatabaseTaskStatus +\{ + public long TaskId \{ get; set; \} + public BackupType BackupType \{ get; set; \} + public bool IsFull \{ get; set; \} + public string NodeTag \{ get; set; \} + public DateTime? LastFullBackup \{ get; set; \} + public DateTime? LastIncrementalBackup \{ get; set; \} + public DateTime? LastFullBackupInternal \{ get; set; \} + public DateTime? LastIncrementalBackupInternal \{ get; set; \} + public LocalBackup LocalBackup \{ get; set; \} + public UploadToS3 UploadToS3; + public UploadToGlacier UploadToGlacier; + public UploadToAzure UploadToAzure; + public UploadToFtp UploadToFtp; + public long? LastEtag \{ get; set; \} + public LastRaftIndex LastRaftIndex \{ get; set; \} + public string FolderName \{ get; set; \} + public long? DurationInMs \{ get; set; \} + public long Version \{ get; set; \} + public Error Error \{ get; set; \} + public long? LastOperationId \{ get; set; \} +\} +`} + + + + +## Delay Backup Execution + +The execution of a periodic backup task can be **delayed** for a given time period +via [Studio](../../../../studio/database/tasks/backup-task.mdx#delaying-a-running-backup-task) +or using the `DelayBackupOperation` store operation. + +* Definition: + + +{`public DelayBackupOperation(long runningBackupTaskId, TimeSpan duration) +`} + + + +* Parameters: + + | Parameter | Type | Functionality | + | ------ | ------ | ------ | + | runningBackupTaskId| `long` | Backup task ID | + | duration | `TimeSpan` | Delay Duration | + +* Example: + To delay the execution of a running backup task pass `DelayBackupOperation` + the task's ID and the delay duration. + + +{`// Get backup operation info +var taskBackupInfo = await docStore.Maintenance.SendAsync( + new GetOngoingTaskInfoOperation(taskId, OngoingTaskType.Backup)) as OngoingTaskBackup; + +// Set delay duration to 10 minutes from now +var delayDuration = TimeSpan.FromMinutes(10); +var delayUntil = DateTime.Now + delayDuration; + +// Delay backup operation +await docStore.Maintenance.SendAsync( + new DelayBackupOperation(taskBackupInfo.OnGoingBackup.RunningBackupTaskId, delayDuration)); +`} + + + + + +## Recommended Precautions + + +* **Don't substitute RavenDB's backup procedures with simply copying the database folder yourself**. + The official backup procedure satisfies needs that simply copying the database folder does not. E.g. - + * A reliable point-in-time freeze of backed up data. + * An ACIDity of backed-up data, to keep its independence during restoration. + +* **Remove old backup files regularly**. + Set the [backup retention policy](../../../../client-api/operations/maintenance/backup/backup-overview.mdx#backup-retention-policy) + to remove unneeded backup files so that they don't build up. + While setting how many days to keep your backups, consider how much of a recent database history you would like to have access to. + +* **Store backup files in a location other than your database's**. + Note that backup files are always stored in a local folder first (even when the final backup destination is remote). + Make sure that this local folder is not where your database is stored, as a precaution to keep vacant database storage space. + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/backup/encrypted-backup.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/backup/encrypted-backup.mdx new file mode 100644 index 0000000000..46d3955f03 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/backup/encrypted-backup.mdx @@ -0,0 +1,336 @@ +--- +title: "Backup Encryption" +hide_table_of_contents: true +sidebar_label: Encryption +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Backup Encryption + + +* When a database is **encrypted**, you can generate the following backup types for it: + * An *encrypted Snapshot* (using the database encryption key) + * An *encrypted Logical-Backup* (using the database encryption key, or any key of your choice) + * An *un-encrypted Logical-Backup* + +* When a database is **not encrypted**, you can generate the following backup types for it: + * An *un-encrypted Snapshot* + * An *encrypted Logical-Backup* (providing an encryption key of your choice) + * An *un-encrypted* Logical-Backup + +* **Incremental backups** of encrypted logical-backups and snapshots are encrypted as well, + using the same encryption key provided for the full backup. + +* In this page: + * [RavenDB's Security Approach](../../../../client-api/operations/maintenance/backup/encrypted-backup.mdx#ravendb) + * [Secure Client-Server Communication](../../../../client-api/operations/maintenance/backup/encrypted-backup.mdx#secure-client-server-communication) + * [Database Encryption](../../../../client-api/operations/maintenance/backup/encrypted-backup.mdx#database-encryption) + * [Backup-Encryption Overview](../../../../client-api/operations/maintenance/backup/encrypted-backup.mdx#backup-encryption-overview) + * [Prerequisites to Encrypting Backups](../../../../client-api/operations/maintenance/backup/encrypted-backup.mdx#prerequisites-to-encrypting-backups) + * [Choosing Encryption Mode & Key](../../../../client-api/operations/maintenance/backup/encrypted-backup.mdx#choosing-encryption-mode--key) + * [Creating an Encrypted Logical-Backup](../../../../client-api/operations/maintenance/backup/encrypted-backup.mdx#creating-an-encrypted-logical-backup) + * [For a Non-Encrypted Database](../../../../client-api/operations/maintenance/backup/encrypted-backup.mdx#for-a-non-encrypted-database) + * [For an Encrypted Database](../../../../client-api/operations/maintenance/backup/encrypted-backup.mdx#for-an-encrypted-database) + * [Creating an Encrypted Snapshot](../../../../client-api/operations/maintenance/backup/encrypted-backup.mdx#creating-an-encrypted-snapshot) + * [Restoring an Encrypted Backup](../../../../client-api/operations/maintenance/backup/encrypted-backup.mdx#restoring-an-encrypted-backup) + * [Restoring an encrypted Logical-Backup](../../../../client-api/operations/maintenance/backup/encrypted-backup.mdx#restoring-an-encrypted-logical-backup) + * [Restoring a Snapshot](../../../../client-api/operations/maintenance/backup/encrypted-backup.mdx#restoring-a-snapshot) + +## RavenDB's Security Approach + +RavenDB's comprehensive security approach includes - + +* **Authentication** and **Certification** + to secure your data while it is **transferred between client and server**. +* **Database Encryption** + to secure your data while **stored in the database**. +* **Backup-Files Encryption** + to secure your data while **stored for safe-keeping**. +#### Secure Client-Server Communication + +To prevent unauthorized access to your data during transfer, apply the following: + +* **Enable secure communication** in advance, during the server setup. + Secure communication can be enabled either [manually](../../../../server/security/authentication/certificate-configuration.mdx) + or [using the setup-wizard](../../../../start/installation/setup-wizard.mdx). +* **Authenticate with the server**. + Secure communication requires clients to **certify themselves** in order to access RavenDB. + Client authentication code sample: + + +{` +// path to the certificate you received during the server setup +var cert = new X509Certificate2(@"C:\\Users\\RavenDB\\authentication_key\\admin.client.certificate.RavenDBdom.pfx"); + +using (var docStore = new DocumentStore +\{ + Urls = new[] \{ "https://a.RavenDBdom.development.run" \}, + Database = "encryptedDatabase", + Certificate = cert +\}.Initialize()) +\{ + // Backup & Restore procedures here +\} +`} + + +#### Database Encryption + +Secure the data stored on the server by +[encrypting your database](../../../../server/security/encryption/database-encryption.mdx). + +* **Secure communication to enable database encryption.** + An encrypted database can only be created when the + [client-server communication is secure](../../../../client-api/operations/maintenance/backup/encrypted-backup.mdx#secure-client-server-communication). + + + +## Backup-Encryption Overview + +#### Prerequisites to Encrypting Backups + +* **Logical-Backup** + There are no prerequisites to encrypting a Logical-Backup. + An encrypted logical-backup can be generated for an **encrypted database** and + for a **non-encrypted database**. + The encryption key used to generate an encrypted logical-backup of an encrypted database + can be different than the original database encryption key. + +* **Snapshot** + A [snapshot](../../../../client-api/operations/maintenance/backup/backup-overview.mdx#snapshot) is an exact image of your database. + If the database is **not encrypted**, its snapshot wouldn't be either. + If the database is **encrypted**, its snapshot would also be encrypted using the database encryption key. + If you want your snapshot to be encrypted, simply take the snapshot of an + [encrypted database](../../../../server/security/encryption/database-encryption.mdx#creating-an-encrypted-database-using-the-rest-api-and-the-client-api). + +#### Choosing Encryption Mode & Key + +Use the same [Backup](../../../../client-api/operations/maintenance/backup/backup-overview.mdx#backup) and [Restore](../../../../client-api/operations/maintenance/backup/restore.mdx) methods that are used to create and restore **un**-encrypted backups. +Specify whether encryption is used, and with which encryption key, +in the **BackupEncryptionSettings** structure defined within the +[PeriodicBackupConfiguration](../../../../client-api/operations/maintenance/backup/backup-overview.mdx#backup-to-local-and-remote-destinations) object. + +`BackupEncryptionSettings` definition: + + + +{`public class BackupEncryptionSettings +\{ + public EncryptionMode EncryptionMode \{ get; set; \} + public string Key \{ get; set; \} + + public BackupEncryptionSettings() + \{ + Key = null; + EncryptionMode = EncryptionMode.None; + \} +\} +`} + + + +`BackupEncryptionSettings` properties: + +| Property | Type | Functionality | +|--------------------|--------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **EncryptionMode** | enum | Set the encryption mode.
`None` - Use **no encryption** (default mode).
`UseDatabaseKey` - Use **the same key the DB is encrypted with** (Logical-Backups & Snapshots).
`UseProvidedKey` - Provide **your own encryption key** (Logical-Backups only). | +| **Key** | string | Pass **your own encryption key** using this parameter (Logical-Backup only).
// Use an encryption key of your choice
EncryptionMode = EncryptionMode.UseProvidedKey,
Key = "OI7Vll7DroXdUORtc6Uo64wdAk1W0Db9ExXXgcg5IUs="

**Note**: When Key is provided and `EncryptionMode` is set to `useDatabaseKey`, the **database key** is used (and not the provided key). | + +`EncryptionMode` definition: + + + +{`public enum EncryptionMode +\{ + None, + UseDatabaseKey, + UseProvidedKey +\} +`} + + + +## Creating an Encrypted Logical-Backup + +An encrypted logical-backup can be created for both **encrypted** and **non-encrypted** databases. +#### For a Non-Encrypted Database + +1. To create a **non-encrypted logical-backup** - + **Set** `EncryptionMode = EncryptionMode.None` + Or + **Don't set** EncryptionMode & Key at all - Default value is: `EncryptionMode.None` + +2. To create an **encrypted logical-backup**, set: + + +{`EncryptionMode = EncryptionMode.UseProvidedKey, +Key = "a_key_of_your_choice" +`} + + +#### For an Encrypted Database + +1. To create a non-encrypted logical-backup - + Set `EncryptionMode = EncryptionMode.None` + +2. To create an encrypted logical-backup using the database key: + **Set** `EncryptionMode = EncryptionMode.UseDatabaseKey` + Or + **Don't set** EncryptionMode & Key at all - Default value is: `EncryptionMode.UseDatabaseKey` + + +{`//Encrypting a logical-backup using the database encryption key +var config = new PeriodicBackupConfiguration +\{ + //Additional settings here.. + //.. + + //Set backup type to logical-backup + BackupType = BackupType.Backup, + + BackupEncryptionSettings = new BackupEncryptionSettings + \{ + //Use the same encryption key as the database + EncryptionMode = EncryptionMode.UseDatabaseKey + \} +\}; +var operation = new UpdatePeriodicBackupOperation(config); +var result = await docStore.Maintenance.SendAsync(operation); +`} + + + +3. To create an encrypted logical-backup using your own key, set: + + +{`EncryptionMode = EncryptionMode.UseProvidedKey, +Key = "a_key_of_your_choice" +`} + + + + + +## Creating an Encrypted Snapshot + +An encrypted Snapshot can only be created for an encrypted database. + +* To create a **Non-Encrypted Snapshot** (for a non-encrypted database) - + **Set** `EncryptionMode = EncryptionMode.None` + Or + **Don't set** EncryptionMode & Key at all - Default value is: `EncryptionMode.None` + +* To create an **Encrypted Snapshot** (For an encrypted database) - + **Set** `EncryptionMode = EncryptionMode.UseDatabaseKey` + Or + **Don't set** EncryptionMode & Key at all - Default value is: `EncryptionMode.UseDatabaseKey` + + +{`var config = new PeriodicBackupConfiguration +\{ + //Additional settings here.. + //.. + + //Set backup type to snapshot. + //If the database is encrypted, its snapshot will be encrypted as well. + BackupType = BackupType.Snapshot, + + BackupEncryptionSettings = new BackupEncryptionSettings + \{ + //To encrypt a snapshot, EncryptionMode must be set to EncryptionMode.UseDatabaseKey. + //Setting it to other values will generate an InvalidOperationException. + EncryptionMode = EncryptionMode.UseDatabaseKey + \} +\}; +var operation = new UpdatePeriodicBackupOperation(config); +var result = await docStore.Maintenance.SendAsync(operation); +`} + + + + + +## Restoring an Encrypted Backup + +To [restore](../../../../client-api/operations/maintenance/backup/restore.mdx#configuration-and-execution) +an encrypted backup you must provide the **key** that was used to encrypt it. +Pass the key to `RestoreBackupOperation` via `restoreConfiguration.BackupEncryptionSettings`. + + +{`// restore encrypted database + +var restoreConfiguration = new RestoreBackupConfiguration(); + +//New database name +restoreConfiguration.DatabaseName = "newEncryptedDatabase"; + +//Backup-file location +var backupPath = @"C:\\Users\\RavenDB\\2019-01-06-11-11.ravendb-encryptedDatabase-A-snapshot"; +restoreConfiguration.BackupLocation = backupPath; + +restoreConfiguration.BackupEncryptionSettings = new BackupEncryptionSettings +\{ + Key = "OI7Vll7DroXdUORtc6Uo64wdAk1W0Db9ExXXgcg5IUs=" +\}; + +var restoreBackupTask = new RestoreBackupOperation(restoreConfiguration); +docStore.Maintenance.Server.Send(restoreBackupTask); +`} + + +#### Restoring an encrypted Logical-Backup + +A database is [restored](../../../../client-api/operations/maintenance/backup/encrypted-backup.mdx#restoring-an-encrypted-backup) from a logical-backup +to its **unencrypted** form. +To restore a database and encrypt its contents, you have to address it explicitly. + +* **To encrypt the restored database**: + To encrypt the database, pass `RestoreBackupOperation` an encryption key via `restoreConfiguration.EncryptionKey`. + Note: This key can be different than the key that was used to encrypt the logical-backup. + + +{`//Restore the database using the key you encrypted it with +restoreConfiguration.BackupEncryptionSettings = new BackupEncryptionSettings +\{ + Key = "OI7Vll7DroXdUORtc6Uo64wdAk1W0Db9ExXXgcg5IUs=" +\}; + +//Encrypt the restored database using this key +restoreConfiguration.EncryptionKey = "1F0K2R/KkcwbkK7n4kYlv5eqisy/pMnSuJvZ2sJ/EKo="; + +var restoreBackupTask = new RestoreBackupOperation(restoreConfiguration); +docStore.Maintenance.Server.Send(restoreBackupTask); +`} + + + +* To restore an **unencrypted** logical-backup: + Either provide **no encryption key** to activate the default value (`EncryptionMode.None`), or - + Set `EncryptionMode.None` Explicitly. + + +{`restoreConfiguration.BackupEncryptionSettings = new BackupEncryptionSettings +\{ + //No encryption + EncryptionMode = EncryptionMode.None +\}; +`} + + +#### Restoring a Snapshot + +Restore a snapshot as specified in [Restoring an Encrypted Database](../../../../client-api/operations/maintenance/backup/encrypted-backup.mdx#restoring-an-encrypted-backup). + +* The database of an un-encrypted snapshot is restored to its un-encrypted form. +* The database of an encrypted snapshot is restored to its encrypted form. + You must provide the database key that was used to encrypt the snapshot. + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/backup/faq.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/backup/faq.mdx new file mode 100644 index 0000000000..22f0e61b61 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/backup/faq.mdx @@ -0,0 +1,113 @@ +--- +title: "Backup & Restore: Frequently Asked Questions" +hide_table_of_contents: true +sidebar_label: FAQ +sidebar_position: 3 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Backup & Restore:
Frequently Asked Questions + + +* In this page: + * [Is there a one-time backup?](../../../../client-api/operations/maintenance/backup/faq.mdx#is-there-a-one-time-backup) + * [How do I create a backup of my cluster, not just one database?](../../../../client-api/operations/maintenance/backup/faq.mdx#how-do-i-create-a-backup-of-my-cluster-not-just-one-database) + * [How should the servers' time be set in a multi-node cluster?](../../../../client-api/operations/maintenance/backup/faq.mdx#how-should-the-servers-time-be-set-in-a-multi-node-cluster) + * [Is an External Replication a good substitute for a backup task?](../../../../client-api/operations/maintenance/backup/faq.mdx#is-an-external-replication-task-a-good-substitute-for-a-backup-task) + * [Can I simply copy the database folder contents whenever I need to create a backup?](../../../../client-api/operations/maintenance/backup/faq.mdx#can-i-simply-copy-the-database-folder-contents-whenever-i-need-to-create-a-backup) + * [Does RavenDB automatically delete old backups?](../../../../client-api/operations/maintenance/backup/faq.mdx#does-ravendb-automatically-delete-old-backups) + * [Are there any locations that backup files should NOT be stored at?](../../../../client-api/operations/maintenance/backup/faq.mdx#are-there-any-locations-that-backup-files-should-not-be-stored-at) + * [What happens when a backup process fails before it is completed?](../../../../client-api/operations/maintenance/backup/faq.mdx#what-happens-when-a-backup-process-fails-before-completion) + + +## FAQ + +### Is there a one-time backup? + +Yes. Although [backup is a vital ongoing task](../../../../studio/database/tasks/backup-task.mdx#periodic-backup-creation) and is meant to back your data up continuously, +you can also use [one-time manual backups](../../../../studio/database/tasks/backup-task.mdx#manually-creating-one-time-backups) +(e.g. before upgrading or other maintenance). + +* You can also use [Smuggler](../../../../client-api/smuggler/what-is-smuggler.mdx#what-is-smuggler) as an equivalent of a full backup for a single [export](../../../../client-api/smuggler/what-is-smuggler.mdx#export) operation. +### How do I create a backup of my cluster, not just one database? + +You can run a [server-wide ongoing backup](../../../../studio/server/server-wide-backup.mdx) +which backs up each of the databases in your cluster. +What does it back up? Both binary "Snapshot" and json "Backup" types of backup tasks +save the entire [database record](../../../../studio/database/settings/database-record.mdx) including: + +* Database contents +* Document extensions (attachments, counters, and time-series) +* Indexes (json Backup saves only the index definitions, while Snapshot saves fully built indexes) +* Revisions +* Conflict configurations +* Identities +* Compare-exchange items +* Ongoing tasks (Ongoing backup, ETL, Subscription, and Replication tasks) + +**Cluster configuration and nodes setup** can be [re-created](../../../../start/getting-started.mdx#installation--setup) +and databases can be [restored from backup](../../../../studio/database/create-new-database/from-backup.mdx). + +**To prevent downtime while rebuilding**, you can [replicate your database](../../../../studio/database/tasks/ongoing-tasks/hub-sink-replication/overview.mdx) +so that there is a live version available to distribute the workload and act as a failover. +[Is an External Replication a good substitute for a backup task?](../../../../client-api/operations/maintenance/backup/faq.mdx#is-an-external-replication-task-a-good-substitute-for-a-backup-task) +### How should the servers' time be set in a multi-node cluster? + +The backup task runs on schedule according to the executing server's local time. +It is recommended that you set all nodes to the same time. This way, backup files' +time-signatures are consistent even when the backups are created by different nodes. +### Is an External Replication task a good substitute for a backup task? + +Although [External Replication](../../../../studio/database/tasks/ongoing-tasks/external-replication-task.mdx) +and [Backup](../../../../client-api/operations/maintenance/backup/backup-overview.mdx) +are both ongoing tasks that create a copy of your data, they have different aims and behavior. + +For example, replication tasks don't allow you to retrieve data from a history/restore point after mistakes, +but they do create a live copy that can be used as a failover and they can distribute the workload. +See [Backup Task -vs- External Replication Task](../../../../studio/database/tasks/backup-task.mdx#backup-task--vs--replication-task). +### Can I simply copy the database folder contents whenever I need to create a backup? + +Simply copying the database folder of a live database will probably create corrupted data in the backup. +Creating an [ongoing backup task](../../../../client-api/operations/maintenance/backup/backup-overview.mdx) is a one-time operation. +There really is no reason to do it manually again and again. +There really is no reason to do it manually again and again. Properly backing up provides: + +* **Up-to-date backups** by incrementally and frequently updating changes in the data. +* **The creation of a reliable point-in-time freeze** of backed-up data that can be used in case of mistaken deletes or patches. +* **The assurance of ACID compliance** for backed up data during interactions with the file system. +### Does RavenDB automatically delete old backups? + +You can configure RavenDB to delete old backups with the `RetentionPolicy` feature. +If you enable it, RavenDB will delete backups after the `TimeSpan` that you set. +By default, `RetentionPolicy` is disabled. + +Learn how to change the [Retention Policy via the RavenDB Studio](../../../../studio/database/tasks/backup-task.mdx#retention-policy). +Learn how to change the [Retention Policy via API](../../../../client-api/operations/maintenance/backup/backup-overview.mdx#backup-retention-policy). +### Are there any locations that backup files should NOT be stored at? + +It is recommended **not to store backups on the same drive as your database** data files, +since both the database and the backups would be exposed to the same risks. + +* Disk space can run low as backups start piling up unless you [set your retention policy for backups](../../../../client-api/operations/maintenance/backup/faq.mdx#does-ravendb-automatically-delete-old-backups). +* There are many [options for backup locations](../../../../studio/database/tasks/backup-task.mdx#destination). +* We recommend creating ongoing backups in two different types of locations (cloud and local machine). + You can store your backups in multiple locations by setting up one [on-going backup task](../../../../studio/database/tasks/backup-task.mdx) + with multiple destinations. +### What happens when a backup process fails before completion? + +While in progress, the backup content is written to an **.in-progress* file on disk. + +* Once **backup is complete**, the file is renamed to its correct final name. +* If the backup process **fails before completion**, the **.in-progress* file remains on disk. + This file will not be used in any future Restore processes. + If the failed process was an incremental-backup task, any future incremental backups will + continue from the correct place before the file was created so that the backup is consistent with the source. + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/backup/restore.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/backup/restore.mdx new file mode 100644 index 0000000000..c241753cf1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/backup/restore.mdx @@ -0,0 +1,230 @@ +--- +title: "Restore" +hide_table_of_contents: true +sidebar_label: Restore +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Restore + + +* A backed-up database can be restored to a new database, either + by client API methods or through the Studio. + +* On a [sharded](../../../../sharding/overview.mdx) database, restore + is performed per shard, using the backups created by the shards. + Read about restore on a sharded database [in the section dedicated to it](../../../../sharding/backup-and-restore/restore.mdx). + +* In this page: + * [Restoring a Database: Configuration and Execution](../../../../client-api/operations/maintenance/backup/restore.mdx#restoring-a-database:-configuration-and-execution) + * [Optional Settings](../../../../client-api/operations/maintenance/backup/restore.mdx#optional-settings) + * [Restore Database to a Single Node](../../../../client-api/operations/maintenance/backup/restore.mdx#restore-database-to-a-single-node) + * [Restore Database to Multiple Nodes](../../../../client-api/operations/maintenance/backup/restore.mdx#restore-database-to-multiple-nodes) + * [Restore to a Single Node & Replicate to Other Nodes](../../../../client-api/operations/maintenance/backup/restore.mdx#restore-database-to-a-single-node--replicate-it-to-other-nodes) + * [Restore to Multiple Nodes Simultaneously](../../../../client-api/operations/maintenance/backup/restore.mdx#restore-database-to-multiple-nodes-simultaneously) + * [Recommended Precautions](../../../../client-api/operations/maintenance/backup/restore.mdx#recommended-precautions) + + +## Restoring a Database: Configuration and Execution + +To restore a database, set a `RestoreBackupConfiguration` instance and pass +it to `RestoreBackupOperation` for execution. +### `RestoreBackupOperation` + + +{`public RestoreBackupOperation(RestoreBackupConfiguration restoreConfiguration) +`} + + +### `RestoreBackupConfiguration` + + +{`public class RestoreBackupConfiguration +\{ + public string DatabaseName \{ get; set; \} + public string BackupLocation \{ get; set; \} + public string LastFileNameToRestore \{ get; set; \} + public string DataDirectory \{ get; set; \} + public string EncryptionKey \{ get; set; \} + public bool DisableOngoingTasks \{ get; set; \} + public bool SkipIndexes \{ get; set; \} +\} +`} + + + +* Parameters: + + | Parameter | Value | Functionality | + | ------------- | ------------- | ----- | + | **DatabaseName** | string | Name for the new database. | + | **BackupLocation** | string | Local path of the backup file to be restored.
Path **must be local** for the restoration to continue.| + | **LastFileNameToRestore**
(Optional -
omit for default) | string | [Last incremental backup file](../../../../server/ongoing-tasks/backup-overview.mdx#restoration-procedure) to restore.
**Default behavior: Restore all backup files in the folder.** | + | **DataDirectory**
(Optional -
omit for default) | string | The new database data directory.
**Default folder: Under the "Databases" folder, in a folder that carries the restored database's name.** | + | **EncryptionKey**
(Optional -
omit for default) | string | A key for an encrypted database.
**Default behavior: Try to restore as if DB is unencrypted.**| + | **DisableOngoingTasks**
(Optional -
omit for default) | boolean | `true` - disable ongoing tasks when Restore is complete.
`false` - enable ongoing tasks when Restore is complete.
**Default: `false` (Ongoing tasks will run when Restore is complete).**| + | **SkipIndexes**
(Optional -
omit for default) | boolean | `true` to disable indexes import,
`false` to enable indexes import.
**Default: `false` restore all indexes.**| + + + * Verify that RavenDB has full access to the backup-files and database folders. + * Make sure your server has permissions to read from `BackupLocation` and write to `DataDirectory`. + + + + +## Optional Settings + +### `LastFileNameToRestore` +Restore incremental backup files up to (including) the selected file, and stop restoring there. + +* E.g. - + * These are the files in your backup folder: + 2018-12-26-09-00.ravendb-full-backup + 2018-12-26-12-00.ravendb-incremental-backup + 2018-12-26-15-00.ravendb-incremental-backup + 2018-12-26-18-00.ravendb-incremental-backup + * Feed **LastFileNameToRestore** with the 2018-12-26-12-00 incremental-backup file name: + + +{`//Last incremental backup file to restore from +restoreConfiguration.LastFileNameToRestore = @"2018-12-26-12-00.ravendb-incremental-backup"; +`} + + + * The full-backup and 12:00 incremental-backup files **will** be restored. + The 15:00 and 18:00 files will **not** be restored. +### `DataDirectory` + +Specify the directory into which the database will be restored. + + +{`// Restore to the specified directory path +var dataPath = @"C:\\Users\\RavenDB\\backups\\2018-12-26-16-17.ravendb-Products-A-backup\\restoredDatabaseLocation"; +restoreConfiguration.DataDirectory = dataPath; +`} + + +### `EncryptionKey` + +This is where you need to provide your encryption key if your backup is encrypted. + + +{`restoreConfiguration.EncryptionKey = "your_encryption_key"; +`} + + +### `DisableOngoingTasks` + +set **DisableOngoingTasks** to **true** to disable the execution of ongoing tasks after restoration. +See [Recommended Precautions](../../../../client-api/operations/maintenance/backup/restore.mdx#recommended-precautions). + + +{`// Do or do not run ongoing tasks after restoration. +// Default setting is FALSE, to allow tasks' execution when the backup is restored. +restoreConfiguration.DisableOngoingTasks = true; +`} + + + + + + +## Restore Database to a Single Node + +* **Configuration** + * Set `DatabaseName` with the **new database name**. + * Set `BackupLocation` with a **local path for the backup files**. + +* **Execution** + * Pass the configured `RestoreBackupConfiguration` to `RestoreBackupOperation`. + * Send the restore-backup operation to the server to start the restoration execution. + +* **Code Sample**: + + +{`var restoreConfiguration = new RestoreBackupConfiguration(); + +// New database name +restoreConfiguration.DatabaseName = "newProductsDatabase"; + +// Local path with a backup file +var backupPath = @"C:\\Users\\RavenDB\\backups\\2018-12-26-16-17.ravendb-Products-A-backup"; +restoreConfiguration.BackupLocation = backupPath; + +var restoreBackupTask = new RestoreBackupOperation(restoreConfiguration); +docStore.Maintenance.Server.Send(restoreBackupTask); +`} + + + + + +## Restore Database to Multiple Nodes + +### Restore Database to a Single Node & Replicate it to Other Nodes + +The common approach to restoring a database that should reside on multiple nodes, is to restore the backed-up +database to a single server and then expand the database group to additional nodes, allowing normal replication. + +* Verify relevant nodes exist in your cluster. [Add nodes](../../../../server/clustering/cluster-api.mdx#add-node-to-the-cluster) as needed. +* Manage the database-group topology. + Add a node to the database-group using the [Studio](../../../../studio/database/settings/manage-database-group.mdx) + or from your [code](../../../../client-api/operations/server-wide/add-database-node.mdx), to replicate the database to the other nodes. +### Restore Database to Multiple Nodes Simultaneously + +You can create the cluster in advance, and restore the database to multiple nodes simultaneously. + + + +* When a [logical-backup](../../../../client-api/operations/maintenance/backup/backup-overview.mdx#logical-backup) + is restored, each document receives a new change-vector according to the node it resides on. + When the database instances synchronize, this change-vector will be updated and be composed of all database nodes tags. + +* When a [snapshot](../../../../client-api/operations/maintenance/backup/backup-overview.mdx#snapshot) is restored, + documents are **not** assigned a new change-vector because the databases kept by all nodes are considered identical. + Each document retains the original change-vector it had during backup. + When the database instances synchronize, documents' change-vectors do **not** change. + + + +* On the first node, restore the database using its original name. +* On other nodes, restore the database using different names. +* Wait for the restoration to complete on all nodes. +* **Soft-delete** the additional databases (those with altered names) from the cluster. + [Soft-delete](../../../../client-api/operations/server-wide/delete-database.mdx#operations--server--how-to-delete-a-database) + the databases by setting `HardDelete` to `false`, to retain the data files on disk. +* Rename the database folder on all nodes to the original database name. +* [Expand](../../../../server/clustering/rachis/cluster-topology.mdx#modifying-the-topology) the database group to all relevant nodes. + + + +## Recommended Precautions + + +When restoring a backed-up database, you may be interested only in the restored data +and not in any ongoing tasks that may have existed during backup. + +* E.g., an ETL ongoing task from a production cluster may have unwanted results in a testing environment. + +In such cases, **disable** ongoing tasks using the [DisableOngoingTasks](../../../../client-api/operations/maintenance/backup/restore.mdx#section-1) flag. + +* Code Sample: + + +{`// Do or do not run ongoing tasks after restoration. +// Default setting is FALSE, to allow tasks' execution when the backup is restored. +restoreConfiguration.DisableOngoingTasks = true; +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/clean-change-vector.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/clean-change-vector.mdx new file mode 100644 index 0000000000..46611cf113 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/clean-change-vector.mdx @@ -0,0 +1,90 @@ +--- +title: "Clean Change Vector" +hide_table_of_contents: true +sidebar_label: Clean Change Vector +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Clean Change Vector + + +* A database's [change vector](../../../server/clustering/replication/change-vector.mdx) contains entries from each instance of the database +in the database group. However, even when an instance no longer exists (because it was removed or replaced) its entry will remain in the +database change vector. These entries can build up over time, leading to longer change vectors that take up unnecessary space. + +* **`UpdateUnusedDatabasesOperation`** lets you specify the IDs of database instances that no longer exist so that their entries can be +removed from the database change vector. + +* This operation does not affect any documents' _current_ change vectors, but from now on when documents are modified or created their +change vector will not include the obsolete entries. + + +## Syntax + + + +{`public UpdateUnusedDatabasesOperation(string database, HashSet unusedDatabaseIds) +`} + + + +| Parameter | Type | Description | +| ------------- | ----- | ---- | +| **database** | `string` | Name of the database | +| **unusedDatabaseIds** | `HashSet` | The database IDs to be removed from the change vector | + + + +## Example + +In the 'General Stats' view in the [management studio](../../../studio/overview.mdx), you can see your database's current change vector (it's +the same as the change vector of the database's most recently updated/created document). + +Below we see the change vector of an [example database](../../../start/about-examples.mdx) "NorthWind". It includes three entries: one of the +NorthWind instance currently housed on cluster node A (whose ID begins with `N79J...`), and two of instances that were also previously +housed on node A but which no longer exist. + +![Fig. 1](./assets/clean-change-vector.png) + +This code removes the obsolete entries specified by their database instance IDs: + + + + +{`documentStore.Maintenance.Server.Send( + new UpdateUnusedDatabasesOperation(documentStore.Database, new HashSet +{ + "0N64iiIdYUKcO+yq1V0cPA", + "xwmnvG1KBkSNXfl7/0yJ1A" +})); +`} + + + + +{`await documentStore.Maintenance.Server.SendAsync( + new UpdateUnusedDatabasesOperation(documentStore.Database, new HashSet +{ + "0N64iiIdYUKcO+yq1V0cPA", + "xwmnvG1KBkSNXfl7/0yJ1A" +})); +`} + + + + + + +Next time a document is modified, you will see that the database change vector has been cleaned. + +![Fig. 2](./assets/clean-change-vector-after.png) + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_category_.json b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_category_.json new file mode 100644 index 0000000000..9986c022fe --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 2, + "label": Configuration, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_database-settings-operation-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_database-settings-operation-csharp.mdx new file mode 100644 index 0000000000..fe295f9187 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_database-settings-operation-csharp.mdx @@ -0,0 +1,182 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The default database configuration settings can be customized: + + * From the Client API - as described in this article. + + * From Studio - via the [Database Settings](../../../../studio/database/settings/database-settings.mdx#database-settings) view. + +* In this page: + + * [Put database settings operation](../../../../client-api/operations/maintenance/configuration/database-settings-operation.mdx#put-database-settings-operation) + + * [Get database settings operation](../../../../client-api/operations/maintenance/configuration/database-settings-operation.mdx#get-database-settings-operation) + + +Do not modify the database settings unless you are an expert and know what you're doing. + + + + +## Put database settings operation + +* Use `PutDatabaseSettingsOperation` to modify the default database configuration. + +* Only **database-level** settings can be customized using this operation. + See the [Configuration overview](../../../../server/configuration/configuration-options.mdx) article to learn how to customize the **server-level** settings. + +* Note: for the changes to take effect, the database must be **reloaded**. + Reloading is accomplished by disabling and enabling the database using [ToggleDatabasesStateOperation](../../../../client-api/operations/server-wide/toggle-databases-state.mdx). + See the following example: + + + + +{`// 1. Modify the database settings: +// ================================ + +// Define the settings dictionary with the key-value pairs to set, for example: +var settings = new Dictionary +{ + ["Databases.QueryTimeoutInSec"] = "350", + ["Indexing.Static.DeploymentMode"] = "Rolling" +}; + +// Define the put database settings operation, +// specify the database name & pass the settings dictionary +var putDatabaseSettingsOp = new PutDatabaseSettingsOperation(documentStore.Database, settings); + +// Execute the operation by passing it to Maintenance.Send +documentStore.Maintenance.Send(putDatabaseSettingsOp); + +// 2. RELOAD the database for the change to take effect: +// ===================================================== + +// Disable database +var disableDatabaseOp = new ToggleDatabasesStateOperation(documentStore.Database, true); +documentStore.Maintenance.Server.Send(disableDatabaseOp); + +// Enable database +var enableDatabaseOp = new ToggleDatabasesStateOperation(documentStore.Database, false); +documentStore.Maintenance.Server.Send(enableDatabaseOp); +`} + + + + +{`// 1. Modify the database settings: +// ================================ + +// Define the settings dictionary with the key-value pairs to set, for example: +var settings = new Dictionary +{ + ["Databases.QueryTimeoutInSec"] = "350", + ["Indexing.Static.DeploymentMode"] = "Rolling" +}; + +// Define the put database settings operation, +// specify the database name & pass the settings dictionary +var putDatabaseSettingsOp = new PutDatabaseSettingsOperation(documentStore.Database, settings); + +// Execute the operation by passing it to Maintenance.SendAsync +await documentStore.Maintenance.SendAsync(putDatabaseSettingsOp); + +// 2. RELOAD the database for the change to take effect: +// ===================================================== + +// Disable database +var disableDatabaseOp = new ToggleDatabasesStateOperation(documentStore.Database, true); +await documentStore.Maintenance.Server.SendAsync(disableDatabaseOp); + +// Enable database +var enableDatabaseOp = new ToggleDatabasesStateOperation(documentStore.Database, false); +await documentStore.Maintenance.Server.SendAsync(enableDatabaseOp); +`} + + + +**Syntax**: + + + +{`PutDatabaseSettingsOperation(string databaseName, Dictionary configurationSettings) +`} + + + +| Parameter | Type | Description | +|-----------------------|------------------------------|----------------------------------------------------| +| databaseName | `string` | Name of database for which to change the settings. | +| configurationSettings | `Dictionary` | The configuration settings to set. | + + + + +## Get database settings operation + +* Use `GetDatabaseSettingsOperation` to get the configuration settings that were customized for the database. + +* Only settings that have been changed will be retrieved. + + + + +{`// Define the get database settings operation, specify the database name +var getDatabaseSettingsOp = new GetDatabaseSettingsOperation(documentStore.Database); + +// Execute the operation by passing it to Maintenance.Send +var customizedSettings = documentStore.Maintenance.Send(getDatabaseSettingsOp); + +// Get the customized value +var customizedValue = customizedSettings.Settings["Databases.QueryTimeoutInSec"]; +`} + + + + +{`// Define the get database settings operation, specify the database name +var getDatabaseSettingsOp = new GetDatabaseSettingsOperation(documentStore.Database); + +// Execute the operation by passing it to Maintenance.SendAsync +var customizedSettings = await documentStore.Maintenance.SendAsync(getDatabaseSettingsOp); + +// Get the customized value +var customizedValue = customizedSettings.Settings["Databases.QueryTimeoutInSec"]; +`} + + + +**Syntax**: + + + +{`GetDatabaseSettingsOperation(string databaseName) +`} + + + +| Parameter | Type | Description | +|--------------|----------|-------------------------------------------------------------| +| databaseName | `string` | The database name for which to get the customized settings. | + + + + +{`// Executing the operation returns the following object: +public class DatabaseSettings +\{ + // Configuration settings that have been customized + public Dictionary Settings \{ get; set; \} +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_database-settings-operation-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_database-settings-operation-nodejs.mdx new file mode 100644 index 0000000000..f4faebeeac --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_database-settings-operation-nodejs.mdx @@ -0,0 +1,130 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The default database configuration settings can be customized: + + * From the Client API - as described in this article. + + * From Studio - via the [Database Settings](../../../../studio/database/settings/database-settings.mdx#database-settings) view. + +* In this page: + + * [Put database settings operation](../../../../client-api/operations/maintenance/configuration/database-settings-operation.mdx#put-database-settings-operation) + + * [Get database settings operation](../../../../client-api/operations/maintenance/configuration/database-settings-operation.mdx#get-database-settings-operation) + + +Do not modify the database settings unless you are an expert and know what you're doing. + + + + +## Put database settings operation + +* Use `PutDatabaseSettingsOperation` to modify the default database configuration. + +* Only **database-level** settings can be customized using this operation. + See the [Configuration overview](../../../../server/configuration/configuration-options.mdx) article to learn how to customize the **server-level** settings. + +* Note: for the changes to take effect, the database must be **reloaded**. + Reloading is accomplished by disabling and enabling the database using [ToggleDatabasesStateOperation](../../../../client-api/operations/server-wide/toggle-databases-state.mdx). + See the following example: + + + +{`// 1. Modify the database settings: +// ================================ + +// Define a settings object with key-value pairs to set, for example: +const settings = \{ + "Databases.QueryTimeoutInSec": "350", + "Indexing.Static.DeploymentMode": "Rolling" +\}; + +// Define the put database settings operation, +// specify the database name & pass the settings dictionary +const putDatabaseSettingsOp = new PutDatabaseSettingsOperation(documentStore.database, settings) + +// Execute the operation by passing it to maintenance.send +await documentStore.maintenance.send(putDatabaseSettingsOp); + +// 2. RELOAD the database for the change to take effect: +// ===================================================== + +// Disable database +const disableDatabaseOp = new ToggleDatabasesStateOperation(documentStore.database, true); +await documentStore.maintenance.server.send(disableDatabaseOp); + +// Enable database +const enableDatabaseOp = new ToggleDatabasesStateOperation(documentStore.database, false); +await documentStore.maintenance.server.send(enableDatabaseOp); +`} + + +**Syntax**: + + + +{`const putDatabaseSettingsOp = new PutDatabaseSettingsOperation(databaseName, configurationSettings) +`} + + + +| Parameter | Type | Description | +|-----------------------|-----------|----------------------------------------------------| +| databaseName | `string` | Name of database for which to change the settings. | +| configurationSettings | `object` | The configuration settings to set. | + + + + +## Get database settings operation + +* Use `GetDatabaseSettingsOperation` to get the configuration settings that were customized for the database. + +* Only settings that have been changed will be retrieved. + + + +{`// Define the get database settings operation, specify the database name +const getDatabaseSettingsOp = new GetDatabaseSettingsOperation(documentStore.database); + +// Execute the operation by passing it to maintenance.send +const customizedSettings = await documentStore.maintenance.send(getDatabaseSettingsOp); + +// Get the customized value +const customizedValue = customizedSettings.settings["Databases.QueryTimeoutInSec"]; +`} + + +**Syntax**: + + + +{`const getDatabaseSettingsOp = new GetDatabaseSettingsOperation(databaseName); +`} + + + +| Parameter | Type | Description | +|--------------|----------|-------------------------------------------------------------| +| databaseName | `string` | The database name for which to get the customized settings. | + + + + +{`// Executing the operation returns the following object: +\{ + settings // An object with key-value configuration pairs +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_database-settings-operation-php.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_database-settings-operation-php.mdx new file mode 100644 index 0000000000..c220393c6c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_database-settings-operation-php.mdx @@ -0,0 +1,134 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The default database configuration settings can be customized: + + * From the Client API - as described in this article. + + * From Studio - via the [Database Settings](../../../../studio/database/settings/database-settings.mdx#database-settings) view. + +* In this page: + + * [Put database settings operation](../../../../client-api/operations/maintenance/configuration/database-settings-operation.mdx#put-database-settings-operation) + + * [Get database settings operation](../../../../client-api/operations/maintenance/configuration/database-settings-operation.mdx#get-database-settings-operation) + + +Do not modify the database settings unless you are an expert and know what you're doing. + + + + +## Put database settings operation + +* Use `PutDatabaseSettingsOperation` to modify the default database configuration. + +* Only **database-level** settings can be customized using this operation. + See the [Configuration overview](../../../../server/configuration/configuration-options.mdx) article to learn how to customize the **server-level** settings. + +* Note: for the changes to take effect, the database must be **reloaded**. + Reloading is accomplished by disabling and enabling the database using [ToggleDatabasesStateOperation](../../../../client-api/operations/server-wide/toggle-databases-state.mdx). + See the following example: + + + +{`// 1. Modify the database settings: +// ================================ + +// Define the settings dictionary with the key-value pairs to set, for example: +$settings = [ + "Databases.QueryTimeoutInSec" => "350", + "Indexing.Static.DeploymentMode" => "Rolling" +]; + +// Define the put database settings operation, +// specify the database name & pass the settings dictionary +$putDatabaseSettingsOp = new PutDatabaseSettingsOperation($documentStore->getDatabase(), $settings); + +// Execute the operation by passing it to Maintenance.Send +$documentStore->maintenance()->send($putDatabaseSettingsOp); + +// 2. RELOAD the database for the change to take effect: +// ===================================================== + +// Disable database +$disableDatabaseOp = new ToggleDatabasesStateOperation($documentStore->getDatabase(), true); +$documentStore->maintenance()->server()->send($disableDatabaseOp); + +// Enable database +$enableDatabaseOp = new ToggleDatabasesStateOperation($documentStore->getDatabase(), false); +$documentStore->maintenance()->server()->send($enableDatabaseOp); +`} + + +**Syntax**: + + + +{`PutDatabaseSettingsOperation(?string $databaseName, StringMap|array|null $configurationSettings) +`} + + + +| Parameter | Type | Description | +|-----------------------|------------|--------------------------------------------------| +| $databaseName | `?string` | Name of the database to change the settings for. | +| $configurationSettings | `StringMap`
`array`
`null` | The configuration settings to set. | + + + + +## Get database settings operation + +* Use `GetDatabaseSettingsOperation` to get the configuration settings that were customized for the database. + +* Only settings that have been changed will be retrieved. + + + +{`// Define the get database settings operation, specify the database name +$getDatabaseSettingsOp = new GetDatabaseSettingsOperation($documentStore->getDatabase()); + +// Execute the operation by passing it to Maintenance.Send +/** @var DatabaseSettings $customizedSettings */ +$customizedSettings = $documentStore->maintenance()->send($getDatabaseSettingsOp); + +// Get the customized value +$customizedValue = $customizedSettings->getSettings()["Databases.QueryTimeoutInSec"]; +`} + + +**Syntax**: + + + +{`GetDatabaseSettingsOperation(?string $databaseName); +`} + + + +| Parameter | Type | Description | +|--------------|-----------|--------------------------------------------------------| +| $databaseName | `?string` | The database name to get the customized settings for. | + + + + +{`// Executing the operation returns the following object: +class DatabaseSettings +\{ +// Configuration settings that have been customized + private ?StringMap $settings = null; + // ...getter and setter +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_get-client-configuration-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_get-client-configuration-csharp.mdx new file mode 100644 index 0000000000..483dbc9e08 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_get-client-configuration-csharp.mdx @@ -0,0 +1,75 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* It is recommended to first refer to the **client-configuration description** in the [put client-configuration](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx) article. + +* Use `GetClientConfigurationOperation` to get the current client-configuration set on the server for the database. + +* In this page: + * [Get client-configuration](../../../../client-api/operations/maintenance/configuration/get-client-configuration.mdx#get-client-configuration) + * [Syntax](../../../../client-api/operations/maintenance/configuration/get-client-configuration.mdx#syntax) + + +## Get client-configuration + + + + +{`// Define the get client-configuration operation +var getClientConfigOp = new GetClientConfigurationOperation(); + +// Execute the operation by passing it to Maintenance.Send +GetClientConfigurationOperation.Result result = store.Maintenance.Send(getClientConfigOp); + +ClientConfiguration clientConfiguration = result.Configuration; +`} + + + + +{`// Define the get client-configuration operation +var getClientConfigOp = new GetClientConfigurationOperation(); + +// Execute the operation by passing it to Maintenance.SendAsync +GetClientConfigurationOperation.Result config = + await store.Maintenance.SendAsync(getClientConfigOp); + +ClientConfiguration clientConfiguration = config.Configuration; +`} + + + + + + +## Syntax + + + +{`public GetClientConfigurationOperation() +`} + + + + + +{`// Executing the operation returns the following object: +public class Result +\{ + // The configuration Etag + public long Etag \{ get; set; \} + + // The current client-configuration deployed on the server for the database + public ClientConfiguration Configuration \{ get; set; \} +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_get-client-configuration-java.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_get-client-configuration-java.mdx new file mode 100644 index 0000000000..f6bac44b09 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_get-client-configuration-java.mdx @@ -0,0 +1,59 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +**GetClientConfigurationOperation** is used to return a client configuration, which is saved on the server and overrides client behavior. + +## Syntax + + + +{`GetClientConfigurationOperation() +`} + + + + + +{`public static class Result \{ + private long etag; + private ClientConfiguration configuration; + + public long getEtag() \{ + return etag; + \} + + public void setEtag(long etag) \{ + this.etag = etag; + \} + + public ClientConfiguration getConfiguration() \{ + return configuration; + \} + + public void setConfiguration(ClientConfiguration configuration) \{ + this.configuration = configuration; + \} +\} +`} + + + +| Return Value | | | +| ------------- | ----- | ---- | +| **Etag** | String | Etag of configuration | +| **Configuration** | `ClientConfiguration` | configuration which will be used by the client API | + +## Example + + + +{`GetClientConfigurationOperation.Result config + = store.maintenance().send(new GetClientConfigurationOperation()); +ClientConfiguration clientConfiguration = config.getConfiguration(); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_get-client-configuration-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_get-client-configuration-nodejs.mdx new file mode 100644 index 0000000000..4bb6096ee2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_get-client-configuration-nodejs.mdx @@ -0,0 +1,67 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* It is recommended to first refer to the **client-configuration description** in the [put client-configuration](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx) article. + +* Use `GetClientConfigurationOperation` to get the current client-configuration set on the server for the database. + +* In this page: + * [Get client-configuration](../../../../client-api/operations/maintenance/configuration/get-client-configuration.mdx#get-client-configuration) + * [Syntax](../../../../client-api/operations/maintenance/configuration/get-client-configuration.mdx#syntax) + + +## Get client-configuration + + + +{`// Define the get client-configuration operation +const getClientConfigOp = new GetClientConfigurationOperation(); + +// Execute the operation by passing it to maintenance.send +const result = await store.maintenance.send(getClientConfigOp); + +const configuration = result.configuration; +`} + + + + + +## Syntax + + + +{`const getClientConfigOp = new GetClientConfigurationOperation(); +`} + + + + + +{`// Object returned from store.maintenance.send(getClientConfigOp): +\{ + etag, + configuration // The configution object +\} + +// The configuration object: +\{ + identityPartsSeparator, + etag, + disabled, + maxNumberOfRequestsPerSession, + readBalanceBehavior, + loadBalanceBehavior, + loadBalancerContextSeed +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_get-client-configuration-php.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_get-client-configuration-php.mdx new file mode 100644 index 0000000000..523ea48a7e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_get-client-configuration-php.mdx @@ -0,0 +1,59 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* It is recommended to first refer to the **client-configuration description** in the [put client-configuration](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx) article. + +* Use `GetClientConfigurationOperation` to get the current client-configuration set on the server for the database. + +* In this page: + * [Get client-configuration](../../../../client-api/operations/maintenance/configuration/get-client-configuration.mdx#get-client-configuration) + * [Syntax](../../../../client-api/operations/maintenance/configuration/get-client-configuration.mdx#syntax) + + +## Get client-configuration + + + +{`// Define the get client-configuration operation +$getClientConfigOp = new GetClientConfigurationOperation(); + +// Execute the operation by passing it to Maintenance.Send +/** @var GetClientConfigurationResult $result */ +$result = $store->maintenance()->send($getClientConfigOp); + +$clientConfiguration = $result->getConfiguration(); +`} + + + + + +## Syntax + + + +{`public GetClientConfigurationOperation() +`} + + + + + +{`// Executing the operation returns the following object: +class GetClientConfigurationResult implements ResultInterface + private ?int $etag = null; + private ?ClientConfiguration $configuration; + + // ... getters and setters +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_get-client-configuration-python.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_get-client-configuration-python.mdx new file mode 100644 index 0000000000..10d5210139 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_get-client-configuration-python.mdx @@ -0,0 +1,60 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* It is recommended to first refer to the **client-configuration description** in the [put client-configuration](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx) article. + +* Use `GetClientConfigurationOperation` to get the current client-configuration set on the server for the database. + +* In this page: + * [Get client-configuration](../../../../client-api/operations/maintenance/configuration/get-client-configuration.mdx#get-client-configuration) + * [Syntax](../../../../client-api/operations/maintenance/configuration/get-client-configuration.mdx#syntax) + + +## Get client-configuration + + + +{`# Define the get client-configuration operation +get_client_config_op = GetClientConfigurationOperation() + +# Execute the operation by passing it to maintenance.send +result = store.maintenance.send(get_client_config_op) + +client_configuration = result.configuration +`} + + + + + +## Syntax + + + +{`class GetClientConfigurationOperation(MaintenanceOperation): ... + +# no __init__ (default) +`} + + + + + +{`# Executing the operation returns the following object: +class Result: + def __init__(self, etag: int, configuration: ClientConfiguration): + # The configuration Etag + self.etag = etag + # The current client-configuration deployed on the server for the database + self.configuration = configuration +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_put-client-configuration-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_put-client-configuration-csharp.mdx new file mode 100644 index 0000000000..df765bc1fa --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_put-client-configuration-csharp.mdx @@ -0,0 +1,158 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The **client configuration** is a set of configuration options applied during + client-server communication. +* The initial client configuration can be set by the client when creating the Document Store. +* A database administrator can modify the current client configuration on the server using the + `PutClientConfigurationOperation` operation or via Studio, to gain dynamic control over + client-server communication. + The client will be updated with the modified configuration the next time it sends a request to the database. +* In this page: + + * [Client configuration overview and modification](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx#client-configuration-overview-and-modification) + * [What can be configured](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx#what-can-be-configured) + * [Put client configuration (for database)](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx#put-client-configuration-(for-database)) + * [Syntax](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx#syntax) + + +## Client configuration overview and modification + +* **What is the client configuration**: + The client configuration is a set of configuration options that apply to the client when communicating with the database. + See [what can be configured](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx#what-can-be-configured) below. + +* **Initializing the client configuration** (on the client): + This configuration can be initially customized from the client code when creating the Document Store via the [Conventions](../../../../client-api/configuration/conventions.mdx). + +* **Overriding the initial client configuration for the database** (on the server): + + * From the client code: + Use `PutClientConfigurationOperation` to set the client configuration options on the server. + See the example below. + + * From the Studio: + Set the client configuration from the [Client Configuration view](../../../../studio/database/settings/client-configuration-per-database.mdx). + +* **Updating the running client**: + + * Once the client configuration is modified on the server, the running client will [receive the updated settings](../../../../client-api/configuration/load-balance/overview.mdx#keeping-the-client-topology-up-to-date) + the next time it makes a request to the database. + + * Setting the client configuration on the server enables administrators to dynamically control + the client behavior after it has started running. + e.g. manage load balancing of client requests on the fly in response to changing system demands. + +* The client configuration set for the database level **overrides** the + [server-wide client configuration](../../../../client-api/operations/server-wide/configuration/put-serverwide-client-configuration.mdx). + + + +## What can be configured + +The following client configuration options are available: + +* **Identity parts separator**: + Set the separator used for automatically generated document IDs (default is `/`). + Applies only to [Identity IDs](../../../../server/kb/document-identifier-generation.mdx#identity-id) and [HiLo IDs](../../../../server/kb/document-identifier-generation.mdx#hilo-algorithm-id). + +* **Maximum number of requests per session**: + Set this number to restrict the number of requests (Reads & Writes) per session in the client API. + +* **Read balance behavior**: + Set the Read balance method the client will use when accessing a node with Read requests. + Learn more in [Balancing client requests - overview](../../../../client-api/configuration/load-balance/overview.mdx) and [Read balance behavior](../../../../client-api/configuration/load-balance/read-balance-behavior.mdx). + +* **Load balance behavior**: + Set the Load balance method for Read & Write requests. + Learn more in [Load balance behavior](../../../../client-api/configuration/load-balance/load-balance-behavior.mdx). + + + +## Put client configuration (for database) + + + +{`// You can customize the client-configuration options in the client +// when creating the Document Store (this is optional): +// ================================================================= + +var documentStore = new DocumentStore +\{ + Urls = new[] \{ "ServerURL_1", "ServerURL_2", "..." \}, + Database = "DefaultDB", + Conventions = new DocumentConventions + \{ + // Initialize some client-configuration options: + MaxNumberOfRequestsPerSession = 100, + IdentityPartsSeparator = '$' + // ... + \} +\}.Initialize(); +`} + + + + + +{`// Override the initial client-configuration in the server using the put operation: +// ================================================================================ + +using (documentStore) +\{ + // Define the client-configuration object + ClientConfiguration clientConfiguration = new ClientConfiguration + \{ + MaxNumberOfRequestsPerSession = 200, + ReadBalanceBehavior = ReadBalanceBehavior.FastestNode + // ... + \}; + + // Define the put client-configuration operation, pass the configuration + var putClientConfigOp = new PutClientConfigurationOperation(clientConfiguration); + + // Execute the operation by passing it to Maintenance.Send + documentStore.Maintenance.Send(putClientConfigOp); +\} +`} + + + + + +## Syntax + + + +{`public PutClientConfigurationOperation(ClientConfiguration configuration) +`} + + + +| Parameter | Type | Description | +|-------------------|-----------------------|------------------------------------------------------------------------| +| **configuration** | `ClientConfiguration` | Client configuration that will be set on the server (for the database) | + + + +{`public class ClientConfiguration +\{ + public long Etag \{ get; set; \} + public bool Disabled \{ get; set; \} + public int? MaxNumberOfRequestsPerSession \{ get; set; \} + public ReadBalanceBehavior? ReadBalanceBehavior \{ get; set; \} + public LoadBalanceBehavior? LoadBalanceBehavior \{ get; set; \} + public int? LoadBalancerContextSeed \{ get; set; \} + public char? IdentityPartsSeparator; // can be any character except '|' +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_put-client-configuration-java.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_put-client-configuration-java.mdx new file mode 100644 index 0000000000..1c58d31237 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_put-client-configuration-java.mdx @@ -0,0 +1,35 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +**PutClientConfigurationOperation** is used to save a client configuration on the server. It allows you to override client's settings remotely. + +## Syntax + + + +{`PutClientConfigurationCommand(ClientConfiguration configuration) +`} + + + +| Return Value | | | +| ------------- | ----- | ---- | +| **configuration** | `ClientConfiguration` | configuration which will be used by client API | + +## Example + + + +{`ClientConfiguration clientConfiguration = new ClientConfiguration(); +clientConfiguration.setMaxNumberOfRequestsPerSession(100); +clientConfiguration.setReadBalanceBehavior(ReadBalanceBehavior.FASTEST_NODE); + +store.maintenance().send( + new PutClientConfigurationOperation(clientConfiguration)); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_put-client-configuration-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_put-client-configuration-nodejs.mdx new file mode 100644 index 0000000000..081a49d831 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_put-client-configuration-nodejs.mdx @@ -0,0 +1,149 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The **client configuration** is a set of configuration options applied during + client-server communication. +* The initial client configuration can be set by the client when creating the Document Store. +* A database administrator can modify the current client configuration on the server using the + `PutClientConfigurationOperation` operation or via Studio, to gain dynamic control over + client-server communication. + The client will be updated with the modified configuration the next time it sends a request to the database. +* In this page: + + * [Client configuration overview and modification](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx#client-configuration-overview-and-modification) + * [What can be configured](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx#what-can-be-configured) + * [Put client configuration (for database)](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx#put-client-configuration-(for-database)) + * [Syntax](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx#syntax) + + +## Client configuration overview and modification + +* **What is the client configuration**: + The client configuration is a set of configuration options that apply to the client when communicating with the database. + See [what can be configured](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx#what-can-be-configured) below. + +* **Initializing the client configuration** (on the client): + This configuration can be initially customized from the client code when creating the Document Store via the [Conventions](../../../../client-api/configuration/conventions.mdx). + +* **Overriding the initial client configuration for the database** (on the server): + + * From the client code: + Use `PutClientConfigurationOperation` to set the client configuration options on the server. + See the example below. + + * From the Studio: + Set the client configuration from the [Client Configuration](../../../../studio/database/settings/client-configuration-per-database.mdx) view. + +* **Updating the running client**: + + * Once the client configuration is modified on the server, the running client will [receive the updated settings](../../../../client-api/configuration/load-balance/overview.mdx#keeping-the-client-topology-up-to-date) + the next time it makes a request to the database. + + * Setting the client configuration on the server enables administrators to dynamically control + the client behavior after it has started running. + e.g. manage load balancing of client requests on the fly in response to changing system demands. + +* The client configuration set for the database level **overrides** the + [server-wide client configuration](../../../../client-api/operations/server-wide/configuration/put-serverwide-client-configuration.mdx). + + + +## What can be configured + +The following client configuration options are available: + +* **Identity parts separator**: + Set the separator used for automatically generated document IDs (default is `/`). + Applies only to [Identity IDs](../../../../server/kb/document-identifier-generation.mdx#identity-id) and [HiLo IDs](../../../../server/kb/document-identifier-generation.mdx#hilo-algorithm-id). + +* **Maximum number of requests per session**: + Set this number to restrict the number of requests (Reads & Writes) per session in the client API. + +* **Read balance behavior**: + Set the Read balance method the client will use when accessing a node with Read requests. + Learn more in [Balancing client requests - overview](../../../../client-api/configuration/load-balance/overview.mdx) and [Read balance behavior](../../../../client-api/configuration/load-balance/read-balance-behavior.mdx). + +* **Load balance behavior**: + Set the Load balance method for Read & Write requests. + Learn more in [Load balance behavior](../../../../client-api/configuration/load-balance/load-balance-behavior.mdx). + + + +## Put client-configuration (for-database) + + + +{`// You can customize the client-configuration options in the client +// when creating the Document Store (this is optional): +// ================================================================= + +const documentStore = new DocumentStore(["serverUrl_1", "serverUrl_2", "..."], "DefaultDB"); + +documentStore.conventions.maxNumberOfRequestsPerSession = 100; +documentStore.conventions.identityPartsSeparator = '$'; +// ... + +documentStore.initialize(); +`} + + + + + +{`// Override the initial client-configuration in the server using the put operation: +// ================================================================================ + +// Define the client-configuration object +const clientConfiguration = \{ + maxNumberOfRequestsPerSession: 200, + readBalanceBehavior: "FastestNode", + // ... +\}; + +// Define the put client-configuration operation, pass the configuration +const putClientConfigOp = new PutClientConfigurationOperation(clientConfiguration); + +// Execute the operation by passing it to maintenance.send +await documentStore.maintenance.send(putClientConfigOp); +`} + + + + + +## Syntax + + + +{`const putClientConfigOp = new PutClientConfigurationOperation(configuration); +`} + + + +| Parameter | Type | Description | +|-------------------|----------|------------------------------------------------------------------------| +| **configuration** | `object` | Client configuration that will be set on the server (for the database) | + + + +{`// The client-configuration object +\{ + identityPartsSeparator, + etag, + disabled, + maxNumberOfRequestsPerSession, + readBalanceBehavior, + loadBalanceBehavior, + loadBalancerContextSeed +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_put-client-configuration-php.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_put-client-configuration-php.mdx new file mode 100644 index 0000000000..5064006839 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_put-client-configuration-php.mdx @@ -0,0 +1,159 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The **client configuration** is a set of configuration options applied during + client-server communication. +* The initial client configuration can be set by the client when creating the Document Store. +* A database administrator can modify the current client configuration on the server using the + `PutClientConfigurationOperation` operation or via Studio, to gain dynamic control over + client-server communication. + The client will be updated with the modified configuration the next time it sends a request to the database. +* In this page: + + * [Client configuration overview and modification](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx#client-configuration-overview-and-modification) + * [What can be configured](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx#what-can-be-configured) + * [Put client configuration (for database)](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx#put-client-configuration-(for-database)) + * [Syntax](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx#syntax) + + +## Client configuration overview and modification + +* **What is the client configuration**: + The client configuration is a set of configuration options that apply to the client when communicating with the database. + See [what can be configured](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx#what-can-be-configured) below. + +* **Initializing the client configuration** (on the client): + This configuration can be initially customized from the client code when creating the Document Store via the [Conventions](../../../../client-api/configuration/conventions.mdx). + +* **Overriding the initial client configuration for the database** (on the server): + + * From the client code: + Use `PutClientConfigurationOperation` to set the client configuration options on the server. + See the example below. + + * From Studio: + Set the client configuration from the [Client Configuration](../../../../studio/database/settings/client-configuration-per-database.mdx) view. + +* **Updating the running client**: + + * Once the client configuration is modified on the server, the running client will [receive the updated settings](../../../../client-api/configuration/load-balance/overview.mdx#keeping-the-client-topology-up-to-date) + the next time it makes a request to the database. + + * Setting the client configuration on the server enables administrators to dynamically control + the client behavior after it has started running. + e.g. manage load balancing of client requests on the fly in response to changing system demands. + +* The client configuration set for the database level **overrides** the + [server-wide client configuration](../../../../client-api/operations/server-wide/configuration/put-serverwide-client-configuration.mdx). + + + +## What can be configured + +The following client configuration options are available: + +* **Identity parts separator**: + Set the separator used for automatically generated document IDs (default is `/`). + Applies only to [Identity IDs](../../../../server/kb/document-identifier-generation.mdx#identity-id) and [HiLo IDs](../../../../server/kb/document-identifier-generation.mdx#hilo-algorithm-id). + +* **Maximum number of requests per session**: + Set this number to restrict the number of requests (Reads & Writes) per session in the client API. + +* **Read balance behavior**: + Set the Read balance method the client will use when accessing a node with Read requests. + Learn more in [Balancing client requests - overview](../../../../client-api/configuration/load-balance/overview.mdx) and [Read balance behavior](../../../../client-api/configuration/load-balance/read-balance-behavior.mdx). + +* **Load balance behavior**: + Set the Load balance method for Read & Write requests. + Learn more in [Load balance behavior](../../../../client-api/configuration/load-balance/load-balance-behavior.mdx). + + + +## Put client configuration (for database) + + + +{`// You can customize the client-configuration options in the client +// when creating the Document Store (this is optional): +// ================================================================= + +$urls = ["ServerURL_1", "ServerURL_2", "..."]; +$database = "DefaultDB"; + +$documentStore = new DocumentStore($urls, $database); + +$conventions = new DocumentConventions(); +$conventions->setMaxNumberOfRequestsPerSession(100); +$conventions->setIdentityPartsSeparator('$'); +// .... + +$documentStore->setConventions($conventions); + +$documentStore->initialize(); +`} + + + + + +{`// Override the initial client-configuration in the server using the put operation: +// ================================================================================ +try \{ + // Define the client-configuration object + $clientConfiguration = new ClientConfiguration(); + $clientConfiguration->setMaxNumberOfRequestsPerSession(200); + $clientConfiguration->setReadBalanceBehavior(ReadBalanceBehavior::fastestNode()); + // ... + + // Define the put client-configuration operation, pass the configuration + $putClientConfigOp = new PutClientConfigurationOperation($clientConfiguration); + + // Execute the operation by passing it to Maintenance.Send + $documentStore->maintenance()->send($putClientConfigOp); +\} finally \{ + $documentStore->close(); +\} +`} + + + + + +## Syntax + + + +{`PutClientConfigurationOperation(?ClientConfiguration $configuration) +`} + + + +| Parameter | Type | Description | +|-------------------|-----------------------|------------------------------------------------------------------------| +| **$configuration** | `?ClientConfiguration` | Client configuration that will be set on the server (for the database) | + + + +{`class ClientConfiguration +\{ + private ?string $identityPartsSeparator = null; + private ?int $etag = null; + private bool $disabled = false; + private ?int $maxNumberOfRequestsPerSession = null; + private ?ReadBalanceBehavior $readBalanceBehavior = null; + private ?LoadBalanceBehavior $loadBalanceBehavior = null; + private ?int $loadBalancerContextSeed = null; + + // ... getters and setters +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_put-client-configuration-python.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_put-client-configuration-python.mdx new file mode 100644 index 0000000000..d20b190479 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/_put-client-configuration-python.mdx @@ -0,0 +1,149 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The **client configuration** is a set of configuration options applied during + client-server communication. +* The initial client configuration can be set by the client when creating the Document Store. +* A database administrator can modify the current client configuration on the server using the + `PutClientConfigurationOperation` operation or via Studio, to gain dynamic control over + client-server communication. + The client will be updated with the modified configuration the next time it sends a request to the database. +* In this page: + + * [Client configuration overview and modification](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx#client-configuration-overview-and-modification) + * [What can be configured](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx#what-can-be-configured) + * [Put client configuration (for database)](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx#put-client-configuration-(for-database)) + * [Syntax](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx#syntax) + + +## Client configuration overview and modification + +* **What is the client configuration**: + The client configuration is a set of configuration options that apply to the client when communicating with the database. + See [what can be configured](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx#what-can-be-configured) below. + +* **Initializing the client configuration** (on the client): + This configuration can be initially customized from the client code when creating the Document Store via the [Conventions](../../../../client-api/configuration/conventions.mdx). + +* **Overriding the initial client configuration for the database** (on the server): + + * From the client code: + Use `PutClientConfigurationOperation` to set the client configuration options on the server. + See the example below. + + * From the Studio: + Set the client configuration from the [Client Configuration](../../../../studio/database/settings/client-configuration-per-database.mdx) view. + +* **Updating the running client**: + + * Once the client configuration is modified on the server, the running client will [receive the updated settings](../../../../client-api/configuration/load-balance/overview.mdx#keeping-the-client-topology-up-to-date) + the next time it makes a request to the database. + + * Setting the client configuration on the server enables administrators to dynamically control + the client behavior after it has started running. + e.g. manage load balancing of client requests on the fly in response to changing system demands. + +* The client configuration set for the database level **overrides** the + [server-wide client configuration](../../../../client-api/operations/server-wide/configuration/put-serverwide-client-configuration.mdx). + + + +## What can be configured + +The following client configuration options are available: + +* **Identity parts separator**: + Set the separator used for automatically generated document IDs (default is `/`). + Applies only to [Identity IDs](../../../../server/kb/document-identifier-generation.mdx#identity-id) and [HiLo IDs](../../../../server/kb/document-identifier-generation.mdx#hilo-algorithm-id). + +* **Maximum number of requests per session**: + Set this number to restrict the number of requests (Reads & Writes) per session in the client API. + +* **Read balance behavior**: + Set the Read balance method the client will use when accessing a node with Read requests. + Learn more in [Balancing client requests - overview](../../../../client-api/configuration/load-balance/overview.mdx) and [Read balance behavior](../../../../client-api/configuration/load-balance/read-balance-behavior.mdx). + +* **Load balance behavior**: + Set the Load balance method for Read & Write requests. + Learn more in [Load balance behavior](../../../../client-api/configuration/load-balance/load-balance-behavior.mdx). + + + +## Put client configuration (for database) + + + +{`# You can customize the client-configuration options in the client +# when creating the Document Store (this is optional): +# ================================================================= +document_store = DocumentStore(urls=["ServerURL_1", "ServerURL_2", "..."], database="DefaultDB") +document_store.conventions = DocumentConventions() + +# Initialize some client-configuration options: +document_store.conventions.max_number_of_requests_per_session = 100 +document_store.conventions.identity_parts_separator = "$" +# ... + +document_store.initialize() +`} + + + + + +{`# Override the initial client-configuration in the server using the put operation: +# ================================================================================ +with document_store: + # Define the client-configuration object + client_configuration = ClientConfiguration() + client_configuration.max_number_of_requests_per_session = 200 + client_configuration.read_balance_behavior = ReadBalanceBehavior.FASTEST_NODE + # ... + +# Define the put client-configuration operation, pass the configuration +put_client_config_op = PutClientConfigurationOperation(client_configuration) + +# Execute the operation by passing it to maintenance.send +document_store.maintenance.send(put_client_config_op) +`} + + + + + +## Syntax + + + +{`class PutClientConfigurationOperation(VoidMaintenanceOperation): + def __init__(self, config: ClientConfiguration): ... +`} + + + +| Parameter | Type | Description | +|-------------------|-----------------------|------------------------------------------------------------------------| +| **config** | `ClientConfiguration` | Client configuration that will be set on the server (for the database) | + + + +{`class ClientConfiguration: + def __init__(self): + self.__identity_parts_separator: Union[None, str] = None + self.etag: int = 0 + self.disabled: bool = False + self.max_number_of_requests_per_session: Optional[int] = None + self.read_balance_behavior: Optional[ReadBalanceBehavior] = None + self.load_balance_behavior: Optional[LoadBalanceBehavior] = None + self.load_balancer_context_seed: Optional[int] = None +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/database-settings-operation.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/database-settings-operation.mdx new file mode 100644 index 0000000000..87875c0d6e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/database-settings-operation.mdx @@ -0,0 +1,43 @@ +--- +title: "Database Settings Operations" +hide_table_of_contents: true +sidebar_label: Database Settings Operations +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import DatabaseSettingsOperationCsharp from './_database-settings-operation-csharp.mdx'; +import DatabaseSettingsOperationPhp from './_database-settings-operation-php.mdx'; +import DatabaseSettingsOperationNodejs from './_database-settings-operation-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "php", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/get-client-configuration.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/get-client-configuration.mdx new file mode 100644 index 0000000000..5a2ba7e631 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/get-client-configuration.mdx @@ -0,0 +1,53 @@ +--- +title: "Get Client Configuration Operation (for database)" +hide_table_of_contents: true +sidebar_label: Get Client Configuration +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetClientConfigurationCsharp from './_get-client-configuration-csharp.mdx'; +import GetClientConfigurationJava from './_get-client-configuration-java.mdx'; +import GetClientConfigurationPython from './_get-client-configuration-python.mdx'; +import GetClientConfigurationPhp from './_get-client-configuration-php.mdx'; +import GetClientConfigurationNodejs from './_get-client-configuration-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/put-client-configuration.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/put-client-configuration.mdx new file mode 100644 index 0000000000..fbc0d6fa0f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/configuration/put-client-configuration.mdx @@ -0,0 +1,57 @@ +--- +title: "Put Client Configuration Operation (for database)" +hide_table_of_contents: true +sidebar_label: Put Client Configuration +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import PutClientConfigurationCsharp from './_put-client-configuration-csharp.mdx'; +import PutClientConfigurationJava from './_put-client-configuration-java.mdx'; +import PutClientConfigurationPython from './_put-client-configuration-python.mdx'; +import PutClientConfigurationPhp from './_put-client-configuration-php.mdx'; +import PutClientConfigurationNodejs from './_put-client-configuration-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/connection-strings/_add-connection-string-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/connection-strings/_add-connection-string-csharp.mdx new file mode 100644 index 0000000000..182fb4d22a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/connection-strings/_add-connection-string-csharp.mdx @@ -0,0 +1,357 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the [PutConnectionStringOperation](../../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#the%c2%a0putconnectionstringoperation%c2%a0method) method to define a connection string in your database. + +* In this page: + * [Add a RavenDB connection string](../../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#add-a-ravendb-connection-string) + * [Add an SQL connection string](../../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#add-an-sql-connection-string) + * [Add a Snowflake connection string](../../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#add-a-snowflake-connection-string) + * [Add an OLAP connection string](../../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#add-an-olap-connection-string) + * [Add an Elasticsearch connection string](../../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#add-an-elasticsearch-connection-string) + * [Add a Kafka connection string](../../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#add-a-kafka-connection-string) + * [Add a RabbitMQ connection string](../../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#add-a-rabbitmq-connection-string) + * [Add an Azure Queue Storage connection string](../../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#add-an-azure-queue-storage-connection-string) + * [Add an Amazon SQS connection string](../../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#add-an-amazon-sqs-connection-string) + * [The PutConnectionStringOperation method](../../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#the%c2%a0putconnectionstringoperation%c2%a0method) + + +## Add a RavenDB connection string + +RavenDB connection strings are used by RavenDB [RavenDB ETL Tasks](../../../../server/ongoing-tasks/etl/raven.mdx). + +#### Example: + + +{`// Define a connection string to a RavenDB database destination +// ============================================================ +var ravenDBConStr = new RavenConnectionString +\{ + Name = "ravendb-connection-string-name", + Database = "target-database-name", + TopologyDiscoveryUrls = new[] \{ "https://rvn2:8080" \} +\}; + +// Deploy (send) the connection string to the server via the PutConnectionStringOperation +// ====================================================================================== +var PutConnectionStringOp = new PutConnectionStringOperation(ravenDBConStr); +PutConnectionStringResult connectionStringResult = store.Maintenance.Send(PutConnectionStringOp); +`} + + + +#### Syntax: + + +{`public class RavenConnectionString : ConnectionString +\{ + public override ConnectionStringType Type => ConnectionStringType.Raven; + + public string Database \{ get; set; \} // Target database name + public string[] TopologyDiscoveryUrls; // List of server urls in the target RavenDB cluster +\} +`} + + + + + +**Secure servers** + +To [connect to secure RavenDB servers](../../../../server/security/authentication/certificate-management.mdx#enabling-communication-between-servers:-importing-and-exporting-certificates) +you need to: +1. Export the server certificate from the source server. +2. Install it as a client certificate on the destination server. + +This can be done from the Studio [Certificates view](../../../../server/security/authentication/certificate-management.mdx#studio-certificates-management-view). + + + + + +## Add an SQL connection string + +SQL connection strings are used by RavenDB [SQL ETL Tasks](../../../../server/ongoing-tasks/etl/sql.mdx). + +#### Example: + + +{`// Define a connection string to a SQL database destination +// ======================================================== +var sqlConStr = new SqlConnectionString +\{ + Name = "sql-connection-string-name", + + // Define destination factory name + FactoryName = "MySql.Data.MySqlClient", + + // Define the destination database + // May also need to define authentication and encryption parameters + // By default, encrypted databases are sent over encrypted channels + ConnectionString = "host=127.0.0.1;user=root;database=Northwind" +\}; + +// Deploy (send) the connection string to the server via the PutConnectionStringOperation +// ====================================================================================== +var PutConnectionStringOp = new PutConnectionStringOperation(sqlConStr); +PutConnectionStringResult connectionStringResult = store.Maintenance.Send(PutConnectionStringOp); +`} + + + +#### Syntax: + + +{`public class SqlConnectionString : ConnectionString +\{ + public override ConnectionStringType Type => ConnectionStringType.Sql; + + public string ConnectionString \{ get; set; \} + public string FactoryName \{ get; set; \} +\} +`} + + + + + +## Add a Snowflake connection string + +[Snowflake connection strings](https://github.com/snowflakedb/snowflake-connector-net/blob/master/doc/Connecting.md) +are used by RavenDB [Snowflake ETL Tasks](../../../../server/ongoing-tasks/etl/snowflake.mdx). + +#### Example: + + +{`// Define a connection string to a Snowflake warehouse database +// ========================================================== +var SnowflakeConStr = new SnowflakeConnectionString +\{ + Name = "snowflake-connection-string-name", + ConnectionString = "ACCOUNT = " + SnowflakeAccount + "; USER = " + SnowflakeUser + "; PASSWORD = " + SnowflakePassword +\}; + +// Deploy (send) the connection string to the server via the PutConnectionStringOperation +// ====================================================================================== +var PutConnectionStringOp = + new PutConnectionStringOperation(SnowflakeConStr); +PutConnectionStringResult connectionStringResult = store.Maintenance.Send(PutConnectionStringOp); +`} + + + + + +## Add an OLAP connection string + +OLAP connection strings are used by RavenDB [OLAP ETL Tasks](../../../../server/ongoing-tasks/etl/olap.mdx). + +#### Example: To a local machine + + +{`// Define a connection string to a local OLAP destination +// ====================================================== +OlapConnectionString olapConStr = new OlapConnectionString +\{ + Name = "olap-connection-string-name", + LocalSettings = new LocalSettings + \{ + FolderPath = "path-to-local-folder" + \} +\}; + +// Deploy (send) the connection string to the server via the PutConnectionStringOperation +// ====================================================================================== +var PutConnectionStringOp = new PutConnectionStringOperation(olapConStr); +PutConnectionStringResult connectionStringResult = store.Maintenance.Send(PutConnectionStringOp); +`} + + + +#### Example: To a cloud-based server + +* The following example shows a connection string to Amazon AWS. +* Adjust the parameters as needed if you are using other cloud-based servers (e.g. Google, Azure, Glacier, S3, FTP). +* The available parameters are listed in [ETL destination settings](../../../../server/ongoing-tasks/etl/olap.mdx#etl-destination-settings). + + + +{`// Define a connection string to an AWS OLAP destination +// ===================================================== +var olapConStr = new OlapConnectionString +\{ + Name = "myOlapConnectionStringName", + S3Settings = new S3Settings + \{ + BucketName = "myBucket", + RemoteFolderName = "my/folder/name", + AwsAccessKey = "myAccessKey", + AwsSecretKey = "myPassword", + AwsRegionName = "us-east-1" + \} +\}; + +// Deploy (send) the connection string to the server via the PutConnectionStringOperation +// ====================================================================================== +var PutConnectionStringOp = new PutConnectionStringOperation(olapConStr); +PutConnectionStringResult connectionStringResult = store.Maintenance.Send(PutConnectionStringOp); +`} + + + +#### Syntax: + + +{`public class OlapConnectionString : ConnectionString +\{ + public override ConnectionStringType Type => ConnectionStringType.Olap; + + public LocalSettings LocalSettings \{ get; set; \} + public S3Settings S3Settings \{ get; set; \} + public AzureSettings AzureSettings \{ get; set; \} + public GlacierSettings GlacierSettings \{ get; set; \} + public GoogleCloudSettings GoogleCloudSettings \{ get; set; \} + public FtpSettings FtpSettings \{ get; set; \} +\} +`} + + + + + +## Add an Elasticsearch connection string + +Elasticsearch connection strings are used by RavenDB [Elasticsearch ETL Tasks](../../../../server/ongoing-tasks/etl/elasticsearch.mdx). + +#### Example: + + +{`// Define a connection string to an Elasticsearch destination +// ========================================================== +var elasticSearchConStr = new ElasticSearchConnectionString +\{ + Name = "elasticsearch-connection-string-name", + + // Elasticsearch Nodes URLs + Nodes = new[] \{ "http://localhost:9200" \}, + + // Authentication Method + Authentication = new Raven.Client.Documents.Operations.ETL.ElasticSearch.Authentication + \{ + Basic = new BasicAuthentication + \{ + Username = "John", + Password = "32n4j5kp8" + \} + \} +\}; + +// Deploy (send) the connection string to the server via the PutConnectionStringOperation +// ====================================================================================== +var PutConnectionStringOp = + new PutConnectionStringOperation(elasticSearchConStr); +PutConnectionStringResult connectionStringResult = store.Maintenance.Send(PutConnectionStringOp); +`} + + + +#### Syntax: + + +{`public class ElasticsearchConnectionString : ConnectionString +\{ + public override ConnectionStringType Type => ConnectionStringType.ElasticSearch; + + public string Nodes \{ get; set; \} + public string Authentication \{ get; set; \} + public string Basic \{ get; set; \} + public string Username \{ get; set; \} + public string Password \{ get; set; \} +\} +`} + + + + + +## Add a Kafka connection string + +Kafkah connection strings are used by RavenDB [Kafka Queue ETL Tasks](../../../../server/ongoing-tasks/etl/queue-etl/kafka.mdx). +Learn how to add a Kafka connection string in the [Add a Kafka connection string]( ../../../../server/ongoing-tasks/etl/queue-etl/kafka#add-a-kafka-connection-string) section. + + + +## Add a RabbitMQ connection string + +RabbitMQ connection strings are used by RavenDB [RabbitMQ Queue ETL Tasks](../../../../server/ongoing-tasks/etl/queue-etl/rabbit-mq.mdx). +Learn how to add a RabbitMQ connection string in the [Add a RabbitMQ connection string]( ../../../../server/ongoing-tasks/etl/queue-etl/rabbit-mq#add-a-rabbitmq-connection-string) section. + + + +## Add an Azure Queue Storage connection string + +Azure Queue Storage connection strings are used by RavenDB [Azure Queue Storage ETL Tasks](../../../../server/ongoing-tasks/etl/queue-etl/azure-queue.mdx). +Learn to add an Azure Queue Storage connection string in the [Add an Azure Queue Storage connection string]( ../../../../server/ongoing-tasks/etl/queue-etl/azure-queue#add-an-azure-queue-storage-connection-string) section. + + + +## Add an Amazon SQS connection string + +Amazon SQS connection strings are used by RavenDB [Amazon SQS ETL Tasks](../../../../server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx). +Learn to add an SQS connection string in [this section](../../../../server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx#add-an-amazon-sqs-connection-string). + + + +## The `PutConnectionStringOperation` method + + + +{`public PutConnectionStringOperation(T connectionString) +`} + + + +| Parameters | Type | Description | +|----------------------|---------------------------------|----------------------------------------------------| +| **connectionString** | `RavenConnectionString` | Object that defines the RavenDB connection string. | +| **connectionString** | `SqlConnectionString` | Object that defines the SQL Connection string. | +| **connectionString** | `SnowflakeConnectionString` | Object that defines the Snowflake connction string. | +| **connectionString** | `OlapConnectionString` | Object that defines the OLAP connction string. | +| **connectionString** | `ElasticSearchConnectionString` | Object that defines the Elasticsearch connction string. | +| **connectionString** | `QueueConnectionString` | Object that defines the connection string for the Queue ETLs tasks (Kafka, RabbitMQ, Azure Queue Storage, and Amazon SQS). | + + + +{`// All the connection string class types inherit from this abstract ConnectionString class: +// ======================================================================================== + +public abstract class ConnectionString +\{ + // A name for the connection string + public string Name \{ get; set; \} + + // The connection string type + public abstract ConnectionStringType Type \{ get; \} +\} + +public enum ConnectionStringType +\{ + RavenNone, + Raven, + Sql, + Olap, + ElasticSearch, + Queue, + Snowflake +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/connection-strings/_category_.json b/versioned_docs/version-7.1/client-api/operations/maintenance/connection-strings/_category_.json new file mode 100644 index 0000000000..3f9c806e15 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/connection-strings/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 8, + "label": Connection strings, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/connection-strings/_get-connection-string-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/connection-strings/_get-connection-string-csharp.mdx new file mode 100644 index 0000000000..91aec46192 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/connection-strings/_get-connection-string-csharp.mdx @@ -0,0 +1,147 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetConnectionStringsOperation` to retrieve properties for a specific connection string + or for all connection strings defined in the databse. + +* To learn how to create a new connection string, see [Add Connection String Operation](../../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx). + +* In this page: + * [Get connection string by name and type](../../../../client-api/operations/maintenance/connection-strings/get-connection-string.mdx#get-connection-string-by-name-and-type) + * [Get all connection strings](../../../../client-api/operations/maintenance/connection-strings/get-connection-string.mdx#get-all-connnection-strings) + * [Syntax](../../../../client-api/operations/maintenance/connection-strings/get-connection-string.mdx#syntax) + + +## Get connection string by name and type + +The following example retrieves a RavenDB Connection String: + + + +{`using (var store = new DocumentStore()) +\{ + // Request to get a specific connection string, pass its name and type: + // ==================================================================== + var getRavenConStrOp = + new GetConnectionStringsOperation("ravendb-connection-string-name", ConnectionStringType.Raven); + + GetConnectionStringsResult connectionStrings = store.Maintenance.Send(getRavenConStrOp); + + // Access results: + // =============== + Dictionary ravenConnectionStrings = + connectionStrings.RavenConnectionStrings; + + var numberOfRavenConnectionStrings = ravenConnectionStrings.Count; + var ravenConStr = ravenConnectionStrings["ravendb-connection-string-name"]; + + var targetUrls = ravenConStr.TopologyDiscoveryUrls; + var targetDatabase = ravenConStr.Database; +\} +`} + + + + + +## Get all connnection strings + + + +{`using (var store = new DocumentStore()) +\{ + // Get all connection strings: + // =========================== + var getAllConStrOp = new GetConnectionStringsOperation(); + GetConnectionStringsResult allConnectionStrings = store.Maintenance.Send(getAllConStrOp); + + // Access results: + // =============== + + // RavenDB + Dictionary ravenConnectionStrings = + allConnectionStrings.RavenConnectionStrings; + + // SQL + Dictionary sqlConnectionStrings = + allConnectionStrings.SqlConnectionStrings; + + // OLAP + Dictionary olapConnectionStrings = + allConnectionStrings.OlapConnectionStrings; + + // Elasticsearch + Dictionary elasticsearchConnectionStrings = + allConnectionStrings.ElasticSearchConnectionStrings; + + // Access the Queue ETL connection strings in a similar manner: + // ============================================================ + Dictionary queueConnectionStrings = + allConnectionStrings.QueueConnectionStrings; + + var kafkaConStr = queueConnectionStrings["kafka-connection-string-name"]; +\} +`} + + + + + +## Syntax + + + +{`public GetConnectionStringsOperation() +public GetConnectionStringsOperation(string connectionStringName, ConnectionStringType type) +`} + + + +| Parameter | Type | Description | +|--------------------------|------------------------|--------------------------------------------------------------------------------| +| **connectionStringName** | `string` | Connection string name | +| **type** | `ConnectionStringType` | Connection string type:
`Raven`, `Sql`, `Olap`, `ElasticSearch`, or `Queue` | + + + +{`public enum ConnectionStringType +\{ + Raven, + Sql, + Olap, + ElasticSearch, + Queue +\} +`} + + + +| Return value of `store.Maintenance.Send(GetConnectionStringsOperation)` | | +|--------------------------------------------------------------------------|---------------------------------------------------------------| +| `GetConnectionStringsResult` | Class with all connection strings are defined on the database | + + + +{`public class GetConnectionStringsResult +\{ + public Dictionary RavenConnectionStrings \{ get; set; \} + public Dictionary SqlConnectionStrings \{ get; set; \} + public Dictionary OlapConnectionStrings \{ get; set; \} + public Dictionary ElasticSearchConnectionStrings \{ get; set; \} + public Dictionary QueueConnectionStrings \{ get; set; \} +\} +`} + + + + +A detailed syntax for each connection string type is available in article [Add connection string](../../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx). + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/connection-strings/_remove-connection-string-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/connection-strings/_remove-connection-string-csharp.mdx new file mode 100644 index 0000000000..9336081ac3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/connection-strings/_remove-connection-string-csharp.mdx @@ -0,0 +1,57 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `RemoveConnectionStringOperation` to remove a connection string definition from the database. + +* In this page: + * [Remove connection string](../../../../client-api/operations/maintenance/connection-strings/remove-connection-string.mdx#remove-connecion-string) + * [Syntax](../../../../client-api/operations/maintenance/connection-strings/remove-connection-string.mdx#syntax) + + +## Remove connection string + +The following example removes a RavenDB Connection String. + + + +{`var ravenConnectionString = new RavenConnectionString() +\{ + // Note: + // Only the 'Name' property of the connection string is needed for the remove operation. + // Other properties are not considered. + Name = "ravendb-connection-string-name" +\}; + +// Define the remove connection string operation, +// pass the connection string to be removed. +var removeConStrOp + = new RemoveConnectionStringOperation(ravenConnectionString); + +// Execute the operation by passing it to Maintenance.Send +store.Maintenance.Send(removeConStrOp); +`} + + + + + +## Syntax + + + +{`public RemoveConnectionStringOperation(T connectionString) +`} + + + +| Parameter | Type | Description | +|----------------------|-------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **connectionString** | `T` | Connection string to remove:
`RavenConnectionString`
`SqlConnectionString`
`OlapConnectionString`
`ElasticSearchConnectionString`
`QueueConnectionString` | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/connection-strings/add-connection-string.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/connection-strings/add-connection-string.mdx new file mode 100644 index 0000000000..1c01bc98f7 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/connection-strings/add-connection-string.mdx @@ -0,0 +1,36 @@ +--- +title: "Add Connection String Operation" +hide_table_of_contents: true +sidebar_label: Add Connection String +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import AddConnectionStringCsharp from './_add-connection-string-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/connection-strings/get-connection-string.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/connection-strings/get-connection-string.mdx new file mode 100644 index 0000000000..7c11b2c6bc --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/connection-strings/get-connection-string.mdx @@ -0,0 +1,28 @@ +--- +title: "Get Connection String Operation" +hide_table_of_contents: true +sidebar_label: Get Connection String +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetConnectionStringCsharp from './_get-connection-string-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/connection-strings/remove-connection-string.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/connection-strings/remove-connection-string.mdx new file mode 100644 index 0000000000..97c9634ccf --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/connection-strings/remove-connection-string.mdx @@ -0,0 +1,28 @@ +--- +title: "Remove Connection String Operation" +hide_table_of_contents: true +sidebar_label: Remove Connection String +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import RemoveConnectionStringCsharp from './_remove-connection-string-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/etl/_add-etl-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/etl/_add-etl-csharp.mdx new file mode 100644 index 0000000000..8e95b81eb1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/etl/_add-etl-csharp.mdx @@ -0,0 +1,378 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the `AddEtlOperation` method to add a new ongoing ETL task to your database. + +* To learn about ETL (Extract, Transfer, Load) ongoing tasks, see the [ETL Basics](../../../../server/ongoing-tasks/etl/basics.mdx) article. + To learn how to manage ETL tasks from Studio, see [Ongoing tasks - overview](../../../../studio/database/tasks/ongoing-tasks/general-info.mdx). + +* In this page: + + * [Add RavenDB ETL task](../../../../client-api/operations/maintenance/etl/add-etl.mdx#add-ravendb-etl-task) + * [Add SQL ETL task](../../../../client-api/operations/maintenance/etl/add-etl.mdx#add-sql-etl-task) + * [Add Snowflake ETL task](../../../../client-api/operations/maintenance/etl/add-etl.mdx#add-snowflake-etl-task) + * [Add OLAP ETL task](../../../../client-api/operations/maintenance/etl/add-etl.mdx#add-olap-etl-task) + * [Add Elasticsearch ETL task](../../../../client-api/operations/maintenance/etl/add-etl.mdx#add-elasticsearch-etl-task) + * [Add Kafka ETL task](../../../../client-api/operations/maintenance/etl/add-etl.mdx#add-kafka-etl-task) + * [Add RabbitMQ ETL task](../../../../client-api/operations/maintenance/etl/add-etl.mdx#add-rabbitmq-etl-task) + * [Add Azure Queue Storage ETL task](../../../../client-api/operations/maintenance/etl/add-etl.mdx#add-azure-queue-storage-etl-task) + * [Add Amazon SQS ETL task](../../../../client-api/operations/maintenance/etl/add-etl.mdx#add-amazon-sqs-etl-task) + * [Syntax](../../../../client-api/operations/maintenance/etl/add-etl.mdx#syntax) + + +## Add RavenDB ETL task + +* Learn about the RavenDB ETL task in the **[RavenDB ETL task](../../../../server/ongoing-tasks/etl/raven.mdx)** article. +* Learn how to define a connection string for the RavenDB ETL task here: **[Add a RavenDB connection string](../../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#add-a-ravendb-connection-string)** +* To manage the RavenDB ETL task from Studio, see **[Studio: RavenDB ETL task](../../../../studio/database/tasks/ongoing-tasks/ravendb-etl-task.mdx)**. +The following example adds a RavenDB ETL task: + + + +{`// Define the RavenDB ETL task configuration object +// ================================================ +var ravenEtlConfig = new RavenEtlConfiguration +\{ + Name = "task-name", + ConnectionStringName = "raven-connection-string-name", + Transforms = + \{ + new Transformation + \{ + // The script name + Name = "script-name", + + // RavenDB collections the script uses + Collections = \{ "Employees" \}, + + // The transformation script + Script = @"loadToEmployees (\{ + Name: this.FirstName + ' ' + this.LastName, + Title: this.Title + \});" + \} + \}, + + // Do not prevent task failover to another node (optional) + PinToMentorNode = false +\}; + +// Define the AddEtlOperation +// ========================== +var operation = new AddEtlOperation(ravenEtlConfig); + +// Execute the operation by passing it to Maintenance.Send +// ======================================================= +AddEtlOperationResult result = store.Maintenance.Send(operation); +`} + + + + + +## Add SQL ETL task + +* Learn about the SQL ETL task in the **[SQL ETL task](../../../../server/ongoing-tasks/etl/sql.mdx)** article. +* Learn how to define a connection string for the SQL ETL task here: **[Add an SQL connection string](../../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#add-an-sql-connection-string)** +The following example adds an SQL ETL task: + + + +{`// Define the SQL ETL task configuration object +// ============================================ +var sqlEtlConfig = new SqlEtlConfiguration +\{ + Name = "task-name", + ConnectionStringName = "sql-connection-string-name", + SqlTables = + \{ + new SqlEtlTable \{TableName = "Orders", DocumentIdColumn = "Id", InsertOnlyMode = false\}, + new SqlEtlTable \{TableName = "OrderLines", DocumentIdColumn = "OrderId", InsertOnlyMode = false\}, + \}, + Transforms = + \{ + new Transformation + \{ + Name = "script-name", + Collections = \{ "Orders" \}, + Script = @"var orderData = \{ + Id: id(this), + OrderLinesCount: this.Lines.length, + TotalCost: 0 + \}; + + for (var i = 0; i < this.Lines.length; i++) \{ + var line = this.Lines[i]; + orderData.TotalCost += line.PricePerUnit; + + // Load to SQL table 'OrderLines' + loadToOrderLines(\{ + OrderId: id(this), + Qty: line.Quantity, + Product: line.Product, + Cost: line.PricePerUnit + \}); + \} + orderData.TotalCost = Math.round(orderData.TotalCost * 100) / 100; + + // Load to SQL table 'Orders' + loadToOrders(orderData)" + \} + \}, + + // Do not prevent task failover to another node (optional) + PinToMentorNode = false +\}; + +// Define the AddEtlOperation +// =========================== +var operation = new AddEtlOperation(sqlEtlConfig); + +// Execute the operation by passing it to Maintenance.Send +// ======================================================= +AddEtlOperationResult result = store.Maintenance.Send(operation); +`} + + + + + +## Add Snowflake ETL task + +* Learn about the Snowflake ETL task in the **[Snowflake ETL task](../../../../server/ongoing-tasks/etl/snowflake.mdx)** article. +* Learn how to define a connection string for the Snowflake ETL task here: **[Add a Snowflake connection string](../../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#add-a-snowflake-connection-string)** +The following example adds a Snowflake ETL task: + + + +{`// Define the Snowflake ETL task configuration object +// ============================================ +var snowflakeEtlConfig = new SnowflakeEtlConfiguration +\{ + Name = "task-name", + ConnectionStringName = "snowflake-connection-string-name", + SnowflakeTables = + \{ + new SnowflakeEtlTable \{TableName = "Orders", DocumentIdColumn = "Id", InsertOnlyMode = false\}, + new SnowflakeEtlTable \{TableName = "OrderLines", DocumentIdColumn = "OrderId", InsertOnlyMode = false\}, + \}, + Transforms = + \{ + new Transformation + \{ + Name = "script-name", + Collections = \{ "Orders" \}, + Script = @"var orderData = \{ + Id: id(this), + OrderLinesCount: this.Lines.length, + TotalCost: 0 + \}; + + for (var i = 0; i < this.Lines.length; i++) \{ + var line = this.Lines[i]; + orderData.TotalCost += line.PricePerUnit; + + // Load to SQL table 'OrderLines' + loadToOrderLines(\{ + OrderId: id(this), + Qty: line.Quantity, + Product: line.Product, + Cost: line.PricePerUnit + \}); + \} + orderData.TotalCost = Math.round(orderData.TotalCost * 100) / 100; + + // Load to SQL table 'Orders' + loadToOrders(orderData)" + \} + \}, + + // Do not prevent task failover to another node (optional) + PinToMentorNode = false +\}; + +// Define the AddEtlOperation +// =========================== +var operation = new AddEtlOperation(snowflakeEtlConfig); + +// Execute the operation by passing it to Maintenance.Send +// ======================================================= +AddEtlOperationResult result = store.Maintenance.Send(operation); +`} + + + + + +## Add OLAP ETL task + +* Learn about the OLAP ETL task in the **[OLAP ETL task](../../../../server/ongoing-tasks/etl/olap.mdx)** article. +* Learn how to define a connection string for the OLAP ETL task here: **[Add an OLAP connection string](../../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#add-an-olap-connection-string)** +* To manage the OLAP ETL task from Studio, see **[Studio: OLAP ETL task](../../../../studio/database/tasks/ongoing-tasks/olap-etl-task.mdx)**. +The following example adds an OLAP ETL task: + + + +{`// Define the OLAP ETL task configuration object +// ============================================= +var olapEtlConfig = new OlapEtlConfiguration +\{ + Name = "task-name", + ConnectionStringName = "olap-connection-string-name", + Transforms = + \{ + new Transformation + \{ + Name = "script-name", + Collections = \{"Orders"\}, + Script = @"var orderDate = new Date(this.OrderedAt); + var year = orderDate.getFullYear(); + var month = orderDate.getMonth(); + var key = new Date(year, month); + loadToOrders(key, \{ + Company : this.Company, + ShipVia : this.ShipVia + \})" + \} + \} +\}; + +// Define the AddEtlOperation +// ========================== +var operation = new AddEtlOperation(olapEtlConfig); + +// Execute the operation by passing it to Maintenance.Send +// ======================================================= +AddEtlOperationResult result = store.Maintenance.Send(operation); +`} + + + + + +## Add Elasticsearch ETL task + +* Learn about the Elasticsearch ETL task in the **[Elasticsearch ETL task](../../../../server/ongoing-tasks/etl/elasticsearch.mdx)** article. +* Learn how to define a connection string for the Elasticsearch ETL task here: **[Add an Elasticsearch connection string](../../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#add-an-elasticsearch-connection-string)** +* To manage the Elasticsearch ETL task from Studio, see **[Studio: Elasticsearch ETL task](../../../../studio/database/tasks/ongoing-tasks/elasticsearch-etl-task.mdx)**. +The following example adds an Elasticsearch ETL task: + + + +{`// Define the Elasticsearch ETL task configuration object +// ====================================================== +var elasticsearchEtlConfig = new ElasticSearchEtlConfiguration +\{ + Name = "task-name", + ConnectionStringName = "elasticsearch-connection-string-name", + ElasticIndexes = + \{ + // Define Elasticsearch Indexes + new ElasticSearchIndex + \{ + // Elasticsearch Index name + IndexName = "orders", + // The Elasticsearch document property that will contain the source RavenDB document id. + // Make sure this property is also defined inside the transform script. + DocumentIdProperty = "DocId", + InsertOnlyMode = false + \}, + new ElasticSearchIndex + \{ + IndexName = "lines", + DocumentIdProperty = "OrderLinesCount", + // If true, don't send _delete_by_query before appending docs + InsertOnlyMode = true + \} + \}, + Transforms = + \{ + new Transformation() + \{ + Collections = \{ "Orders" \}, + Script = @"var orderData = \{ + DocId: id(this), + OrderLinesCount: this.Lines.length, + TotalCost: 0 + \}; + + // Write the \`orderData\` as a document to the Elasticsearch 'orders' index + loadToOrders(orderData);", + + Name = "script-name" + \} + \} +\}; + +// Define the AddEtlOperation +// ========================== +var operation = new AddEtlOperation(elasticsearchEtlConfig); + +// Execute the operation by passing it to Maintenance.Send +// ======================================================= +store.Maintenance.Send(operation); +`} + + + + + +## Add Kafka ETL task + +* Learn about the Kafka ETL task in the **[Kafka ETL task](../../../../server/ongoing-tasks/etl/queue-etl/kafka.mdx)** article. +* Learn how to define a connection string for the Kafka ETL task here: **[Add a Kafka connection string](../../../../server/ongoing-tasks/etl/queue-etl/kafka.mdx#add-a-kafka-connection-string)** +* To manage the Kafka ETL task from Studio, see **[Studio: Kafka ETL task](../../../../studio/database/tasks/ongoing-tasks/kafka-etl-task.mdx)**. +* Examples showing how to add a Kafka ETL task are available in the **[Add a Kafka ETL task](../../../../server/ongoing-tasks/etl/queue-etl/kafka.mdx#add-a-kafka-etl-task)** section. + + + +## Add RabbitMQ ETL task + +* Learn about the RabbitMQ ETL task in the **[RabbitMQ ETL task](../../../../server/ongoing-tasks/etl/queue-etl/rabbit-mq.mdx)** article. +* Learn how to define a connection string for the RabbitMQ ETL task here: **[Add a RabbitMQ connection string](../../../../server/ongoing-tasks/etl/queue-etl/rabbit-mq.mdx#add-a-rabbitmq-connection-string)** +* To manage the RabbitMQ ETL task from Studio, see **[Studio: RabbitMQ ETL task](../../../../studio/database/tasks/ongoing-tasks/rabbitmq-etl-task.mdx)**. +* Examples showing how to add a RabbitMQ ETL task are available in the **[Add a RabbitMQ ETL task](../../../../server/ongoing-tasks/etl/queue-etl/rabbit-mq.mdx#add-a-rabbitmq-etl-task)** section. + + + +## Add Azure Queue Storage ETL task + +* Learn about the Azure Queue Storage ETL task in the **[Azure Queue Storage ETL task](../../../../server/ongoing-tasks/etl/queue-etl/azure-queue.mdx)** article. +* Learn how to define a connection string for the Azure Queue Storage ETL task here: + **[Add an Azure Queue Storage connection string](../../../../server/ongoing-tasks/etl/queue-etl/azure-queue.mdx#add-an-azure-queue-storage-connection-string)** +* To manage the Azure Queue Storage ETL task from Studio, see **[Studio: Azure Queue Storage ETL task](../../../../studio/database/tasks/ongoing-tasks/azure-queue-storage-etl.mdx)**. +* Examples showing how to add an Azure Queue Storage ETL task are available in the **[Add a Azure Queue Storage ETL task](../../../../server/ongoing-tasks/etl/queue-etl/azure-queue.mdx#add-an-azure-queue-storage-etl-task)** section. + + + +## Add Amazon SQS ETL task + +* Learn about the AWS SQS ETL task in the **[Amazon SQS ETL task](../../../../server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx)** article. + * [This section](../../../../server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx#add-an-amazon-sqs-connection-string) + shows how to define a connection string to the SQS destination. + * [This section](../../../../server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx#add-an-amazon-sqs-etl-task) + shows how to run an ETL task that uses the defined connection string. +* To learn how to manage the task from Studio, see **[Studio: Amazon SQS ETL Task](../../../../studio/database/tasks/ongoing-tasks/amazon-sqs-etl.mdx)**. + + + +## Syntax + + + +{`public AddEtlOperation(EtlConfiguration configuration) +`} + + + +| Parameter | Type | Description | +|-------------------|-----------------------|----------------------------------------------------------------------| +| **configuration** | `EtlConfiguration` | The ETL configuration object where `T` is the connection string type | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/etl/_add-etl-java.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/etl/_add-etl-java.mdx new file mode 100644 index 0000000000..722c67c680 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/etl/_add-etl-java.mdx @@ -0,0 +1,159 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the `AddEtlOperation` method to add a new ongoing ETL task to your database. + +* To learn about ETL (Extract, Transfer, Load) ongoing tasks, see article [ETL Basics](../../../../server/ongoing-tasks/etl/basics.mdx). + To learn how to manage ETL tasks from the Studio, see [Ongoing tasks - overview](../../../../studio/database/tasks/ongoing-tasks/general-info.mdx). + +* In this page: + * [Example - add Raven ETL](../../../../client-api/operations/maintenance/etl/add-etl.mdx#example---add-raven-etl) + * [Example - add SQL ETL](../../../../client-api/operations/maintenance/etl/add-etl.mdx#example---add-sql-etl) + * [Example - add OLAP ETL](../../../../client-api/operations/maintenance/etl/add-etl.mdx#example---add-olap-etl) + * [Syntax](../../../../client-api/operations/maintenance/etl/add-etl.mdx#syntax) + + +## Example - add Raven ETL + + + +{`RavenEtlConfiguration configuration = new RavenEtlConfiguration(); +configuration.setName("Employees ETL"); +Transformation transformation = new Transformation(); +transformation.setName("Script #1"); +transformation.setScript("loadToEmployees (\{\\n" + + " Name: this.FirstName + ' ' + this.LastName,\\n" + + " Title: this.Title\\n" + + "\});"); + +configuration.setTransforms(Arrays.asList(transformation)); +AddEtlOperation operation = new AddEtlOperation<>(configuration); +AddEtlOperationResult result = store.maintenance().send(operation); +`} + + + + + +**Secure servers**: + +To [connect secure RavenDB servers](../../../../server/security/authentication/certificate-management.mdx#enabling-communication-between-servers:-importing-and-exporting-certificates) +you need to + +1. Export the server certificate from the source server. +2. Install it as a client certificate on the destination server. + +This can be done in the RavenDB Studio -> Server Management -> [Certificates view](../../../../server/security/authentication/certificate-management.mdx#studio-certificates-management-view). + + + + +## Example - add SQL ETL + + + +{`SqlEtlConfiguration configuration = new SqlEtlConfiguration(); +SqlEtlTable table1 = new SqlEtlTable(); +table1.setTableName("Orders"); +table1.setDocumentIdColumn("Id"); +table1.setInsertOnlyMode(false); + +SqlEtlTable table2 = new SqlEtlTable(); +table2.setTableName("OrderLines"); +table2.setDocumentIdColumn("OrderId"); +table2.setInsertOnlyMode(false); + +configuration.setSqlTables(Arrays.asList(table1, table2)); +configuration.setName("Order to SQL"); +configuration.setConnectionStringName("sql-connection-string-name"); + +Transformation transformation = new Transformation(); +transformation.setName("Script #1"); +transformation.setCollections(Arrays.asList("Orders")); +transformation.setScript("var orderData = \{\\n" + + " Id: id(this),\\n" + + " OrderLinesCount: this.Lines.length,\\n" + + " TotalCost: 0\\n" + + "\};\\n" + + "\\n" + + " for (var i = 0; i < this.Lines.length; i++) \{\\n" + + " var line = this.Lines[i];\\n" + + " orderData.TotalCost += line.PricePerUnit;\\n" + + "\\n" + + " // Load to SQL table 'OrderLines'\\n" + + " loadToOrderLines(\{\\n" + + " OrderId: id(this),\\n" + + " Qty: line.Quantity,\\n" + + " Product: line.Product,\\n" + + " Cost: line.PricePerUnit\\n" + + " \});\\n" + + " \}\\n" + + " orderData.TotalCost = Math.round(orderData.TotalCost * 100) / 100;\\n" + + "\\n" + + " // Load to SQL table 'Orders'\\n" + + " loadToOrders(orderData)"); + +configuration.setTransforms(Arrays.asList(transformation)); + +AddEtlOperation operation = new AddEtlOperation<>(configuration); + +AddEtlOperationResult result = store.maintenance().send(operation); +`} + + + + + +## Example - add OLAP ETL + + + +{`OlapEtlConfiguration configuration = new OlapEtlConfiguration(); + +configuration.setName("Orders ETL"); +configuration.setConnectionStringName("olap-connection-string-name"); + +Transformation transformation = new Transformation(); +transformation.setName("Script #1"); +transformation.setCollections(Arrays.asList("Orders")); +transformation.setScript("var orderDate = new Date(this.OrderedAt);\\n"+ + "var year = orderDate.getFullYear();\\n"+ + "var month = orderDate.getMonth();\\n"+ + "var key = new Date(year, month);\\n"+ + "loadToOrders(key, \{\\n"+ + " Company : this.Company,\\n"+ + " ShipVia : this.ShipVia\\n"+ + "\})" +); + +configuration.setTransforms(Arrays.asList(transformation)); + +AddEtlOperation operation = new AddEtlOperation(configuration); + +AddEtlOperationResult result = store.maintenance().send(operation); +`} + + + + + +## Syntax + + + +{`public AddEtlOperation(EtlConfiguration configuration); +`} + + + +| Parameter | Type | Description | +|-------------------|-----------------------|-------------------------------------------------------| +| **configuration** | `EtlConfiguration` | ETL configuration where `T` is connection string type | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/etl/_add-etl-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/etl/_add-etl-nodejs.mdx new file mode 100644 index 0000000000..df3c02b283 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/etl/_add-etl-nodejs.mdx @@ -0,0 +1,130 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the `AddEtlOperation` method to add a new ongoing ETL task to your database. + +* To learn about ETL (Extract, Transfer, Load) ongoing tasks, see article [ETL Basics](../../../../server/ongoing-tasks/etl/basics.mdx). + To learn how to manage ETL tasks from the Studio, see [Ongoing tasks - overview](../../../../studio/database/tasks/ongoing-tasks/general-info.mdx). + +* In this page: + * [Example - add Raven ETL](../../../../client-api/operations/maintenance/etl/add-etl.mdx#example---add-raven-etl) + * [Example - add SQL ETL](../../../../client-api/operations/maintenance/etl/add-etl.mdx#example---add-sql-etl) + * [Example - add OLAP ETL](../../../../client-api/operations/maintenance/etl/add-etl.mdx#example---add-olap-etl) + * [Syntax](../../../../client-api/operations/maintenance/etl/add-etl.mdx#syntax) + + +## Example - add Raven ETL + + + +{`const etlConfigurationRvn = Object.assign(new RavenEtlConfiguration(), \{ + connectionStringName: "raven-connection-string-name", + disabled: false, + name: "etlRvn" +\}); + +const transformationRvn = \{ + applyToAllDocuments: true, + name: "Script #1" +\}; + +etlConfigurationRvn.transforms = [transformationRvn]; + +const operationRvn = new AddEtlOperation(etlConfigurationRvn); +const etlResultRvn = await store.maintenance.send(operationRvn); +`} + + + + + +## Example - add SQL ETL + + + +{`const transformation = \{ + applyToAllDocuments: true, + name: "Script #1" +\}; + +const table1 = \{ + documentIdColumn: "Id", + insertOnlyMode: false, + tableName: "Users" +\}; + +const etlConfigurationSql = Object.assign(new SqlEtlConfiguration(), \{ + connectionStringName: "sql-connection-string-name", + disabled: false, + name: "etlSql", + transforms: [transformation], + sqlTables: [table1] +\}); + +const operationSql = new AddEtlOperation(etlConfigurationSql); +const etlResult = await store.maintenance.send(operationSql); +`} + + + + + +## Example - add OLAP ETL + + + +{`const transformationOlap = \{ + applyToAllDocuments: true, + name: "Script #1" +\}; + +const etlConfigurationOlap = Object.assign(new OlapEtlConfiguration(), \{ + connectionStringName: "olap-connection-string-name", + disabled: false, + name: "etlOlap", + transforms: [transformationOlap], +\}); + +const operationOlap = new AddEtlOperation(etlConfigurationOlap); +const etlResultOlap = await store.maintenance.send(operationOlap); +`} + + + + + +## Syntax + + + +{`const operation = new AddEtlOperation(etlConfiguration); +`} + + + +| Parameter | Type | Description | +|-------------------|---------------------------|-----------------------------------| +| **configuration** | `EtlConfiguration` object | The ETL task configuration to add | + + + +{`class EtlConfiguration \{ + taskId?; // number + name; // string + mentorNode?: // string + connectionStringName; // string + transforms; // Transformation[] + disabled?; // boolean + allowEtlOnNonEncryptedChannel?; // boolean +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/etl/_category_.json b/versioned_docs/version-7.1/client-api/operations/maintenance/etl/_category_.json new file mode 100644 index 0000000000..281dfffa47 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/etl/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 7, + "label": ETL, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/etl/_reset-etl-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/etl/_reset-etl-csharp.mdx new file mode 100644 index 0000000000..a7e8977040 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/etl/_reset-etl-csharp.mdx @@ -0,0 +1,32 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +ETL is processing documents from the point where the last batch finished. To start the processing from the very beginning you can reset the ETL by using **ResetEtlOperation**. + +## Syntax + + + +{`public ResetEtlOperation(string configurationName, string transformationName) +`} + + + +| Parameters | | | +| ------------- | ----- | ---- | +| **configurationName** | string | ETL configuration name | +| **transformationName** | string | Name of ETL transformation | + +## Example + + + +{`ResetEtlOperation operation = new ResetEtlOperation("OrdersExport", "script1"); +store.Maintenance.Send(operation); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/etl/_reset-etl-java.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/etl/_reset-etl-java.mdx new file mode 100644 index 0000000000..fd6345e25d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/etl/_reset-etl-java.mdx @@ -0,0 +1,32 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +ETL is processing documents from the point where the last batch finished. To start the processing from the very beginning you can reset the ETL by using **ResetEtlOperation**. + +## Syntax + + + +{`public ResetEtlOperation(String configurationName, String transformationName); +`} + + + +| Parameters | | | +| ------------- | ----- | ---- | +| **configurationName** | String | ETL configuration name | +| **transformationName** | String | Name of ETL transformation | + +## Example + + + +{`ResetEtlOperation resetEtlOperation = new ResetEtlOperation("OrdersExport", "script1"); +store.maintenance().send(resetEtlOperation); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/etl/_update-etl-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/etl/_update-etl-csharp.mdx new file mode 100644 index 0000000000..30c5921f0e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/etl/_update-etl-csharp.mdx @@ -0,0 +1,56 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +You can modify ETL task by using **UpdateEtlOperation**. + +## Syntax + + + +{`public UpdateEtlOperation(long taskId, EtlConfiguration configuration) +`} + + + +| Parameters | | | +| ------------- | ----- | ---- | +| **taskId** | long | Current ETL task ID | +| **configuration** | `EtlConfiguration` | ETL configuration where `T` is connection string type | + +## Example + + + +{`// AddEtlOperationResult addEtlResult = store.Maintenance.Send(new AddEtlOperation() \{ ... \}); + +UpdateEtlOperation operation = new UpdateEtlOperation( + addEtlResult.TaskId, + new RavenEtlConfiguration + \{ + ConnectionStringName = "raven-connection-string-name", + Name = "Employees ETL", + Transforms = + \{ + new Transformation + \{ + Name = "Script #1", + Collections = + \{ + "Employees" + \}, + Script = @"loadToEmployees (\{ + Name: this.FirstName + ' ' + this.LastName, + Title: this.Title + \});" + \} + \} + \}); + +UpdateEtlOperationResult result = store.Maintenance.Send(operation); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/etl/_update-etl-java.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/etl/_update-etl-java.mdx new file mode 100644 index 0000000000..a68277bbf3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/etl/_update-etl-java.mdx @@ -0,0 +1,48 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +You can modify ETL task by using **UpdateEtlOperation**. + +## Syntax + + + +{`public UpdateEtlOperation(long taskId, EtlConfiguration configuration); +`} + + + +| Parameters | | | +| ------------- | ----- | ---- | +| **taskId** | Long | Current ETL task ID | +| **configuration** | `EtlConfiguration` | ETL configuration where `T` is connection string type | + +## Example + + + +{`//store.maintenance().send(new AddEtlOperation(...)); + +RavenEtlConfiguration etlConfiguration = new RavenEtlConfiguration(); +etlConfiguration.setConnectionStringName("raven-connection-string-name"); +etlConfiguration.setName("Employees ETL"); +Transformation transformation = new Transformation(); +transformation.setName("Script #1"); +transformation.setCollections(Arrays.asList("Employees")); +transformation.setScript("loadToEmployees (\{\\n" + + " Name: this.FirstName + ' ' + this.LastName,\\n" + + " Title: this.Title\\n" + + " \});"); + +etlConfiguration.setTransforms(Arrays.asList(transformation)); + +UpdateEtlOperation operation = new UpdateEtlOperation<>( + addEtlResult.getTaskId(), etlConfiguration); +UpdateEtlOperationResult result = store.maintenance().send(operation); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/etl/add-etl.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/etl/add-etl.mdx new file mode 100644 index 0000000000..42a54ed2cf --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/etl/add-etl.mdx @@ -0,0 +1,63 @@ +--- +title: "Add ETL Operation" +hide_table_of_contents: true +sidebar_label: Add ETL +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import AddEtlCsharp from './_add-etl-csharp.mdx'; +import AddEtlJava from './_add-etl-java.mdx'; +import AddEtlNodejs from './_add-etl-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/etl/reset-etl.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/etl/reset-etl.mdx new file mode 100644 index 0000000000..627efe0e7a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/etl/reset-etl.mdx @@ -0,0 +1,37 @@ +--- +title: "Operations: How to Reset ETL" +hide_table_of_contents: true +sidebar_label: Reset ETL +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import ResetEtlCsharp from './_reset-etl-csharp.mdx'; +import ResetEtlJava from './_reset-etl-java.mdx'; + +export const supportedLanguages = ["csharp", "java"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/etl/update-etl.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/etl/update-etl.mdx new file mode 100644 index 0000000000..3a6762c538 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/etl/update-etl.mdx @@ -0,0 +1,37 @@ +--- +title: "Operations: How to Update ETL" +hide_table_of_contents: true +sidebar_label: Update ETL +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import UpdateEtlCsharp from './_update-etl-csharp.mdx'; +import UpdateEtlJava from './_update-etl-java.mdx'; + +export const supportedLanguages = ["csharp", "java"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/get-stats.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/get-stats.mdx new file mode 100644 index 0000000000..05bac5ccf7 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/get-stats.mdx @@ -0,0 +1,54 @@ +--- +title: "Get Statistics" +hide_table_of_contents: true +sidebar_label: Get Statistics +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetStatsCsharp from './_get-stats-csharp.mdx'; +import GetStatsJava from './_get-stats-java.mdx'; +import GetStatsPython from './_get-stats-python.mdx'; +import GetStatsPhp from './_get-stats-php.mdx'; +import GetStatsNodejs from './_get-stats-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_category_.json b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_category_.json new file mode 100644 index 0000000000..b3c0bbf97b --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 3, + "label": Identities, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_get-identities-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_get-identities-csharp.mdx new file mode 100644 index 0000000000..e1b4e99233 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_get-identities-csharp.mdx @@ -0,0 +1,115 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Upon document creation, providing a collection name with a pipe symbol (`|`) + will cause the server to generate an ID for the new document called an **identity**. + E.g. `companies|` + +* The identity document ID is unique across the entire cluster within the database scope. + It is composed of the provided collection name and an integer value that is continuously incremented. + +* Identity values can also be managed from the Studio [identities](../../../../studio/database/documents/identities-view.mdx) view. + +* Use `GetIdentitiesOperation` to get the dictionary that maps collection names to their corresponding latest identity values. + + +Learn more about identities in: + +* [Document identifier generation - Identity ID](../../../../server/kb/document-identifier-generation.mdx#identity-id) +* [Working with document identifiers](../../../../client-api/document-identifiers/working-with-document-identifiers.mdx#identities) + + + +* In this page: + + * [Get identities operation](../../../../client-api/operations/maintenance/identities/get-identities.mdx#get-identities-operation) + * [Syntax](../../../../client-api/operations/maintenance/identities/get-identities.mdx#syntax) + + +## Get identities operation + + + + +{`// Create a document with an identity ID: +// ====================================== + +using (var session = store.OpenSession()) +{ + // Request the server to generate an identity ID for the new document. Pass: + // * The entity to store + // * The collection name with a pipe (|) postfix + session.Store(new Company { Name = "RavenDB" }, "companies|"); + + // If this is the first identity created for this collection, + // and if the identity value was not customized + // then a document with an identity ID "companies/1" will be created + session.SaveChanges(); +} + +// Get identities information: +// =========================== + +// Define the get identities operation +var getIdentitiesOp = new GetIdentitiesOperation(); + +// Execute the operation by passing it to Maintenance.Send +Dictionary identities = store.Maintenance.Send(getIdentitiesOp); + +// Results +var latestIdentityValue = identities["companies|"]; // => value will be 1 +`} + + + + +{`// Create a document with an identity ID: +// ====================================== + +using (var asyncSession = store.OpenAsyncSession()) +{ + // Request the server to generate an identity ID for the new document. Pass: + // * The entity to store + // * The collection name with a pipe (|) postfix + asyncSession.StoreAsync(new Company { Name = "RavenDB" }, "companies|"); + + // If this is the first identity created for this collection, + // and if the identity value was not customized + // then a document with an identity ID "companies/1" will be created + asyncSession.SaveChangesAsync(); +} + +// Get identities information: +// =========================== + +// Define the get identities operation +var getIdentitiesOp = new GetIdentitiesOperation(); + +// Execute the operation by passing it to Maintenance.SendAsync +Dictionary identities = await store.Maintenance.SendAsync(getIdentitiesOp); + +// Results +var latestIdentityValue = identities["companies|"]; // => value will be 1 +`} + + + + + + +## Syntax + + + +{`public GetIdentitiesOperation(); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_get-identities-java.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_get-identities-java.mdx new file mode 100644 index 0000000000..722f0b914f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_get-identities-java.mdx @@ -0,0 +1,27 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +**GetIdentitiesOperation** is used to return a dictionary which maps from the collection name to the identity value. + +## Syntax + + + +{`public GetIdentitiesOperation() +`} + + + +## Example + + + +{`Map identities + = store.maintenance().send(new GetIdentitiesOperation()); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_get-identities-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_get-identities-nodejs.mdx new file mode 100644 index 0000000000..801b42934c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_get-identities-nodejs.mdx @@ -0,0 +1,79 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Upon document creation, providing a collection name with a pipe symbol (`|`) + will cause the server to generate an ID for the new document called an **identity**. + E.g. `companies|` + +* The identity document ID is unique across the entire cluster within the database scope. + It is composed of the provided collection name and an integer value that is continuously incremented. + +* Identity values can also be managed from the Studio [identities](../../../../studio/database/documents/identities-view.mdx) view. + +* Use `GetIdentitiesOperation` to get the dictionary that maps collection names to their corresponding latest identity values. + + +Learn more about identities in: +[Document identifier generation - Identity ID](../../../../server/kb/document-identifier-generation.mdx#identity-id) + + +* In this page: + + * [Get identities operation](../../../../client-api/operations/maintenance/identities/get-identities.mdx#get-identities-operation) + * [Syntax](../../../../client-api/operations/maintenance/identities/get-identities.mdx#syntax) + + +## Get identities operation + + + +{`// Create a document with an identity ID: +// ====================================== + +const session = documentStore.openSession(); +const company = new Company(); +company.name = "RavenDB"; + +// Request the server to generate an identity ID for the new document. Pass: +// * The entity to store +// * The collection name with a pipe (|) postfix +await session.store(company, "companies|"); + +// If this is the first identity created for this collection, +// and if the identity value was not customized +// then a document with an identity ID "companies/1" will be created +await session.saveChanges(); + +// Get identities information: +// =========================== + +// Define the get identities operation +const getIdentitiesOp = new GetIdentitiesOperation(); + +// Execute the operation by passing it to maintenance.send +const identities = await store.maintenance.send(getIdentitiesOp); + +// Results +const latestIdentityValue = identities["companies|"]; // => value will be 1 +`} + + + + + +## Syntax + + + +{`const getIdentitiesOp = new GetIdentitiesOperation(); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_get-identities-php.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_get-identities-php.mdx new file mode 100644 index 0000000000..a06083f2e1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_get-identities-php.mdx @@ -0,0 +1,86 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Upon document creation, providing a collection name with a pipe symbol (`|`) + will cause the server to generate an ID for the new document called an **identity**. + E.g. `companies|` + +* The identity document ID is unique across the entire cluster within the database scope. + It is composed of the provided collection name and an integer value that is continuously incremented. + +* Identity values can also be managed from the Studio [identities](../../../../studio/database/documents/identities-view.mdx) view. + +* Use `GetIdentitiesOperation` to get the dictionary that maps collection names to their corresponding latest identity values. + + +Learn more about identities in: + +* [Document identifier generation - Identity ID](../../../../server/kb/document-identifier-generation.mdx#identity-id) +* [Working with document identifiers](../../../../client-api/document-identifiers/working-with-document-identifiers.mdx#identities) + + + +* In this page: + + * [Get identities operation](../../../../client-api/operations/maintenance/identities/get-identities.mdx#get-identities-operation) + * [Syntax](../../../../client-api/operations/maintenance/identities/get-identities.mdx#syntax) + + +## Get identities operation + + + +{`// Create a document with an identity ID: +// ====================================== + +$session = $store->openSession(); +try \{ + // Request the server to generate an identity ID for the new document. Pass: + // * The entity to store + // * The collection name with a pipe (|) postfix + $company = new Company(); + $company->setName("RavenDB"); + $session->store($company, "companies|"); + + // If this is the first identity created for this collection, + // and if the identity value was not customized + // then a document with an identity ID "companies/1" will be created + $session->saveChanges(); +\} finally \{ + $session->close(); +\} + +// Get identities information: +// =========================== + +// Define the get identities operation +$getIdentitiesOp = new GetIdentitiesOperation(); + +// Execute the operation by passing it to Maintenance.Send +/** @var array $identities */ +$identities = $store->maintenance()->send($getIdentitiesOp); + +// Results +$latestIdentityValue = $identities["companies|"]; // => value will be 1 +`} + + + + + +## Syntax + + + +{`GetIdentitiesOperation(); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_get-identities-python.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_get-identities-python.mdx new file mode 100644 index 0000000000..0887420d06 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_get-identities-python.mdx @@ -0,0 +1,75 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Upon document creation, providing a collection name with a pipe symbol (`|`) + will cause the server to generate an ID for the new document called an **identity**. + E.g. `companies|` + +* The identity document ID is unique across the entire cluster within the database scope. + It is composed of the provided collection name and an integer value that is continuously incremented. + +* Identity values can also be managed from the Studio [identities](../../../../studio/database/documents/identities-view.mdx) view. + +* Use `GetIdentitiesOperation` to get the dictionary that maps collection names to their corresponding latest identity values. + +* Learn more about identities in: + + * [Document identifier generation - Identity ID](../../../../server/kb/document-identifier-generation.mdx#identity-id) + * [Working with document identifiers](../../../../client-api/document-identifiers/working-with-document-identifiers.mdx#identities) + +* In this page: + + * [Get identities operation](../../../../client-api/operations/maintenance/identities/get-identities.mdx#get-identities-operation) + * [Syntax](../../../../client-api/operations/maintenance/identities/get-identities.mdx#syntax) + + +## Get identities operation + + + +{`# Create a document with an identity ID: +# ====================================== +with store.open_session() as session: + # Request the server to generate an identity ID for the new document. Pass: + # * The entity to store + # * The collection name with a pipe (|) postfix + session.store(Company(name="RavenDB"), "companies|") + + # If this is the first identity created for this collection, + # and if the identity value was not customized + # then a document with an identity ID "companies/1" will be created + session.save_changes() + +# Get identities information: +# =========================== + +# Define the get identities operation +get_identities_op = GetIdentitiesOperation() + +# Execute the operation by passing it to maintenance.send +identities = store.maintenance.send(get_identities_op) + +# Results +latest_identity_value = identities["companies|"] # => value will be 1 +`} + + + + + +## Syntax + + + +{`class GetIdentitiesOperation(MaintenanceOperation[Dict[str, int]]): ... +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_increment-next-identity-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_increment-next-identity-csharp.mdx new file mode 100644 index 0000000000..9965d90b1a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_increment-next-identity-csharp.mdx @@ -0,0 +1,114 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `NextIdentityForOperation` to increment the latest identity value set on the server for the specified collection in the database. + +* The next document that will be created using an identity for the collection will receive the consecutive integer value. + +* In this page: + + * [Increment the next identity value](../../../../client-api/operations/maintenance/identities/increment-next-identity.mdx#increment-the-next-identity-value) + * [Syntax](../../../../client-api/operations/maintenance/identities/increment-next-identity.mdx#syntax) + + +## Increment the next identity value + + + + +{`// Create a document with an identity ID: +// ====================================== + +using (var session = store.OpenSession()) +{ + // Pass a collection name that ends with a pipe '|' to create an identity ID + session.Store(new Company { Name = "RavenDB" }, "companies|"); + session.SaveChanges(); + // => Document "companies/1" will be created +} + +// Increment the identity value on the server: +// =========================================== + +// Define the next identity operation +// Pass the collection name (can be with or without a pipe) +var nextIdentityOp = new NextIdentityForOperation("companies|"); + +// Execute the operation by passing it to Maintenance.Send +// The latest value will be incremented to "2" +// and the next document created with an identity will be assigned "3" +long incrementedValue = store.Maintenance.Send(nextIdentityOp); + +// Create another document with an identity ID: +// ============================================ + +using (var session = store.OpenSession()) +{ + session.Store(new Company { Name = "RavenDB" }, "companies|"); + session.SaveChanges(); + // => Document "companies/3" will be created +} +`} + + + + +{`// Create a document with an identity ID: +// ====================================== + +using (var asyncSession = store.OpenAsyncSession()) +{ + // Pass a collection name that ends with a pipe '|' to create an identity ID + asyncSession.StoreAsync(new Company { Name = "RavenDB" }, "companies|"); + asyncSession.SaveChangesAsync(); + // => Document "companies/1" will be created +} + +// Increment the identity value on the server: +// =========================================== + +// Define the next identity operation +// Pass the collection name (can be with or without a pipe) +var nextIdentityOp = new NextIdentityForOperation("companies|"); + +// Execute the operation by passing it to Maintenance.SendAsync +// The latest value will be incremented to "2" +// and the next document created with an identity will be assigned "3" +long incrementedValue = await store.Maintenance.SendAsync(nextIdentityOp); + +// Create another document with an identity ID: +// ============================================ + +using (var asyncSession = store.OpenAsyncSession()) +{ + asyncSession.StoreAsync(new Company { Name = "AnotherCompany" }, "companies|"); + asyncSession.SaveChangesAsync(); + // => Document "companies/3" will be created +} +`} + + + + + + +## Syntax + + + +{`public NextIdentityForOperation(string name); +`} + + + +| Parameter | Type | Description | +|-----------|--------|------------------------------------------------------------------------------------------------------------------------------------------------| +| **name** | string | The collection name for which to increment the identity value.
Can be with or without a pipe in the end (e.g. "companies" or "companies\|". | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_increment-next-identity-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_increment-next-identity-nodejs.mdx new file mode 100644 index 0000000000..80e1db1084 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_increment-next-identity-nodejs.mdx @@ -0,0 +1,77 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `NextIdentityForOperation` to increment the latest identity value set on the server for the specified collection in the database. + +* The next document that will be created using an identity for the collection will receive the consecutive integer value. + +* In this page: + + * [Increment the next identity value](../../../../client-api/operations/maintenance/identities/increment-next-identity.mdx#increment-the-next-identity-value) + * [Syntax](../../../../client-api/operations/maintenance/identities/increment-next-identity.mdx#syntax) + + +## Increment the next identity value + + + +{`// Create a document with an identity ID: +// ====================================== + +const session = documentStore.openSession(); +const company = new Company(); +company.name = "RavenDB"; + +// Pass a collection name that ends with a pipe '|' to create an identity ID +await session.store(company, "companies|"); + +await session.saveChanges(); +// => Document "companies/1" will be created + +// Increment the identity value on the server: +// =========================================== + +// Define the next identity operation +// Pass the collection name (can be with or without a pipe) +const nextIdentityOp = new NextIdentityForOperation("companies|"); + +// Execute the operation by passing it to maintenance.send +// The latest value will be incremented to "2" +// and the next document created with an identity will be assigned "3" +const incrementedValue = await store.maintenance.send(nextIdentityOp); + +// Create another document with an identity ID: +// ============================================ + +const company = new Company(); +company.name = "AnotherComapany"; + +await session.store(company, "companies|"); +await session.saveChanges(); +// => Document "companies/3" will be created +`} + + + + + +## Syntax + + + +{`const nextIdentityOp = new NextIdentityForOperation(name); +`} + + + +| Parameter | Type | Description | +|-----------|--------|------------------------------------------------------------------------------------------------------------------------------------------------| +| **name** | string | The collection name for which to increment the identity value.
Can be with or without a pipe in the end (e.g. "companies" or "companies\|". | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_increment-next-identity-php.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_increment-next-identity-php.mdx new file mode 100644 index 0000000000..a22eef236f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_increment-next-identity-php.mdx @@ -0,0 +1,83 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `NextIdentityForOperation` to increment the latest identity value set on the server for the specified collection in the database. + +* The next document that will be created using an identity for the collection will receive the consecutive integer value. + +* In this page: + + * [Increment the next identity value](../../../../client-api/operations/maintenance/identities/increment-next-identity.mdx#increment-the-next-identity-value) + * [Syntax](../../../../client-api/operations/maintenance/identities/increment-next-identity.mdx#syntax) + + +## Increment the next identity value + + + +{`// Create a document with an identity ID: +// ====================================== + +$session = $store->openSession(); +try \{ + // Pass a collection name that ends with a pipe '|' to create an identity ID + $company = new Company(); + $company->setName("RavenDB"); + $session->store($company, "companies|"); + $session->saveChanges(); + // => Document "companies/1" will be created +\} finally \{ + $session->close(); +\} + +// Increment the identity value on the server: +// =========================================== + +// Define the next identity operation +// Pass the collection name (can be with or without a pipe) +$nextIdentityOp = new NextIdentityForOperation("companies|"); + +// Execute the operation by passing it to Maintenance.Send +// The latest value will be incremented to "2" +// and the next document created with an identity will be assigned "3" +$incrementedValue = $store->maintenance()->send($nextIdentityOp); + +// Create another document with an identity ID: +// ============================================ + +$session = $store->openSession(); +try \{ + $company = new Company(); + $company->setName("RavenDB"); + $session->store($company, "companies|"); + $session->saveChanges(); + // => Document "companies/3" will be created +\} finally \{ + $session->close(); +\} +`} + + + + + +## Syntax + + + +{`NextIdentityForOperation(?string $name); +`} + + + +| Parameter | Type | Description | +|-----------|--------|-------------------------------------------------| +| **$name** | `?string` | The name of a collection to create an ID for | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_increment-next-identity-python.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_increment-next-identity-python.mdx new file mode 100644 index 0000000000..27abeda1d5 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_increment-next-identity-python.mdx @@ -0,0 +1,71 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `NextIdentityForOperation` to increment the latest identity value set on the server for the specified collection in the database. + +* The next document that will be created using an identity for the collection will receive the consecutive integer value. + +* In this page: + + * [Increment the next identity value](../../../../client-api/operations/maintenance/identities/increment-next-identity.mdx#increment-the-next-identity-value) + * [Syntax](../../../../client-api/operations/maintenance/identities/increment-next-identity.mdx#syntax) + + +## Increment the next identity value + + + +{`# Create a document with an identity ID: +# ====================================== + +with store.open_session() as session: + # Pass a collection name that ends with a pipe '|' to create an identity ID + session.store(Company(name="RavenDB"), "companies|") + session.save_changes() + # => Document "companies/1" will be created + +# Increment the identity value on the server: +# =========================================== + +# Define the next identity operation +# Pass the collection name (can be with or without a pipe) +next_identity_op = NextIdentityForOperation("companies|") + +# Execute the operation by passing it to Maintenance.Send +# The latest value will be incremented to "2" +# and the next document created with an identity will be assigned "3" +incremented_value = store.maintenance.send(next_identity_op) + +# Create another document with an identity ID: +# ============================================ + +with store.open_session() as session: + session.store(Company(name="RavenDB"), "companies|") + session.save_changes() + # => Document "companies/3" will be created +`} + + + + + +## Syntax + + + +{`class NextIdentityForOperation(MaintenanceOperation[int]): ... +`} + + + +| Parameter | Type | Description | +|-----------|--------|---------------------------------------------------------------------| +| **MaintenanceOperation[int]** | Operation | An operation to increment the next identity | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_seed-identity-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_seed-identity-csharp.mdx new file mode 100644 index 0000000000..689a772b0d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_seed-identity-csharp.mdx @@ -0,0 +1,168 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `SeedIdentityForOperation` to set the latest identity value for the specified collection. + +* The next document that will be created using an identity for the collection will receive the consecutive integer value. + +* Identity values can also be managed from the Studio [identities view](../../../../studio/database/documents/identities-view.mdx). + +* In this page: + * [Set a higher identity value](../../../../client-api/operations/maintenance/identities/seed-identity.mdx#set-a-higher-identity-value) + * [Force a lower identity value](../../../../client-api/operations/maintenance/identities/seed-identity.mdx#force-a-lower-identity-value) + * [Syntax](../../../../client-api/operations/maintenance/identities/seed-identity.mdx#syntax) + + +## Set a higher identity value + +You can replace the latest identity value on the server with a new, **higher** number. + + + + + +{`// Seed a higher identity value on the server: +// =========================================== + +// Define the seed identity operation. Pass: +// * The collection name (can be with or without a pipe) +// * The new value to set +var seedIdentityOp = new SeedIdentityForOperation("companies|", 23); + +// Execute the operation by passing it to Maintenance.Send +// The latest value on the server will be incremented to "23" +// and the next document created with an identity will be assigned "24" +long seededValue = store.Maintenance.Send(seedIdentityOp); + +// Create a document with an identity ID: +// ====================================== + +using (var session = store.OpenSession()) +{ + session.Store(new Company { Name = "RavenDB" }, "companies|"); + session.SaveChanges(); + // => Document "companies/24" will be created +} +`} + + + + +{`// Seed the identity value on the server: +// ====================================== + +// Define the seed identity operation. Pass: +// * The collection name (can be with or without a pipe) +// * The new value to set +var seedIdentityOp = new SeedIdentityForOperation("companies|", 23); + +// Execute the operation by passing it to Maintenance.SendAsync +// The latest value on the server will be incremented to "23" +// and the next document created with an identity will be assigned "24" +long seededValue = await store.Maintenance.SendAsync(seedIdentityOp); + +// Create a document with an identity ID: +// ====================================== + +using (var asyncSession = store.OpenAsyncSession()) +{ + asyncSession.StoreAsync(new Company { Name = "RavenDB" }, "companies|"); + asyncSession.SaveChangesAsync(); + // => Document "companies/24" will be created +} +`} + + + + + + +## Force a lower identity value + +* You can set the latest identity value to a number that is **lower** than the current latest value. + +* Before proceeding, first ensure that documents with an identity value higher than the new number do not exist. + + + + +{`// Force a smaller identity value on the server: +// ============================================= + +// Define the seed identity operation. Pass: +// * The collection name (can be with or without a pipe) +// * The new value to set +// * Set 'forceUpdate' to true +var seedIdentityOp = new SeedIdentityForOperation("companies|", 5, forceUpdate: true); + +// Execute the operation by passing it to Maintenance.Send +// The latest value on the server will be decremented to "5" +// and the next document created with an identity will be assigned "6" +long seededValue = store.Maintenance.Send(seedIdentityOp); + +// Create a document with an identity ID: +// ====================================== + +using (var session = store.OpenSession()) +{ + session.Store(new Company { Name = "RavenDB" }, "companies|"); + session.SaveChanges(); + // => Document "companies/6" will be created +} +`} + + + + +{`// Force a smaller identity value on the server: +// ============================================= + +// Define the seed identity operation. Pass: +// * The collection name (can be with or without a pipe) +// * The new value to set +// * Set 'forceUpdate' to true +var seedIdentityOp = new SeedIdentityForOperation("companies|", 5, forceUpdate: true); + +// Execute the operation by passing it to Maintenance.SendAsync +// The latest value on the server will be decremented to "5" +// and the next document created with an identity will be assigned "6" +long seededValue = await store.Maintenance.SendAsync(seedIdentityOp); + +// Create a document with an identity ID: +// ====================================== + +using (var asyncSession = store.OpenAsyncSession()) +{ + asyncSession.StoreAsync(new Company { Name = "RavenDB" }, "companies|"); + asyncSession.SaveChangesAsync(); + // => Document "companies/6" will be created +} +`} + + + + + + +## Syntax + + + +{`public SeedIdentityForOperation(string name, long value, bool forceUpdate = false); +`} + + + +| Parameter | Type | Description | +|-----------------|----------|--------------------------------------------------------------------------------------------------------------------------------| +| **name** | `string` | The collection name to seed the identity value for.
Can be ended with or without a pipe (e.g. "companies" or "companies\|". | +| **value** | `long` | The number to set as the latest identity value. | +| **forceUpdate** | `bool` | `true` - force a new value that is lower than the latest.
`false` - only a higher value can be set. | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_seed-identity-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_seed-identity-nodejs.mdx new file mode 100644 index 0000000000..035b520d01 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_seed-identity-nodejs.mdx @@ -0,0 +1,108 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `SeedIdentityForOperation` to set the latest identity value for the specified collection. + +* The next document that will be created using an identity for the collection will receive the consecutive integer value. + +* Identity values can also be managed from the Studio [identities view](../../../../studio/database/documents/identities-view.mdx). + +* In this page: + * [Set a higher identity value](../../../../client-api/operations/maintenance/identities/seed-identity.mdx#set-a-higher-identity-value) + * [Force a lower identity value](../../../../client-api/operations/maintenance/identities/seed-identity.mdx#force-a-lower-identity-value) + * [Syntax](../../../../client-api/operations/maintenance/identities/seed-identity.mdx#syntax) + + +## Set a higher identity value + +You can replace the latest identity value on the server with a new, **higher** number. + + + +{`// Seed a higher identity value on the server: +// =========================================== + +// Define the seed identity operation. Pass: +// * The collection name (can be with or without a pipe) +// * The new value to set +const seedIdentityOp = new SeedIdentityForOperation("companies|", 23); + +// Execute the operation by passing it to maintenance.send +// The latest value on the server will be incremented to "23" +// and the next document created with an identity will be assigned "24" +const seededValue = await store.maintenance.send(seedIdentityOp); + +// Create a document with an identity ID: +// ====================================== + +const company = new Company(); +company.name = "RavenDB"; + +await session.store(company, "companies|"); +await session.saveChanges(); +// => Document "companies/24" will be created +`} + + + + + +## Force a lower identity value + +* You can set the latest identity value to a number that is **lower** than the current latest value. + +* Before proceeding, first ensure that documents with an identity value higher than the new number do not exist. + + + +{`// Force a smaller identity value on the server: +// ============================================= + +// Define the seed identity operation. Pass: +// * The collection name (can be with or without a pipe) +// * The new value to set +// * Pass 'true' to force the update +const seedIdentityOp = new SeedIdentityForOperation("companies|", 5, true); + +// Execute the operation by passing it to maintenance.send +// The latest value on the server will be decremented to "5" +// and the next document created with an identity will be assigned "6" +const seededValue = await store.maintenance.send(seedIdentityOp); + +// Create a document with an identity ID: +// ====================================== + +const company = new Company(); +company.name = "RavenDB"; + +await session.store(company, "companies|"); +await session.saveChanges(); +// => Document "companies/6" will be created +`} + + + + + +## Syntax + + + +{`const seedIdentityOp = new SeedIdentityForOperation(name, value, forceUpdate); +`} + + + +| Parameter | Type | Description | +|-----------------|-----------|--------------------------------------------------------------------------------------------------------------------------------| +| **name** | `string` | The collection name to seed the identity value for.
Can be ended with or without a pipe (e.g. "companies" or "companies\|". | +| **value** | `number` | The number to set as the latest identity value. | +| **forceUpdate** | `boolean` | `true` - force a new value that is lower than the latest.
`false` - only a higher value can be set. | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_seed-identity-php.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_seed-identity-php.mdx new file mode 100644 index 0000000000..601c5e7f14 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_seed-identity-php.mdx @@ -0,0 +1,117 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `SeedIdentityForOperation` to set the latest identity value for the specified collection. + +* The next document that will be created using an identity for the collection will receive the consecutive integer value. + +* Identity values can also be managed from the Studio [identities view](../../../../studio/database/documents/identities-view.mdx). + +* In this page: + * [Set a higher identity value](../../../../client-api/operations/maintenance/identities/seed-identity.mdx#set-a-higher-identity-value) + * [Force a lower identity value](../../../../client-api/operations/maintenance/identities/seed-identity.mdx#force-a-lower-identity-value) + * [Syntax](../../../../client-api/operations/maintenance/identities/seed-identity.mdx#syntax) + + +## Set a higher identity value + +You can replace the latest identity value on the server with a new, **higher** number. + + + + +{`// Seed a higher identity value on the server: +// =========================================== + +// Define the seed identity operation. Pass: +// * The collection name (can be with or without a pipe) +// * The new value to set +$seedIdentityOp = new SeedIdentityForOperation("companies|", 23); + +// Execute the operation by passing it to Maintenance.Send +// The latest value on the server will be incremented to "23" +// and the next document created with an identity will be assigned "24" +$seededValue = $store->maintenance()->send($seedIdentityOp); + +// Create a document with an identity ID: +// ====================================== + +$session = $store->openSession(); +try \{ + $company = new Company(); + $company->setName("RavenDB"); + $session->store($company, "companies|"); + $session->saveChanges(); + // => Document "companies/24" will be created +\} finally \{ + $session->close(); +\} +`} + + + + + +## Force a lower identity value + +* You can set the latest identity value to a number that is **lower** than the current latest value. + +* Before proceeding, first ensure that documents with an identity value higher than the new number do not exist. + + + +{`// Force a smaller identity value on the server: +// ============================================= + +// Define the seed identity operation. Pass: +// * The collection name (can be with or without a pipe) +// * The new value to set +// * Set 'forceUpdate' to true +$seedIdentityOp = new SeedIdentityForOperation("companies|", 5, forceUpdate: true); + +// Execute the operation by passing it to Maintenance.Send +// The latest value on the server will be decremented to "5" +// and the next document created with an identity will be assigned "6" +$seededValue = $store->maintenance()->send($seedIdentityOp); + +// Create a document with an identity ID: +// ====================================== + +$session = $store->openSession(); +try \{ + $company = new Company(); + $company->setName("RavenDB"); + $session->store($company, "companies|"); + $session->saveChanges(); + // => Document "companies/6" will be created +\} finally \{ + $session->close(); +\} +`} + + + + + +## Syntax + + + +{`SeedIdentityForOperation(string $name, int $value, bool $forceUpdate = false) +`} + + + +| Parameter | Type | Description | +|------------------|-----------|--------------------------------------------------------------------------------------------------------------------------------| +| **$name** | `string ` | The collection name to seed the identity value for.
Can be ended with or without a pipe (e.g. "companies" or "companies\|". | +| **$value** | `int` | The number to set as the latest identity value. | +| **$forceUpdate** | `bool` | `True` - force a new value that is lower than the latest.
`False` - only a higher value can be set. | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_seed-identity-python.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_seed-identity-python.mdx new file mode 100644 index 0000000000..dd1a357402 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/_seed-identity-python.mdx @@ -0,0 +1,106 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `SeedIdentityForOperation` to set the latest identity value for the specified collection. + +* The next document that will be created using an identity for the collection will receive the consecutive integer value. + +* Identity values can also be managed from the Studio [identities view](../../../../studio/database/documents/identities-view.mdx). + +* In this page: + * [Set a higher identity value](../../../../client-api/operations/maintenance/identities/seed-identity.mdx#set-a-higher-identity-value) + * [Force a lower identity value](../../../../client-api/operations/maintenance/identities/seed-identity.mdx#force-a-lower-identity-value) + * [Syntax](../../../../client-api/operations/maintenance/identities/seed-identity.mdx#syntax) + + +## Set a higher identity value + +You can replace the latest identity value on the server with a new, **higher** number. + + + + +{`# Seed a higher identity value on the server: +# =========================================== + +# Define the seed identity operation. Pass: +# * The collection name (can be with or without a pipe) +# * The new value to set +seed_identity_op = SeedIdentityForOperation("companies|", 23) + +# Execute the operation by passing it to maintenance.send +# The latest value on the server will be incremented to "23" +# and the next document created with an identity will be assigned "24" +seeded_value = store.maintenance.send(seed_identity_op) + +# Create a document with an identity ID: +# ====================================== + +with store.open_session() as session: + session.store(Company(name="RavenDB"), "companies|") + session.save_changes() + # => Document "companies/24" will be created +`} + + + + + +## Force a lower identity value + +* You can set the latest identity value to a number that is **lower** than the current latest value. + +* Before proceeding, first ensure that documents with an identity value higher than the new number do not exist. + + + +{`# Force a smaller identity value on the server: +# ============================================= + +# Define the seed identity operation. Pass: +# * The collection name (can be with or without a pipe) +# * The new value to set +# * Set 'force_update' to True +seed_identity_op = SeedIdentityForOperation("companies|", 5, force_update=True) + +# Execute the operation by passing it to maintenance.send +# The latest value on the server will be decremented to "5" +# and the next document created with an identity will be assigned "6" +seeded_value = store.maintenance.send(seed_identity_op) + +# Create a document with an identity ID: +# ====================================== + +with store.open_session() as session: + session.store(Company(name="RavenDB"), "companies|") + session.save_changes() + # => Document "companies/6" will be created +`} + + + + + +## Syntax + + + +{`class SeedIdentityForOperation(MaintenanceOperation[int]): + def __init__(self, name: str, value: int, force_update: bool = False): ... +`} + + + +| Parameter | Type | Description | +|------------------|--------|--------------------------------------------------------------------------------------------------------------------------------| +| **name** | `str` | The collection name to seed the identity value for.
Can be ended with or without a pipe (e.g. "companies" or "companies\|". | +| **value** | `long` | The number to set as the latest identity value. | +| **force_update** | `bool` | `True` - force a new value that is lower than the latest.
`False` - only a higher value can be set. | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/identities/get-identities.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/get-identities.mdx new file mode 100644 index 0000000000..7ae209fe63 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/get-identities.mdx @@ -0,0 +1,58 @@ +--- +title: "Get Identities Operation" +hide_table_of_contents: true +sidebar_label: Get Identities +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetIdentitiesCsharp from './_get-identities-csharp.mdx'; +import GetIdentitiesJava from './_get-identities-java.mdx'; +import GetIdentitiesPython from './_get-identities-python.mdx'; +import GetIdentitiesPhp from './_get-identities-php.mdx'; +import GetIdentitiesNodejs from './_get-identities-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/identities/increment-next-identity.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/increment-next-identity.mdx new file mode 100644 index 0000000000..487d374e5c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/increment-next-identity.mdx @@ -0,0 +1,52 @@ +--- +title: "Increment Next Identity Operation" +hide_table_of_contents: true +sidebar_label: Increment Next Identity +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import IncrementNextIdentityCsharp from './_increment-next-identity-csharp.mdx'; +import IncrementNextIdentityPython from './_increment-next-identity-python.mdx'; +import IncrementNextIdentityPhp from './_increment-next-identity-php.mdx'; +import IncrementNextIdentityNodejs from './_increment-next-identity-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/identities/seed-identity.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/seed-identity.mdx new file mode 100644 index 0000000000..2ee5cf8548 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/identities/seed-identity.mdx @@ -0,0 +1,52 @@ +--- +title: "Seed Identity Operation" +hide_table_of_contents: true +sidebar_label: Seed Identity +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import SeedIdentityCsharp from './_seed-identity-csharp.mdx'; +import SeedIdentityPython from './_seed-identity-python.mdx'; +import SeedIdentityPhp from './_seed-identity-php.mdx'; +import SeedIdentityNodejs from './_seed-identity-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_category_.json b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_category_.json new file mode 100644 index 0000000000..e8c599ce5a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 4, + "label": Indexes, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-csharp.mdx new file mode 100644 index 0000000000..f49ea78a8e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-csharp.mdx @@ -0,0 +1,59 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `DeleteIndexOperation` to remove an index from the database. + +* The index will be deleted from all the database-group nodes. + +* In this page: + * [Delete index example](../../../../client-api/operations/maintenance/indexes/delete-index.mdx#delete-index-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/delete-index.mdx#syntax) + + +## Delete index example + + + + +{`// Define the delete index operation, specify the index name +var deleteIndexOp = new DeleteIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to Maintenance.Send +store.Maintenance.Send(deleteIndexOp); +`} + + + + +{`// Define the delete index errors operation, specify the index name +var deleteIndexOp = new DeleteIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to Maintenance.SendAsync +await store.Maintenance.SendAsync(deleteIndexOp); +`} + + + + + + +## Syntax + + + +{`public DeleteIndexOperation(string indexName) +`} + + + +| Parameter | Type | Description | +|---------------|----------|-------------------------| +| **indexName** | `string` | Name of index to delete | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-errors-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-errors-csharp.mdx new file mode 100644 index 0000000000..590fc502e5 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-errors-csharp.mdx @@ -0,0 +1,104 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `DeleteIndexErrorsOperation` to delete indexing errors. + +* The operation will be executed only on the server node that is defined by the current [client-configuration](../../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + +* Deleting the errors will only **clear the index errors**. + An index with an 'Error state' will Not be set back to 'Normal state'. + +* To just get index errors see [get index errors](../../../../client-api/operations/maintenance/indexes/get-index-errors.mdx). + +* In this page: + * [Delete errors from all indexes](../../../../client-api/operations/maintenance/indexes/delete-index-errors.mdx#delete-errors-from-all-indexes) + * [Delete errors from specific indexes](../../../../client-api/operations/maintenance/indexes/delete-index-errors.mdx#delete-errors-from-specific-indexes) + * [Syntax](../../../../client-api/operations/maintenance/indexes/delete-index-errors.mdx#syntax) + + +## Delete errors from all indexes + + + + +{`// Define the delete index errors operation +var deleteIndexErrorsOp = new DeleteIndexErrorsOperation(); + +// Execute the operation by passing it to Maintenance.Send +store.Maintenance.Send(deleteIndexErrorsOp); + +// All errors from ALL indexes are deleted +`} + + + + +{`// Define the delete index errors operation +var deleteIndexErrorsOp = new DeleteIndexErrorsOperation(); + +// Execute the operation by passing it to Maintenance.SendAsync +await store.Maintenance.SendAsync(deleteIndexErrorsOp); + +// All errors from ALL indexes are deleted +`} + + + + + + +## Delete errors from specific indexes + + + + +{`// Define the delete index errors operation from specific indexes +var deleteIndexErrorsOp = new DeleteIndexErrorsOperation(new[] { "Orders/Totals" }); + +// Execute the operation by passing it to Maintenance.Send +// An exception will be thrown if any of the specified indexes do not exist +store.Maintenance.Send(deleteIndexErrorsOp); + +// Only errors from index "Orders/Totals" are deleted +`} + + + + +{`// Define the delete index errors operation from specific indexes +var deleteIndexErrorsOp = new DeleteIndexErrorsOperation(new[] { "Orders/Totals" }); + +// Execute the operation by passing it to Maintenance.SendAsync +// An exception will be thrown if any of the specified indexes do not exist +await store.Maintenance.SendAsync(deleteIndexErrorsOp); + +// Only errors from index "Orders/Totals" are deleted +`} + + + + + + +## Syntax + + + +{`// Available overloads: +public DeleteIndexErrorsOperation() // Delete errors from all indexes +public DeleteIndexErrorsOperation(string[] indexNames) // Delete errors from specific indexes +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **indexNames** | `string[]` | List of index names to delete errors from.
An exception is thrown if any of the specified indexes does not exist. | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-errors-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-errors-nodejs.mdx new file mode 100644 index 0000000000..b959949b05 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-errors-nodejs.mdx @@ -0,0 +1,75 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `DeleteIndexErrorsOperation` to delete indexing errors. + +* The operation will be executed only on the server node that is defined by the current [client-configuration](../../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + +* Deleting the errors will only **clear the index errors**. + An index with an 'Error state' will Not be set back to 'Normal state'. + +* To just get index errors see [get index errors](../../../../client-api/operations/maintenance/indexes/get-index-errors.mdx). + +* In this page: + * [Delete errors from all indexes](../../../../client-api/operations/maintenance/indexes/delete-index-errors.mdx#delete-errors-from-all-indexes) + * [Delete errors from specific indexes](../../../../client-api/operations/maintenance/indexes/delete-index-errors.mdx#delete-errors-from-specific-indexes) + * [Syntax](../../../../client-api/operations/maintenance/indexes/delete-index-errors.mdx#syntax) + + +## Delete errors from all indexes + + + +{`// Define the delete index errors operation +const deleteIndexErrorsOp = new DeleteIndexErrorsOperation(); + +// Execute the operation by passing it to maintenance.send +await store.maintenance.send(deleteIndexErrorsOp); + +// All errors from ALL indexes are deleted +`} + + + + + +## Delete errors from specific indexes + + + +{`// Define the delete index errors operation from specific indexes +const deleteIndexErrorsOp = new DeleteIndexErrorsOperation(["Orders/Totals"]); + +// Execute the operation by passing it to maintenance.send +// An exception will be thrown if any of the specified indexes do not exist +await store.maintenance.send(deleteIndexErrorsOp); + +// Only errors from index "Orders/Totals" are deleted +`} + + + + + +## Syntax + + + +{`// Available overloads: +const deleteIndexErrorsOp = new DeleteIndexErrorsOperation(); // Delete errors from all indexes +const deleteIndexErrorsOp = new DeleteIndexErrorsOperation(indexNames); // Delete errors from specific indexes +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **indexNames** | `string[]` | List of index names to delete errors from.
An exception is thrown if any of the specified indexes does not exist. | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-errors-php.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-errors-php.mdx new file mode 100644 index 0000000000..dfcc7fc261 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-errors-php.mdx @@ -0,0 +1,75 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `DeleteIndexErrorsOperation` to delete indexing errors. + +* The operation will be executed only on the server node that is defined by the current [client-configuration](../../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + +* Deleting the errors will only **clear the index errors**. + An index with an 'Error state' will Not be set back to 'Normal state'. + +* To just get index errors see [get index errors](../../../../client-api/operations/maintenance/indexes/get-index-errors.mdx). + +* In this page: + * [Delete errors from all indexes](../../../../client-api/operations/maintenance/indexes/delete-index-errors.mdx#delete-errors-from-all-indexes) + * [Delete errors from specific indexes](../../../../client-api/operations/maintenance/indexes/delete-index-errors.mdx#delete-errors-from-specific-indexes) + * [Syntax](../../../../client-api/operations/maintenance/indexes/delete-index-errors.mdx#syntax) + + +## Delete errors from all indexes + + + +{`// Define the delete index errors operation +$deleteIndexErrorsOp = new DeleteIndexErrorsOperation(); + +// Execute the operation by passing it to Maintenance.Send +$store->maintenance()->send($deleteIndexErrorsOp); + +// All errors from ALL indexes are deleted +`} + + + + + +## Delete errors from specific indexes + + + +{`// Define the delete index errors operation from specific indexes +$deleteIndexErrorsOp = new DeleteIndexErrorsOperation([ "Orders/Totals" ]); + +// Execute the operation by passing it to Maintenance.Send +// An exception will be thrown if any of the specified indexes do not exist +$store->maintenance()->send($deleteIndexErrorsOp); + +// Only errors from index "Orders/Totals" are deleted +`} + + + + + +## Syntax + + + +{`// Available overloads: +DeleteIndexErrorsOperation() // Delete errors from all indexes +DeleteIndexErrorsOperation(StringArray|array|string $indexNames) // Delete errors from specific indexes +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **$indexNames** | `StringArray`
`array`
`string` | List of index names to delete errors from.
An exception is thrown if any of the specified indexes does not exist. | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-errors-python.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-errors-python.mdx new file mode 100644 index 0000000000..2b29180f6a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-errors-python.mdx @@ -0,0 +1,74 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `DeleteIndexErrorsOperation` to delete indexing errors. + +* The operation will be executed only on the server node that is defined by the current [client-configuration](../../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + +* Deleting the errors will only **clear the index errors**. + An index with an 'Error state' will Not be set back to 'Normal state'. + +* To just get index errors see [get index errors](../../../../client-api/operations/maintenance/indexes/get-index-errors.mdx). + +* In this page: + * [Delete errors from all indexes](../../../../client-api/operations/maintenance/indexes/delete-index-errors.mdx#delete-errors-from-all-indexes) + * [Delete errors from specific indexes](../../../../client-api/operations/maintenance/indexes/delete-index-errors.mdx#delete-errors-from-specific-indexes) + * [Syntax](../../../../client-api/operations/maintenance/indexes/delete-index-errors.mdx#syntax) + + +## Delete errors from all indexes + + + +{`# Define the delete index errors operation +delete_index_errors_op = DeleteIndexErrorsOperation() + +# Execute the operation by passing it to maintenance.send +store.maintenance.send(delete_index_errors_op) + +# All errors from ALL indexes are deleted +`} + + + + + +## Delete errors from specific indexes + + + +{`# Define the delete index errors operation from specific indexes +delete_index_errors_op = DeleteIndexErrorsOperation(["Orders/Totals"]) + +# Execute the operation by passing it to maintenance.send +# An exception will be thrown if any of the specified indexes do not exist +store.maintenance.send(delete_index_errors_op) + +# Only errors from index "Orders/Totals" are deleted +`} + + + + + +## Syntax + + + +{`class DeleteIndexErrorsOperation(VoidMaintenanceOperation): + def __init__(self, index_names: List[str] = None): ... +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **index_names** | `List[str]` | List of index names to delete errors from.
An exception is thrown if any of the specified indexes does not exist. | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-java.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-java.mdx new file mode 100644 index 0000000000..171b5da458 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-java.mdx @@ -0,0 +1,30 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +**DeleteIndexOperation** is used to remove an index from a database. + +## Syntax + + + +{`public DeleteIndexOperation(String indexName) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **indexName** | String | name of an index to delete | + +## Example + + + +{`store.maintenance().send(new DeleteIndexOperation("Orders/Totals")); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-nodejs.mdx new file mode 100644 index 0000000000..7b376410f2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-nodejs.mdx @@ -0,0 +1,47 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `DeleteIndexOperation` to remove an index from the database. + +* The index will be deleted from all the database-group nodes. + +* In this page: + * [Delete index example](../../../../client-api/operations/maintenance/indexes/delete-index.mdx#delete-index-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/delete-index.mdx#syntax) + + +## Delete index example + + + +{`// Define the delete index operation, specify the index name +const deleteIndexOp = new DeleteIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to maintenance.send +await store.maintenance.send(deleteIndexOp); +`} + + + + + +## Syntax + + + +{`const deleteIndexOp = new DeleteIndexOperation(indexName); +`} + + + +| Parameters | Type | Description | +|- | - | - | +| **indexName** | `string` | Name of index to delete | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-php.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-php.mdx new file mode 100644 index 0000000000..c78c6613fd --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-php.mdx @@ -0,0 +1,47 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `DeleteIndexOperation` to remove an index from the database. + +* The index will be deleted from all the database-group nodes. + +* In this page: + * [Delete index example](../../../../client-api/operations/maintenance/indexes/delete-index.mdx#delete-index-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/delete-index.mdx#syntax) + + +## Delete index example + + + +{`// Define the delete index operation, specify the index name +$deleteIndexOp = new DeleteIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to Maintenance.Send +$store->maintenance()->send($deleteIndexOp); +`} + + + + + +## Syntax + + + +{`DeleteIndexOperation(?string $indexName) +`} + + + +| Parameters | Type | Description | +|- | - | - | +| **$indexName** | `?string` | Name of index to delete | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-python.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-python.mdx new file mode 100644 index 0000000000..3317174ee4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_delete-index-python.mdx @@ -0,0 +1,48 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `DeleteIndexOperation` to remove an index from the database. + +* The index will be deleted from all the database-group nodes. + +* In this page: + * [Delete index example](../../../../client-api/operations/maintenance/indexes/delete-index.mdx#delete-index-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/delete-index.mdx#syntax) + + +## Delete index example + + + +{`# Define the delete index operation, specify the index name +delete_index_op = DeleteIndexOperation("Orders/Totals") + +# Execute the operation by passing it to maintenance.send +store.maintenance.send(delete_index_op) +`} + + + + + +## Syntax + + + +{`class DeleteIndexOperation(VoidMaintenanceOperation): + def __init__(self, index_name: str): ... +`} + + + +| Parameters | Type | Description | +|- | - | - | +| **index_name** | `str` | Name of index to delete | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_disable-index-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_disable-index-csharp.mdx new file mode 100644 index 0000000000..4175b2ec49 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_disable-index-csharp.mdx @@ -0,0 +1,169 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can **disable a specific index** by either of the following: + * From the Client API - using `DisableIndexOperation` + * From Studio - see [indexes list view](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) + * Via the file system + +* To learn how to enable a disabled index, see [Enable index operation](../../../../client-api/operations/maintenance/indexes/enable-index.mdx). + +* In this page: + + * [Overview](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#overview) + * [Which node is the index disabled on?](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#which-node-is-the-index-disabled-on) + * [What happens when the index is disabled?](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#what-happens-when-the-index-is-disabled) + + * [Disable index from the Client API](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disable-index-from-the-client-api) + * [Disable index - single node](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disable-index---single-node) + * [Disable index - cluster wide](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disable-index---cluster-wide) + * [Syntax](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#syntax) + + * [Disable index manually via the file system](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disable-index-manually-via-the-file-system) + + +## Overview + +#### Which node is the index disabled on? + +* The index can be disabled either: + * On a single node, or + * Cluster wide - on all database-group nodes. + +* When disabling the index from the **client API** on a single node: + The index will be disabled on the [preferred node](../../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) only, and Not on all the database-group nodes. + +* When disabling an index from the **Studio** [indexes list](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) view: + The index will be disabled on the local node the browser is opened on, even if it is Not the preferred node. + +* When disabling the index [manually](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disable-index-via-the-file-system): + The index will be disabled on the [preferred node](../../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) only, and Not on all the database-group nodes. +#### What happens when the index is disabled? + +* No indexing will be done by a disabled index on the node where index is disabled. + However, new data will be indexed by the index on other database-group nodes where it is not disabled. + +* You can still query the index, + but results may be stale when querying a node on which the index was disabled. + +* Disabling an index is a **persistent operation**: + * The index will remain disabled even after restarting the server or after [disabling/enabling](../../../../client-api/operations/server-wide/toggle-databases-state.mdx) the database. + * To only pause the index and resume after a restart see: [pause index operation](../../../../client-api/operations/maintenance/indexes/stop-index.mdx). + + + +## Disable index from the Client API + +#### Disable index - single node: + + + + +{`// Define the disable index operation +// Use this overload to disable on a single node +var disableIndexOp = new DisableIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to Maintenance.Send +store.Maintenance.Send(disableIndexOp); + +// At this point, the index is disabled only on the 'preferred node' +// New data will not be indexed on this node only +`} + + + + +{`// Define the disable index operation +// Use this overload to disable on a single node +var disableIndexOp = new DisableIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to Maintenance.SendAsync +await store.Maintenance.SendAsync(disableIndexOp); + +// At this point, the index is disabled only on the 'preferred node' +// New data will not be indexed on this node only +`} + + + +#### Disable index - cluster wide: + + + + +{`// Define the disable index operation +// Pass 'true' to disable the index on all nodes in the database-group +var disableIndexOp = new DisableIndexOperation("Orders/Totals", true); + +// Execute the operation by passing it to Maintenance.Send +store.Maintenance.Send(disableIndexOp); + +// At this point, the index is disabled on ALL nodes +// New data will not be indexed +`} + + + + +{`// Define the disable index operation +// Pass 'true' to disable the index on all nodes in the database-group +var disableIndexOp = new DisableIndexOperation("Orders/Totals", true); + +// Execute the operation by passing it to Maintenance.SendAsync +await store.Maintenance.SendAsync(disableIndexOp); + +// At this point, the index is disabled on ALL nodes +// New data will not be indexed +`} + + + +#### Syntax: + + + +{`// Available overloads: +public DisableIndexOperation(string indexName) +public DisableIndexOperation(string indexName, bool clusterWide) +`} + + + +| Parameter | Type | Description | +|-----------------|----------|--------------------------------------------------------------------------------------------------------------------------| +| **indexName** | `string` | Name of index to disable | +| **clusterWide** | `bool` | `true` - Disable index on all database-group nodes
`false` - Disable index only on a single node (the preferred node) | + + + +## Disable index manually via the file system + +* It may sometimes be useful to disable an index manually, through the file system. + For example, a faulty index may load before [DisableIndexOperation](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disableindexoperation) gets a chance to disable it. + Manually disabling the index will ensure that the index is not loaded. + +* To **manually disable** an index: + + * Place a file named `disable.marker` in the [index directory](../../../../server/storage/directory-structure.mdx). + Indexes are kept under the database directory, each index in a directory whose name is similar to the index's. + * The `disable.marker` file can be empty, + and can be created by any available method, e.g. using the File Explorer, a terminal, or code. + +* Attempting to use a manually disabled index will generate the following exception: + + Unable to open index: '{IndexName}', + it has been manually disabled via the file: '{disableMarkerPath}'. + To re-enable, remove the disable.marker file and enable indexing.` + +* To **enable** a manually disabled index: + + * First, remove the `disable.marker` file from the index directory. + * Then, enable the index by any of the options described in: [How to enable an index](../../../../client-api/operations/maintenance/indexes/enable-index.mdx#how-to-enable-an-index). + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_disable-index-java.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_disable-index-java.mdx new file mode 100644 index 0000000000..dade7167e1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_disable-index-java.mdx @@ -0,0 +1,37 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +The **DisableIndexOperation** is used to turn the indexing off for a given index. Querying a `disabled` index is allowed, but it may return stale results. + + +Unlike [StopIndex](../../../../client-api/operations/maintenance/indexes/stop-index.mdx) or [StopIndexing](../../../../client-api/operations/maintenance/indexes/stop-indexing.mdx) disable index is a persistent operation, so the index remains disabled even after a server restart. + + + +## Syntax + + + +{`public DisableIndexOperation(String indexName) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **indexName** | String | name of an index to disable indexing | + +## Example + + + +{`store.maintenance().send(new DisableIndexOperation("Orders/Totals")); +// index is disabled at this point, new data won't be indexed +// but you can still query on this index +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_disable-index-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_disable-index-nodejs.mdx new file mode 100644 index 0000000000..32e0687c21 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_disable-index-nodejs.mdx @@ -0,0 +1,135 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can **disable a specific index** by either of the following: + * From the Client API - using `DisableIndexOperation` + * From Studio - see [indexes list view](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) + * Via the file system + +* To learn how to enable a disabled index, see [Enable index operation](../../../../client-api/operations/maintenance/indexes/enable-index.mdx). + +* In this page: + + * [Overview](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#overview) + * [Which node is the index disabled on?](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#which-node-is-the-index-disabled-on) + * [What happens when the index is disabled?](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#what-happens-when-the-index-is-disabled) + + * [Disable index from the Client API](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disable-index-from-the-client-api) + * [Disable index - single node](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disable-index---single-node) + * [Disable index - cluster wide](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disable-index---cluster-wide) + * [Syntax](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#syntax) + + * [Disable index manually via the file system](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disable-index-manually-via-the-file-system) + + +## Overview + +#### Which node is the index disabled on? + +* The index can be disabled either: + * On a single node, or + * Cluster wide - on all database-group nodes. + +* When disabling the index from the **client API** on a single node: + The index will be disabled on the [preferred node](../../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) only, and Not on all the database-group nodes. + +* When disabling an index from the **Studio** [indexes list](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) view: + The index will be disabled on the local node the browser is opened on, even if it is Not the preferred node. + +* When disabling the index [manually](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disable-index-via-the-file-system): + The index will be disabled on the [preferred node](../../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) only, and Not on all the database-group nodes. +#### What happens when the index is disabled? + +* No indexing will be done by a disabled index on the node where index is disabled. + However, new data will be indexed by the index on other database-group nodes where it is not disabled. + +* You can still query the index, + but results may be stale when querying a node on which the index was disabled. + +* Disabling an index is a **persistent operation**: + * The index will remain disabled even after restarting the server or after [disabling/enabling](../../../../client-api/operations/server-wide/toggle-databases-state.mdx) the database. + * To only pause the index and resume after a restart see: [pause index operation](../../../../client-api/operations/maintenance/indexes/stop-index.mdx). + + + +## Disable index from the Client API + +#### Disable index - single node: + + + +{`// Define the disable index operation +// Use this overload to disable on a single node +const disableIndexOp = new DisableIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to maintenance.send +await documentStore.maintenance.send(disableIndexOp); + +// At this point, the index is disabled only on the 'preferred node' +// New data will not be indexed on this node only +`} + + +#### Disable index - cluster wide: + + + +{`// Define the disable index operation +// Pass 'true' to disable the index on all nodes in the database-group +const disableIndexOp = new DisableIndexOperation("Orders/Totals", true); + +// Execute the operation by passing it to maintenance.send +await documentStore.maintenance.send(disableIndexOp); + +// At this point, the index is disabled on ALL nodes +// New data will not be indexed +`} + + +#### Syntax: + + + +{`const disableIndexOp = new DisableIndexOperation(indexName, clusterWide = false); +`} + + + +| Parameter | Type | Description | +|-----------------|-----------|--------------------------------------------------------------------------------------------------------------------------| +| **indexName** | `string` | Name of index to disable | +| **clusterWide** | `boolean` | `true` - Disable index on all database-group nodes
`false` - Disable index only on a single node (the preferred node) | + + + +## Disable index manually via the file system + +* It may sometimes be useful to disable an index manually, through the file system. + For example, a faulty index may load before [DisableIndexOperation](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disableindexoperation) gets a chance to disable it. + Manually disabling the index will ensure that the index is not loaded. + +* To **manually disable** an index: + + * Place a file named `disable.marker` in the [index directory](../../../../server/storage/directory-structure.mdx). + Indexes are kept under the database directory, each index in a directory whose name is similar to the index's. + * The `disable.marker` file can be empty, + and can be created by any available method, e.g. using the File Explorer, a terminal, or code. + +* Attempting to use a manually disabled index will generate the following exception: + + Unable to open index: '{IndexName}', + it has been manually disabled via the file: '{disableMarkerPath}'. + To re-enable, remove the disable.marker file and enable indexing.` + +* To **enable** a manually disabled index: + + * First, remove the `disable.marker` file from the index directory. + * Then, enable the index by any of the options described in: [How to enable an index](../../../../client-api/operations/maintenance/indexes/enable-index.mdx#how-to-enable-an-index). + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_disable-index-php.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_disable-index-php.mdx new file mode 100644 index 0000000000..2fd7f208e2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_disable-index-php.mdx @@ -0,0 +1,135 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can **disable a specific index** by either of the following: + * From the Client API - using `DisableIndexOperation` + * From Studio - see [indexes list view](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) + * Via the file system + +* To learn how to enable a disabled index, see [Enable index operation](../../../../client-api/operations/maintenance/indexes/enable-index.mdx). + +* In this page: + + * [Overview](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#overview) + * [Which node is the index disabled on?](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#which-node-is-the-index-disabled-on) + * [What happens when the index is disabled?](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#what-happens-when-the-index-is-disabled) + + * [Disable index from the Client API](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disable-index-from-the-client-api) + * [Disable index - single node](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disable-index---single-node) + * [Disable index - cluster wide](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disable-index---cluster-wide) + * [Syntax](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#syntax) + + * [Disable index manually via the file system](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disable-index-manually-via-the-file-system) + + +## Overview + +#### Which node is the index disabled on? + +* The index can be disabled either: + * On a single node, or + * Cluster wide - on all database-group nodes. + +* When disabling the index from the **client API** on a single node: + The index will be disabled on the [preferred node](../../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) only, and Not on all the database-group nodes. + +* When disabling an index from the **Studio** [indexes list](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) view: + The index will be disabled on the local node the browser is opened on, even if it is Not the preferred node. + +* When disabling the index [manually](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disable-index-via-the-file-system): + The index will be disabled on the [preferred node](../../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) only, and Not on all the database-group nodes. +#### What happens when the index is disabled? + +* No indexing will be done by a disabled index on the node where index is disabled. + However, new data will be indexed by the index on other database-group nodes where it is not disabled. + +* You can still query the index, + but results may be stale when querying a node on which the index was disabled. + +* Disabling an index is a **persistent operation**: + * The index will remain disabled even after restarting the server or after [disabling/enabling](../../../../client-api/operations/server-wide/toggle-databases-state.mdx) the database. + * To only pause the index and resume after a restart see: [pause index operation](../../../../client-api/operations/maintenance/indexes/stop-index.mdx). + + + +## Disable index from the Client API + +#### Disable index - single node: + + + +{`// Define the disable index operation +// Use this overload to disable on a single node +$disableIndexOp = new DisableIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to Maintenance.Send +$store->maintenance()->send($disableIndexOp); + +// At this point, the index is disabled only on the 'preferred node' +// New data will not be indexed on this node only +`} + + +#### Disable index - cluster wide: + + + +{`// Define the disable index operation +// Pass 'true' to disable the index on all nodes in the database-group +$disableIndexOp = new DisableIndexOperation("Orders/Totals", true); + +// Execute the operation by passing it to Maintenance.Send +$store->maintenance()->send($disableIndexOp); + +// At this point, the index is disabled on ALL nodes +// New data will not be indexed +`} + + +#### Syntax: + + + +{`DisableIndexOperation(?string $indexName, bool $clusterWide = false) +`} + + + +| Parameter | Type | Description | +|------------------|--------|--------------------------------------------------------------------------------------------------------------------------| +| **$indexName** | `?string` | Name of index to disable | +| **$clusterWide** | `bool` | `true` - Disable index on all database-group nodes
`false` - Disable index only on a single node (the preferred node) | + + + +## Disable index manually via the file system + +* It may sometimes be useful to disable an index manually, through the file system. + For example, a faulty index may load before [DisableIndexOperation](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disableindexoperation) gets a chance to disable it. + Manually disabling the index will ensure that the index is not loaded. + +* To **manually disable** an index: + + * Place a file named `disable.marker` in the [index directory](../../../../server/storage/directory-structure.mdx). + Indexes are kept under the database directory, each index in a directory whose name is similar to the index's. + * The `disable.marker` file can be empty, + and can be created by any available method, e.g. using the File Explorer, a terminal, or code. + +* Attempting to use a manually disabled index will generate the following exception: + + Unable to open index: '{IndexName}', + it has been manually disabled via the file: '{disableMarkerPath}'. + To re-enable, remove the disable.marker file and enable indexing.` + +* To **enable** a manually disabled index: + + * First, remove the `disable.marker` file from the index directory. + * Then, enable the index by any of the options described in: [How to enable an index](../../../../client-api/operations/maintenance/indexes/enable-index.mdx#how-to-enable-an-index). + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_disable-index-python.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_disable-index-python.mdx new file mode 100644 index 0000000000..76bd9e8128 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_disable-index-python.mdx @@ -0,0 +1,136 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can **disable a specific index** by either of the following: + * From the Client API - using `DisableIndexOperation` + * From Studio - see [indexes list view](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) + * Via the file system + +* To learn how to enable a disabled index, see [Enable index operation](../../../../client-api/operations/maintenance/indexes/enable-index.mdx). + +* In this page: + + * [Overview](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#overview) + * [Which node is the index disabled on?](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#which-node-is-the-index-disabled-on) + * [What happens when the index is disabled?](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#what-happens-when-the-index-is-disabled) + + * [Disable index from the Client API](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disable-index-from-the-client-api) + * [Disable index - single node](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disable-index---single-node) + * [Disable index - cluster wide](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disable-index---cluster-wide) + * [Syntax](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#syntax) + + * [Disable index manually via the file system](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disable-index-manually-via-the-file-system) + + +## Overview + +#### Which node is the index disabled on? + +* The index can be disabled either: + * On a single node, or + * Cluster wide - on all database-group nodes. + +* When disabling the index from the **client API** on a single node: + The index will be disabled on the [preferred node](../../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) only, and Not on all the database-group nodes. + +* When disabling an index from the **Studio** [indexes list](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) view: + The index will be disabled on the local node the browser is opened on, even if it is Not the preferred node. + +* When disabling the index [manually](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disable-index-via-the-file-system): + The index will be disabled on the [preferred node](../../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) only, and Not on all the database-group nodes. +#### What happens when the index is disabled? + +* No indexing will be done by a disabled index on the node where index is disabled. + However, new data will be indexed by the index on other database-group nodes where it is not disabled. + +* You can still query the index, + but results may be stale when querying a node on which the index was disabled. + +* Disabling an index is a **persistent operation**: + * The index will remain disabled even after restarting the server or after [disabling/enabling](../../../../client-api/operations/server-wide/toggle-databases-state.mdx) the database. + * To only pause the index and resume after a restart see: [pause index operation](../../../../client-api/operations/maintenance/indexes/stop-index.mdx). + + + +## Disable index from the Client API + +#### Disable index - single node: + + + +{`# Define the disable index operation +# Use this args set to disable on a single node +disable_index_op = DisableIndexOperation("Orders/Totals") + +# Execute the operation by passing it to maintenance.send +store.maintenance.send(disable_index_op) + +# At this point, the index is disabled only on the 'preferred node' +# New data will not be indexed on this node only +`} + + +#### Disable index - cluster wide: + + + +{`# Define the disable index operation +# Pass 'True' to disable the index on all nodes in the database-group +disable_index_op = DisableIndexOperation("Orders/Totals", True) + +# Execute the operation by passing it to maintenance.send +store.maintenance.send(disable_index_op) + +# At this point, the index is disabled on ALL nodes +# New data will not be indexed +`} + + +#### Syntax: + + + +{`class DisableIndexOperation(VoidMaintenanceOperation): + def __init__(self, index_name: str, cluster_wide: bool = False): ... +`} + + + +| Parameter | Type | Description | +|------------------|--------|--------------------------------------------------------------------------------------------------------------------------| +| **index_name** | `str` | Name of index to disable | +| **cluster_wide** | `bool` | `True` - Disable index on all database-group nodes
`False` - Disable index only on a single node (the preferred node) | + + + +## Disable index manually via the file system + +* It may sometimes be useful to disable an index manually, through the file system. + For example, a faulty index may load before [DisableIndexOperation](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disableindexoperation) gets a chance to disable it. + Manually disabling the index will ensure that the index is not loaded. + +* To **manually disable** an index: + + * Place a file named `disable.marker` in the [index directory](../../../../server/storage/directory-structure.mdx). + Indexes are kept under the database directory, each index in a directory whose name is similar to the index's. + * The `disable.marker` file can be empty, + and can be created by any available method, e.g. using the File Explorer, a terminal, or code. + +* Attempting to use a manually disabled index will generate the following exception: + + Unable to open index: '{IndexName}', + it has been manually disabled via the file: '{disableMarkerPath}'. + To re-enable, remove the disable.marker file and enable indexing.` + +* To **enable** a manually disabled index: + + * First, remove the `disable.marker` file from the index directory. + * Then, enable the index by any of the options described in: [How to enable an index](../../../../client-api/operations/maintenance/indexes/enable-index.mdx#how-to-enable-an-index). + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_enable-index-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_enable-index-csharp.mdx new file mode 100644 index 0000000000..b1f14e24d2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_enable-index-csharp.mdx @@ -0,0 +1,134 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When an index is enabled, indexing will take place, and new data will be indexed. + +* To learn how to disable an index, see [disable index](../../../../client-api/operations/maintenance/indexes/disable-index.mdx). + +* In this page: + * [How to enable an index](../../../../client-api/operations/maintenance/indexes/enable-index.mdx#how-to-enable-an-index) + * [Enable index from the Client API](../../../../client-api/operations/maintenance/indexes/enable-index.mdx#enable-index-from-the-client-api) + * [Enable index - single node](../../../../client-api/operations/maintenance/indexes/enable-index.mdx#enable-index---single-node) + * [Enable index - cluster wide](../../../../client-api/operations/maintenance/indexes/enable-index.mdx#enable-index---cluster-wide) + * [Syntax](../../../../client-api/operations/maintenance/indexes/enable-index.mdx#syntax) + + +## How to enable an index + +* **From the Client API**: + Use `EnableIndexOperation` to enable the index from the Client API. + The index can be enabled: + * On a single node. + * Cluster wide, on all database-group nodes. + +* **From Studio**: + To enable the index from Studio go to the [indexes list view](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions). + +* **Reset index**: + [Resetting](../../../../client-api/operations/maintenance/indexes/reset-index.mdx) a disabled index will re-enable the index + locally, on the node that the reset operation was performed on. + +* **Modify index definition**: + Modifying the index definition will also re-enable the normal operation of the index. + +* The above methods can also be used to enable an index that was + [disabled via the file system](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disable-index-manually-via-the-file-system), + after removing the `disable.marker` file. + + + +## Enable index from the Client API + +#### Enable index - single node: + +* With this option, the index will be enabled on the [preferred node](../../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) only. + The preferred node is simply the first node in the [database group topology](../../../../studio/database/settings/manage-database-group.mdx). + +* Note: When enabling an index from [Studio](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions), + the index will be enabled on the local node the browser is opened on, even if it is Not the preferred node. + + + + +{`// Define the enable index operation +// Use this overload to enable on a single node +var enableIndexOp = new EnableIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to Maintenance.Send +store.Maintenance.Send(enableIndexOp); + +// At this point, the index is enabled on the 'preferred node' +// New data will be indexed on this node +`} + + + + +{`// Define the enable index operation +// Use this overload to enable on a single node +var enableIndexOp = new EnableIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to Maintenance.SendAsync +await store.Maintenance.SendAsync(enableIndexOp); + +// At this point, the index is enabled on the 'preferred node' +// New data will be indexed on this node +`} + + + +#### Enable index - cluster wide: + + + + +{`// Define the enable index operation +// Pass 'true' to enable the index on all nodes in the database-group +var enableIndexOp = new EnableIndexOperation("Orders/Totals", true); + +// Execute the operation by passing it to Maintenance.Send +store.Maintenance.Send(enableIndexOp); + +// At this point, the index is enabled on ALL nodes +// New data will be indexed +`} + + + + +{`// Define the enable index operation +// Pass 'true' to enable the index on all nodes in the database-group +var enableIndexOp = new EnableIndexOperation("Orders/Totals", true); + +// Execute the operation by passing it to Maintenance.SendAsync +await store.Maintenance.SendAsync(enableIndexOp); + +// At this point, the index is enabled on ALL nodes +// New data will be indexed +`} + + + +#### Syntax: + + + +{`// Available overloads: +public EnableIndexOperation(string indexName) +public EnableIndexOperation(string indexName, bool clusterWide) +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **indexName** | `string` | Name of index to enable | +| **clusterWide** | `bool` | `true` - Enable index on all database-group nodes
`false` - Enable index only on a single node (the preferred node) | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_enable-index-java.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_enable-index-java.mdx new file mode 100644 index 0000000000..0d8dad2ba3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_enable-index-java.mdx @@ -0,0 +1,31 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +The **EnableIndexOperation** is used to turn on the indexing for a given index. + + +## Syntax + + + +{`public EnableIndexOperation(String indexName) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **indexName** | String | name of an index to enable indexing | + +## Example + + + +{`store.maintenance().send(new EnableIndexOperation("Orders/Totals")); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_enable-index-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_enable-index-nodejs.mdx new file mode 100644 index 0000000000..2606b0e7a1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_enable-index-nodejs.mdx @@ -0,0 +1,100 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When an index is enabled, indexing will take place, and new data will be indexed. + +* To learn how to disable an index, see [disable index](../../../../client-api/operations/maintenance/indexes/disable-index.mdx). + +* In this page: + * [How to enable an index](../../../../client-api/operations/maintenance/indexes/enable-index.mdx#how-to-enable-an-index) + * [Enable index from the Client API](../../../../client-api/operations/maintenance/indexes/enable-index.mdx#enable-index-from-the-client-api) + * [Enable index - single node](../../../../client-api/operations/maintenance/indexes/enable-index.mdx#enable-index---single-node) + * [Enable index - cluster wide](../../../../client-api/operations/maintenance/indexes/enable-index.mdx#enable-index---cluster-wide) + * [Syntax](../../../../client-api/operations/maintenance/indexes/enable-index.mdx#syntax) + + +## How to enable an index + +* **From the Client API**: + Use `EnableIndexOperation` to enable the index from the Client API. + The index can be enabled: + * On a single node. + * Cluster wide, on all database-group nodes. + +* **From Studio**: + To enable the index from Studio go to the [indexes list view](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions). + +* **Reset index**: + [Resetting](../../../../client-api/operations/maintenance/indexes/reset-index.mdx) a disabled index will re-enable the index + locally, on the node that the reset operation was performed on. + +* **Modify index definition**: + Modifying the index definition will also re-enable the normal operation of the index. + +* The above methods can also be used to enable an index that was + [disabled via the file system](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disable-index-manually-via-the-file-system), + after removing the `disable.marker` file. + + + +## Enable index from the Client API + +#### Enable index - single node: + +* With this option, the index will be enabled on the [preferred node](../../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) only. + The preferred node is simply the first node in the [database group topology](../../../../studio/database/settings/manage-database-group.mdx). + +* Note: When enabling an index from [Studio](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions), + the index will be enabled on the local node the browser is opened on, even if it is Not the preferred node. + + + +{`// Define the enable index operation +// Use this overload to enable on a single node +const enableIndexOp = new EnableIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to maintenance.send +await documentStore.maintenance.send(enableIndexOp); + +// At this point, the index is enabled on the 'preferred node' +// New data will be indexed on this node +`} + + +#### Enable index - cluster wide: + + + +{`// Define the enable index operation +// Pass 'true' to enable the index on all nodes in the database-group +const enableIndexOp = new EnableIndexOperation("Orders/Totals", true); + +// Execute the operation by passing it to maintenance.send +await documentStore.maintenance.send(enableIndexOp); + +// At this point, the index is enabled on ALL nodes +// New data will be indexed +`} + + +#### Syntax: + + + +{`const enableIndexOp = new EnableIndexOperation(indexName, clusterWide = false); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **indexName** | `string` | Name of index to enable | +| **clusterWide** | `bool` | `true` - Enable index on all database-group nodes
`false` - Enable index only on a single node (the preferred node) | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_enable-index-php.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_enable-index-php.mdx new file mode 100644 index 0000000000..9f584de502 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_enable-index-php.mdx @@ -0,0 +1,101 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When an index is enabled, indexing will take place, and new data will be indexed. + +* To learn how to disable an index, see [disable index](../../../../client-api/operations/maintenance/indexes/disable-index.mdx). + +* In this page: + * [How to enable an index](../../../../client-api/operations/maintenance/indexes/enable-index.mdx#how-to-enable-an-index) + * [Enable index from the Client API](../../../../client-api/operations/maintenance/indexes/enable-index.mdx#enable-index-from-the-client-api) + * [Enable index - single node](../../../../client-api/operations/maintenance/indexes/enable-index.mdx#enable-index---single-node) + * [Enable index - cluster wide](../../../../client-api/operations/maintenance/indexes/enable-index.mdx#enable-index---cluster-wide) + * [Syntax](../../../../client-api/operations/maintenance/indexes/enable-index.mdx#syntax) + + +## How to enable an index + +* **From the Client API**: + Use `EnableIndexOperation` to enable the index from the Client API. + The index can be enabled: + * On a single node. + * Cluster wide, on all database-group nodes. + +* **From Studio**: + To enable the index from Studio go to the [indexes list view](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions). + +* **Reset index**: + [Resetting](../../../../client-api/operations/maintenance/indexes/reset-index.mdx) a disabled index will re-enable the index + locally, on the node that the reset operation was performed on. + +* **Modify index definition**: + Modifying the index definition will also re-enable the normal operation of the index. + +* The above methods can also be used to enable an index that was + [disabled via the file system](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disable-index-manually-via-the-file-system), + after removing the `disable.marker` file. + + + +## Enable index from the Client API + +#### Enable index - single node: + +* With this option, the index will be enabled on the [preferred node](../../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) only. + The preferred node is simply the first node in the [database group topology](../../../../studio/database/settings/manage-database-group.mdx). + +* Note: When enabling an index from [Studio](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions), + the index will be enabled on the local node the browser is opened on, even if it is Not the preferred node. + + + +{`// Define the enable index operation +// Use this overload to enable on a single node +$enableIndexOp = new EnableIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to Maintenance.Send +$store->maintenance()->send($enableIndexOp); + +// At this point, the index is enabled on the 'preferred node' +// New data will be indexed on this node +`} + + +#### Enable index - cluster wide: + + + +{`// Define the enable index operation +// Pass 'true' to enable the index on all nodes in the database-group +$enableIndexOp = new EnableIndexOperation("Orders/Totals", true); + +// Execute the operation by passing it to Maintenance.Send +$store->maintenance()->send($enableIndexOp); + +// At this point, the index is enabled on ALL nodes +// New data will be indexed +`} + + +#### Syntax: + + + +{`// Available overloads: +EnableIndexOperation(?string $indexName, bool clusterWide = false) +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **$indexName** | `?string` | Name of index to enable | +| **$clusterWide** | `bool` | `true` - Enable index on all database-group nodes
`false` - Enable index only on a single node (the preferred node) | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_enable-index-python.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_enable-index-python.mdx new file mode 100644 index 0000000000..f82f14199e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_enable-index-python.mdx @@ -0,0 +1,101 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When an index is enabled, indexing will take place, and new data will be indexed. + +* To learn how to disable an index, see [disable index](../../../../client-api/operations/maintenance/indexes/disable-index.mdx). + +* In this page: + * [How to enable an index](../../../../client-api/operations/maintenance/indexes/enable-index.mdx#how-to-enable-an-index) + * [Enable index from the Client API](../../../../client-api/operations/maintenance/indexes/enable-index.mdx#enable-index-from-the-client-api) + * [Enable index - single node](../../../../client-api/operations/maintenance/indexes/enable-index.mdx#enable-index---single-node) + * [Enable index - cluster wide](../../../../client-api/operations/maintenance/indexes/enable-index.mdx#enable-index---cluster-wide) + * [Syntax](../../../../client-api/operations/maintenance/indexes/enable-index.mdx#syntax) + + +## How to enable an index + +* **From the Client API**: + Use `EnableIndexOperation` to enable the index from the Client API. + The index can be enabled: + * On a single node. + * Cluster wide, on all database-group nodes. + +* **From Studio**: + To enable the index from Studio go to the [indexes list view](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions). + +* **Reset index**: + [Resetting](../../../../client-api/operations/maintenance/indexes/reset-index.mdx) a disabled index will re-enable the index + locally, on the node that the reset operation was performed on. + +* **Modify index definition**: + Modifying the index definition will also re-enable the normal operation of the index. + +* The above methods can also be used to enable an index that was + [disabled via the file system](../../../../client-api/operations/maintenance/indexes/disable-index.mdx#disable-index-manually-via-the-file-system), + after removing the `disable.marker` file. + + + +## Enable index from the Client API + +#### Enable index - single node: + +* With this option, the index will be enabled on the [preferred node](../../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) only. + The preferred node is simply the first node in the [database group topology](../../../../studio/database/settings/manage-database-group.mdx). + +* Note: When enabling an index from [Studio](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions), + the index will be enabled on the local node the browser is opened on, even if it is Not the preferred node. + + + +{`# Define the enable index operation +# Use this args set to enable on a single node +enable_index_op = EnableIndexOperation("Orders/Totals") + +# Execute the operation by passing it to maintenance.send +store.maintenance.send(enable_index_op) + +# At this point, the index is enabled only on the 'preferred node' +# New data will not be indexed on this node only +`} + + +#### Enable index - cluster wide: + + + +{`# Define the enable index operation +# Pass 'True' to enable the index on all nodes in the database-group +enable_index_op = EnableIndexOperation("Orders/Totals", True) + +# Execute the operation by passing it to maintenance.send +store.maintenance.send(enable_index_op) + +# At this point, the index is enabled on ALL nodes +# New data will not be indexed +`} + + +#### Syntax: + + + +{`class EnableIndexOperation(VoidMaintenanceOperation): + def __init__(self, index_name: str, cluster_wide: bool = False): ... +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **index_name** | `str` | Name of index to enable | +| **cluster_wide** | `bool` | `True` - Enable index on all database-group nodes
`False` - Enable index only on a single node (the preferred node) | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-csharp.mdx new file mode 100644 index 0000000000..b9156e0f96 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-csharp.mdx @@ -0,0 +1,80 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetIndexOperation` to retrieve an index definition from the database. + +* The operation will execute on the node defined by the [client configuration](../../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + However, the index definition returned is taken from the database record, + which is common to all the database-group nodes. + i.e., an index state change done only on a local node is not reflected. + +* To get the index state on the local node use `GetIndexStatisticsOperation`. + +* In this page: + * [Get Index example](../../../../client-api/operations/maintenance/indexes/get-index.mdx#get-index-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/get-index.mdx#syntax) + + +## Get Index example + + + + +{`// Define the get index operation, pass the index name +var getIndexOp = new GetIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to Maintenance.Send +IndexDefinition index = store.Maintenance.Send(getIndexOp); + +// Access the index definition +var state = index.State; +var lockMode = index.LockMode; +var deploymentMode = index.DeploymentMode; +// etc. +`} + + + + +{`// Define the get index operation, pass the index name +var getIndexOp = new GetIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to Maintenance.SendAsync +IndexDefinition index = await store.Maintenance.SendAsync(getIndexOp); + +// Access the index definition +var state = index.State; +var lockMode = index.LockMode; +var deploymentMode = index.DeploymentMode; +// etc. +`} + + + + + + +## Syntax + + + +{`public GetIndexOperation(string indexName) +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **indexName** | `string` | Name of index to get | + +| Return value of `store.Maintenance.Send(getIndexOp)` | Description | +|- | - | +| `IndexDefinition` | An instance of class [IndexDefinition](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx#indexdefinition) | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-errors-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-errors-csharp.mdx new file mode 100644 index 0000000000..8e9ec1804f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-errors-csharp.mdx @@ -0,0 +1,138 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetIndexErrorsOperation` to get errors encountered during indexing. + +* The index errors will be retrieved only from the server node defined by the current [client-configuration](../../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + +* To learn about clearing index errors, see [delete index errors](../../../../client-api/operations/maintenance/indexes/delete-index-errors.mdx). + +* In this page: + * [Get errors for all indexes](../../../../client-api/operations/maintenance/indexes/get-index-errors.mdx#get-errors-for-all-indexes) + * [Get errors for specific indexes](../../../../client-api/operations/maintenance/indexes/get-index-errors.mdx#get-errors-for-specific-indexes) + * [Syntax](../../../../client-api/operations/maintenance/indexes/get-index-errors.mdx#syntax) + + +## Get errors for all indexes + + + + +{`// Define the get index errors operation +var getIndexErrorsOp = new GetIndexErrorsOperation(); + +// Execute the operation by passing it to Maintenance.Send +IndexErrors[] indexErrors = store.Maintenance.Send(getIndexErrorsOp); + +// indexErrors will contain errors for ALL indexes +`} + + + + +{`// Define the get index errors operation +var getIndexErrorsOp = new GetIndexErrorsOperation(); + +// Execute the operation by passing it to Maintenance.SendAsync +IndexErrors[] indexErrors = await store.Maintenance.SendAsync(getIndexErrorsOp); + +// indexErrors will contain errors for ALL indexes +`} + + + + + + +## Get errors for specific indexes + + + + +{`// Define the get index errors operation for specific indexes +var getIndexErrorsOp = new GetIndexErrorsOperation(new[] { "Orders/Totals" }); + +// Execute the operation by passing it to Maintenance.Send +// An exception will be thrown if any of the specified indexes do not exist +IndexErrors[] indexErrors = store.Maintenance.Send(getIndexErrorsOp); + +// indexErrors will contain errors only for index "Orders/Totals" +`} + + + + +{`// Define the get index errors operation for specific indexes +var getIndexErrorsOp = new GetIndexErrorsOperation(new[] { "Orders/Totals" }); + +// Execute the operation by passing it to Maintenance.SendAsync +// An exception will be thrown if any of the specified indexes do not exist +IndexErrors[] indexErrors = await store.Maintenance.SendAsync(getIndexErrorsOp); + +// indexErrors will contain errors only for index "Orders/Totals" +`} + + + + + + +## Syntax + + + +{`// Available overloads: +public GetIndexErrorsOperation() // Get errors for all indexes +public GetIndexErrorsOperation(string[] indexNames) // Get errors for specific indexes +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **indexNames** | `string[]` | List of index names to get errors for | + +| Return value of
`store.Maintenance.Send(getIndexErrorsOp)`| Description | +| - | - | +| `IndexErrors[]` | List of `IndexErrors` classes - see definition below.
An exception is thrown if any of the specified indexes doesn't exist. | + + + +{`public class IndexErrors +\{ + public string Name \{ get; set; \} // Index name + public IndexingError[] Errors \{ get; set; \} // List of errors for this index +\} +`} + + + + + +{`public class IndexingError +\{ + // The error message + public string Error \{ get; set; \} + + // Time of error + public DateTime Timestamp \{ get; set; \} + + // If Action is 'Map' - field will contain the document ID + // If Action is 'Reduce' - field will contain the Reduce key value + // For all other Actions - field will be null + public string Document \{ get; set; \} + + // Area where error has occurred, e.g. Map/Reduce/Analyzer/Memory/etc. + public string Action \{ get; set; \} +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-errors-java.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-errors-java.mdx new file mode 100644 index 0000000000..d1a2ec291d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-errors-java.mdx @@ -0,0 +1,122 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +**GetIndexErrorsOperation** is used to return errors encountered during document indexing. + +## Syntax + + + +{`public GetIndexErrorsOperation() + +public GetIndexErrorsOperation(String[] indexNames) +`} + + + + + +{`public class IndexErrors \{ + private String name; + private IndexingError[] errors; + + public IndexErrors() \{ + errors = new IndexingError[0]; + \} + + public String getName() \{ + return name; + \} + + public void setName(String name) \{ + this.name = name; + \} + + public IndexingError[] getErrors() \{ + return errors; + \} + + public void setErrors(IndexingError[] errors) \{ + this.errors = errors; + \} +\} +`} + + + + + +{`public class IndexingError \{ + + private String error; + private Date timestamp; + private String document; + private String action; + + public String getError() \{ + return error; + \} + + public void setError(String error) \{ + this.error = error; + \} + + public Date getTimestamp() \{ + return timestamp; + \} + + public void setTimestamp(Date timestamp) \{ + this.timestamp = timestamp; + \} + + public String getDocument() \{ + return document; + \} + + public void setDocument(String document) \{ + this.document = document; + \} + + public String getAction() \{ + return action; + \} + + public void setAction(String action) \{ + this.action = action; + \} +\} +`} + + + +| Return Value | | | +| ------------- | ----- | ---- | +| **Name** | String | Index name | +| **Errors** | IndexingError\[\] | List of indexing errors | + +## Example I + + + +{`// gets errors for all indexes +IndexErrors[] indexErrors + = store.maintenance().send(new GetIndexErrorsOperation()); +`} + + + +## Example II + + + +{`// gets errors only for 'Orders/Totals' index +IndexErrors[] indexErrors + = store.maintenance() + .send(new GetIndexErrorsOperation(new String[]\{"Orders/Totals"\})); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-errors-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-errors-nodejs.mdx new file mode 100644 index 0000000000..72d3c66457 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-errors-nodejs.mdx @@ -0,0 +1,110 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetIndexErrorsOperation` to get errors encountered during indexing. + +* The index errors will be retrieved only from the server node defined by the current [client-configuration](../../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + +* To learn about clearing index errors, see [delete index errors](../../../../client-api/operations/maintenance/indexes/delete-index-errors.mdx). + +* In this page: + * [Get errors for all indexes](../../../../client-api/operations/maintenance/indexes/get-index-errors.mdx#get-errors-for-all-indexes) + * [Get errors for specific indexes](../../../../client-api/operations/maintenance/indexes/get-index-errors.mdx#get-errors-for-specific-indexes) + * [Syntax](../../../../client-api/operations/maintenance/indexes/get-index-errors.mdx#syntax) + + +## Get errors for all indexes + + + +{`// Define the get index errors operation +const getIndexErrorsOp = new GetIndexErrorsOperation(); + +// Execute the operation by passing it to maintenance.send +const indexErrors = await store.maintenance.send(getIndexErrorsOp); + +// indexErrors will contain errors for ALL indexes +`} + + + + + +## Get errors for specific indexes + + + +{`// Define the get index errors operation for specific indexes +const getIndexErrorsOp = new GetIndexErrorsOperation(["Orders/Totals"]); + +// Execute the operation by passing it to maintenance.send +// An exception will be thrown if any of the specified indexes do not exist +const indexErrors = await store.maintenance.send(getIndexErrorsOp); + +// indexErrors will contain errors only for index "Orders/Totals" +`} + + + + + +## Syntax + + + +{`// Available overloads: +const getIndexErrorsOp = new GetIndexErrorsOperation(); // Get errors for all indexes +const getIndexErrorsOp = new GetIndexErrorsOperation(indexNames); // Get errors for specific indexes +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **indexNames** | `string[]` | List of index names to get errors for | + +| Return value of
`store.maintenance.send(getIndexErrorsOp)`| Description | +| - | - | +| `object[]` | List of 'index errors' objects - see definition below.
An exception is thrown if any of the specified indexes doesn't exist. | + + + + +{`// An 'index errors' object: +\{ + name, // Index name + errors // List of 'error objects' for this index +\} +`} + + + + +{`// An 'error object': +\{ + // The error message + error, + + // Time of error + timestamp, + + // If Action is 'Map' - field will contain the document ID + // If Action is 'Reduce' - field will contain the Reduce key value + // For all other Actions - field will be null + document, + + // Area where error has occurred, e.g. Map/Reduce/Analyzer/Memory/etc. + action +\} +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-errors-php.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-errors-php.mdx new file mode 100644 index 0000000000..29fbe0e3e1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-errors-php.mdx @@ -0,0 +1,117 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetIndexErrorsOperation` to get errors encountered during indexing. + +* The index errors will be retrieved only from the server node defined by the current [client-configuration](../../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + +* To learn about clearing index errors, see [delete index errors](../../../../client-api/operations/maintenance/indexes/delete-index-errors.mdx). + +* In this page: + * [Get errors for all indexes](../../../../client-api/operations/maintenance/indexes/get-index-errors.mdx#get-errors-for-all-indexes) + * [Get errors for specific indexes](../../../../client-api/operations/maintenance/indexes/get-index-errors.mdx#get-errors-for-specific-indexes) + * [Syntax](../../../../client-api/operations/maintenance/indexes/get-index-errors.mdx#syntax) + + +## Get errors for all indexes + + + +{`// Define the get index errors operation +$getIndexErrorsOp = new GetIndexErrorsOperation(); + +// Execute the operation by passing it to maintenance.send +/** @var IndexErrorsArray $indexErrors */ +$indexErrors = $store->maintenance()->send($getIndexErrorsOp); + +// indexErrors will contain errors for ALL indexes +`} + + + + + +## Get errors for specific indexes + + + +{`// Define the get index errors operation for specific indexes +$getIndexErrorsOp = new GetIndexErrorsOperation([ "Orders/Totals" ]); + +// Execute the operation by passing it to Maintenance.Send +// An exception will be thrown if any of the specified indexes do not exist +/** @var IndexErrorsArray $indexErrors */ +$indexErrors = $store->maintenance()->send($getIndexErrorsOp); + +// indexErrors will contain errors only for index "Orders/Totals" +`} + + + + + +## Syntax + + + +{`// Available overloads: +GetIndexErrorsOperation() // Get errors for all indexes +GetIndexErrorsOperation(array $indexNames) // Get errors for specific indexes +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **$indexNames** | `array` | List of index names to get errors for | + +| `$getIndexErrorsOp` operation Return value | Description | +| - | - | +| `?IndexingErrorArray` | List of `IndexingError` classes - see definition below.
An exception is thrown if any of the specified indexes doesn't exist. | + + + + + +{`public class IndexErrors +\{ + private ?string $name = null; // Index name + private ?IndexingErrorArray $errors = null; // List of errors for this index + + // ... getters and setters +\} +`} + + + + + +{`public class IndexingError +\{ + // The error message + private ?string $error = null; + + // Time of error + private ?DateTimeInterface $timestamp = null; + + // If Action is 'Map' - field will contain the document ID + // If Action is 'Reduce' - field will contain the Reduce key value + // For all other Actions - field will be null + private ?string $document = null; + + // Area where error has occurred, e.g. Map/Reduce/Analyzer/Memory/etc. + private ?string $action = null; + + // ... getters and setters +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-errors-python.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-errors-python.mdx new file mode 100644 index 0000000000..8dce945c5f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-errors-python.mdx @@ -0,0 +1,115 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetIndexErrorsOperation` to get errors encountered during indexing. + +* The index errors will be retrieved only from the server node defined by the current [client-configuration](../../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + +* To learn about clearing index errors, see [delete index errors](../../../../client-api/operations/maintenance/indexes/delete-index-errors.mdx). + +* In this page: + * [Get errors for all indexes](../../../../client-api/operations/maintenance/indexes/get-index-errors.mdx#get-errors-for-all-indexes) + * [Get errors for specific indexes](../../../../client-api/operations/maintenance/indexes/get-index-errors.mdx#get-errors-for-specific-indexes) + * [Syntax](../../../../client-api/operations/maintenance/indexes/get-index-errors.mdx#syntax) + + +## Get errors for all indexes + + + +{`# Define the get index errors operation +get_index_errors_op = GetIndexErrorsOperation() + +# Execute the operation by passing it to maintenance.send +index_errors = store.maintenance.send(get_index_errors_op) + +# index_errors will contain errors for ALL indexes +`} + + + + + +## Get errors for specific indexes + + + +{`# Define the get index errors operation for specific indexes +get_index_errors_op = GetIndexErrorsOperation("Orders/Totals") + +# Execute the operation by passing it to maintenance.send +# An exception will be thrown if any of the specified indexes do not exist +index_errors = store.maintenance.send(get_index_errors_op) + +# index_errors will contain errors only for index "Orders/Totals" +`} + + + + + +## Syntax + + + +{`class GetIndexErrorsOperation(MaintenanceOperation[List[IndexErrors]]): + def __init__(self, *index_names: str): # If no index_names provided, get errors for all indexes + ... +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **\*index_names** | `str` | List of index names to get errors for | + +| Return value of
`store.maintenance.send(GetIndexErrorsOperation)` | Description | +| - | - | +| `List[IndexErrors]` | List of `IndexErrors` classes - see definition below.
An exception is thrown if any of the specified indexes doesn't exist. | + + + + + +{`class IndexErrors: + def __init__(self, name: Optional[str] = None, errors: Optional[List[IndexingError]] = None): + self.name = name # Index name + self.errors = errors # List of errors for this index +`} + + + + + +{`class IndexingError: + def __init__( + self, + error: Optional[str] = None, + timestamp: Optional[datetime.datetime] = None, + document: Optional[str] = None, + action: Optional[str] = None, + ): + # Error message + self.error = error + + # Time of error + self.timestamp = timestamp + + # If action is 'Map' - field will contain the document ID + # If action is 'Reduce' - field will contain the Reduce key value + # For all other actions - field will be None + self.document = document + + # Area where error has occurred, e.g. Map/Reduce/Analyzer/Memory/etc. + self.action = action +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-java.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-java.mdx new file mode 100644 index 0000000000..3522b23556 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-java.mdx @@ -0,0 +1,36 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +**GetIndexOperation** is used to retrieve an index definition from a database. + +### Syntax + + + +{`public GetIndexOperation(String indexName) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **indexName** | String | name of an index | + +| Return Value | | +| ------------- | ----- | +| `IndexDefinition` | Instance of IndexDefinition representing index. | + +### Example + + + +{`IndexDefinition index + = store.maintenance() + .send(new GetIndexOperation("Orders/Totals")); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-names-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-names-csharp.mdx new file mode 100644 index 0000000000..898c14d18a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-names-csharp.mdx @@ -0,0 +1,68 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetIndexNamesOperation` to retrieve multiple **index names** from the database. + +* In this page: + * [Get index names example](../../../../client-api/operations/maintenance/indexes/get-index-names.mdx#get-index-names-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/get-index-names.mdx#syntax) + + +## Get index names example + + + + +{`// Define the get index names operation +// Pass number of indexes to skip & number of indexes to retrieve +var getIndexNamesOp = new GetIndexNamesOperation(0, 10); + +// Execute the operation by passing it to Maintenance.Send +string[] indexNames = store.Maintenance.Send(getIndexNamesOp); + +// indexNames will contain the first 10 indexes, alphabetically ordered +`} + + + + +{`// Define the get index names operation +// Pass number of indexes to skip & number of indexes to retrieve +var getIndexNamesOp = new GetIndexNamesOperation(0, 10); + +// Execute the operation by passing it to Maintenance.SendAsync +string[] indexNames = await store.Maintenance.SendAsync(getIndexNamesOp); + +// indexNames will contain the first 10 indexes, alphabetically ordered +`} + + + + + + +## Syntax + + + +{`public GetIndexNamesOperation(int start, int pageSize) +`} + + + +| Parameters | Type | Description | +| - |- | - | +| **start** | `int` | Number of index names to skip | +| **pageSize** | `int` | Number of index names to retrieve | + +| Return Value of
`store.Maintenance.Send(getIndexNamesOp)` | Description | +| - | - | +| `string[]` | A list of index names.
Alphabetically ordered. | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-names-java.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-names-java.mdx new file mode 100644 index 0000000000..bfb8903171 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-names-java.mdx @@ -0,0 +1,38 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +**GetIndexNamesOperation** is used to retrieve multiple index names from a database. + +### Syntax + + + +{`public GetIndexNamesOperation(int start, int pageSize) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **start** | int | Number of index names that should be skipped | +| **pageSize** | int | Maximum number of index names that will be retrieved | + +| Return Value | | +| ------------- | ----- | +| String[] | This method returns an array of index **name** as a result. | + +### Example + + + +{`String[] indexNames + = store.maintenance() + .send(new GetIndexNamesOperation(0, 10)); +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-names-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-names-nodejs.mdx new file mode 100644 index 0000000000..c298d48c36 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-names-nodejs.mdx @@ -0,0 +1,53 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetIndexNamesOperation` to retrieve multiple **index names** from the database. + +* In this page: + * [Get index names example](../../../../client-api/operations/maintenance/indexes/get-index-names.mdx#get-index-names-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/get-index-names.mdx#syntax) + + +## Get index names example + + + +{`// Define the get index names operation +// Pass number of indexes to skip & number of indexes to retrieve +const getIndexNamesOp = new GetIndexNamesOperation(0, 10); + +// Execute the operation by passing it to maintenance.send +const indexNames = await store.maintenance.send(getIndexNamesOp); + +// indexNames will contain the first 10 indexes, alphabetically ordered +`} + + + + + +## Syntax + + + +{`const getIndexNamesOp = new GetIndexNamesOperation(start, pageSize); +`} + + + +| Parameters | Type | Description | +| - |- | - | +| **start** | `number` | Number of index names to skip | +| **pageSize** | `number` | Number of index names to retrieve | + +| Return Value of
`store.maintenance.send(getIndexNamesOp)` | Description | +| - | - | +| `string[]` | A list of index names.
Alphabetically ordered. | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-names-php.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-names-php.mdx new file mode 100644 index 0000000000..f5c1de5b89 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-names-php.mdx @@ -0,0 +1,33 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetIndexNamesOperation` to retrieve multiple **index names** from the database. + +* In this page: + * [Get index names example](../../../../client-api/operations/maintenance/indexes/get-index-names.mdx#get-index-names-example) + + +## Get index names example + + + +{`// Define the get index names operation +// Pass number of indexes to skip & number of indexes to retrieve +$getIndexNamesOp = new GetIndexNamesOperation(0, 10); + +// Execute the operation by passing it to Maintenance.Send +/** @var StringArrayResult $indexNames */ +$indexNames = $store->maintenance()->send($getIndexNamesOp); + +// indexNames will contain the first 10 indexes, alphabetically ordered +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-names-python.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-names-python.mdx new file mode 100644 index 0000000000..6307640a28 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-names-python.mdx @@ -0,0 +1,54 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetIndexNamesOperation` to retrieve multiple **index names** from the database. + +* In this page: + * [Get index names example](../../../../client-api/operations/maintenance/indexes/get-index-names.mdx#get-index-names-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/get-index-names.mdx#syntax) + + +## Get index names example + + + +{`# Define the get index names operation +# Pass number of indexes to skip & number of indexes to retrieve +get_index_names_op = GetIndexNamesOperation(0, 10) + +# Execute the operation by passing it to maintenance.send +index_names = store.maintenance.send(get_index_names_op) + +# index_names will contain the first 10 indexes, alphabetically ordered +`} + + + + + +## Syntax + + + +{`class GetIndexNamesOperation(MaintenanceOperation): + def __init__(self, start: int, page_size: int): ... +`} + + + +| Parameters | Type | Description | +| - |- | - | +| **start** | `int` | Number of index names to skip | +| **page_size** | `int` | Number of index names to retrieve | + +| Return Value of
`store.maintenance.send(GetIndexNamesOperation)` | Description | +| - | - | +| `str[]` | A list of index names.
Alphabetically ordered. | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-nodejs.mdx new file mode 100644 index 0000000000..7f5b7d5b2f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-nodejs.mdx @@ -0,0 +1,62 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetIndexOperation` to retrieve the **index definition** from the database. + +* The operation will execute on the node defined by the [client configuration](../../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + However, the index definition returned is taken from the database record, + which is common to all the database-group nodes. + i.e., an index state change done only on a local node is not reflected. + +* To get the index state on the local node use `GetIndexStatisticsOperation`. + +* In this page: + * [Get Index example](../../../../client-api/operations/maintenance/indexes/get-index.mdx#get-index-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/get-index.mdx#syntax) + + +## Get Index example + + + +{`// Define the get index operation, pass the index name +const getIndexOp = new GetIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to maintenance.send +const indexDefinition = await store.maintenance.send(getIndexOp); + +// Access the index definition +const state = indexDefinition.state; +const lockMode = indexDefinition.lockMode; +const deploymentMode = indexDefinition.deploymentMode; +// etc. +`} + + + + + +## Syntax + + + +{`const getIndexOp = new GetIndexOperation(indexName); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **indexName** | `string` | Name of index to get | + +| Return value of `store.maintenance.send(getIndexOp)` | Description | +|- | - | +| `IndexDefinition` | An instance of class [IndexDefinition](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx#indexdefinition) | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-php.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-php.mdx new file mode 100644 index 0000000000..ed48747e22 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-php.mdx @@ -0,0 +1,57 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetIndexOperation` to retrieve an index definition from the database. + +* The operation will execute on the node defined by the [client configuration](../../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + However, the index definition returned is taken from the database record, + which is common to all the database-group nodes. + i.e., an index state change done only on a local node is not reflected. + +* To get the index state on the local node use `GetIndexStatisticsOperation`. + +* In this page: + * [Get Index example](../../../../client-api/operations/maintenance/indexes/get-index.mdx#get-index-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/get-index.mdx#syntax) + + +## Get Index example + + + +{`// Define the get index operation, pass the index name +$getIndexOp = new GetIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to Maintenance.Send +/** @var IndexDefinition $index */ +$index = $store->maintenance()->send($getIndexOp); + +// Access the index definition +$state = $index->getState(); +$lockMode = $index->getLockMode(); +$deploymentMode = $index->getDeploymentMode(); +// etc. +`} + + + +### Syntax + + + +{`GetIndexOperation(?string $indexName) +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **$indexName** | `?string` | Name of index to get | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-python.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-python.mdx new file mode 100644 index 0000000000..ca8d6b442f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-index-python.mdx @@ -0,0 +1,63 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetIndexOperation` to retrieve an index definition from the database. + +* The operation will execute on the node defined by the [client configuration](../../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + However, the index definition returned is taken from the database record, + which is common to all the database-group nodes. + i.e., an index state change done only on a local node is not reflected. + +* To get the index state on the local node use `GetIndexStatisticsOperation`. + +* In this page: + * [Get Index example](../../../../client-api/operations/maintenance/indexes/get-index.mdx#get-index-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/get-index.mdx#syntax) + + +## Get Index example + + + +{`# Define the get index operation, pass the index name +get_index_op = GetIndexOperation("Orders/Totals") + +# Execute the operation by passing it to maintenance.send +index = store.maintenance.send(get_index_op) + +# Access the index definition +state = index.state +lock_mode = index.lock_mode +deployment_mode = index.deployment_mode +# etc. +`} + + + + + +## Syntax + + + +{`class GetIndexOperation(MaintenanceOperation[IndexDefinition]): + def __init__(self, index_name: str): ... +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **index_name** | `str` | Name of index to get | + +| Return value of `store.maintenance.send(GetIndexOperation)` | Description | +|- | - | +| `IndexDefinition` | An instance of class [IndexDefinition](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx#indexdefinition) | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-indexes-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-indexes-csharp.mdx new file mode 100644 index 0000000000..447218fbf8 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-indexes-csharp.mdx @@ -0,0 +1,87 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetIndexesOperation` to retrieve multiple **index definitions** from the database. + +* The operation will execute on the node defined by the [client configuration](../../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + However, the index definitions returned are taken from the database record, + which is common to all the database-group nodes. + i.e., an index state change done only on a local node is not reflected. + +* To get a specific index state on a local node use `GetIndexStatisticsOperation`. + +* In this page: + * [Get Indexes example](../../../../client-api/operations/maintenance/indexes/get-indexes.mdx#get-indexes-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/get-indexes.mdx#syntax) + + +## Get Indexes example + + + + +{`// Define the get indexes operation +// Pass number of indexes to skip & number of indexes to retrieve +var getIndexesOp = new GetIndexesOperation(0, 10); + +// Execute the operation by passing it to Maintenance.Send +IndexDefinition[] indexes = store.Maintenance.Send(getIndexesOp); + +// indexes will contain the first 10 indexes, alphabetically ordered by index name +// Access an index definition from the resulting list: +var name = indexes[0].Name; +var state = indexes[0].State; +var lockMode = indexes[0].LockMode; +var deploymentMode = indexes[0].DeploymentMode; +// etc. +`} + + + + +{`// Define the get indexes operation +// Pass number of indexes to skip & number of indexes to retrieve +var getIndexesOp = new GetIndexesOperation(0, 10); + +// Execute the operation by passing it to Maintenance.SendAsync +IndexDefinition[] indexes = await store.Maintenance.SendAsync(getIndexesOp); + +// indexes will contain the first 10 indexes, alphabetically ordered by index name +// Access an index definition from the resulting list: +var name = indexes[0].Name; +var state = indexes[0].State; +var lockMode = indexes[0].LockMode; +var deploymentMode = indexes[0].DeploymentMode; +// etc. +`} + + + + + + +## Syntax + + + +{`public GetIndexesOperation(int start, int pageSize) +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **start** | `int` | Number of indexes to skip | +| **pageSize** | `int` | Number of indexes to retrieve | + +| Return value of `store.Maintenance.Send(getIndexesOp)` | Description | +| - | - | +| `IndexDefinition[]` | A list of [IndexDefinition](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx#indexdefinition) classes,
ordered alphabetically by index name. | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-indexes-java.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-indexes-java.mdx new file mode 100644 index 0000000000..976106d376 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-indexes-java.mdx @@ -0,0 +1,37 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +**GetIndexesOperation** is used to retrieve multiple index definitions from a database. + +### Syntax + + + +{`public GetIndexesOperation(int start, int pageSize) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **start** | int | Number of indexes that should be skipped | +| **pageSize** | int | Maximum number of indexes that will be retrieved | + +| Return Value | | +| ------------- | ----- | +| `IndexDefinition` | Instance of IndexDefinition representing index. | + +### Example + + + +{`IndexDefinition[] indexes + = store.maintenance() + .send(new GetIndexesOperation(0, 10)); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-indexes-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-indexes-nodejs.mdx new file mode 100644 index 0000000000..11d6b925d1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-indexes-nodejs.mdx @@ -0,0 +1,66 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetIndexesOperation` to retrieve multiple **index definitions** from the database. + +* The operation will execute on the node defined by the [client configuration](../../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + However, the index definitions returned are taken from the database record, + which is common to all the database-group nodes. + i.e., an index state change done only on a local node is not reflected. + +* To get a specific index state on a local node use `GetIndexStatisticsOperation`. + +* In this page: + * [Get Indexes example](../../../../client-api/operations/maintenance/indexes/get-indexes.mdx#get-indexes-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/get-indexes.mdx#syntax) + + +## Get Indexes example + + + +{`// Define the get indexes operation +// Pass number of indexes to skip & number of indexes to retrieve +const getIndexesOp = new GetIndexesOperation(0, 10); + +// Execute the operation by passing it to maintenance.send +const indexes = await store.maintenance.send(getIndexesOp); + +// indexes will contain the first 10 indexes, alphabetically ordered by index name +// Access an index definition from the resulting list: +const name = indexes[0].name; +const state = indexes[0].state; +const lockMode = indexes[0].lockMode; +const deploymentMode = indexes[0].deploymentMode; +// etc. +`} + + + + + +## Syntax + + + +{`const getIndexesOp = new GetIndexesOperation(start, pageSize); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **start** | `number` | Number of indexes to skip | +| **pageSize** | `number` | Number of indexes to retrieve | + +| Return value of `store.maintenance.send(getIndexesOp)` | Description | +| - | - | +| `IndexDefinition[]` | A list of [IndexDefinition](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx#indexdefinition),
ordered alphabetically by index name. | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-indexes-php.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-indexes-php.mdx new file mode 100644 index 0000000000..f7704d71a3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-indexes-php.mdx @@ -0,0 +1,61 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetIndexesOperation` to retrieve multiple **index definitions** from the database. + +* The operation will execute on the node defined by the [client configuration](../../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + However, the index definitions returned are taken from the database record, + which is common to all the database-group nodes. + i.e., an index state change done only on a local node is not reflected. + +* To get a specific index state on a local node use `GetIndexStatisticsOperation`. + +* In this page: + * [Get Indexes example](../../../../client-api/operations/maintenance/indexes/get-indexes.mdx#get-indexes-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/get-indexes.mdx#syntax) + + +## Get Indexes example + + + +{`// Define the get indexes operation +// Pass number of indexes to skip & number of indexes to retrieve +$getIndexesOp = new GetIndexesOperation(0, 10); + +// Execute the operation by passing it to Maintenance.Send +/** @var IndexDefinitionArray $indexes */ +$indexes = $store->maintenance()->send($getIndexesOp); + +// indexes will contain the first 10 indexes, alphabetically ordered by index name +// Access an index definition from the resulting list: +$name = $indexes[0]->getName(); +$state = $indexes[0]->getState(); +$lockMode = $indexes[0]->getLockMode(); +$deploymentMode = $indexes[0]->getDeploymentMode(); +// etc. +`} + + + +#### Syntax + + + +{`GetIndexesOperation(int $start, int $pageSize) +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **$start** | `int` | Number of indexes to skip | +| **$pageSize** | `int` | Number of indexes to retrieve | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-indexes-python.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-indexes-python.mdx new file mode 100644 index 0000000000..3b0748f054 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-indexes-python.mdx @@ -0,0 +1,67 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetIndexesOperation` to retrieve multiple **index definitions** from the database. + +* The operation will execute on the node defined by the [client configuration](../../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + However, the index definitions returned are taken from the database record, + which is common to all the database-group nodes. + i.e., an index state change done only on a local node is not reflected. + +* To get a specific index state on a local node use `GetIndexStatisticsOperation`. + +* In this page: + * [Get Indexes example](../../../../client-api/operations/maintenance/indexes/get-indexes.mdx#get-indexes-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/get-indexes.mdx#syntax) + + +## Get Indexes example + + + +{`# Define the get indexes operation +# Pass number of indexes to skip & number of indexes to retrieve +get_index_op = GetIndexesOperation(0, 10) + +# Execute the operation by passing it to maintenance.send +indexes = store.maintenance.send(get_index_op) + +# indexes will contain the first 10 indexes, alphabetically ordered by index name +# Access an index definition from the resulting list: +name = indexes[0].name +state = indexes[0].state +lock_mode = indexes[0].lock_mode +deployment_mode = indexes[0].deployment_mode +# etc. +`} + + + + + +## Syntax + + + +{`class GetIndexesOperation(MaintenanceOperation[List[IndexDefinition]]): + def __init__(self, start: int, page_size: int): ... +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **start** | `int` | Number of indexes to skip | +| **page_size** | `int` | Number of indexes to retrieve | + +| Return value of `store.Maintenance.Send(getIndexesOp)` | Description | +| - | - | +| `IndexDefinition[]` | A list of [IndexDefinition](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx#indexdefinition) classes,
ordered alphabetically by index name. | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-terms-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-terms-csharp.mdx new file mode 100644 index 0000000000..63080e64a2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-terms-csharp.mdx @@ -0,0 +1,70 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetTermsOperation` to retrieve the **terms of an index-field**. + +* In this page: + * [Get Terms example](../../../../client-api/operations/maintenance/indexes/get-terms.mdx#get-terms-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/get-terms.mdx#syntax) + + +## Get Terms example + + + + +{`// Define the get terms operation +// Pass the requested index-name, index-field, start value & page size +var getTermsOp = new GetTermsOperation("Orders/Totals", "Employee", "employees/5-a", 10); + +// Execute the operation by passing it to Maintenance.Send +string[] fieldTerms = store.Maintenance.Send(getTermsOp); + +// fieldTerms will contain the all terms that come after term 'employees/5-a' for index-field 'Employee' +`} + + + + +{`// Define the get terms operation +// Pass the requested index-name, index-field, start value & page size +var getTermsOp = new GetTermsOperation("Orders/Totals", "Employee", "employees/5-a", 10); + +// Execute the operation by passing it to Maintenance.SendAsync +string[] fieldTerms = await store.Maintenance.SendAsync(getTermsOp); + +// fieldTerms will contain the all terms that come after term 'employees/5-a' for index-field 'Employee' +`} + + + + + + +## Syntax + + + +{`public GetTermsOperation(string indexName, string field, string fromValue, int? pageSize = null) +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **indexName** | `string` | Name of an index to get terms for | +| **field** | `string` | Name of index-field to get terms for | +| **fromValue** | `string` | The starting term from which to return results.
This term is not included in the results.
`null` - start from first term. | +| **pageSize** | `int?` | Number of terms to get.
`null` - return all terms. | + +| Return value of `store.Maintenance.Send(getTermsOp)` | Description | +| - |- | +| string[] | List of terms for the requested index-field.
Alphabetically ordered. | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-terms-java.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-terms-java.mdx new file mode 100644 index 0000000000..67c7e22419 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-terms-java.mdx @@ -0,0 +1,43 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +The **GetTermsOperation** will retrieve stored terms for a field of an index. + +## Syntax + + + +{`public GetTermsOperation(String indexName, String field, String fromValue) + +public GetTermsOperation(String indexName, String field, String fromValue, Integer pageSize) +`} + + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **indexName** | String | Name of an index to get terms for | +| **field** | String | Name of field to get terms for | +| **fromValue** | String | The starting term from which to return results | +| **pageSize** | Integer | Number of terms to get | + +| Return Value | | +| ------------- | ----- | +| String[] | List of terms for the requested index-field.
Alphabetically ordered. | + +## Example + + + +{`String[] terms = store + .maintenance() + .send( + new GetTermsOperation("Orders/Totals", "Employee", null)); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-terms-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-terms-nodejs.mdx new file mode 100644 index 0000000000..6861ee636e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-terms-nodejs.mdx @@ -0,0 +1,57 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetTermsOperation` to retrieve the **terms of an index-field**. + +* In this page: + * [Get Terms example](../../../../client-api/operations/maintenance/indexes/get-terms.mdx#get-terms-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/get-terms.mdx#syntax) + + +## Get Terms example + + + +{`// Define the get terms operation +// Pass the requested index-name, index-field, start value & page size +const getTermsOp = new GetTermsOperation("Orders/Totals", "Employee", "employees/5-a", 10); + +// Execute the operation by passing it to maintenance.send +const fieldTerms = await store.maintenance.send(getTermsOp); + +// fieldTerms will contain the all terms that come after term 'employees/5-a' for index-field 'Employee' +`} + + + + + +## Syntax + + + +{`// Available overloads: +const getTermsOp = new GetTermsOperation(indexName, field, fromValue); +const getTermsOp = new GetTermsOperation(indexName, field, fromValue, pageSize); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **indexName** | `string` | Name of an index to get terms for | +| **field** | `string` | Name of index-field to get terms for | +| **fromValue** | `string` | The starting term from which to return results.
This term is not included in the results.
`null` - start from first term. | +| **pageSize** | `number` | Number of terms to get.
`undefined/null` - return all terms. | + +| Return value of `store.maintenance.send(getTermsOp)` | Description | +| - |- | +| `string[]` | List of terms for the requested index-field.
Alphabetically ordered. | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-terms-php.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-terms-php.mdx new file mode 100644 index 0000000000..347151f4d6 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-terms-php.mdx @@ -0,0 +1,52 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetTermsOperation` to retrieve the **terms of an index-field**. + +* In this page: + * [Get Terms example](../../../../client-api/operations/maintenance/indexes/get-terms.mdx#get-terms-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/get-terms.mdx#syntax) + + +## Get Terms example + + + +{`// Define the get terms operation +// Pass the requested index-name, index-field, start value & page size +$getTermsOp = new GetTermsOperation("Orders/Totals", "Employee", "employees/5-a", 10); + +// Execute the operation by passing it to Maintenance.Send +/** @var StringArrayResult $fieldTerms */ +$fieldTerms = $store->maintenance()->send($getTermsOp); + +// fieldTerms will contain the all terms that come after term 'employees/5-a' for index-field 'Employee' +`} + + + + + +## Syntax + + + +{`GetTermsOperation(?string $indexName, ?string $field, ?string $fromValue, ?int $pageSize = null) +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **$indexName** | `?string` | Name of an index to get terms for | +| **$field** | `?string` | Name of index-field to get terms for | +| **$fromValue** | `?string` | The starting term to return results from.
This term is not included in the results.
`None` - start from first term. | +| **$pageSize** | `?int` | Number of terms to get.
`None` - return all terms. | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-terms-python.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-terms-python.mdx new file mode 100644 index 0000000000..4d70ed6705 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_get-terms-python.mdx @@ -0,0 +1,56 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetTermsOperation` to retrieve the **terms of an index-field**. + +* In this page: + * [Get Terms example](../../../../client-api/operations/maintenance/indexes/get-terms.mdx#get-terms-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/get-terms.mdx#syntax) + + +## Get Terms example + + + +{`# Define the get terms operation +# Pass the requested index-name, index-filed, start value & page size +get_terms_op = GetTermsOperation("Orders/Totals", "Employee", "employees/5-A", 10) + +# Execute the operation by passing it to maintenance.send +field_terms = store.maintenance.send(get_terms_op) + +# field_terms will contain alle the terms that come after term 'employees/5-A' for index-field 'Employee' +`} + + + + + +## Syntax + + + +{`class GetTermsOperation(MaintenanceOperation[List[str]]): + def __init__(self, index_name: str, field: str, from_value: Optional[str], page_size: int = None): ... +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **index_name** | `str` | Name of an index to get terms for | +| **field** | `str` | Name of index-field to get terms for | +| **from_value** | `str` (optional) | The starting term from which to return results.
This term is not included in the results.
`None` - start from first term. | +| **page_size** | `int` | Number of terms to get.
`None` - return all terms. | + +| Return value of `store.maintenance.send(GetTermsOperation)` | Description | +| - |- | +| `List[str]` | List of terms for the requested index-field.
Alphabetically ordered. | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_index-has-changed-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_index-has-changed-csharp.mdx new file mode 100644 index 0000000000..bd1c75649c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_index-has-changed-csharp.mdx @@ -0,0 +1,95 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **When deploying an index**: + * If the new index definition is **different** from the current index definition on the server, + the current index will be overwritten and data will be re-indexed according to the new index definition. + * If the new index definition is the **same** as the one currently deployed on the server, + it will not be overwritten and re-indexing will not occur upon deploying the index. + +* **Prior to deploying an index:**, + * Use `IndexHasChangedOperation` to check if the new index definition differs from the one + on the server to avoid any unwanted changes to the existing indexed data. + +* In this page: + * [Check if index has changed](../../../../client-api/operations/maintenance/indexes/index-has-changed.mdx#check-if-index-has-changed) + * [Syntax](../../../../client-api/operations/maintenance/indexes/index-has-changed.mdx#syntax) + + +## Check if index has changed + + + + + +{`// Some index definition +var indexDefinition = new IndexDefinition +{ + Name = "UsersByName", + Maps = { "from user in docs.Users select new { user.Name }"} +}; + +// Define the has-changed operation, pass the index definition +var indexHasChangedOp = new IndexHasChangedOperation(indexDefinition); + +// Execute the operation by passing it to Maintenance.Send +bool indexHasChanged = store.Maintenance.Send(indexHasChangedOp); + +// Return values: +// false: The definition of the index passed is the SAME as the one deployed on the server +// true: The definition of the index passed is DIFFERENT than the one deployed on the server +// Or - index does not exist +`} + + + + +{`// Some index definition +var indexDefinition = new IndexDefinition +{ + Name = "UsersByName", + Maps = { "from user in docs.Users select new { user.Name }"} +}; + +// Define the has-changed operation, pass the index definition +var indexHasChangedOp = new IndexHasChangedOperation(indexDefinition); + +// Execute the operation by passing it to Maintenance.SendAsync +bool indexHasChanged = await store.Maintenance.SendAsync(indexHasChangedOp); + +// Return values: +// false: The definition of the index passed is the SAME as the one deployed on the server +// true: The definition of the index passed is DIFFERENT than the one deployed on the server +// Or - index does not exist +`} + + + + + + +## Syntax + + + +{`public IndexHasChangedOperation(IndexDefinition definition) +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **definition** | [IndexDefinition](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx#indexdefinition) | The index definition to check | + +| Return Value | Description | +| - | - | +| `true` | When the index **does not exist** on the server
or -
When the index definition **is different** from the one deployed on the server | +| `false` | When the index definition is **the same** as the one deployed on the server | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_index-has-changed-java.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_index-has-changed-java.mdx new file mode 100644 index 0000000000..3419bf22e8 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_index-has-changed-java.mdx @@ -0,0 +1,37 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +**IndexHasChangedOperation** will let you check if the given index definition differs from the one on a server. This might be useful when you want to check the prior index deployment, if the index will be overwritten, and if indexing data will be lost. + +## Syntax + + + +{`public IndexHasChangedOperation(IndexDefinition definition) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **indexDef** | `IndexDefinition` | index definition | + +| Return Value | | +| ------------- | ----- | +| true | if an index **does not exist** on a server | +| true | if an index definition **does not match** the one from the **indexDef** parameter | +| false | if there are no differences between an index definition on the server and the one from the **indexDef** parameter | + +## Example + + + +{`Boolean ordersIndexHasChanged = + store.maintenance().send(new IndexHasChangedOperation(ordersIndexDefinition)); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_index-has-changed-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_index-has-changed-nodejs.mdx new file mode 100644 index 0000000000..1163f91954 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_index-has-changed-nodejs.mdx @@ -0,0 +1,68 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **When deploying an index**: + * If the new index definition is **different** from the current index definition on the server, + the current index will be overwritten and data will be re-indexed according to the new index definition. + * If the new index definition is the **same** as the one currently deployed on the server, + it will not be overwritten and re-indexing will not occur upon deploying the index. + +* **Prior to deploying an index:**, + * Use `IndexHasChangedOperation` to check if the new index definition differs from the one + on the server to avoid any unwanted changes to the existing indexed data. + +* In this page: + * [Check if index has changed](../../../../client-api/operations/maintenance/indexes/index-has-changed.mdx#check-if-index-has-changed) + * [Syntax](../../../../client-api/operations/maintenance/indexes/index-has-changed.mdx#syntax) + + +## Check if index has changed + + + +{`// Some index definition +const indexDefinition = new IndexDefinition(); +indexDefinition.name = "UsersByName"; +indexDefinition.maps = new Set([ \`from user in docs.Users select new \{ user.Name \}\` ]); + +// Define the has-changed operation, pass the index definition +const indexHasChangedOp = new IndexHasChangedOperation(indexDefinition); + +// Execute the operation by passing it to maintenance.send +const indexHasChanged = await documentStore.maintenance.send(indexHasChangedOp); + +// Return values: +// false: The definition of the index passed is the SAME as the one deployed on the server +// true: The definition of the index passed is DIFFERENT than the one deployed on the server +// Or - index does not exist +`} + + + + + +## Syntax + + + +{`const indexHasChangedOp = new IndexHasChangedOperation(definition); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **definition** | [IndexDefinition](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx#indexdefinition) | The index definition to check | + +| Return Value | Description | +| - | - | +| `true` | When the index **does not exist** on the server
or -
When the index definition **is different** from the one deployed on the server | +| `false` | When the index definition is **the same** as the one deployed on the server | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_index-has-changed-php.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_index-has-changed-php.mdx new file mode 100644 index 0000000000..414881a6e5 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_index-has-changed-php.mdx @@ -0,0 +1,68 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **When deploying an index**: + * If the new index definition is **different** from the current index definition on the server, + the current index will be overwritten and data will be re-indexed according to the new index definition. + * If the new index definition is the **same** as the one currently deployed on the server, + it will not be overwritten and re-indexing will not occur upon deploying the index. + +* **Prior to deploying an index:**, + * Use `IndexHasChangedOperation` to check if the new index definition differs from the one + on the server to avoid any unwanted changes to the existing indexed data. + +* In this page: + * [Check if index has changed](../../../../client-api/operations/maintenance/indexes/index-has-changed.mdx#check-if-index-has-changed) + * [Syntax](../../../../client-api/operations/maintenance/indexes/index-has-changed.mdx#syntax) + + +## Check if index has changed + + + +{`// Some index definition +$indexDefinition = new IndexDefinition(); +$indexDefinition->setName("UsersByName"); +$indexDefinition->setMaps(["from user in docs.Users select new \{ user.Name \}"]); + +// Define the has-changed operation, pass the index definition +$indexHasChangedOp = new IndexHasChangedOperation($indexDefinition); + +// Execute the operation by passing it to Maintenance.Send +$store->maintenance()->send($indexHasChangedOp); + +// Return values: +// false: The definition of the index passed is the SAME as the one deployed on the server +// true: The definition of the index passed is DIFFERENT than the one deployed on the server +// Or - index does not exist +`} + + + + + +## Syntax + + + +{`IndexHasChangedOperation(?IndexDefinition $definition) +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **$definition** | [?IndexDefinition](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx#indexdefinition) | The index definition to check | + +| Return Value | Description | +| - | - | +| `true` | When the index **does not exist** on the server
or -
When the index definition **is different** from the one deployed on the server | +| `false` | When the index definition is **the same** as the one deployed on the server | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_index-has-changed-python.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_index-has-changed-python.mdx new file mode 100644 index 0000000000..8993660adf --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_index-has-changed-python.mdx @@ -0,0 +1,69 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **When deploying an index**: + * If the new index definition is **different** from the current index definition on the server, + the current index will be overwritten and data will be re-indexed according to the new index definition. + * If the new index definition is the **same** as the one currently deployed on the server, + it will not be overwritten and re-indexing will not occur upon deploying the index. + +* **Prior to deploying an index:**, + * Use `IndexHasChangedOperation` to check if the new index definition differs from the one + on the server to avoid any unwanted changes to the existing indexed data. + +* In this page: + * [Check if index has changed](../../../../client-api/operations/maintenance/indexes/index-has-changed.mdx#check-if-index-has-changed) + * [Syntax](../../../../client-api/operations/maintenance/indexes/index-has-changed.mdx#syntax) + + +## Check if index has changed + + + +{`# Some index definition +index_definition = IndexDefinition( + name="UsersByName", maps=\{"from user in docs.Users select new \{ user.Name \}"\} +) + +# Define the has-changed operation, pass the index definition +index_has_changed_op = IndexHasChangedOperation(index_definition) + +# Execute the operation by passing it to maintenance.send +index_has_changed = store.maintenance.send(index_has_changed_op) + +# Return values: +# False: The definition of the index passed is the SAME as the one deployed on the server +# True: The definition of the index passed is DIFFERENT from the one deployed on the server +# Or - index does not exist +`} + + + + + +## Syntax + + + +{`class IndexHasChangedOperation(MaintenanceOperation[bool]): + def __init__(self, index: IndexDefinition): ... +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **index** | [IndexDefinition](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx#indexdefinition) | The index definition to check | + +| Return Value | Description | +| - | - | +| `True` | When the index **does not exist** on the server
or -
When the index definition **is different** from the one deployed on the server | +| `False` | When the index definition is **the same** as the one deployed on the server | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_put-indexes-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_put-indexes-csharp.mdx new file mode 100644 index 0000000000..b3e3f2ea25 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_put-indexes-csharp.mdx @@ -0,0 +1,341 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* There are a few ways to create and deploy indexes in a database. + +* This page describes deploying a **static-index** using the `PutIndexesOperation` Operation. + For a general description of Operations see [what are operations](../../../../client-api/operations/what-are-operations.mdx). + +* In this page: + * [Ways to deploy indexes - short summary](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx#ways-to-deploy-indexes---short-summary) + * [Put indexes operation with IndexDefinition](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx#put-indexes-operation-with-indexdefinition) + * [Put indexes operation with IndexDefinitionBuilder](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx#put-indexes-operation-with-indexdefinitionbuilder) + * [Syntax](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx#syntax) + + +## Ways to deploy indexes - short summary + + + +##### Static-indexes: + +There are a few ways to deploy a static-index from the Client API: + +* The following methods are explained in section [Deploy a static-index](../../../../indexes/creating-and-deploying.mdx#deploy-a-static-index): + * Call `Execute()` on a specific index instance. + * Call `ExecuteIndex()` or `ExecuteIndexes()` on your _DocumentStore_ object. + * Call `IndexCreation.CreateIndexes()`. + +* Alternatively, you can execute the `PutIndexesOperation` maintenance operation on the _DocumentStore_, **as explained below**. + + + + +##### Auto-indexes: + + * An auto-index is created by the server when making a filtering query that doesn't specify which index to use. + Learn more in [Creating auto indexes](../../../../indexes/creating-and-deploying.mdx#auto-indexes). + + + + +## Put indexes operation with IndexDefinition + +Using `PutIndexesOperation` with **IndexDefinition** allows you to: + + * Choose any name for the index. + This string-based name is specified when querying the index. + * Set low-level properties available in _IndexDefinition_. + + + + +{`// Create an index definition +var indexDefinition = new IndexDefinition +{ + // Name is mandatory, can use any string + Name = "OrdersByTotal", + + // Define the index Map functions, string format + // A single string for a map-index, multiple strings for a multi-map-index + Maps = new HashSet + { + @" + // Define the collection that will be indexed: + from order in docs.Orders + + // Define the index-entry: + select new + { + // Define the index-fields within each index-entry: + Employee = order.Employee, + Company = order.Company, + Total = order.Lines.Sum(l => (l.Quantity * l.PricePerUnit) * (1 - l.Discount)) + }" + }, + + // Reduce = ..., + + // Can provide other index definitions available on the IndexDefinition class + // Override the default values, e.g.: + DeploymentMode = IndexDeploymentMode.Rolling, + Priority = IndexPriority.High, + Configuration = new IndexConfiguration + { + { "Indexing.IndexMissingFieldsAsNull", "true" } + } + // See all available properties in syntax below +}; + +// Define the put indexes operation, pass the index definition +// Note: multiple index definitions can be passed, see syntax below +IMaintenanceOperation putIndexesOp = new PutIndexesOperation(indexDefinition); + +// Execute the operation by passing it to Maintenance.Send +store.Maintenance.Send(putIndexesOp); +`} + + + + +{`// Create an index definition +var indexDefinition = new IndexDefinition +{ + // Name is mandatory, can use any string + Name = "OrdersByTotal", + + // Define the index Map functions, string format + // A single string for a map-index, multiple strings for a multi-map-index + Maps = new HashSet + { + @" + // Define the collection that will be indexed: + from order in docs.Orders + + // Define the index-entry: + select new + { + // Define the index-fields within each index-entry: + Employee = order.Employee, + Company = order.Company, + Total = order.Lines.Sum(l => (l.Quantity * l.PricePerUnit) * (1 - l.Discount)) + }" + }, + + // Reduce = ..., + + // Can provide other index definitions available on the IndexDefinition class + // Override the default values, e.g.: + DeploymentMode = IndexDeploymentMode.Rolling, + Priority = IndexPriority.High, + Configuration = new IndexConfiguration + { + { "Indexing.IndexMissingFieldsAsNull", "true" } + } + // See all available properties in syntax below +}; + +// Define the put indexes operation, pass the index definition +// Note: multiple index definitions can be passed, see syntax below +IMaintenanceOperation putIndexesOp = new PutIndexesOperation(indexDefinition); + +// Execute the operation by passing it to Maintenance.SendAsync +await store.Maintenance.SendAsync(putIndexesOp); +`} + + + + +{`// Create an index definition +var indexDefinition = new IndexDefinition +{ + // Name is mandatory, can use any string + Name = "OrdersByTotal", + + // Define the index Map functions, string format + // A single string for a map-index, multiple strings for a multi-map-index + Maps = new HashSet + { + @"map('Orders', function(order) { + return { + Employee: order.Employee, + Company: order.Company, + Total: order.Lines.reduce(function(sum, l) { + return sum + (l.Quantity * l.PricePerUnit) * (1 - l.Discount); + }, 0) + }; + });" + }, + + // Reduce = ..., + + // Can provide other index definitions available on the IndexDefinition class + // Override the default values, e.g.: + DeploymentMode = IndexDeploymentMode.Rolling, + Priority = IndexPriority.High, + Configuration = new IndexConfiguration + { + { "Indexing.IndexMissingFieldsAsNull", "true" } + } + // See all available properties in syntax below +}; + +// Define the put indexes operation, pass the index definition +// Note: multiple index definitions can be passed, see syntax below +IMaintenanceOperation putIndexesOp = new PutIndexesOperation(indexDefinition); + +// Execute the operation by passing it to Maintenance.Send +store.Maintenance.Send(putIndexesOp); +`} + + + + + + +## Put indexes operation with IndexDefinitionBuilder + +Using `PutIndexesOperation` with an IndexDefinition created from an **IndexDefinitionBuilder** allows: + + * Creating an index definition using a strongly typed LINQ syntax. + * Setting low-level properties available in _IndexDefinitionBuilder_. + * Note: + Only map or map-reduce indexes can be generated by the _IndexDefinitionBuilder_. + To generate multi-map indexes use the above _IndexDefinition_ option. + + + + +{`// Create an index definition builder +var builder = new IndexDefinitionBuilder +{ + // Define the map function, strongly typed LINQ format + Map = + // Define the collection that will be indexed: + orders => from order in orders + // Define the index-entry: + select new + { + // Define the index-fields within each index-entry: + Employee = order.Employee, + Company = order.Company, + Total = order.Lines.Sum(l => (l.Quantity * l.PricePerUnit) * (1 - l.Discount)) + }, + + // Can provide other properties available on the IndexDefinitionBuilder class, e.g.: + DeploymentMode = IndexDeploymentMode.Rolling, + Priority = IndexPriority.High, + // Reduce = ..., etc. +}; + +// Generate index definition from builder +// Pass the conventions, needed for building the Maps property +var indexDefinition = builder.ToIndexDefinition(store.Conventions); + +// Optionally, set the index name, can use any string +// If not provided then default name from builder is used, e.g.: "IndexDefinitionBuildersOfOrders" +indexDefinition.Name = "OrdersByTotal"; + +// Define the put indexes operation, pass the index definition +// Note: multiple index definitions can be passed, see syntax below +IMaintenanceOperation putIndexesOp = new PutIndexesOperation(indexDefinition); + +// Execute the operation by passing it to Maintenance.Send +store.Maintenance.Send(putIndexesOp); +`} + + + + +{`// Create an index definition builder +var builder = new IndexDefinitionBuilder +{ + // Define the map function, strongly typed LINQ format + Map = + // Define the collection that will be indexed: + orders => from order in orders + // Define the index-entry: + select new + { + // Define the index-fields within each index-entry: + Employee = order.Employee, + Company = order.Company, + Total = order.Lines.Sum(l => (l.Quantity * l.PricePerUnit) * (1 - l.Discount)) + }, + + // Can provide other properties available on the IndexDefinitionBuilder class, e.g.: + DeploymentMode = IndexDeploymentMode.Rolling, + Priority = IndexPriority.High, + // Reduce = ..., etc. +}; + +// Generate index definition from builder +// Pass the conventions, needed for building the Maps property +var indexDefinition = builder.ToIndexDefinition(store.Conventions); + +// Optionally, set the index name, can use any string +// If not provided then default name from builder is used, e.g.: "IndexDefinitionBuildersOfOrders" +indexDefinition.Name = "OrdersByTotal"; + +// Define the put indexes operation, pass the index definition +// Note: multiple index definitions can be passed, see syntax below +IMaintenanceOperation putIndexesOp = new PutIndexesOperation(indexDefinition); + +// Execute the operation by passing it to Maintenance.SendAsync +await store.Maintenance.SendAsync(putIndexesOp); +`} + + + + + + +## Syntax + + + +{`public PutIndexesOperation(params IndexDefinition[] indexesToAdd) +`} + + + +| Parameter | Type | Description | +|------------------|----------------------------|----------------------------------| +| **indexesToAdd** | `params IndexDefinition[]` | Definitions of indexes to deploy | + +
+ +| `IndexDefinition` parameter | Type | Description | +|----------------------------------------------|-----------------------------------------|---------------------------------------------------------------------------------------------------------------------------| +| Name | `string` | Name of the index, a unique identifier | +| Maps | `HashSet` | All the map functions for the index | +| Reduce | `string` | The index reduce function | +| DeploymentMode | `IndexDeploymentMode?` | Deployment mode
(Parallel, Rolling) | +| State | `IndexState?` | State of index
(Normal, Disabled, Idle, Error) | +| Priority | `IndexPriority?` | Priority of index
(Low, Normal, High) | +| LockMode | `IndexLockMode?` | Lock mode of index
(Unlock, LockedIgnore, LockedError) | +| Fields | `Dictionary` | _IndexFieldOptions_ per index field | +| AdditionalSources | `Dictionary` | Additional code files to be compiled with this index | +| AdditionalAssemblies | `HashSet` | Additional assemblies that are referenced | +| Configuration | `IndexConfiguration` | Can override [indexing configuration](../../../../server/configuration/indexing-configuration.mdx) by setting this dictionary | +| OutputReduceToCollection | `string` | A collection name for saving the reduce results as documents | +| ReduceOutputIndex | `long?` | This number will be part of the reduce results documents IDs | +| PatternForOutputReduceToCollectionReferences | `string` | Pattern for documents IDs which reference IDs of reduce results documents | +| PatternReferencesCollectionName | `string` | A collection name for the reference documents created based on provided pattern | + +| `store.Maintenance.Send(putIndexesOp)` return value | Description | +|-------------------------------------------------------|------------------------------------| +| `PutIndexResult[]` | List of _PutIndexResult_ per index | + +| `PutIndexResult` parameter | Type | Description | +|-----------------------------|----------|-----------------------------------------| +| Index | `string` | Name of the index that was added | +| RaftCommandIndex | `long` | Index of raft command that was executed | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_put-indexes-java.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_put-indexes-java.mdx new file mode 100644 index 0000000000..39ad127c02 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_put-indexes-java.mdx @@ -0,0 +1,59 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +**PutIndexesOperation** is used to insert indexes into a database. + +### Syntax + + + +{`PutIndexesOperation(IndexDefinition... indexToAdd) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **indexToAdd** | `IndexDefinition...` | Definitions of indexes | + +| Return Value | | +| ------------- | ----- | +| PutIndexResult[] | List of created indexes | + +### Example I + + + +{`IndexDefinition indexDefinition = new IndexDefinition(); +indexDefinition.setMaps(Collections.singleton("from order in docs.Orders select new \{ " + + " order.Employee," + + " order.Company," + + " Total = order.Lines.Sum(l => (l.Quantity * l.PricePerUnit) * (1 - l.Discount))" + + "\}")); + +store.maintenance().send(new PutIndexesOperation(indexDefinition)); +`} + + + +### Example II + + + +{`IndexDefinitionBuilder builder = new IndexDefinitionBuilder(); +builder.setMap("from order in docs.Orders select new \{ " + + " order.Employee," + + " order.Company," + + " Total = order.Lines.Sum(l => (l.Quantity * l.PricePerUnit) * (1 - l.Discount))" + + "\}"); + +IndexDefinition definition = builder.toIndexDefinition(store.getConventions()); +store.maintenance() + .send(new PutIndexesOperation(definition)); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_put-indexes-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_put-indexes-nodejs.mdx new file mode 100644 index 0000000000..bf8450cb65 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_put-indexes-nodejs.mdx @@ -0,0 +1,188 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* There are a few ways to create and deploy indexes in a database. + +* This page describes deploying a **static-index** using the `PutIndexesOperation` Operation. + For a general description of Operations see [what are operations](../../../../client-api/operations/what-are-operations.mdx). + +* In this page: + * [Ways to deploy indexes - short summary](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx#ways-to-deploy-indexes---short-summary) + * [Put indexes operation with IndexDefinition](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx#put-indexes-operation-with-indexdefinition) + * [Syntax](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx#syntax) + + +## Ways to deploy indexes - short summary + + + +##### Static-indexes: + +There are a few ways to deploy a static-index from the Client API: + +* The following methods are explained in section [Deploy a static-index](../../../../indexes/creating-and-deploying.mdx#deploy-a-static-index): + * Call `execute()` on a specific index instance. + * Call `executeIndex()` or `executeIndexes()` on your _DocumentStore_ object. + * Call `IndexCreation.createIndexes()`. + +* Alternatively, you can execute the `PutIndexesOperation` maintenance operation on the _DocumentStore_, **as explained below**. + + + + +##### Auto-indexes: + +* An auto-index is created by the server when making a filtering query that doesn't specify which index to use. + Learn more in [Creating auto indexes](../../../../indexes/creating-and-deploying.mdx#auto-indexes). + + + + +## Put indexes operation with IndexDefinition + +Using `PutIndexesOperation` with **IndexDefinition** allows you to: + + * Choose any name for the index. + This string-based name is specified when querying the index. + * Set low-level properties available in _IndexDefinition_. + + + + +{`// Create an index definition +const indexDefinition = new IndexDefinition(); + +// Name is mandatory, can use any string +indexDefinition.name = "OrdersByTotal"; + +// Define the index map functions, string format +// A single string for a map-index, multiple strings for a multi-map-index +indexDefinition.maps = new Set([\` + // Define the collection that will be indexed: + from order in docs.Orders + + // Define the index-entry: + select new + { + // Define the index-fields within each index-entry: + Employee = order.Employee, + Company = order.Company, + Total = order.Lines.Sum(l => (l.Quantity * l.PricePerUnit) * (1 - l.Discount)) + }\` +]); + + // indexDefinition.reduce = ... + +// Can provide other index definitions available on the IndexDefinition class +// Override the default values, e.g.: +indexDefinition.deploymentMode = "Rolling"; +indexDefinition.priority = "High"; +indexDefinition.configuration = { + "Indexing.IndexMissingFieldsAsNull": "true" +}; +// See all available properties in syntax below + +// Define the put indexes operation, pass the index definition +// Note: multiple index definitions can be passed, see syntax below +const putIndexesOp = new PutIndexesOperation(indexDefinition); + +// Execute the operation by passing it to maintenance.send +await documentStore.maintenance.send(putIndexesOp); +`} + + + + +{`// Create an index definition +const indexDefinition = new IndexDefinition(); + +// Name is mandatory, can use any string +indexDefinition.name = "OrdersByTotal"; + +// Define the index map functions, string format +// A single string for a map-index, multiple strings for a multi-map-index +indexDefinition.maps = new Set([\` + map('Orders', function(order) { + return { + Employee: order.Employee, + Company: order.Company, + Total: order.Lines.reduce(function(sum, l) { + return sum + (l.Quantity * l.PricePerUnit) * (1 - l.Discount); + }, 0) + }; + });\` +]); + +// indexDefinition.reduce = ... + +// Can provide other index definitions available on the IndexDefinition class +// Override the default values, e.g.: +indexDefinition.deploymentMode = "Rolling"; +indexDefinition.priority = "High"; +indexDefinition.configuration = { + "Indexing.IndexMissingFieldsAsNull": "true" +}; +// See all available properties in syntax below + +// Define the put indexes operation, pass the index definition +// Note: multiple index definitions can be passed, see syntax below +const putIndexesOp = new PutIndexesOperation(indexDefinition); + +// Execute the operation by passing it to maintenance.send +await documentStore.maintenance.send(putIndexesOp); +`} + + + + + + +## Syntax + + + +{`const putIndexesOperation = new PutIndexesOperation(indexesToAdd); +`} + + + +| Parameter | Type | Description | +|------------------|------------------------|----------------------------------| +| **indexesToAdd** | `...IndexDefinition[]` | Definitions of indexes to deploy | + +
+ +| `IndexDefinition` parameter | Type | Description | +|----------------------------------------------|--------------------------|---------------------------------------------------------------------------------------------------------------------------------------------| +| name | `string` | Name of the index, a unique identifier | +| maps | `Set` | All the map functions for the index | +| reduce | `string` | The index reduce function | +| deploymentMode | `object` | Deployment mode
(Parallel, Rolling) | +| state | `object` | State of index
(Normal, Disabled, Idle, Error) | +| priority | `object` | Priority of index
(Low, Normal, High) | +| lockMode | `object` | Lock mode of index
(Unlock, LockedIgnore, LockedError) | +| fields | `Record` | _IndexFieldOptions_ per index field | +| additionalSources | `Record` | Additional code files to be compiled with this index | +| additionalAssemblies | `object[]` | Additional assemblies that are referenced | +| configuration | `object` | Can override [indexing configuration](../../../../server/configuration/indexing-configuration.mdx) by setting this Record<string, string> | +| outputReduceToCollection | `string` | A collection name for saving the reduce results as documents | +| reduceOutputIndex | `number` | This number will be part of the reduce results documents IDs | +| patternForOutputReduceToCollectionReferences | `string` | Pattern for documents IDs which reference IDs of reduce results documents | +| patternReferencesCollectionName | `string` | A collection name for the reference documents created based on provided pattern | + +| `store.maintenance.send(putIndexesOp)` return value | Description | +|------------------------------------------------------|----------------------------| +| `object[]` | operation result per index | + +| Operation result per index | Type | Description | +|-----------------------------|----------|-----------------------------------------| +| index | `string` | Name of the index that was added | +| raftCommandIndex | `long` | Index of raft command that was executed | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_put-indexes-php.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_put-indexes-php.mdx new file mode 100644 index 0000000000..164976e0f2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_put-indexes-php.mdx @@ -0,0 +1,226 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* There are a few ways to create and deploy indexes in a database. + +* This page describes deploying a **static-index** using the `PutIndexesOperation` Operation. + For a general description of Operations see [what are operations](../../../../client-api/operations/what-are-operations.mdx). + +* In this page: + * [Ways to deploy indexes - short summary](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx#ways-to-deploy-indexes---short-summary) + * [Put indexes operation with IndexDefinition](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx#put-indexes-operation-with-indexdefinition) + * [Put indexes operation with IndexDefinitionBuilder](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx#put-indexes-operation-with-indexdefinitionbuilder) + * [Syntax](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx#syntax) + + +## Ways to deploy indexes - short summary + +#### Static index: + +There are a few ways to deploy a static-index from the Client API: + + * Call `execute()` on a specific index instance + * Call `IndexCreation.create_indexes()` to deploy multiple indexes + * Execute `PutIndexesOperation` maintenance operation on the Document Store - see below + * Learn more in [static indexes](../../../../indexes/creating-and-deploying.mdx#static-indexes) + +#### Auto index: + + * An auto-index is created by the server when making a filtering query that doesn't specify which index to use + * Learn more in [auto indexes](../../../../indexes/creating-and-deploying.mdx#auto-indexes) + + + +## Put indexes operation with IndexDefinition + +Using `PutIndexesOperation` with **IndexDefinition** allows the following: + + * Choosing any name for the index. + * Setting low-level properties available in _IndexDefinition_. + + + + +{`// Create an index definition +$indexDefinition = new IndexDefinition(); + +// Name is mandatory, can use any string +$indexDefinition->setName("OrdersByTotal"); + +// Define the index Map functions, string format +// A single string for a map-index, multiple strings for a multi-map-index +$indexDefinition->setMaps([ + "// Define the collection that will be indexed:" . + "from order in docs.Orders" . + " // Define the index-entry:" . + " select new" . + " {" . + " // Define the index-fields within each index-entry:" . + " Employee = order.Employee," . + " Company = order.Company," . + " Total = order.Lines.Sum(l => (l.Quantity * l.PricePerUnit) * (1 - l.Discount))" . + " }" +]); + +// $indexDefinition->setReduce(...); + +// Can provide other index definitions available on the IndexDefinition class +// Override the default values, e.g.: +$indexDefinition->setDeploymentMode(IndexDeploymentMode::rolling()); +$indexDefinition->setPriority(IndexPriority::high()); + +$configuration = new IndexConfiguration(); +$configuration->offsetSet("Indexing.IndexMissingFieldsAsNull", "true"); +$indexDefinition->setConfiguration($configuration); + +// See all available properties in syntax below + +// Define the put indexes operation, pass the index definition +// Note: multiple index definitions can be passed, see syntax below +$putIndexesOp = new PutIndexesOperation($indexDefinition); + +// Execute the operation by passing it to Maintenance.Send +$store->maintenance()->send($putIndexesOp); +`} + + + + +{`// Create an index definition +$indexDefinition = new IndexDefinition(); + +// Name is mandatory, can use any string +$indexDefinition->setName("OrdersByTotal"); + +// Define the index Map functions, string format +// A single string for a map-index, multiple strings for a multi-map-index +$indexDefinition->setMaps([ + "map('Orders', function(order) {" . + " return {" . + " Employee: order.Employee," . + " Company: order.Company," . + " Total: order.Lines.reduce(function(sum, l) {" . + " return sum + (l.Quantity * l.PricePerUnit) * (1 - l.Discount);" . + " }, 0)" . + " };" . + "});" +]); + +// $indexDefinition->setReduce(...); + +// Can provide other index definitions available on the IndexDefinition class +// Override the default values, e.g.: + +$indexDefinition->setDeploymentMode(IndexDeploymentMode::rolling()); +$indexDefinition->setPriority(IndexPriority::high()); + +$configuration = new IndexConfiguration(); +$configuration->offsetSet("Indexing.IndexMissingFieldsAsNull", "true"); +$indexDefinition->setConfiguration($configuration); +// See all available properties in syntax below + +// Define the put indexes operation, pass the index definition +// Note: multiple index definitions can be passed, see syntax below +$putIndexesOp = new PutIndexesOperation($indexDefinition); + +// Execute the operation by passing it to Maintenance.Send +$store->maintenance()->send($putIndexesOp); +`} + + + + + + +## Put indexes operation with IndexDefinitionBuilder + +* Using `PutIndexesOperation` with an IndexDefinition created from an **IndexDefinitionBuilder** + allows setting low-level properties available in _IndexDefinitionBuilder_. + +* Note that only map or map-reduce indexes can be generated by the _IndexDefinitionBuilder_. + To generate multi-map indexes, use the above _IndexDefinition_ option. + + + +{`// Create an index definition builder +$builder = new IndexDefinitionBuilder(); +$builder->setMap( + "// Define the collection that will be indexed:" . + " from order in docs.Orders" . + " // Define the index-entry:" . + " select new" . + " \{" . + " // Define the index-fields within each index-entry:" . + " Employee = order.Employee," . + " Company = order.Company," . + " Total = order.Lines.Sum(l => (l.Quantity * l.PricePerUnit) * (1 - l.Discount))" . + " \} " +); + +// Can provide other properties available on the IndexDefinitionBuilder class, e.g.: +$builder->setDeploymentMode(IndexDeploymentMode::rolling()); +$builder->setPriority(IndexPriority::high()); +// $builder->setReduce(...); + +// Generate index definition from builder +// Pass the conventions, needed for building the Maps property +$indexDefinition = $builder->toIndexDefinition($store->getConventions()); + +// Optionally, set the index name, can use any string +// If not provided then default name from builder is used, e.g.: "IndexDefinitionBuildersOfOrders" +$indexDefinition->setName("OrdersByTotal"); + +// Define the put indexes operation, pass the index definition +// Note: multiple index definitions can be passed, see syntax below +$putIndexesOp = new PutIndexesOperation($indexDefinition); + +// Execute the operation by passing it to maintenance.send +$store->maintenance()->send($putIndexesOp); +`} + + + + + +## Syntax + + + +{`PutIndexesOperation(IndexDefinition|IndexDefinitionArray|array ...$indexToAdd) +`} + + + +| Parameters | Type | Description | +| - |- | - | +| **$indexToAdd** | `IndexDefinition`
`IndexDefinitionArray`
`array`| Definitions of indexes to deploy | + +
+ +| `IndexDefinition` parameter| Type | Description | +| - |- | - | +| **$name** | `?string` | Name of the index, a unique identifier | +| **$state** | `?IndexState` | State of index
(NORMAL, DISABLED, IDLE, ERROR) | +| **$priority** | `?IndexPriority` | Priority of index
(LOW, NORMAL, HIGH) | +| **$maps** | `?StringSet` | All the map functions for the index | +| **$reduce** | `?string` | The index reduce function | +| **$deploymentMode** | `?IndexDeploymentMode` | Deployment mode
(`parallel`, `rolling`) | +| **$lockMode** | `?IndexLockMode` | Lock mode of index
(`Unlock`, `LockedIgnore`, `LockedError`) | +| **$fields** | `?IndexFieldOptionsArray` | _IndexFieldOptions_ per index field | +| **$additionalSources** | `?AdditionalSourcesArray` | Additional code files to be compiled with this index | +| **$additionalAssemblies** | `?AdditionalAssemblySet` | Additional assemblies that are referenced | +| **$configuration** | `?IndexConfiguration` | Can override [indexing configuration](../../../../server/configuration/indexing-configuration.mdx) by setting this dictionary | +| **$outputReduceToCollection** | `?string` | A collection name for saving the reduce results as documents | +| **$reduceOutputIndex** | `?int` | This number will be part of the reduce results documents IDs | +| **$patternForOutputReduceToCollectionReferences** | `?string` | Pattern for documents IDs which reference IDs of reduce results documents | +| **$patternReferencesCollectionName** | `?string` | A collection name for the reference documents created based on provided pattern | +| **$sourceType** | `?IndexSourceType` | Index source type
(`None`, `Documents`, `TimeSeries`, `Counters`) | +| **$type** | `?IndexType` | Index type
(`None`, `AutoMap`, `AutoMapReduce`, `Map`, `MapReduce`, `Faulty`, `JavaScriptMap`, `JavaScriptMapReduce`) | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_put-indexes-python.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_put-indexes-python.mdx new file mode 100644 index 0000000000..71163415df --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_put-indexes-python.mdx @@ -0,0 +1,224 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* There are a few ways to create and deploy indexes in a database. + +* This page describes deploying a **static-index** using the `PutIndexesOperation` Operation. + For a general description of Operations see [what are operations](../../../../client-api/operations/what-are-operations.mdx). + +* In this page: + * [Ways to deploy indexes - short summary](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx#ways-to-deploy-indexes---short-summary) + * [Put indexes operation with IndexDefinition](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx#put-indexes-operation-with-indexdefinition) + * [Put indexes operation with IndexDefinitionBuilder](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx#put-indexes-operation-with-indexdefinitionbuilder) + * [Syntax](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx#syntax) + + +## Ways to deploy indexes - short summary + +#### Static index: + +There are a few ways to deploy a static-index from the Client API: + + * Call `execute()` on a specific index instance + * Call `IndexCreation.create_indexes()` to deploy multiple indexes + * Execute `PutIndexesOperation` maintenance operation on the Document Store - see below + * Learn more in [static indexes](../../../../indexes/creating-and-deploying.mdx#static-indexes) + +#### Auto index: + + * An auto-index is created by the server when making a filtering query that doesn't specify which index to use + * Learn more in [auto indexes](../../../../indexes/creating-and-deploying.mdx#auto-indexes) + + + +## Put indexes operation with IndexDefinition + +Using `PutIndexesOperation` with **IndexDefinition** allows the following: + + * Choosing any name for the index. + * Setting low-level properties available in _IndexDefinition_. + + + + +{`# Create an index definition +index_definition = IndexDefinition( + # Name is mandatory, can use any string + name="OrdersByTotal", + # Define the index Map functions, string format + # A single string for a map-index, multiple strings for a multi-map-index + maps={ + """ + // Define the collection that will be indexed: + from order in docs.Orders + + // Define the index-entry: + select new + { + // Define the index-fields within each index-entry: + Employee = order.Employee, + Company = order.Company, + Total = order.Lines.Sum(l => (l.Quantity * l.PricePerUnit) * (1 - l.Discount)) + } + """ + }, + # reduce = ... + # Can provide other index definitions available on the IndexDefinition class + # Override the default values, e.g.: + deployment_mode=IndexDeploymentMode.ROLLING, + priority=IndexPriority.HIGH, + configuration={"Indexing.IndexMissingFieldsAsNull": "true"}, + # See all available properties in syntax below +) + +# Define the put indexes operation, pass the index definition +# Note: multiple index definitions can be passed, see syntax below +put_indexes_op = PutIndexesOperation(index_definition) + +# Execute the operation by passing it to maintenance.send +store.maintenance.send(put_indexes_op) +`} + + + + +{`# Create an index definition +index_definition = IndexDefinition( + # Name is mandatory, can use any string + name="OrdersByTotal", + # Define the index map functions, string format + # A single string for a map-index, multiple strings for a multimap index + maps={ + """ + map('Orders', function(order) { + return { + Employee: order.Employee, + Company: order.Company, + Total: order.Lines.reduce(function(sum, l) { + return sum + (l.Quantity * l.PricePerUnit) * (1 - l.Discount); + }, 0) + }; + }); + """ + }, + # reduce = ..., + # Can provide other index definitions available on the IndexDefinition class + # Override the default values, e.g.: + deployment_mode=IndexDeploymentMode.ROLLING, + priority=IndexPriority.HIGH, + configuration={"Indexing.IndexMissingFieldsAsNull": "true"}, + # See all available properties in syntax below +) +# Define the put indexes operation, pass the index definition +# Note: multiple index definitions can be passed, see syntax below +put_indexes_op = PutIndexesOperation(index_definition) + +# Execute the operation by passing it to Maintenance.Send +store.maintenance.send(put_indexes_op) +`} + + + + + + +## Put indexes operation with IndexDefinitionBuilder + +* Using `PutIndexesOperation` with an IndexDefinition created from an **IndexDefinitionBuilder** + allows setting low-level properties available in _IndexDefinitionBuilder_. + +* Note that only map or map-reduce indexes can be generated by the _IndexDefinitionBuilder_. + To generate multi-map indexes, use the above _IndexDefinition_ option. + + + +{`# Create an index definition builder +builder = IndexDefinitionBuilder() +builder.map = """ + // Define the collection that will be indexed: + from order in docs.Orders + + // Define the index-entry: + select new + \{ + // Define the index-fields within each index-entry: + Employee = order.Employee, + Company = order.Company, + Total = order.Lines.Sum(l => (l.Quantity * l.PricePerUnit) * (1 - l.Discount)) + \} + """ +# Can provide other properties available on the IndexDefinitionBuilder class, e.g.: +builder.deployment_mode = IndexDeploymentMode.ROLLING +builder.priority = IndexPriority.HIGH +# builder.reduce = ..., etc. + +# Generate index definition from builder +# Pass the conventions, needed for building the maps property +builder.to_index_definition(store.conventions) + +# Optionally, set the index name, can use any string +# If not provided then default name from builder is used, e.g.: "IndexDefinitionBuildersOfOrders" +index_definition.name = "OrdersByTotal" + +# Define the put indexes operation, pass the index definition +# Note: multiple index definitions can be passed, see syntax below +put_indexes_op = PutIndexesOperation(index_definition) + +# Execute the operation by passing it to Maintenance.Send +store.maintenance.send(put_indexes_op) +`} + + + + + +## Syntax + + + +{`class PutIndexesOperation(MaintenanceOperation): + def __init__(self, *indexes_to_add: IndexDefinition): ... +`} + + + +| Parameters | Type | Description | +| - |- | - | +| **\*indexes_to_add** | `IndexDefinition` | Definitions of indexes to deploy | + +
+ +| `IndexDefinition` parameter| Type | Description | +| - |- | - | +| **name** | `str` | Name of the index, a unique identifier | +| **maps** | `Set[str]` | All the map functions for the index | +| **reduce** | `str` | The index reduce function | +| **deployment_mode** | `IndexDeploymentMode` | Deployment mode
(PARALLEL, ROLLING) | +| **state** | `IndexState` | State of index
(NORMAL, DISABLED, IDLE, ERROR) | +| **priority** | `IndexPriority` | Priority of index
(LOW, NORMAL, HIGH) | +| **lock_mode** | `IndexLockMode` | Lock mode of index
(UNLOCK, LOCKED_IGNORE, LOCKED_ERROR) | +| **fields** | `Dict[str, IndexFieldOptions]` | _IndexFieldOptions_ per index field | +| **additional_sources** | `Dict[str, str]` | Additional code files to be compiled with this index | +| **additional_assemblies** | `Set[AdditionalAssembly]` | Additional assemblies that are referenced | +| **configuration** | `IndexConfiguration` | Can override [indexing configuration](../../../../server/configuration/indexing-configuration.mdx) by setting this dictionary | +| **output_reduce_to_collection** | `str` | A collection name for saving the reduce results as documents | +| **reduce_output_index** | `int` | This number will be part of the reduce results documents IDs | +| **pattern_for_output_reduce_to_collection_references** | `str` | Pattern for documents IDs which reference IDs of reduce results documents | +| **pattern_references_collection_name** | `str` | A collection name for the reference documents created based on provided pattern | + +| `store.maintenance.send(put_indexes_op)` return value | Description | +| - | - | +| `List[PutIndexResult]` | List of _PutIndexResult_ per index | + +| `PutIndexResult` parameter | Type | Description | +| - | - | - | +| **index** | `str` | Name of the index that was added | +| **raft_command_index** | `int` | Index of raft command that was executed | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_reset-index-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_reset-index-csharp.mdx new file mode 100644 index 0000000000..b4faca553c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_reset-index-csharp.mdx @@ -0,0 +1,74 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `ResetIndexOperation` to rebuild an index: + * All existing indexed data will be removed. + * All items matched by the index definition will be re-indexed. + +* **Indexes scope**: + * Both static and auto indexes can be reset. + +* **Nodes scope**: + * When resetting an index from the **client**: + The index is reset only on the preferred node only, and Not on all the database-group nodes. + * When resetting an index from the **Studio** [indexes list](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) view: + The index is reset on the local node the browser is opened on, even if it is Not the preferred node. + +* If the index is [disabled](../../../../client-api/operations/maintenance/indexes/disable-index.mdx) + or [paused](../../../../client-api/operations/maintenance/indexes/stop-index.mdx), resetting the index + will put it back to the **normal** running state on the local node where the action was performed. + +* In this page: + * [Reset index](../../../../client-api/operations/maintenance/indexes/set-index-priority.mdx#set-priority---single-index) + * [Syntax](../../../../client-api/operations/maintenance/indexes/set-index-priority.mdx#syntax) + + +## Reset index + + + + +{`// Define the reset index operation, pass index name +var resetIndexOp = new ResetIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to Maintenance.Send +// An exception will be thrown if index does not exist +store.Maintenance.Send(resetIndexOp); +`} + + + + +{`// Define the reset index operation, pass index name +var resetIndexOp = new ResetIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to Maintenance.SendAsync +// An exception will be thrown if index does not exist +await store.Maintenance.SendAsync(resetIndexOp); +`} + + + + + + +## Syntax + + + +{`public ResetIndexOperation(string indexName); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **indexName** | `string` | Name of an index to reset | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_reset-index-java.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_reset-index-java.mdx new file mode 100644 index 0000000000..f9d3604916 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_reset-index-java.mdx @@ -0,0 +1,32 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +**ResetIndexOperation** will remove all indexing data from a server for a given index so the indexation can start from scratch for that index. + +## Syntax + + + +{`public ResetIndexOperation(String indexName) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **indexName** | String | name of an index to reset | + + +## Example + + + +{`store.maintenance() + .send(new ResetIndexOperation("Orders/Totals")); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_reset-index-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_reset-index-nodejs.mdx new file mode 100644 index 0000000000..b0a26d5002 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_reset-index-nodejs.mdx @@ -0,0 +1,61 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `ResetIndexOperation` to rebuild an index: + * All existing indexed data will be removed. + * All items matched by the index definition will be re-indexed. + +* **Indexes scope**: + * Both static and auto indexes can be reset. + +* **Nodes scope**: + * When resetting an index from the **client**: + The index is reset only on the preferred node only, and Not on all the database-group nodes. + * When resetting an index from the **Studio** [indexes list](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) view: + The index is reset on the local node the browser is opened on, even if it is Not the preferred node. + +* If the index is [disabled](../../../../client-api/operations/maintenance/indexes/disable-index.mdx) + or [paused](../../../../client-api/operations/maintenance/indexes/stop-index.mdx), resetting the index + will put it back to the **normal** running state on the local node where the action was performed. + +* In this page: + * [Reset index](../../../../client-api/operations/maintenance/indexes/set-index-priority.mdx#set-priority---single-index) + * [Syntax](../../../../client-api/operations/maintenance/indexes/set-index-priority.mdx#syntax) + + +## Reset index + + + +{`// Define the reset index operation, pass index name +const resetIndexOp = new ResetIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to maintenance.send +// An exception will be thrown if index does not exist +await store.maintenance.send(resetIndexOp); +`} + + + + + +## Syntax + + + +{`const resetIndexOp = new ResetIndexOperation(indexName); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **indexName** | `string` | Name of an index to reset | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_reset-index-php.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_reset-index-php.mdx new file mode 100644 index 0000000000..6ebf10ead6 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_reset-index-php.mdx @@ -0,0 +1,61 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `ResetIndexOperation` to rebuild an index: + * All existing indexed data will be removed. + * All items matched by the index definition will be re-indexed. + +* **Indexes scope**: + * Both static and auto indexes can be reset. + +* **Nodes scope**: + * When resetting an index from the **client**: + The index is reset only on the preferred node only, and Not on all the database-group nodes. + * When resetting an index from the **Studio** [indexes list](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) view: + The index is reset on the local node the browser is opened on, even if it is Not the preferred node. + +* If the index is [disabled](../../../../client-api/operations/maintenance/indexes/disable-index.mdx) + or [paused](../../../../client-api/operations/maintenance/indexes/stop-index.mdx), resetting the index + will put it back to the **normal** running state on the local node where the action was performed. + +* In this page: + * [Reset index](../../../../client-api/operations/maintenance/indexes/reset-index.mdx#reset-index) + * [Syntax](../../../../client-api/operations/maintenance/indexes/reset-index.mdx#syntax) + + +## Reset index + + + +{`// Define the reset index operation, pass index name +$resetIndexOp = new ResetIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to Maintenance.SendAsync +// An exception will be thrown if index does not exist +$store->maintenance()->send($resetIndexOp); +`} + + + + + +## Syntax + + + +{`public ResetIndexOperation(?string $indexName); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **$indexName** | `?string` | Name of an index to reset | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_reset-index-python.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_reset-index-python.mdx new file mode 100644 index 0000000000..1d3a205e79 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_reset-index-python.mdx @@ -0,0 +1,62 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `ResetIndexOperation` to rebuild an index: + * All existing indexed data will be removed. + * All items matched by the index definition will be re-indexed. + +* **Indexes scope**: + * Both static and auto indexes can be reset. + +* **Nodes scope**: + * When resetting an index from the **client**: + The index is reset only on the preferred node only, and Not on all the database-group nodes. + * When resetting an index from the **Studio** [indexes list](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) view: + The index is reset on the local node the browser is opened on, even if it is Not the preferred node. + +* If the index is [disabled](../../../../client-api/operations/maintenance/indexes/disable-index.mdx) + or [paused](../../../../client-api/operations/maintenance/indexes/stop-index.mdx), resetting the index + will put it back to the **normal** running state on the local node where the action was performed. + +* In this page: + * [Reset index](../../../../client-api/operations/maintenance/indexes/set-index-priority.mdx#set-priority---single-index) + * [Syntax](../../../../client-api/operations/maintenance/indexes/set-index-priority.mdx#syntax) + + +## Reset index + + + +{`# Define the reset index operation, pass index name +reset_index_op = ResetIndexOperation("Orders/Totals") + +# Execute the operation by passing it to maintenance.send +# An exception will be thrown if index does not exist +store.operations.send(reset_index_op) +`} + + + + + +## Syntax + + + +{`class ResetIndexOperation(VoidMaintenanceOperation): + def __init__(self, index_name: str): ... +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **index_name** | `str` | Name of an index to reset | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-lock-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-lock-csharp.mdx new file mode 100644 index 0000000000..a0ad8d7fda --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-lock-csharp.mdx @@ -0,0 +1,201 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The lock mode controls the behavior of index modifications. + Use `SetIndexesLockOperation` to modify the **lock mode** for a single index or multiple indexes. + +* **Indexes scope**: + The lock mode can be set only for static-indexes, not for auto-indexes. + +* **Nodes scope**: + The lock mode will be updated on all nodes in the database group. + +* Setting the lock mode can also be done in the **Studio** [indexes list](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) view. + Locking an index is not a security measure, the index can be unlocked at any time. + +* In this page: + * [Lock modes](../../../../client-api/operations/maintenance/indexes/set-index-lock.mdx#lock-modes) + * [Sample usage flow](../../../../client-api/operations/maintenance/indexes/set-index-lock.mdx#sample-usage-flow) + * [Set lock mode - single index](../../../../client-api/operations/maintenance/indexes/set-index-lock.mdx#set-lock-mode---single-index) + * [Set lock mode - multiple indexes](../../../../client-api/operations/maintenance/indexes/set-index-lock.mdx#set-lock-mode---multiple-indexes) + * [Syntax](../../../../client-api/operations/maintenance/indexes/set-index-lock.mdx#syntax) + + +## Lock modes + +* **Unlocked** - when lock mode is set to `Unlock`: + * Any change to the index definition will be applied. + * If the new index definition differs from the one stored on the server, + the index will be updated and the data will be re-indexed using the new index definition. + * The index can be deleted. + +* **Locked (ignore)** - when lock mode is set to `LockedIgnore`: + * Index definition changes will Not be applied. + * Modifying the index definition will return successfully and no error will be raised, + however, no change will be made to the index definition on the server. + * Trying to delete the index will not remove it from the server, and no error will be raised. + +* **Locked (error)** - when lock mode is set to `LockedError`: + * Index definitions changes will Not be applied. + * An exception will be thrown upon trying to modify or delete the index. + * The index cannot be deleted. Attempting to do so will result in an exception. + + + +## Sample usage flow + +Consider the following scenario: + +* Your client application defines and [deploys a static-index](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx) upon application startup. + +* After the application has started, you make a change to your index definition and re-indexing occurs. + However, if the index lock mode is _'Unlock'_, the next time your application will start, + it will reset the index definition back to the original version. + +* Locking the index allows to make changes to the running index and prevents the application + from setting it back to the previous definition upon startup. See the following steps: +
+ + 1. Run your application + 2. Modify the index definition on the server (from Studio, or from another application), + and then set this index lock mode to `LockedIgnore`. + 3. A side-by-side replacement index is created on the server. + It will index your dataset according to the **new** definition. + 4. At this point, if any instance of your original application is started, + the code that defines and deploys the index upon startup will have no effect + since the index is 'locked'. + 5. Once the replacement index is done indexing, it will replace the original index. + + + +## Set lock mode - single index + + + + +{`// Define the set lock mode operation +// Pass index name & lock mode +var setLockModeOp = new SetIndexesLockOperation("Orders/Totals", IndexLockMode.LockedIgnore); + +// Execute the operation by passing it to Maintenance.Send +// An exception will be thrown if index does not exist +store.Maintenance.Send(setLockModeOp); + +// Lock mode is now set to 'LockedIgnore' +// Any modifications done now to the index will Not be applied, and will Not throw +`} + + + + +{`// Define the set lock mode operation +// Pass index name & lock mode +var setLockModeOp = new SetIndexesLockOperation("Orders/Totals", IndexLockMode.LockedIgnore); + +// Execute the operation by passing it to Maintenance.SendAsync +// An exception will be thrown if index does not exist +await store.Maintenance.SendAsync(setLockModeOp); + +// Lock mode is now set to 'LockedIgnore' +// Any modifications done now to the index will Not be applied, and will Not throw +`} + + + + + + +## Set lock mode - multiple indexes + + + + +{`// Define the index list and the new lock mode: +var parameters = new SetIndexesLockOperation.Parameters { + IndexNames = new[] {"Orders/Totals", "Orders/ByCompany"}, + Mode = IndexLockMode.LockedError +}; + +// Define the set lock mode operation, pass the parameters +var setLockModeOp = new SetIndexesLockOperation(parameters); + +// Execute the operation by passing it to Maintenance.Send +// An exception will be thrown if any of the specified indexes do not exist +store.Maintenance.Send(setLockModeOp); + +// Lock mode is now set to 'LockedError' on both indexes +// Any modifications done now to either index will throw +`} + + + + +{`// Define the index list and the new lock mode: +var parameters = new SetIndexesLockOperation.Parameters { + IndexNames = new[] {"Orders/Totals", "Orders/ByCompany"}, + Mode = IndexLockMode.LockedError +}; + +// Define the set lock mode operation, pass the parameters +var setLockModeOp = new SetIndexesLockOperation(parameters); + +// Execute the operation by passing it to Maintenance.SendAsync +// An exception will be thrown if any of the specified indexes do not exist +await store.Maintenance.SendAsync(setLockModeOp); + +// Lock mode is now set to 'LockedError' on both indexes +// Any modifications done now to either index will throw +`} + + + + + + +## Syntax + + + +{`// Available overloads: +public SetIndexesLockOperation(string indexName, IndexLockMode mode); +public SetIndexesLockOperation(Parameters parameters); +`} + + + +| Parameters | Type | Description | +|- | - | - | +| **indexName** | string | Index name for which to set lock mode | +| **mode** | `IndexLockMode` | Lock mode to set | +| **parameters** | `SetIndexesLockOperation.Parameters` | List of indexes + Lock mode to set.
An exception is thrown if any of the specified indexes do not exist. | + + + +{`public enum IndexLockMode +\{ + Unlock, + LockedIgnore, + LockedError +\} +`} + + + + + +{`public class Parameters +\{ + public string[] IndexNames \{ get; set; \} + public IndexLockMode Mode \{ get; set; \} +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-lock-java.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-lock-java.mdx new file mode 100644 index 0000000000..467df95a03 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-lock-java.mdx @@ -0,0 +1,83 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +**SetIndexesLockOperation** allows you to change index lock mode for a given index or indexes. + +## Syntax + + + +{`public SetIndexesLockOperation(String indexName, IndexLockMode mode) +public SetIndexesLockOperation(SetIndexesLockOperation.Parameters parameters) +`} + + + + + +{`public enum IndexLockMode \{ + UNLOCK, + LOCKED_IGNORE, + LOCKED_ERROR +\} +`} + + + + + +{`public static class Parameters \{ + private String[] indexNames; + private IndexLockMode mode; + + public String[] getIndexNames() \{ + return indexNames; + \} + + public void setIndexNames(String[] indexNames) \{ + this.indexNames = indexNames; + \} + + public IndexLockMode getMode() \{ + return mode; + \} + + public void setMode(IndexLockMode mode) \{ + this.mode = mode; + \} +\} +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **name** | String | name of an index to change lock mode for | +| **lockMode** | IndexLockMode | new index lock mode | +| **parameters** | SetIndexesLockOperation.Parameters | list of indexes + new index lock mode | + +## Example I + + + +{`store.maintenance().send(new SetIndexesLockOperation("Orders/Totals", IndexLockMode.LOCKED_IGNORE)); +`} + + + +## Example II + + + +{`SetIndexesLockOperation.Parameters parameters = new SetIndexesLockOperation.Parameters(); +parameters.setIndexNames(new String[]\{ "Orders/Totals", "Orders/ByCompany" \}); +parameters.setMode(IndexLockMode.LOCKED_IGNORE); + +store.maintenance().send(new SetIndexesLockOperation(parameters)); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-lock-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-lock-nodejs.mdx new file mode 100644 index 0000000000..5c1684f5ab --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-lock-nodejs.mdx @@ -0,0 +1,151 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The lock mode controls the behavior of index modifications. + Use `SetIndexesLockOperation` to modify the **lock mode** for a single index or multiple indexes. + +* **Indexes scope**: + The lock mode can be set only for static-indexes, not for auto-indexes. + +* **Nodes scope**: + The lock mode will be updated on all nodes in the database group. + +* Setting the lock mode can also be done in the **Studio** [indexes list](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) view. + Locking an index is not a security measure, the index can be unlocked at any time. + + +* In this page: + * [Lock modes](../../../../client-api/operations/maintenance/indexes/set-index-lock.mdx#lock-modes) + * [Sample usage flow](../../../../client-api/operations/maintenance/indexes/set-index-lock.mdx#sample-usage-flow) + * [Set lock mode - single index](../../../../client-api/operations/maintenance/indexes/set-index-lock.mdx#set-lock-mode---single-index) + * [Set lock mode - multiple indexes](../../../../client-api/operations/maintenance/indexes/set-index-lock.mdx#set-lock-mode---multiple-indexes) + * [Syntax](../../../../client-api/operations/maintenance/indexes/set-index-lock.mdx#syntax) + + +## Lock modes + +* **Unlocked** - when lock mode is set to `Unlock`: + * Any change to the index definition will be applied. + * If the new index definition differs from the one stored on the server, + the index will be updated and the data will be re-indexed using the new index definition. + * The index can be deleted. + +* **Locked (ignore)** - when lock mode is set to `LockedIgnore`: + * Index definition changes will Not be applied. + * Modifying the index definition will return successfully and no error will be raised, + however, no change will be made to the index definition on the server. + * Trying to delete the index will not remove it from the server, and no error will be raised. + +* **Locked (error)** - when lock mode is set to `LockedError`: + * Index definitions changes will Not be applied. + * An exception will be thrown upon trying to modify the index. + * The index cannot be deleted. Attempting to do so will result in an exception. + + + +## Sample usage flow + +Consider the following scenario: + +* Your client application defines and [deploys a static-index](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx) upon application startup. + +* After the application has started, you make a change to your index definition and re-indexing occurs. + However, if the index lock mode is _'Unlock'_, the next time your application will start, + it will reset the index definition back to the original version. + +* Locking the index allows to make changes to the running index and prevents the application + from setting it back to the previous definition upon startup. See the following steps: +
+ + 1. Run your application + 2. Modify the index definition on the server (from Studio, or from another application), + and then set this index lock mode to `LockedIgnore`. + 3. A side-by-side replacement index is created on the server. + It will index your dataset according to the **new** definition. + 4. At this point, if any instance of your original application is started, + the code that defines and deploys the index upon startup will have no effect + since the index is 'locked'. + 5. Once the replacement index is done indexing, it will replace the original index. + + + +## Set lock mode - single index + + + +{`// Define the set lock mode operation +// Pass index name & lock mode +const setLockModeOp = new SetIndexesLockOperation("Orders/Totals", "LockedIgnore"); + +// Execute the operation by passing it to maintenance.send +// An exception will be thrown if index does not exist +await store.maintenance.send(setLockModeOp); + +// Lock mode is now set to 'LockedIgnore' +// Any modifications done now to the index will Not be applied, and will Not throw +`} + + + + + +## Set lock mode - multiple indexes + + + +{`// Define the index list and the new lock mode: +const parameters = \{ + indexNames: ["Orders/Totals", "Orders/ByCompany"], + mode: "LockedError" +\} + +// Define the set lock mode operation, pass the parameters +const setLockModeOp = new SetIndexesLockOperation(parameters); + +// Execute the operation by passing it to maintenance.send +// An exception will be thrown if any of the specified indexes do not exist +await store.maintenance.send(setLockModeOp); + +// Lock mode is now set to 'LockedError' on both indexes +// Any modifications done now to either index will throw +`} + + + + + +## Syntax + + + +{`// Available overloads: +const setLockModeOp = new SetIndexesLockOperation(indexName, mode); +const setLockModeOp = new SetIndexesLockOperation(parameters); +`} + + + +| Parameters | Type | Description | +|- | - | - | +| **indexName** | string | Index name for which to set lock mode | +| **mode** | `"Unlock"` /
`"LockedIgnore"` /
`"LockedError"` | Lock mode to set | +| **parameters** | parameters object | List of indexes + lock mode to set.
An exception is thrown if any of the specified indexes do not exist. | + + + +{`// parameters object +\{ + indexNames, // string[], list of index names + mode // Lock mode to set +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-lock-php.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-lock-php.mdx new file mode 100644 index 0000000000..5a4059b043 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-lock-php.mdx @@ -0,0 +1,155 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The lock mode controls the behavior of index modifications. + Use `SetIndexesLockOperation` to modify the **lock mode** for a single index or multiple indexes. + +* **Indexes scope**: + The lock mode can be set only for static-indexes, not for auto-indexes. + +* **Nodes scope**: + The lock mode will be updated on all nodes in the database group. + +* Setting the lock mode can also be done in the **Studio** [indexes list](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) view. + Locking an index is not a security measure, the index can be unlocked at any time. + +* In this page: + * [Lock modes](../../../../client-api/operations/maintenance/indexes/set-index-lock.mdx#lock-modes) + * [Sample usage flow](../../../../client-api/operations/maintenance/indexes/set-index-lock.mdx#sample-usage-flow) + * [Set lock mode - single index](../../../../client-api/operations/maintenance/indexes/set-index-lock.mdx#set-lock-mode---single-index) + * [Set lock mode - multiple indexes](../../../../client-api/operations/maintenance/indexes/set-index-lock.mdx#set-lock-mode---multiple-indexes) + * [Syntax](../../../../client-api/operations/maintenance/indexes/set-index-lock.mdx#syntax) + + +## Lock modes + +* **Unlocked** - when lock mode is set using `unlock()`: + * Any change to the index definition will be applied. + * If the new index definition differs from the one stored on the server, + the index will be updated and the data will be re-indexed using the new index definition. + * The index can be deleted. + +* **Locked (ignore)** - when lock mode is set using `lockedIgnore()`: + * Index definition changes will Not be applied. + * Modifying the index definition will return successfully and no error will be raised, + however, no change will be made to the index definition on the server. + * Trying to delete the index will not remove it from the server, and no error will be raised. + +* **Locked (error)** - when lock mode is set using `lockedError()`: + * Index definitions changes will Not be applied. + * An exception will be thrown upon trying to modify the index. + * The index cannot be deleted. Attempting to do so will result in an exception. + + + +## Sample usage flow + +Consider the following scenario: + +* Your client application defines and [deploys a static-index](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx) upon application startup. + +* After the application has started, you make a change to your index definition and re-indexing occurs. + However, if the index lock mode is `unlock`, the next time your application will start, + it will reset the index definition back to the original version. + +* Locking the index allows to make changes to the running index and prevents the application + from setting it back to the previous definition upon startup. See the following steps: +
+ + 1. Run your application + 2. Modify the index definition on the server (from Studio, or from another application), + and then set this index lock mode to `lockedIgnore`. + 3. A side-by-side replacement index is created on the server. + It will index your dataset according to the **new** definition. + 4. At this point, if any instance of your original application is started, + the code that defines and deploys the index upon startup will have no effect + since the index is locked. + 5. Once the replacement index is done indexing, it will replace the original index. + + + +## Set lock mode - single index + + + +{`// Define the set lock mode operation +// Pass index name & lock mode +$setLockModeOp = new SetIndexesLockOperation("Orders/Totals", IndexLockMode::lockedIgnore()); + +// Execute the operation by passing it to Maintenance.Send +// An exception will be thrown if index does not exist +$store->maintenance()->send($setLockModeOp); + +// Lock mode is now set to 'LockedIgnore' +// Any modifications done now to the index will Not be applied, and will Not throw +`} + + + + + +## Set lock mode - multiple indexes + + + +{`// Define the index list and the new lock mode: +$parameters = new IndexLockParameters(); +$parameters->setIndexNames([ "Orders/Totals", "Orders/ByCompany" ]); +$parameters->setMode(IndexLockMode::lockedError()); + +// Define the set lock mode operation, pass the parameters +$setLockModeOp = new SetIndexesLockOperation($parameters); + +// Execute the operation by passing it to Maintenance.Send +// An exception will be thrown if any of the specified indexes do not exist +$store->maintenance()->send($setLockModeOp); + +// Lock mode is now set to 'LockedError' on both indexes +// Any modifications done now to either index will throw +`} + + + + + +## Syntax + + + +{`// Available overloads: +SetIndexesLockOperation(?string $indexName, ?IndexLockMode $mode); +SetIndexesLockOperation(?Parameters $parameters); +`} + + + +| Parameters | Type | Description | +|- | - | - | +| **$mode** | `?IndexLockMode` | Lock mode to set | +| **$indexName** | `?string` | Index names to set lock mode for | +| **$parameters** | `?Parameters` | Index lock parameters | + + + +{`class IndexLockMode +\{ + public static function unlock(): IndexLockMode; + public static function lockedIgnore(): IndexLockMode + public static function lockedError(): IndexLockMode; + + public function isUnlock(): bool; + public function isLockedIgnore(): bool; + public function isLockedError(): bool; +\} +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-lock-python.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-lock-python.mdx new file mode 100644 index 0000000000..bc594b1f55 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-lock-python.mdx @@ -0,0 +1,145 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The lock mode controls the behavior of index modifications. + Use `SetIndexesLockOperation` to modify the **lock mode** for a single index or multiple indexes. + +* **Indexes scope**: + The lock mode can be set only for static-indexes, not for auto-indexes. + +* **Nodes scope**: + The lock mode will be updated on all nodes in the database group. + +* Setting the lock mode can also be done in the **Studio** [indexes list](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) view. + Locking an index is not a security measure, the index can be unlocked at any time. + +* In this page: + * [Lock modes](../../../../client-api/operations/maintenance/indexes/set-index-lock.mdx#lock-modes) + * [Sample usage flow](../../../../client-api/operations/maintenance/indexes/set-index-lock.mdx#sample-usage-flow) + * [Set lock mode - single index](../../../../client-api/operations/maintenance/indexes/set-index-lock.mdx#set-lock-mode---single-index) + * [Set lock mode - multiple indexes](../../../../client-api/operations/maintenance/indexes/set-index-lock.mdx#set-lock-mode---multiple-indexes) + * [Syntax](../../../../client-api/operations/maintenance/indexes/set-index-lock.mdx#syntax) + + +## Lock modes + +* **Unlocked** - when lock mode is set to `UNLOCK`: + * Any change to the index definition will be applied. + * If the new index definition differs from the one stored on the server, + the index will be updated and the data will be re-indexed using the new index definition. + * The index can be deleted. + +* **Locked (ignore)** - when lock mode is set to `LOCKED_IGNORE`: + * Index definition changes will Not be applied. + * Modifying the index definition will return successfully and no error will be raised, + however, no change will be made to the index definition on the server. + * Trying to delete the index will not remove it from the server, and no error will be raised. + +* **Locked (error)** - when lock mode is set to `LOCKED_ERROR`: + * Index definitions changes will Not be applied. + * An exception will be thrown upon trying to modify the index. + * The index cannot be deleted. Attempting to do so will result in an exception. + + + +## Sample usage flow + +Consider the following scenario: + +* Your client application defines and [deploys a static-index](../../../../client-api/operations/maintenance/indexes/put-indexes.mdx) upon application startup. + +* After the application has started, you make a change to your index definition and re-indexing occurs. + However, if the index lock mode is `UNLOCK`, the next time your application will start, + it will reset the index definition back to the original version. + +* Locking the index allows to make changes to the running index and prevents the application + from setting it back to the previous definition upon startup. See the following steps: +
+ + 1. Run your application + 2. Modify the index definition on the server (from Studio, or from another application), + and then set this index lock mode to `LOCKED_IGNORE`. + 3. A side-by-side replacement index is created on the server. + It will index your dataset according to the **new** definition. + 4. At this point, if any instance of your original application is started, + the code that defines and deploys the index upon startup will have no effect + since the index is `LOCKED`. + 5. Once the replacement index is done indexing, it will replace the original index. + + + +## Set lock mode - single index + + + +{`# Define the set lock mode operation +# Pass index name & lock mode +set_lock_mode_op = SetIndexesLockOperation(IndexLockMode.LOCKED_IGNORE, "Orders/Totals") + +# Execute the operation by passing it to maintenance.send +# An exception will be thrown if index does not exist +store.maintenance.send(set_lock_mode_op) + +# Lock mode is now set to 'LockedIgnore' +# Any modification done now to the index will Not be applied, and will Not throw +`} + + + + + +## Set lock mode - multiple indexes + + + +{`# Define the set lock mode operation, pass the parameters +set_lock_mode_op = SetIndexesLockOperation(IndexLockMode.LOCKED_ERROR, "Orders/Totals", "Orders/ByCompany") + +# Execute the operation by passing it to maintenance.send +# An exception will be thrown if any of the specified indexes does not exist +store.maintenance.send(set_lock_mode_op) + +# Lock mode is now set to 'LockedError' on both indexes +# Any modifications done now to either index will throw +`} + + + + + +## Syntax + + + +{`class SetIndexesLockOperation(VoidMaintenanceOperation): + def __init__(self, mode: IndexLockMode, *index_names: str): ... +`} + + + +| Parameters | Type | Description | +|- | - | - | +| **mode** | `IndexLockMode` | Lock mode to set | +| **\*index_names** | `str` | Index names to set lock mode for | + + + +{`class IndexLockMode(Enum): + UNLOCK = "Unlock" + LOCKED_IGNORE = "LockedIgnore" + LOCKED_ERROR = "LockedError" + + def __str__(self): + return self.value +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-priority-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-priority-csharp.mdx new file mode 100644 index 0000000000..fa87a5070b --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-priority-csharp.mdx @@ -0,0 +1,156 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In RavenDB, each index has its own dedicated thread for all indexing work. + By default, RavenDB prioritizes processing requests over indexing, + so indexing threads start with a lower priority than request-processing threads. + +* Use `SetIndexesPriorityOperation` to raise or lower the index thread priority. + +* **Indexes scope**: + Index priority can be set for both static and auto indexes. + +* **Nodes scope**: + The priority will be updated on all nodes in the database group. + +* Setting the priority can also be done from the [indexes list view](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) in the Studio. + +* In this page: + * [Index priority](../../../../client-api/operations/maintenance/indexes/set-index-priority.mdx#index-priority) + * [Set priority - single index](../../../../client-api/operations/maintenance/indexes/set-index-priority.mdx#set-priority---single-index) + * [Set priority - multiple indexes](../../../../client-api/operations/maintenance/indexes/set-index-priority.mdx#set-priority---multiple-indexes) + * [Syntax](../../../../client-api/operations/maintenance/indexes/set-index-priority.mdx#syntax) + + + +## Index priority + +Setting the priority will affect the indexing thread priority at the operating system level: + +| Priority value | Indexing thread priority
at OS level | Description | +|-----------------------|------------------------------------------|-------------| +| **Low** | Lowest |
  • Having the `Lowest` priority at the OS level, indexes will run only when there's capacity, when the system is not occupied with higher-priority tasks.
  • Requests to the database will complete faster.
    Use when querying the server is more important to you than indexing.
| +| **Normal** (default) | Below normal |
  • Requests to the database are still preferred over the indexing process.
  • The indexing thread priority at the OS level is `Below normal`,
    while request-processing threads have `Normal` priority.
| +| **High** | Normal |
  • Requests and indexing will have the same priority at the OS level.
| + +## Set priority - single index + + + + +{`// Define the set priority operation +// Pass index name & priority +var setPriorityOp = new SetIndexesPriorityOperation("Orders/Totals", IndexPriority.High); + +// Execute the operation by passing it to Maintenance.Send +// An exception will be thrown if index does not exist +store.Maintenance.Send(setPriorityOp); +`} + + + + +{`// Define the set priority operation +// Pass index name & priority +var setPriorityOp = new SetIndexesPriorityOperation("Orders/Totals", IndexPriority.High); + +// Execute the operation by passing it to Maintenance.SendAsync +// An exception will be thrown if index does not exist +await store.Maintenance.SendAsync(setPriorityOp); +`} + + + + + + +## Set priority - multiple indexes + + + + +{`// Define the index list and the new priority: +var parameters = new SetIndexesPriorityOperation.Parameters +{ + IndexNames = new[] {"Orders/Totals", "Orders/ByCompany"}, + Priority = IndexPriority.Low +}; + +// Define the set priority operation, pass the parameters +var setPriorityOp = new SetIndexesPriorityOperation(parameters); + +// Execute the operation by passing it to Maintenance.Send +// An exception will be thrown if any of the specified indexes do not exist +store.Maintenance.Send(setPriorityOp); +`} + + + + +{`// Define the index list and the new priority: +var parameters = new SetIndexesPriorityOperation.Parameters +{ + IndexNames = new[] {"Orders/Totals", "Orders/ByCompany"}, + Priority = IndexPriority.Low +}; + +// Define the set priority operation, pass the parameters +var setPriorityOp = new SetIndexesPriorityOperation(parameters); + +// Execute the operation by passing it to Maintenance.SendAsync +// An exception will be thrown if any of the specified indexes do not exist +await store.Maintenance.SendAsync(setPriorityOp); +`} + + + + + + +## Syntax + + + +{`// Available overloads: +public SetIndexesPriorityOperation(string indexName, IndexPriority priority); +public SetIndexesPriorityOperation(Parameters parameters); +`} + + + +| Parameters | | | +| - | - | - | +| **indexName** | `string` | Index name for which to change priority | +| **priority** | `IndexingPriority` | Priority to set | +| **parameters** | `SetIndexesPriorityOperation.Parameters` | List of indexes + Priority to set.
An exception is thrown if any of the specified indexes doesn't exist. | + + + +{`public enum IndexPriority +\{ + Low, + Normal, + High +\} +`} + + + + + +{`public class Parameters +\{ + public string[] IndexNames \{ get; set; \} + public IndexPriority Priority \{ get; set; \} +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-priority-java.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-priority-java.mdx new file mode 100644 index 0000000000..eccca1d57a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-priority-java.mdx @@ -0,0 +1,92 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +**SetIndexesPriorityOperation** allows you to change an index priority for a given index or indexes. + +Setting the priority will affect the indexing thread priority at the operating system level: + +| Priority value | Indexing thread priority
at OS level | Description | +|-----------------------|------------------------------------------|-------------| +| **Low** | Lowest |
  • Having the `Lowest` priority at the OS level, indexes will run only when there's capacity, when the system is not occupied with higher-priority tasks.
  • Requests to the database will complete faster.
    Use when querying the server is more important to you than indexing.
| +| **Normal** (default) | Below normal |
  • Requests to the database are still preferred over the indexing process.
  • The indexing thread priority at the OS level is `Below normal`,
    while request-processing threads have `Normal` priority.
| +| **High** | Normal |
  • Requests and indexing will have the same priority at the OS level.
| + +## Syntax + + + +{`public SetIndexesPriorityOperation(String indexName, IndexPriority priority) \{ +public SetIndexesPriorityOperation(SetIndexesPriorityOperation.Parameters parameters) +`} + + + + + +{`public enum IndexPriority \{ + LOW, + NORMAL, + HIGH +\} +`} + + + + + +{`public static class Parameters \{ + private String[] indexNames; + private IndexPriority priority; + + public String[] getIndexNames() \{ + return indexNames; + \} + + public void setIndexNames(String[] indexNames) \{ + this.indexNames = indexNames; + \} + + public IndexPriority getPriority() \{ + return priority; + \} + + public void setPriority(IndexPriority priority) \{ + this.priority = priority; + \} +\} +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **name** | String | name of an index to change priority for | +| **priority** | IndexingPriority | new index priority | +| **parameters** | SetIndexesPriorityOperation.Parameters | list of indexes + new index priority | + +## Example I + + + +{`store.maintenance().send( + new SetIndexesPriorityOperation("Orders/Totals", IndexPriority.HIGH)); +`} + + + +## Example II + + + +{`SetIndexesPriorityOperation.Parameters parameters = new SetIndexesPriorityOperation.Parameters(); +parameters.setIndexNames(new String[]\{ "Orders/Totals", "Orders/ByCompany" \}); +parameters.setPriority(IndexPriority.LOW); + +store.maintenance().send(new SetIndexesPriorityOperation(parameters)); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-priority-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-priority-nodejs.mdx new file mode 100644 index 0000000000..6b39f94496 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-priority-nodejs.mdx @@ -0,0 +1,109 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In RavenDB, each index has its own dedicated thread for all indexing work. + By default, RavenDB prioritizes processing requests over indexing, + so indexing threads start with a lower priority than request-processing threads. + +* Use `SetIndexesPriorityOperation` to raise or lower the index thread priority. + +* **Indexes scope**: + Index priority can be set for both static and auto indexes. + +* **Nodes scope**: + The priority will be updated on all nodes in the database group. + +* Setting the priority can also be done from the [indexes list view](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) in the Studio. + +* In this page: + * [Index priority](../../../../client-api/operations/maintenance/indexes/set-index-priority.mdx#index-priority) + * [Set priority - single index](../../../../client-api/operations/maintenance/indexes/set-index-priority.mdx#set-priority---single-index) + * [Set priority - multiple indexes](../../../../client-api/operations/maintenance/indexes/set-index-priority.mdx#set-priority---multiple-indexes) + * [Syntax](../../../../client-api/operations/maintenance/indexes/set-index-priority.mdx#syntax) + + + +## Index priority + +Setting the priority will affect the indexing thread priority at the operating system level: + +| Priority value | Indexing thread priority
at OS level | Description | +|-----------------------|------------------------------------------|-------------| +| **Low** | Lowest |
  • Having the `Lowest` priority at the OS level, indexes will run only when there's capacity, when the system is not occupied with higher-priority tasks.
  • Requests to the database will complete faster.
    Use when querying the server is more important to you than indexing.
| +| **Normal** (default) | Below normal |
  • Requests to the database are still preferred over the indexing process.
  • The indexing thread priority at the OS level is `Below normal`,
    while request-processing threads have `Normal` priority.
| +| **High** | Normal |
  • Requests and indexing will have the same priority at the OS level.
| + +## Set priority - single index + + + +{`// Define the set priority operation +// Pass index name & priority +const setPriorityOp = new SetIndexesPriorityOperation("Orders/Totals", "High"); + +// Execute the operation by passing it to maintenance.send +// An exception will be thrown if index does not exist +await store.maintenance.send(setPriorityOp); +`} + + + + + +## Set priority - multiple indexes + + + +{`// Define the index list and the new priority: +const parameters = \{ + indexNames: ["Orders/Totals", "Orders/ByCompany"], + priority: "Low" +\} + +// Define the set priority operation, pass the parameters +const setPriorityOp = new SetIndexesPriorityOperation(parameters); + +// Execute the operation by passing it to maintenance.send +// An exception will be thrown if any of the specified indexes do not exist +await store.maintenance.send(setPriorityOp); +`} + + + + + +## Syntax + + + +{`// Available overloads: +const setPriorityOp = new SetIndexesPriorityOperation(indexName, priority); +const setPriorityOp = new SetIndexesPriorityOperation(parameters); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **indexName** | `string` | Index name for which to change priority | +| **priority** | `"Low"` /
`"Normal"` /
`"High"` | Priority to set | +| **parameters** | parameters object | List of indexes + Priority to set.
An exception is thrown if any of the specified indexes doesn't exist. | + + + +{`// parameters object +\{ + indexNames, // string[], list of index names + priority // Priority to set +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-priority-php.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-priority-php.mdx new file mode 100644 index 0000000000..b1db277387 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-priority-php.mdx @@ -0,0 +1,113 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In RavenDB, each index has its own dedicated thread for all indexing work. + By default, RavenDB prioritizes processing requests over indexing, + so indexing threads start with a lower priority than request-processing threads. + +* Use `SetIndexesPriorityOperation` to raise or lower the index thread priority. + +* **Indexes scope**: + Index priority can be set for both static and auto indexes. + +* **Nodes scope**: + The priority will be updated on all nodes in the database group. + +* Setting the priority can also be done from the [indexes list view](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) in the Studio. + +* In this page: + * [Index priority](../../../../client-api/operations/maintenance/indexes/set-index-priority.mdx#index-priority) + * [Set priority - single index](../../../../client-api/operations/maintenance/indexes/set-index-priority.mdx#set-priority---single-index) + * [Set priority - multiple indexes](../../../../client-api/operations/maintenance/indexes/set-index-priority.mdx#set-priority---multiple-indexes) + * [Syntax](../../../../client-api/operations/maintenance/indexes/set-index-priority.mdx#syntax) + + + +## Index priority + +Setting the priority will affect the indexing thread priority at the operating system level: + +| Priority value | Indexing thread priority
at OS level | Description | +|--------------------------------|------------------------------------------|-------------| +| Set using `low()` | Lowest |
  • Having the `Lowest` priority at the OS level, indexes will run only when there's capacity, when the system is not occupied with higher-priority tasks.
  • Requests to the database will complete faster.
    Use when querying the server is more important to you than indexing.
| +| Set using `normal()` (default) | Below normal |
  • Requests to the database are still preferred over the indexing process.
  • The indexing thread priority at the OS level is `Below normal`,
    while request-processing threads have `Normal` priority.
| +| Set using `high()` | Normal |
  • Requests and indexing will have the same priority at the OS level.
| + +## Set priority - single index + + + +{`// Define the set priority operation +// Pass index name & priority +$setPriorityOp = new SetIndexesPriorityOperation("Orders/Totals", IndexPriority::high()); + +// Execute the operation by passing it to Maintenance.Send +// An exception will be thrown if index does not exist +$store->maintenance()->send($setPriorityOp); +`} + + + + + +## Set priority - multiple indexes + + + +{`// Define the index list and the new priority: +$parameters = new IndexPriorityParameters(); +$parameters->setIndexNames(["Orders/Totals", "Orders/ByCompany"]); +$parameters->setPriority(IndexPriority::low()); + +// Define the set priority operation, pass the parameters +$setPriorityOp = new SetIndexesPriorityOperation($parameters); + +// Execute the operation by passing it to Maintenance.Send +// An exception will be thrown if any of the specified indexes do not exist +$store->maintenance()->send($setPriorityOp); +`} + + + + + +## Syntax + + + +{`// Available overloads: +SetIndexesPriorityOperation(?string $indexName, ?IndexPriority $priority); +SetIndexesPriorityOperation(?Parameters $parameters); +`} + + + +| Parameters | | | +| - | - | - | +| **$indexName** | `?string` | Index name for which to change priority | +| **$priority** | `?IndexPriority` | Priority to set | +| **$parameters** | `?Parameters` | Index priority parameters | + + + +{`class IndexPriority +\{ + public static function low(): IndexPriority; + public static function normal(): IndexPriority; + public static function high(): IndexPriority; + + public function isLow(): bool; + public function isNormal(): bool; + public function isHigh(): bool; +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-priority-python.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-priority-python.mdx new file mode 100644 index 0000000000..0feff233fe --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_set-index-priority-python.mdx @@ -0,0 +1,100 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In RavenDB, each index has its own dedicated thread for all indexing work. + By default, RavenDB prioritizes processing requests over indexing, + so indexing threads start with a lower priority than request-processing threads. + +* Use `SetIndexesPriorityOperation` to raise or lower the index thread priority. + +* **Indexes scope**: + Index priority can be set for both static and auto indexes. + +* **Nodes scope**: + The priority will be updated on all nodes in the database group. + +* Setting the priority can also be done from the [indexes list view](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) in the Studio. + +* In this page: + * [Index priority](../../../../client-api/operations/maintenance/indexes/set-index-priority.mdx#index-priority) + * [Set priority - single index](../../../../client-api/operations/maintenance/indexes/set-index-priority.mdx#set-priority---single-index) + * [Set priority - multiple indexes](../../../../client-api/operations/maintenance/indexes/set-index-priority.mdx#set-priority---multiple-indexes) + * [Syntax](../../../../client-api/operations/maintenance/indexes/set-index-priority.mdx#syntax) + + + +## Index priority + +Setting the priority will affect the indexing thread priority at the operating system level: + +| Priority value | Indexing thread priority
at OS level | Description | +|-----------------------|------------------------------------------|-------------| +| **LOW** | Lowest |
  • Having the `Lowest` priority at the OS level, indexes will run only when there's capacity, when the system is not occupied with higher-priority tasks.
  • Requests to the database will complete faster.
    Use when querying the server is more important to you than indexing.
| +| **NORMAL** (default) | Below normal |
  • Requests to the database are still preferred over the indexing process.
  • The indexing thread priority at the OS level is `Below normal`,
    while request-processing threads have `Normal` priority.
| +| **HIGH** | Normal |
  • Requests and indexing will have the same priority at the OS level.
| + +## Set priority - single index + + + +{`# Define the set priority operation +# Pass index name & priority +set_priority_op = SetIndexesPriorityOperation(IndexPriority.HIGH, "Orders/Totals") + +# Execute the operation by passing it to maintenance.send +# An exception will be thrown if index does not exist +store.maintenance.send(set_priority_op) +`} + + + + + +## Set priority - multiple indexes + + + +{`# Define the set priority operation, pass multiple index names +set_priority_op = SetIndexesPriorityOperation(IndexPriority.LOW, "Orders/Totals", "Orders/ByCompany") + +# Execute the operation by passing it to maintenance.send +# An exception will be thrown if any of the specified indexes do not exist +store.maintenance.send(set_priority_op) +`} + + + + + +## Syntax + + + +{`class SetIndexesPriorityOperation(VoidMaintenanceOperation): + def __init__(self, priority: IndexPriority, *index_names: str): ... +`} + + + +| Parameters | | | +| - | - | - | +| **\*index_names** | `str` | Index name for which to change priority | +| **priority** | `IndexingPriority` | Priority to set | + + + +{`class IndexPriority(Enum): + LOW = "Low" + NORMAL = "Normal" + HIGH = "High" +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-index-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-index-csharp.mdx new file mode 100644 index 0000000000..6f367e2f64 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-index-csharp.mdx @@ -0,0 +1,83 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* After an index has been paused using [StopIndexOperation](../../../../client-api/operations/maintenance/indexes/stop-index.mdx), + use `StartIndexOperation` to **resume the index**. + +* When resuming the index from the **client**: + The index is resumed on the preferred node only, and Not on all the database-group nodes. + +* When resuming the index from the **Studio** [indexes list](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) view: + The index is resumed on the local node the browser is opened on, even if it is Not the preferred node. + +* In this page: + * [Resume index example](../../../../client-api/operations/maintenance/indexes/start-index.mdx#resume-index-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/start-index.mdx#syntax) + + +## Resume index example + + + + +{`// Define the resume index operation, pass the index name +var resumeIndexOp = new StartIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to Maintenance.Send +store.Maintenance.Send(resumeIndexOp); + +// At this point: +// Index 'Orders/Totals' is resumed on the preferred node + +// Can verify the index status on the preferred node by sending GetIndexingStatusOperation +var indexingStatus = store.Maintenance.Send(new GetIndexingStatusOperation()); + +var index = indexingStatus.Indexes.FirstOrDefault(x => x.Name == "Orders/Totals"); +Assert.Equal(IndexRunningStatus.Running, index.Status); +`} + + + + +{`// Define the resume index operation, pass the index name +var resumeIndexOp = new StartIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to Maintenance.SendAsync +await store.Maintenance.SendAsync(resumeIndexOp); + +// At this point: +// Index 'Orders/Totals' is resumed on the preferred node + +// Can verify the index status on the preferred node by sending GetIndexingStatusOperation +var indexingStatus = await store.Maintenance.SendAsync(new GetIndexingStatusOperation()); + +var index = indexingStatus.Indexes.FirstOrDefault(x => x.Name == "Orders/Totals"); +Assert.Equal(IndexRunningStatus.Running, index.Status); +`} + + + + + + +## Syntax + + + +{`// class name has "Start", but this is ok, this is the "Resume" operation +public StartIndexOperation(string indexName) +`} + + + +| Parameters | Type | Description | +| - | - |-| +| **indexName** | `string` | Name of an index to resume | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-index-java.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-index-java.mdx new file mode 100644 index 0000000000..204f3c5ba3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-index-java.mdx @@ -0,0 +1,30 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +The **StartIndexOperation** is used to resume indexing for an index. + +### Syntax + + + +{`public StartIndexOperation(String indexName) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **indexName** | String | name of an index to start indexing | + +### Example + + + +{`store.maintenance().send(new StartIndexOperation("Orders/Totals")); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-index-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-index-nodejs.mdx new file mode 100644 index 0000000000..a33cab9563 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-index-nodejs.mdx @@ -0,0 +1,62 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* After an index has been paused using [StopIndexOperation](../../../../client-api/operations/maintenance/indexes/stop-index.mdx), + use `StartIndexOperation` to **resume the index**. + +* When resuming the index from the **client**: + The index is resumed on the preferred node only, and Not on all the database-group nodes. + +* When resuming the index from the **Studio** [indexes list](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) view: + The index is resumed on the local node the browser is opened on, even if it is Not the preferred node. + +* In this page: + * [Resume index example](../../../../client-api/operations/maintenance/indexes/start-index.mdx#resume-index-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/start-index.mdx#syntax) + + +## Resume index example + + + +{`// Define the resume index operation, pass the index name +const resumeIndexOp = new StartIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to maintenance.send +await store.maintenance.send(resumeIndexOp); + +// At this point: +// Index 'Orders/Totals' is resumed on the preferred node + +// Can verify the index status on the preferred node by sending GetIndexingStatusOperation +const indexingStatus = await store.maintenance.send(new GetIndexingStatusOperation()); + +const index = indexingStatus.indexes.find(x => x.name === "Orders/Totals") +assert.strictEqual(index.status, "Running"); +`} + + + + + +## Syntax + + + +{`// class name has "Start", but this is ok, this is the "Resume" operation +const resumeIndexOp = new StartIndexOperation(indexName); +`} + + + +| Parameters | Type | Description | +| - | - |-| +| **indexName** | `string` | Name of an index to resume | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-index-php.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-index-php.mdx new file mode 100644 index 0000000000..7940c9d520 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-index-php.mdx @@ -0,0 +1,68 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* After an index has been paused using [StopIndexOperation](../../../../client-api/operations/maintenance/indexes/stop-index.mdx), + use `StartIndexOperation` to **resume the index**. + +* When resuming the index from the **client**: + The index is resumed on the preferred node only, and Not on all the database-group nodes. + +* When resuming the index from the **Studio** [indexes list](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) view: + The index is resumed on the local node the browser is opened on, even if it is Not the preferred node. + +* In this page: + * [Resume index example](../../../../client-api/operations/maintenance/indexes/start-index.mdx#resume-index-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/start-index.mdx#syntax) + + +## Resume index example + + + +{`// Define the resume index operation, pass the index name +$resumeIndexOp = new StartIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to Maintenance.Send +$store->maintenance()->send($resumeIndexOp); + +// At this point: +// Index 'Orders/Totals' is resumed on the preferred node + +// Can verify the index status on the preferred node by sending GetIndexingStatusOperation +/** @var IndexingStatus $indexingStatus */ +$indexingStatus = $store->maintenance()->send(new GetIndexingStatusOperation()); + +$indexes = array_filter($indexingStatus->getIndexes()->getArrayCopy(), function ($v, $k) \{ + return $v->getName() == "Orders/Totals"; +\}); +/** @var IndexingStatus $index */ +$index = $indexes[0]; + +$this->assertTrue($index->getStatus()->isRunning()); +`} + + + + + +## Syntax + + + +{`// class name begins with "Start" but this is still the "Resume" operation +StartIndexOperation(?string $indexName) +`} + + + +| Parameters | Type | Description | +| - | - |-| +| **$indexName** | `?string` | Name of an index to resume | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-index-python.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-index-python.mdx new file mode 100644 index 0000000000..7d9bda9ee6 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-index-python.mdx @@ -0,0 +1,62 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* After an index has been paused using [StopIndexOperation](../../../../client-api/operations/maintenance/indexes/stop-index.mdx), + use `StartIndexOperation` to **resume the index**. + +* When resuming the index from the **client**: + The index is resumed on the preferred node only, and Not on all the database-group nodes. + +* When resuming the index from the **Studio** [indexes list](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) view: + The index is resumed on the local node the browser is opened on, even if it is Not the preferred node. + +* In this page: + * [Resume index example](../../../../client-api/operations/maintenance/indexes/start-index.mdx#resume-index-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/start-index.mdx#syntax) + + +## Resume index example + + + +{`# Define the resume index operation, pass the index name +resume_index_op = StartIndexOperation("Orders/Totals") + +# Execute the operation by passing it to maintenance.send +store.maintenance.send(resume_index_op) + +# At this point: +# Index 'Orders/Totals' is resumed on the preferred node + +# Can verify the index status on the preferred node by sending GetIndexingStatusOperation +indexing_status = store.maintenance.send(GetIndexingStatusOperation()) + +index = [x for x in indexing_status.indexes if x.name == "Orders/Totals"][0] +self.assertEqual(index.status, IndexRunningStatus.RUNNING) +`} + + + + + +## Syntax + + + +{`class StartIndexOperation(VoidMaintenanceOperation): + def __init__(self, index_name: str): ... +`} + + + +| Parameters | Type | Description | +| - | - |-| +| **index_name** | `str` | Name of an index to resume | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-indexing-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-indexing-csharp.mdx new file mode 100644 index 0000000000..d51c8dd10b --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-indexing-csharp.mdx @@ -0,0 +1,78 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* After indexing has been paused using [StopIndexingOperation](../../../../client-api/operations/maintenance/indexes/stop-indexing.mdx), + use `StartIndexingOperation` to **resume indexing** for ALL indexes in the database. + + Calling `StartIndexingOperation` on a single index will have no effect. + + +* When resuming indexing from the **client**: + Indexing is resumed on the [preferred node](../../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) only, and Not on all the database-group nodes. + +* When resuming indexing from the **Studio** [database list](../../../../studio/database/databases-list-view.mdx#more-actions) view: + Indexing is resumed on the local node the browser is opened on, even if it is Not the preferred node. + +* In this page: + * [Resume indexing example](../../../../client-api/operations/maintenance/indexes/start-indexing.mdx#resume-indexing-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/start-indexing.mdx#syntax) + + +## Resume indexing example + + + + +{`// Define the resume indexing operation +var resumeIndexingOp = new StartIndexingOperation(); + +// Execute the operation by passing it to Maintenance.Send +store.Maintenance.Send(resumeIndexingOp); + +// At this point, +// you can be sure that all indexes on the preferred node are 'running' + +// Can verify indexing status on the preferred node by sending GetIndexingStatusOperation +var indexingStatus = store.Maintenance.Send(new GetIndexingStatusOperation()); +Assert.Equal(IndexRunningStatus.Running, indexingStatus.Status); +`} + + + + +{`// Define the resume indexing operation +var resumeIndexingOp = new StartIndexingOperation(); + +// Execute the operation by passing it to Maintenance.SendAsync +await store.Maintenance.SendAsync(resumeIndexingOp); + +// At this point, +// you can be sure that all indexes on the preferred node are 'running' + +// Can verify indexing status on the preferred node by sending GetIndexingStatusOperation +var indexingStatus = await store.Maintenance.SendAsync(new GetIndexingStatusOperation()); +Assert.Equal(IndexRunningStatus.Running, indexingStatus.Status); +`} + + + + + + +## Syntax + + + +{`// class name has "Start", but this is ok, this is the "Resume" operation +public StartIndexingOperation() +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-indexing-java.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-indexing-java.mdx new file mode 100644 index 0000000000..04c6bfce47 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-indexing-java.mdx @@ -0,0 +1,26 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +**StartIndexingOperation** is used to resume indexing for entire database. + +### Syntax + + + +{`public StartIndexingOperation() +`} + + + +### Example + + + +{`store.maintenance().send(new StartIndexingOperation()); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-indexing-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-indexing-nodejs.mdx new file mode 100644 index 0000000000..e233e9dfe2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-indexing-nodejs.mdx @@ -0,0 +1,59 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* After indexing has been paused using [StopIndexingOperation](../../../../client-api/operations/maintenance/indexes/stop-indexing.mdx), + use `StartIndexingOperation` to **resume indexing** for ALL indexes in the database. + + Calling `StartIndexingOperation` on a single index will have no effect. + + +* When resuming indexing from the **client**: + Indexing is resumed on the [preferred node](../../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) only, and Not on all the database-group nodes. + +* When resuming indexing from the **Studio** [database list](../../../../studio/database/databases-list-view.mdx#more-actions) view: + Indexing is resumed on the local node the browser is opened on, even if it is Not the preferred node. + +* In this page: + * [Resume indexing example](../../../../client-api/operations/maintenance/indexes/start-indexing.mdx#resume-indexing-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/start-indexing.mdx#syntax) + + +## Resume indexing example + + + +{`// Define the resume indexing operation +const resumeIndexingOp = new StartIndexingOperation(); + +// Execute the operation by passing it to maintenance.send +await store.maintenance.send(resumeIndexingOp); + +// At this point, +// you can be sure that all indexes on the preferred node are 'running' + +// Can verify indexing status on the preferred node by sending GetIndexingStatusOperation +const indexingStatus = await store.maintenance.send(new GetIndexingStatusOperation()); +assert.strictEqual(indexingStatus.status, "Running"); +`} + + + + + +## Syntax + + + +{`// class name has "Start", but this is ok, this is the "Resume" operation +const resumeIndexingOp = new StartIndexingOperation(); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-indexing-php.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-indexing-php.mdx new file mode 100644 index 0000000000..49c8cc4328 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-indexing-php.mdx @@ -0,0 +1,60 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* After indexing has been paused using [StopIndexingOperation](../../../../client-api/operations/maintenance/indexes/stop-indexing.mdx), + use `StartIndexingOperation` to **resume indexing** for ALL indexes in the database. + + Calling `StartIndexingOperation` on a single index will have no effect. + + +* When resuming indexing from the **client**: + Indexing is resumed on the [preferred node](../../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) only, and Not on all the database-group nodes. + +* When resuming indexing from the **Studio** [database list](../../../../studio/database/databases-list-view.mdx#more-actions) view: + Indexing is resumed on the local node the browser is opened on, even if it is Not the preferred node. + +* In this page: + * [Resume indexing example](../../../../client-api/operations/maintenance/indexes/start-indexing.mdx#resume-indexing-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/start-indexing.mdx#syntax) + + +## Resume indexing example + + + +{`// Define the resume indexing operation +$resumeIndexingOp = new StartIndexingOperation(); + +// Execute the operation by passing it to Maintenance.Send +$store->maintenance()->send($resumeIndexingOp); + +// At this point, +// you can be sure that all indexes on the preferred node are 'running' + +// Can verify indexing status on the preferred node by sending GetIndexingStatusOperation +/** @var IndexingStatus $indexingStatus */ +$indexingStatus = $store->maintenance()->send(new GetIndexingStatusOperation()); +$this->assertTrue($indexingStatus->getStatus()->isPaused()); +`} + + + + + +## Syntax + + + +{`// class name prefix is "Start", but this is still the "Resume" operation +public StartIndexingOperation() +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-indexing-python.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-indexing-python.mdx new file mode 100644 index 0000000000..6ce957652e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_start-indexing-python.mdx @@ -0,0 +1,60 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* After indexing has been paused using [StopIndexingOperation](../../../../client-api/operations/maintenance/indexes/stop-indexing.mdx), + use `StartIndexingOperation` to **resume indexing** for ALL indexes in the database. + + Calling `StartIndexingOperation` on a single index will have no effect. + + +* When resuming indexing from the **client**: + Indexing is resumed on the [preferred node](../../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) only, and Not on all the database-group nodes. + +* When resuming indexing from the **Studio** [database list](../../../../studio/database/databases-list-view.mdx#more-actions) view: + Indexing is resumed on the local node the browser is opened on, even if it is Not the preferred node. + +* In this page: + * [Resume indexing example](../../../../client-api/operations/maintenance/indexes/start-indexing.mdx#resume-indexing-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/start-indexing.mdx#syntax) + + +## Resume indexing example + + + +{`# Define the resume indexing operation +resume_index_op = StartIndexingOperation() + +# Execute the operation by passing it to maintenance.send +store.maintenance.send(resume_index_op) + +# At this point: +# you can be sure that all indexes on the preferred node are 'running' + +# Can verify the index status on the preferred node by sending GetIndexingStatusOperation +indexing_status = store.maintenance.send(GetIndexingStatusOperation()) + +self.assertEqual(indexing_status.status, IndexRunningStatus.RUNNING) +`} + + + + + +## Syntax + + + +{`class StartIndexingOperation(VoidMaintenanceOperation): + def __init__(self): ... +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-index-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-index-csharp.mdx new file mode 100644 index 0000000000..c7123beac9 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-index-csharp.mdx @@ -0,0 +1,116 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `StopIndexOperation` to **pause a single index** in the database. + +* To pause indexing for ALL indexes in the database use [StopIndexingOperation](../../../../client-api/operations/maintenance/indexes/stop-indexing.mdx). + +* In this page: + * [Overview](../../../../client-api/operations/maintenance/indexes/stop-index.mdx#overview) + * [Pause index example](../../../../client-api/operations/maintenance/indexes/stop-index.mdx#pause-index-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/stop-index.mdx#syntax) + + +## Overview + +#### Which node is the index paused for? + +* When pausing the index from the **client**: + The index will be paused for the [preferred node](../../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) only, + Not for all database-group nodes. + +* When pausing the index from the **Studio** [indexes list](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) view: + The index will be paused for the local node the browser is opened on, even if it is Not the preferred node. +#### What happens when an index is paused for a node? + +* A paused index performs no indexing for the node it is paused for. + New data **is** indexed by the index on database-group nodes that the index is not paused for. + +* A paused index **can** be queried, but results may be stale when querying the node that the index is paused for. +#### Resuming the index: + +* Learn how to resume an index by a client here: [Resume index](../../../../client-api/operations/maintenance/indexes/start-index.mdx) + +* Learn to resume an index from **Studio** here: [Indexes list view](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) + +* Pausing the index is **Not a persistent operation**. + This means the paused index will resume upon either of the following: + * The server is restarted. + * The database is re-loaded (by disabling and then enabling it). + Toggling the database state can be done using the **Studio** [database list](../../../../studio/database/databases-list-view.mdx#database-actions) view, + or using [ToggleDatabasesStateOperation](../../../../client-api/operations/server-wide/toggle-databases-state.mdx) by the client. + +* [Resetting](../../../../client-api/operations/maintenance/indexes/reset-index.mdx) a paused index will resume the normal operation of the index + on the local node where the reset action was performed. + +* Modifying the index definition will resume the normal operation of the index + on all the nodes for which it is paused. + + + +## Pause index example + + + + +{`// Define the pause index operation, pass the index name +var pauseIndexOp = new StopIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to Maintenance.Send +store.Maintenance.Send(pauseIndexOp); + +// At this point: +// Index 'Orders/Totals' is paused on the preferred node + +// Can verify the index status on the preferred node by sending GetIndexingStatusOperation +var indexingStatus = store.Maintenance.Send(new GetIndexingStatusOperation()); + +var index = indexingStatus.Indexes.FirstOrDefault(x => x.Name == "Orders/Totals"); +Assert.Equal(IndexRunningStatus.Paused, index.Status); +`} + + + + +{`// Define the pause index operation, pass the index name +var pauseIndexOp = new StopIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to Maintenance.SendAsync +await store.Maintenance.SendAsync(pauseIndexOp); + +// At this point: +// Index 'Orders/Totals' is paused on the preferred node + +// Can verify the index status on the preferred node by sending GetIndexingStatusOperation +var indexingStatus = await store.Maintenance.SendAsync(new GetIndexingStatusOperation()); + +var index = indexingStatus.Indexes.FirstOrDefault(x => x.Name == "Orders/Totals"); +Assert.Equal(IndexRunningStatus.Paused, index.Status); +`} + + + + + + +## Syntax + + + +{`// class name has "Stop", but this is ok, this is the "Pause" operation +public StopIndexOperation(string indexName) +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **indexName** | string | Name of an index to pause | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-index-java.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-index-java.mdx new file mode 100644 index 0000000000..e5c947a2e5 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-index-java.mdx @@ -0,0 +1,34 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +The **StopIndexOperation** is used to pause indexing of a single index. + + +Indexing will be resumed automatically after server restart. + + +### Syntax + + + +{`public StopIndexOperation(String indexName) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **indexName** | String | name of an index to stop indexing | + +### Example + + + +{`store.maintenance().send(new StopIndexOperation("Orders/Totals")); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-index-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-index-nodejs.mdx new file mode 100644 index 0000000000..623f6b9f7b --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-index-nodejs.mdx @@ -0,0 +1,95 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `StopIndexOperation` to **pause a single index** in the database. + +* To pause indexing for ALL indexes in the database use [StopIndexingOperation](../../../../client-api/operations/maintenance/indexes/stop-indexing.mdx). + +* In this page: + * [Overview](../../../../client-api/operations/maintenance/indexes/stop-index.mdx#overview) + * [Pause index example](../../../../client-api/operations/maintenance/indexes/stop-index.mdx#pause-index-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/stop-index.mdx#syntax) + + +## Overview + +#### Which node is the index paused for? + +* When pausing the index from the **client**: + The index will be paused for the [preferred node](../../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) only, + Not for all database-group nodes. + +* When pausing the index from the **Studio** [indexes list](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) view: + The index will be paused for the local node the browser is opened on, even if it is Not the preferred node. +#### What happens when an index is paused for a node? + +* A paused index performs no indexing for the node it is paused for. + New data **is** indexed by the index on database-group nodes that the index is not paused for. + +* A paused index **can** be queried, but results may be stale when querying the node that the index is paused for. +#### Resuming the index: + +* Learn how to resume an index by a client here: [Resume index](../../../../client-api/operations/maintenance/indexes/start-index.mdx) + +* Learn to resume an index from **Studio** here: [Indexes list view](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) + +* Pausing the index is **Not a persistent operation**. + This means the paused index will resume upon either of the following: + * The server is restarted. + * The database is re-loaded (by disabling and then enabling it). + Toggling the database state can be done using the **Studio** [database list](../../../../studio/database/databases-list-view.mdx#database-actions) view, + or using [ToggleDatabasesStateOperation](../../../../client-api/operations/server-wide/toggle-databases-state.mdx) by the client. + +* [Resetting](../../../../client-api/operations/maintenance/indexes/reset-index.mdx) a paused index will resume the normal operation of the index + on the local node where the reset action was performed. + +* Modifying the index definition will resume the normal operation of the index + on all the nodes for which it is paused. + + + +## Pause index example + + + +{`// Define the pause index operation, pass the index name +const pauseIndexOp = new StopIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to maintenance.send +await store.maintenance.send(pauseIndexOp); + +// At this point: +// Index 'Orders/Totals' is paused on the preferred node + +// Can verify the index status on the preferred node by sending GetIndexingStatusOperation +const indexingStatus = await store.maintenance.send(new GetIndexingStatusOperation()); + +const index = indexingStatus.indexes.find(x => x.name === "Orders/Totals") +assert.strictEqual(index.status, "Paused"); +`} + + + + + +## Syntax + + + +{`// class name has "Stop", but this is ok, this is the "Pause" operation +const pauseIndexOp = new StopIndexOperation(indexName); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **indexName** | string | Name of an index to pause | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-index-php.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-index-php.mdx new file mode 100644 index 0000000000..7d4b7ece13 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-index-php.mdx @@ -0,0 +1,101 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `StopIndexOperation` to **pause a single index** in the database. + +* To pause indexing for ALL indexes in the database use [StopIndexingOperation](../../../../client-api/operations/maintenance/indexes/stop-indexing.mdx). + +* In this page: + * [Overview](../../../../client-api/operations/maintenance/indexes/stop-index.mdx#overview) + * [Pause index example](../../../../client-api/operations/maintenance/indexes/stop-index.mdx#pause-index-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/stop-index.mdx#syntax) + + +## Overview + +#### Which node is the index paused for? + +* When pausing the index from the **client**: + The index will be paused for the [preferred node](../../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) only, + Not for all database-group nodes. + +* When pausing the index from the **Studio** [indexes list](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) view: + The index will be paused for the local node the browser is opened on, even if it is Not the preferred node. +#### What happens when an index is paused for a node? + +* A paused index performs no indexing for the node it is paused for. + New data **is** indexed by the index on database-group nodes that the index is not paused for. + +* A paused index **can** be queried, but results may be stale when querying the node that the index is paused for. +#### Resuming the index: + +* Learn how to resume an index by a client here: [Resume index](../../../../client-api/operations/maintenance/indexes/start-index.mdx) + +* Learn to resume an index from **Studio** here: [Indexes list view](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) + +* Pausing the index is **Not a persistent operation**. + This means the paused index will resume upon either of the following: + * The server is restarted. + * The database is re-loaded (by disabling and then enabling it). + Toggling the database state can be done using the **Studio** [database list](../../../../studio/database/databases-list-view.mdx#database-actions) view, + or using [ToggleDatabasesStateOperation](../../../../client-api/operations/server-wide/toggle-databases-state.mdx) by the client. + +* [Resetting](../../../../client-api/operations/maintenance/indexes/reset-index.mdx) a paused index will resume the normal operation of the index + on the local node where the reset action was performed. + +* Modifying the index definition will resume the normal operation of the index + on all the nodes for which it is paused. + + + +## Pause index example + + + +{`// Define the pause index operation, pass the index name +$pauseIndexOp = new StopIndexOperation("Orders/Totals"); + +// Execute the operation by passing it to Maintenance.Send +$store->maintenance()->send($pauseIndexOp); + +// At this point: +// Index 'Orders/Totals' is paused on the preferred node + +// Can verify the index status on the preferred node by sending GetIndexingStatusOperation +/** @var IndexingStatus $indexingStatus */ +$indexingStatus = $store->maintenance()->send(new GetIndexingStatusOperation()); + +$indexes = array_filter($indexingStatus->getIndexes()->getArrayCopy(), function ($v, $k) \{ + return $v->getName() == "Orders/Totals"; +\}); +/** @var IndexingStatus $index */ +$index = $indexes[0]; + +$this->assertTrue($index->getStatus()->isRunning()); +`} + + + + + +## Syntax + + + +{`// class name has "Stop", but this is ok, this is the "Pause" operation +public StopIndexOperation(?string $indexName) +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **$indexName** | `?string` | Name of an index to pause | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-index-python.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-index-python.mdx new file mode 100644 index 0000000000..16a56339e8 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-index-python.mdx @@ -0,0 +1,96 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `StopIndexOperation` to **pause a single index** in the database. + +* To pause indexing for ALL indexes in the database use [StopIndexingOperation](../../../../client-api/operations/maintenance/indexes/stop-indexing.mdx). + +* In this page: + * [Overview](../../../../client-api/operations/maintenance/indexes/stop-index.mdx#overview) + * [Pause index example](../../../../client-api/operations/maintenance/indexes/stop-index.mdx#pause-index-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/stop-index.mdx#syntax) + + +## Overview + +#### Which node is the index paused for? + +* When pausing the index from the **client**: + The index will be paused for the [preferred node](../../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) only, + Not for all database-group nodes. + +* When pausing the index from the **Studio** [indexes list](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) view: + The index will be paused for the local node the browser is opened on, even if it is Not the preferred node. +#### What happens when an index is paused for a node? + +* A paused index performs no indexing for the node it is paused for. + New data **is** indexed by the index on database-group nodes that the index is not paused for. + +* A paused index **can** be queried, but results may be stale when querying the node that the index is paused for. +#### Resuming the index: + +* Learn how to resume an index by a client here: [Resume index](../../../../client-api/operations/maintenance/indexes/start-index.mdx) + +* Learn to resume an index from **Studio** here: [Indexes list view](../../../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions) + +* Pausing the index is **Not a persistent operation**. + This means the paused index will resume upon either of the following: + * The server is restarted. + * The database is re-loaded (by disabling and then enabling it). + Toggling the database state can be done using the **Studio** [database list](../../../../studio/database/databases-list-view.mdx#database-actions) view, + or using [ToggleDatabasesStateOperation](../../../../client-api/operations/server-wide/toggle-databases-state.mdx) by the client. + +* [Resetting](../../../../client-api/operations/maintenance/indexes/reset-index.mdx) a paused index will resume the normal operation of the index + on the local node where the reset action was performed. + +* Modifying the index definition will resume the normal operation of the index + on all the nodes for which it is paused. + + + +## Pause index example + + + +{`# Define the resume index operation, pass the index name +pause_index_op = StopIndexOperation("Orders/Totals") + +# Execute the operation by passing it to maintenance.send +store.maintenance.send(pause_index_op) + +# At this point: +# Index 'Orders/Totals' is paused on the preferred node + +# Can verify the index status on the preferred node by sending GetIndexingStatusOperation +indexing_status = store.maintenance.send(GetIndexingStatusOperation()) + +index = [x for x in indexing_status.indexes if x.name == "Orders/Totals"][0] +self.assertEqual(index.status, IndexRunningStatus.PAUSED) +`} + + + + + +## Syntax + + + +{`class StopIndexOperation(VoidMaintenanceOperation): + # class name has "Stop", but this is ok, this is the "Pause" operation + def __init__(self, index_name: str): ... +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **index_name** | `str` | Name of an index to pause | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-indexing-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-indexing-csharp.mdx new file mode 100644 index 0000000000..faed0ce2cc --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-indexing-csharp.mdx @@ -0,0 +1,110 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `StopIndexingOperation` to **pause indexing** for ALL indexes in the database. + +* To pause only a specific index use the [StopIndexOperation](../../../../client-api/operations/maintenance/indexes/stop-index.mdx). + +* In this page: + * [Overview](../../../../client-api/operations/maintenance/indexes/stop-indexing.mdx#overview) + * [Pause indexing example](../../../../client-api/operations/maintenance/indexes/stop-indexing.mdx#pause-indexing-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/stop-indexing.mdx#syntax) + + +## Overview + +#### Which node is indexing paused for? + +* When pausing indexing from the **client**: + Indexing will be paused on the [preferred node](../../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) only, and Not on all the database-group nodes. + +* When pausing indexing from the **Studio** [database list](../../../../studio/database/databases-list-view.mdx#more-actions) view: + Indexing will be paused on the local node the browser is opened on, even if it is Not the preferred node. +#### What happens when indexing is paused for a node? + +* No indexing takes place on a node that indexing is paused for. + New data **is** indexed on database-group nodes that indexing is not paused for. + +* All indexes, including paused ones, can be queried, + but results may be stale when querying nodes that indexing has been paused for. + +* New indexes **can** be created for the database. + However, the new indexes will also be paused on any node that indexing is paused for, + until indexing is resumed for that node. + +* When [resetting](../../../../client-api/operations/maintenance/indexes/reset-index.mdx) indexes + or editing index definitions, re-indexing on a node that indexing has been paused for will + only be triggered when indexing is resumed on that node. +#### Resuming indexing: + +* Learn to resume indexing for all indexes by a client, here: [resume indexing](../../../../client-api/operations/maintenance/indexes/start-indexing.mdx) + +* Learn to resume indexing for all indexes via **Studio**, here: [database list view](../../../../studio/database/databases-list-view.mdx#more-actions) + +* Pausing indexing is **Not a persistent operation**. + This means that all paused indexes will resume upon either of the following: + * The server is restarted. + * The database is re-loaded (by disabling and then enabling it). + Toggling the database state can be done using the **Studio** [database list](../../../../studio/database/databases-list-view.mdx#database-actions) view, + or using [ToggleDatabasesStateOperation](../../../../client-api/operations/server-wide/toggle-databases-state.mdx) by the client. + + + +## Pause indexing example + + + + +{`// Define the pause indexing operation +var pauseIndexingOp = new StopIndexingOperation(); + +// Execute the operation by passing it to Maintenance.Send +store.Maintenance.Send(pauseIndexingOp); + +// At this point: +// All indexes in the default database will be 'paused' on the preferred node + +// Can verify indexing status on the preferred node by sending GetIndexingStatusOperation +var indexingStatus = store.Maintenance.Send(new GetIndexingStatusOperation()); +Assert.Equal(IndexRunningStatus.Paused, indexingStatus.Status); +`} + + + + +{`// Define the pause indexing operation +var pauseIndexingOp = new StopIndexingOperation(); + +// Execute the operation by passing it to Maintenance.SendAsync +await store.Maintenance.SendAsync(pauseIndexingOp); + +// At this point: +// All indexes in the default database will be 'paused' on the preferred node + +// Can verify indexing status on the preferred node by sending GetIndexingStatusOperation +var indexingStatus = await store.Maintenance.SendAsync(new GetIndexingStatusOperation()); +Assert.Equal(IndexRunningStatus.Paused, indexingStatus.Status); +`} + + + + + + +## Syntax + + + +{`// class name has "Stop", but this is ok, this is the "Pause" operation +public StopIndexingOperation() +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-indexing-java.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-indexing-java.mdx new file mode 100644 index 0000000000..54afe97f9b --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-indexing-java.mdx @@ -0,0 +1,32 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +**StopIndexingOperation** is used to pause indexing for the entire database. + +Use [StopIndexOperation](../../../../client-api/operations/maintenance/indexes/stop-index.mdx) to stop single index. + + +Indexing will be resumed automatically after a server restart or after using [start indexing operation](../../../../client-api/operations/maintenance/indexes/start-indexing.mdx). + + +### Syntax + + + +{`public StopIndexingOperation() +`} + + + +### Example + + + +{`store.maintenance().send(new StopIndexingOperation()); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-indexing-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-indexing-nodejs.mdx new file mode 100644 index 0000000000..790e2ac915 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-indexing-nodejs.mdx @@ -0,0 +1,91 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `StopIndexingOperation` to **pause indexing** for ALL indexes in the database. + +* To pause only a specific index use the [StopIndexOperation](../../../../client-api/operations/maintenance/indexes/stop-index.mdx). + +* In this page: + * [Overview](../../../../client-api/operations/maintenance/indexes/stop-indexing.mdx#overview) + * [Pause indexing example](../../../../client-api/operations/maintenance/indexes/stop-indexing.mdx#pause-indexing-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/stop-indexing.mdx#syntax) + + +## Overview + +#### Which node is indexing paused for? + +* When pausing indexing from the **client**: + Indexing will be paused on the [preferred node](../../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) only, and Not on all the database-group nodes. + +* When pausing indexing from the **Studio** [database list](../../../../studio/database/databases-list-view.mdx#more-actions) view: + Indexing will be paused on the local node the browser is opened on, even if it is Not the preferred node. +#### What happens when indexing is paused for a node? + +* No indexing takes place on a node that indexing is paused for. + New data **is** indexed on database-group nodes that indexing is not paused for. + +* All indexes, including paused ones, can be queried, + but results may be stale when querying nodes that indexing has been paused for. + +* New indexes **can** be created for the database. + However, the new indexes will also be paused on any node that indexing is paused for, + until indexing is resumed for that node. + +* When [resetting](../../../../client-api/operations/maintenance/indexes/reset-index.mdx) indexes + or editing index definitions, re-indexing on a node that indexing has been paused for will + only be triggered when indexing is resumed on that node. +#### Resuming indexing: + +* Learn to resume indexing for all indexes by a client, here: [resume indexing](../../../../client-api/operations/maintenance/indexes/start-indexing.mdx) + +* Learn to resume indexing for all indexes via **Studio**, here: [database list view](../../../../studio/database/databases-list-view.mdx#more-actions) + +* Pausing indexing is **Not a persistent operation**. + This means that all paused indexes will resume upon either of the following: + * The server is restarted. + * The database is re-loaded (by disabling and then enabling it). + Toggling the database state can be done using the **Studio** [database list](../../../../studio/database/databases-list-view.mdx#database-actions) view, + or using [ToggleDatabasesStateOperation](../../../../client-api/operations/server-wide/toggle-databases-state.mdx) by the client. + + + +## Pause indexing example + + + +{`// Define the pause indexing operation +const pauseIndexingOp = new StopIndexingOperation(); + +// Execute the operation by passing it to maintenance.send +await store.maintenance.send(pauseIndexingOp); + +// At this point: +// All indexes in the default database will be 'paused' on the preferred node + +// Can verify indexing status on the preferred node by sending GetIndexingStatusOperation +const indexingStatus = await store.maintenance.send(new GetIndexingStatusOperation()); +assert.strictEqual(indexingStatus.status, "Paused"); +`} + + + + + +## Syntax + + + +{`// class name has "Stop", but this is ok, this is the "Pause" operation +const pauseIndexingOp = new StopIndexingOperation(); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-indexing-php.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-indexing-php.mdx new file mode 100644 index 0000000000..c9a0e93d81 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-indexing-php.mdx @@ -0,0 +1,92 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `StopIndexingOperation` to **pause indexing** for ALL indexes in the database. + +* To pause only a specific index use the [StopIndexOperation](../../../../client-api/operations/maintenance/indexes/stop-index.mdx). + +* In this page: + * [Overview](../../../../client-api/operations/maintenance/indexes/stop-indexing.mdx#overview) + * [Pause indexing example](../../../../client-api/operations/maintenance/indexes/stop-indexing.mdx#pause-indexing-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/stop-indexing.mdx#syntax) + + +## Overview + +#### Which node is indexing paused for? + +* When pausing indexing from the **client**: + Indexing will be paused on the [preferred node](../../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) only, and Not on all the database-group nodes. + +* When pausing indexing from the **Studio** [database list](../../../../studio/database/databases-list-view.mdx#more-actions) view: + Indexing will be paused on the local node the browser is opened on, even if it is Not the preferred node. +#### What happens when indexing is paused for a node? + +* No indexing takes place on a node that indexing is paused for. + New data **is** indexed on database-group nodes that indexing is not paused for. + +* All indexes, including paused ones, can be queried, + but results may be stale when querying nodes that indexing has been paused for. + +* New indexes **can** be created for the database. + However, the new indexes will also be paused on any node that indexing is paused for, + until indexing is resumed for that node. + +* When [resetting](../../../../client-api/operations/maintenance/indexes/reset-index.mdx) indexes + or editing index definitions, re-indexing on a node that indexing has been paused for will + only be triggered when indexing is resumed on that node. +#### Resuming indexing: + +* Learn to resume indexing for all indexes by a client, here: [resume indexing](../../../../client-api/operations/maintenance/indexes/start-indexing.mdx) + +* Learn to resume indexing for all indexes via **Studio**, here: [database list view](../../../../studio/database/databases-list-view.mdx#more-actions) + +* Pausing indexing is **Not a persistent operation**. + This means that all paused indexes will resume upon either of the following: + * The server is restarted. + * The database is re-loaded (by disabling and then enabling it). + Toggling the database state can be done using the **Studio** [database list](../../../../studio/database/databases-list-view.mdx#database-actions) view, + or using [ToggleDatabasesStateOperation](../../../../client-api/operations/server-wide/toggle-databases-state.mdx) by the client. + + + +## Pause indexing example + + + +{`// Define the pause indexing operation +$pauseIndexingOp = new StopIndexingOperation(); + +// Execute the operation by passing it to Maintenance.Send +$store->maintenance()->send($pauseIndexingOp); + +// At this point: +// All indexes in the default database will be 'paused' on the preferred node + +// Can verify indexing status on the preferred node by sending GetIndexingStatusOperation +/** @var IndexingStatus $indexingStatus */ +$indexingStatus = $store->maintenance()->send(new GetIndexingStatusOperation()); +$this->assertTrue($indexingStatus->getStatus()->isPaused()); +`} + + + + + +## Syntax + + + +{`// class name begins with "Stop" but this is still the "Pause" operation +StopIndexingOperation() +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-indexing-python.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-indexing-python.mdx new file mode 100644 index 0000000000..04a8f83777 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/_stop-indexing-python.mdx @@ -0,0 +1,92 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `StopIndexingOperation` to **pause indexing** for ALL indexes in the database. + +* To pause only a specific index use the [StopIndexOperation](../../../../client-api/operations/maintenance/indexes/stop-index.mdx). + +* In this page: + * [Overview](../../../../client-api/operations/maintenance/indexes/stop-indexing.mdx#overview) + * [Pause indexing example](../../../../client-api/operations/maintenance/indexes/stop-indexing.mdx#pause-indexing-example) + * [Syntax](../../../../client-api/operations/maintenance/indexes/stop-indexing.mdx#syntax) + + +## Overview + +#### Which node is indexing paused for? + +* When pausing indexing from the **client**: + Indexing will be paused on the [preferred node](../../../../client-api/configuration/load-balance/overview.mdx#the-preferred-node) only, and Not on all the database-group nodes. + +* When pausing indexing from the **Studio** [database list](../../../../studio/database/databases-list-view.mdx#more-actions) view: + Indexing will be paused on the local node the browser is opened on, even if it is Not the preferred node. +#### What happens when indexing is paused for a node? + +* No indexing takes place on a node that indexing is paused for. + New data **is** indexed on database-group nodes that indexing is not paused for. + +* All indexes, including paused ones, can be queried, + but results may be stale when querying nodes that indexing has been paused for. + +* New indexes **can** be created for the database. + However, the new indexes will also be paused on any node that indexing is paused for, + until indexing is resumed for that node. + +* When [resetting](../../../../client-api/operations/maintenance/indexes/reset-index.mdx) indexes + or editing index definitions, re-indexing on a node that indexing has been paused for will + only be triggered when indexing is resumed on that node. +#### Resuming indexing: + +* Learn to resume indexing for all indexes by a client, here: [resume indexing](../../../../client-api/operations/maintenance/indexes/start-indexing.mdx) + +* Learn to resume indexing for all indexes via **Studio**, here: [database list view](../../../../studio/database/databases-list-view.mdx#more-actions) + +* Pausing indexing is **Not a persistent operation**. + This means that all paused indexes will resume upon either of the following: + * The server is restarted. + * The database is re-loaded (by disabling and then enabling it). + Toggling the database state can be done using the **Studio** [database list](../../../../studio/database/databases-list-view.mdx#database-actions) view, + or using [ToggleDatabasesStateOperation](../../../../client-api/operations/server-wide/toggle-databases-state.mdx) by the client. + + + +## Pause indexing example + + + +{`# Define the pause indexing operation +pause_indexing_op = StopIndexingOperation() + +# Execute the operation by passing it to maintenance.send +store.maintenance.send(pause_indexing_op) + +# At this point: +# All indexes in the default database will be 'paused' on the preferred node + +# Can verify indexing status on the preferred node by sending GetIndexingStatusOperation +indexing_status = store.maintenance.send(GetIndexingStatusOperation()) +self.assertEqual(indexing_status.status, IndexRunningStatus.PAUSED) +`} + + + + + +## Syntax + + + +{`# class name has "Stop", but this is ok, this is the "Pause" operation +class StopIndexingOperation(VoidMaintenanceOperation): + def __init__(self): ... +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/delete-index-errors.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/delete-index-errors.mdx new file mode 100644 index 0000000000..286dfdb070 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/delete-index-errors.mdx @@ -0,0 +1,49 @@ +--- +title: "Delete Index Errors Operation" +hide_table_of_contents: true +sidebar_label: Delete Index Errors +sidebar_position: 5 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import DeleteIndexErrorsCsharp from './_delete-index-errors-csharp.mdx'; +import DeleteIndexErrorsPython from './_delete-index-errors-python.mdx'; +import DeleteIndexErrorsPhp from './_delete-index-errors-php.mdx'; +import DeleteIndexErrorsNodejs from './_delete-index-errors-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/delete-index.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/delete-index.mdx new file mode 100644 index 0000000000..71cacc4daf --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/delete-index.mdx @@ -0,0 +1,53 @@ +--- +title: "Delete Index Operation" +hide_table_of_contents: true +sidebar_label: Delete Index +sidebar_position: 4 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import DeleteIndexCsharp from './_delete-index-csharp.mdx'; +import DeleteIndexJava from './_delete-index-java.mdx'; +import DeleteIndexPython from './_delete-index-python.mdx'; +import DeleteIndexPhp from './_delete-index-php.mdx'; +import DeleteIndexNodejs from './_delete-index-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/disable-index.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/disable-index.mdx new file mode 100644 index 0000000000..546efeb1e3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/disable-index.mdx @@ -0,0 +1,55 @@ +--- +title: "Disable Index" +hide_table_of_contents: true +sidebar_label: Disable Index +sidebar_position: 6 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import DisableIndexCsharp from './_disable-index-csharp.mdx'; +import DisableIndexJava from './_disable-index-java.mdx'; +import DisableIndexPython from './_disable-index-python.mdx'; +import DisableIndexPhp from './_disable-index-php.mdx'; +import DisableIndexNodejs from './_disable-index-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/enable-index.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/enable-index.mdx new file mode 100644 index 0000000000..48c6ace76e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/enable-index.mdx @@ -0,0 +1,55 @@ +--- +title: "Enable Index Operation" +hide_table_of_contents: true +sidebar_label: Enable Index +sidebar_position: 7 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import EnableIndexCsharp from './_enable-index-csharp.mdx'; +import EnableIndexJava from './_enable-index-java.mdx'; +import EnableIndexPython from './_enable-index-python.mdx'; +import EnableIndexPhp from './_enable-index-php.mdx'; +import EnableIndexNodejs from './_enable-index-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/get-index-errors.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/get-index-errors.mdx new file mode 100644 index 0000000000..b85ca61d59 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/get-index-errors.mdx @@ -0,0 +1,54 @@ +--- +title: "Get Index Errors Operation" +hide_table_of_contents: true +sidebar_label: Get Index Errors +sidebar_position: 14 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetIndexErrorsCsharp from './_get-index-errors-csharp.mdx'; +import GetIndexErrorsJava from './_get-index-errors-java.mdx'; +import GetIndexErrorsPython from './_get-index-errors-python.mdx'; +import GetIndexErrorsPhp from './_get-index-errors-php.mdx'; +import GetIndexErrorsNodejs from './_get-index-errors-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/get-index-names.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/get-index-names.mdx new file mode 100644 index 0000000000..c729d93ffd --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/get-index-names.mdx @@ -0,0 +1,54 @@ +--- +title: "Get Index Names Operation" +hide_table_of_contents: true +sidebar_label: Get Index Names +sidebar_position: 15 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetIndexNamesCsharp from './_get-index-names-csharp.mdx'; +import GetIndexNamesJava from './_get-index-names-java.mdx'; +import GetIndexNamesPython from './_get-index-names-python.mdx'; +import GetIndexNamesPhp from './_get-index-names-php.mdx'; +import GetIndexNamesNodejs from './_get-index-names-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/get-index.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/get-index.mdx new file mode 100644 index 0000000000..265f20a539 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/get-index.mdx @@ -0,0 +1,54 @@ +--- +title: "Get Index Operation" +hide_table_of_contents: true +sidebar_label: Get Index +sidebar_position: 12 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetIndexCsharp from './_get-index-csharp.mdx'; +import GetIndexJava from './_get-index-java.mdx'; +import GetIndexPython from './_get-index-python.mdx'; +import GetIndexPhp from './_get-index-php.mdx'; +import GetIndexNodejs from './_get-index-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/get-indexes.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/get-indexes.mdx new file mode 100644 index 0000000000..75c7ba547d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/get-indexes.mdx @@ -0,0 +1,55 @@ +--- +title: "Get Indexes Operation" +hide_table_of_contents: true +sidebar_label: Get Indexes +sidebar_position: 13 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetIndexesCsharp from './_get-indexes-csharp.mdx'; +import GetIndexesJava from './_get-indexes-java.mdx'; +import GetIndexesPython from './_get-indexes-python.mdx'; +import GetIndexesPhp from './_get-indexes-php.mdx'; +import GetIndexesNodejs from './_get-indexes-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/get-terms.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/get-terms.mdx new file mode 100644 index 0000000000..e9e2bcf4f3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/get-terms.mdx @@ -0,0 +1,49 @@ +--- +title: "Get Index Terms Operation" +hide_table_of_contents: true +sidebar_label: Get Index Terms +sidebar_position: 16 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetTermsCsharp from './_get-terms-csharp.mdx'; +import GetTermsJava from './_get-terms-java.mdx'; +import GetTermsPython from './_get-terms-python.mdx'; +import GetTermsPhp from './_get-terms-php.mdx'; +import GetTermsNodejs from './_get-terms-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/index-has-changed.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/index-has-changed.mdx new file mode 100644 index 0000000000..1c9dfc2c04 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/index-has-changed.mdx @@ -0,0 +1,54 @@ +--- +title: "Index has Changed Operation" +hide_table_of_contents: true +sidebar_label: Index Has Changed +sidebar_position: 17 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import IndexHasChangedCsharp from './_index-has-changed-csharp.mdx'; +import IndexHasChangedJava from './_index-has-changed-java.mdx'; +import IndexHasChangedPython from './_index-has-changed-python.mdx'; +import IndexHasChangedPhp from './_index-has-changed-php.mdx'; +import IndexHasChangedNodejs from './_index-has-changed-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/put-indexes.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/put-indexes.mdx new file mode 100644 index 0000000000..8a8c0cbe17 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/put-indexes.mdx @@ -0,0 +1,55 @@ +--- +title: "Put Indexes Operation" +hide_table_of_contents: true +sidebar_label: Put Indexes +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import PutIndexesCsharp from './_put-indexes-csharp.mdx'; +import PutIndexesJava from './_put-indexes-java.mdx'; +import PutIndexesPython from './_put-indexes-python.mdx'; +import PutIndexesPhp from './_put-indexes-php.mdx'; +import PutIndexesNodejs from './_put-indexes-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/reset-index.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/reset-index.mdx new file mode 100644 index 0000000000..09f323f319 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/reset-index.mdx @@ -0,0 +1,56 @@ +--- +title: "Reset Index Operation" +hide_table_of_contents: true +sidebar_label: Reset Index +sidebar_position: 3 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import ResetIndexCsharp from './_reset-index-csharp.mdx'; +import ResetIndexJava from './_reset-index-java.mdx'; +import ResetIndexPython from './_reset-index-python.mdx'; +import ResetIndexPhp from './_reset-index-php.mdx'; +import ResetIndexNodejs from './_reset-index-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/set-index-lock.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/set-index-lock.mdx new file mode 100644 index 0000000000..fe2e234f2e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/set-index-lock.mdx @@ -0,0 +1,54 @@ +--- +title: "Set Index Lock Mode Operation" +hide_table_of_contents: true +sidebar_label: Set Index Lock Mode +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import SetIndexLockCsharp from './_set-index-lock-csharp.mdx'; +import SetIndexLockJava from './_set-index-lock-java.mdx'; +import SetIndexLockPython from './_set-index-lock-python.mdx'; +import SetIndexLockPhp from './_set-index-lock-php.mdx'; +import SetIndexLockNodejs from './_set-index-lock-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/set-index-priority.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/set-index-priority.mdx new file mode 100644 index 0000000000..fbcabc1cc4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/set-index-priority.mdx @@ -0,0 +1,54 @@ +--- +title: "Set Index Priority Operation" +hide_table_of_contents: true +sidebar_label: Set Index Priority +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import SetIndexPriorityCsharp from './_set-index-priority-csharp.mdx'; +import SetIndexPriorityJava from './_set-index-priority-java.mdx'; +import SetIndexPriorityPython from './_set-index-priority-python.mdx'; +import SetIndexPriorityPhp from './_set-index-priority-php.mdx'; +import SetIndexPriorityNodejs from './_set-index-priority-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/start-index.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/start-index.mdx new file mode 100644 index 0000000000..7512ec5671 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/start-index.mdx @@ -0,0 +1,56 @@ +--- +title: "Resume Index Operation" +hide_table_of_contents: true +sidebar_label: Resume Index +sidebar_position: 10 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import StartIndexCsharp from './_start-index-csharp.mdx'; +import StartIndexJava from './_start-index-java.mdx'; +import StartIndexPython from './_start-index-python.mdx'; +import StartIndexPhp from './_start-index-php.mdx'; +import StartIndexNodejs from './_start-index-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/start-indexing.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/start-indexing.mdx new file mode 100644 index 0000000000..0bcbcbb8a0 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/start-indexing.mdx @@ -0,0 +1,57 @@ +--- +title: "Resume Indexing Operation" +hide_table_of_contents: true +sidebar_label: Resume Indexing +sidebar_position: 11 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import StartIndexingCsharp from './_start-indexing-csharp.mdx'; +import StartIndexingJava from './_start-indexing-java.mdx'; +import StartIndexingPython from './_start-indexing-python.mdx'; +import StartIndexingPhp from './_start-indexing-php.mdx'; +import StartIndexingNodejs from './_start-indexing-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/stop-index.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/stop-index.mdx new file mode 100644 index 0000000000..f7461b6435 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/stop-index.mdx @@ -0,0 +1,60 @@ +--- +title: "Pause Index Operation" +hide_table_of_contents: true +sidebar_label: Pause Index +sidebar_position: 8 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import StopIndexCsharp from './_stop-index-csharp.mdx'; +import StopIndexJava from './_stop-index-java.mdx'; +import StopIndexPython from './_stop-index-python.mdx'; +import StopIndexPhp from './_stop-index-php.mdx'; +import StopIndexNodejs from './_stop-index-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/stop-indexing.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/stop-indexing.mdx new file mode 100644 index 0000000000..805e9319b3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/indexes/stop-indexing.mdx @@ -0,0 +1,58 @@ +--- +title: "Pause Indexing Operation" +hide_table_of_contents: true +sidebar_label: Pause Indexing +sidebar_position: 9 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import StopIndexingCsharp from './_stop-indexing-csharp.mdx'; +import StopIndexingJava from './_stop-indexing-java.mdx'; +import StopIndexingPython from './_stop-indexing-python.mdx'; +import StopIndexingPhp from './_stop-indexing-php.mdx'; +import StopIndexingNodejs from './_stop-indexing-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/ongoing-tasks/_category_.json b/versioned_docs/version-7.1/client-api/operations/maintenance/ongoing-tasks/_category_.json new file mode 100644 index 0000000000..25fbf693c1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/ongoing-tasks/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 5, + "label": Ongoing Tasks, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/ongoing-tasks/_ongoing-task-operations-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/ongoing-tasks/_ongoing-task-operations-csharp.mdx new file mode 100644 index 0000000000..8197a44643 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/ongoing-tasks/_ongoing-task-operations-csharp.mdx @@ -0,0 +1,213 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Once an ongoing task is created, it can be managed via the Client API [Operations](../../../../client-api/operations/what-are-operations.mdx). + You can get task info, toggle the task state (enable, disable), or delete the task. + +* Ongoing tasks can also be managed via the [Tasks list view](../../../../studio/database/tasks/ongoing-tasks/general-info.mdx#ongoing-tasks---view) in the Studio. + +* In this page: + * [Get ongoing task info](../../../../client-api/operations/maintenance/ongoing-tasks/ongoing-task-operations.mdx#get-ongoing-task-info) + * [Delete ongoing task](../../../../client-api/operations/maintenance/ongoing-tasks/ongoing-task-operations.mdx#delete-ongoing-task) + * [Toggle ongoing task state](../../../../client-api/operations/maintenance/ongoing-tasks/ongoing-task-operations.mdx#toggle-ongoing-task-state) + * [Syntax](../../../../client-api/operations/maintenance/ongoing-tasks/ongoing-task-operations.mdx#syntax) + + + +## Get ongoing task info + +For the examples in this article, let's create a simple external replication ongoing task: + + + +{`// Define a simple External Replication task +var taskDefintion = new ExternalReplication +\{ + Name = "MyExtRepTask", + ConnectionStringName = "MyConnectionStringName" +\}; + +// Deploy the task to the server +var taskOp = new UpdateExternalReplicationOperation(taskDefintion); +var sendResult = store.Maintenance.Send(taskOp); + +// The task ID is available in the send result +var taskId = sendResult.TaskId; +`} + + +Use `GetOngoingTaskInfoOperation` to get information about an ongoing task. + + + + +{`// Define the get task operation, pass: +// * The ongoing task ID or the task name +// * The task type +var getTaskInfoOp = new GetOngoingTaskInfoOperation(taskId, OngoingTaskType.Replication); + +// Execute the operation by passing it to Maintenance.Send +var taskInfo = (OngoingTaskReplication)store.Maintenance.Send(getTaskInfoOp); + +// Access the task info +var taskState = taskInfo.TaskState; +var taskDelayTime = taskInfo.DelayReplicationFor; +var destinationUrls= taskInfo.TopologyDiscoveryUrls; +// ... +`} + + + + +{`var getTaskInfoOp = new GetOngoingTaskInfoOperation(taskId, OngoingTaskType.Replication); +var taskInfo = (OngoingTaskReplication) await store.Maintenance.SendAsync(getTaskInfoOp); + +var taskState = taskInfo.TaskState; +var taskDelayTime = taskInfo.DelayReplicationFor; +var destinationUrls= taskInfo.TopologyDiscoveryUrls; +// ... +`} + + + + + + +## Delete ongoing task + +Use `DeleteOngoingTaskOperation` to remove an ongoing task from the list of tasks assigned to the database. + + + + +{`// Define the delete task operation, pass: +// * The ongoing task ID +// * The task type +var deleteTaskOp = new DeleteOngoingTaskOperation(taskId, OngoingTaskType.Replication); + +// Execute the operation by passing it to Maintenance.Send +store.Maintenance.Send(deleteTaskOp); +`} + + + + +{`var deleteTaskOp = new DeleteOngoingTaskOperation(taskId, OngoingTaskType.Replication); +await store.Maintenance.SendAsync(deleteTaskOp); +`} + + + + + + +## Toggle ongoing task state + +Use `ToggleOngoingTaskStateOperation` to enable/disable the task state. + + + + +{`// Define the delete task operation, pass: +// * The ongoing task ID +// * The task type +// * A boolean value to enable/disable +var toggleTaskOp = new ToggleOngoingTaskStateOperation(taskId, OngoingTaskType.Replication, true); + +// Execute the operation by passing it to Maintenance.Send +store.Maintenance.Send(toggleTaskOp); +`} + + + + +{`var toggleTaskOp = new ToggleOngoingTaskStateOperation(taskId, OngoingTaskType.Replication, true); +await store.Maintenance.SendAsync(toggleTaskOp); +`} + + + + + + +## Syntax + + + +{`// Get +public GetOngoingTaskInfoOperation(long taskId, OngoingTaskType type); +public GetOngoingTaskInfoOperation(string taskName, OngoingTaskType type); +`} + + + + + +{`// Delete +public DeleteOngoingTaskOperation(long taskId, OngoingTaskType taskType); +`} + + + + + +{`// Toggle +public ToggleOngoingTaskStateOperation(long taskId, OngoingTaskType type, bool disable); +`} + + + +| Parameter | Type | Description | +|--------------|-------------------|--------------------------------------------------------| +| **taskId** | `long` | Task ID | +| **taskName** | `string` | Task name | +| **taskType** | `OngoingTaskType` | Task type | +| **disable** | `bool` | `true` - disable the task
`false` - enable the task | + + + +{`private enum OngoingTaskType +\{ + Replication, + RavenEtl, + SqlEtl, + OlapEtl, + ElasticSearchEtl, + QueueEtl, + Backup, + Subscription, + PullReplicationAsHub, + PullReplicationAsSink, + QueueSink, +\} +`} + + + +
+ +| Return value of `store.Maintenance.Send(GetOngoingTaskInfoOperation)` | | +|-------------------------------------------------------------------------|----------------------------------------| +| `OngoingTaskReplication` | Object with information about the task | + + + +{`public sealed class OngoingTaskReplication : OngoingTask +\{ + public OngoingTaskReplication() => this.TaskType = OngoingTaskType.Replication; + public string DestinationUrl \{ get; set; \} + public string[] TopologyDiscoveryUrls \{ get; set; \} + public string DestinationDatabase \{ get; set; \} + public string ConnectionStringName \{ get; set; \} + public TimeSpan DelayReplicationFor \{ get; set; \} +\} +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/ongoing-tasks/ongoing-task-operations.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/ongoing-tasks/ongoing-task-operations.mdx new file mode 100644 index 0000000000..17ad1464e1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/ongoing-tasks/ongoing-task-operations.mdx @@ -0,0 +1,24 @@ +--- +title: "Ongoing Task Operations" +hide_table_of_contents: true +sidebar_label: Ongoing Task Operations +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import OngoingTaskOperationsCsharp from './_ongoing-task-operations-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/sorters/_put-sorter-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/sorters/_put-sorter-csharp.mdx new file mode 100644 index 0000000000..35b4e1a8d2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/sorters/_put-sorter-csharp.mdx @@ -0,0 +1,115 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The Lucene indexing engine allows you to create your own __Custom Sorters__ + where you can define how query results will be ordered based on your specific requirements. + +* Use `PutSortersOperation` to deploy a custom sorter to the RavenDB server. + Once deployed, you can use it to sort query results for all queries made on the __database__ + that is scoped to your [Document Store](../../../../client-api/setting-up-default-database.mdx). + +* To deploy a custom sorter that will apply cluster-wide, to all databases, see [put server-wide sorter](../../../../client-api/operations/server-wide/sorters/put-sorter-server-wide.mdx). + +* A custom sorter can also be uploaded to the server from the [Studio](../../../../studio/database/settings/custom-sorters.mdx). + +* In this page: + * [Put custom sorter](../../../../client-api/operations/maintenance/sorters/put-sorter.mdx#put-custom-sorter) + * [Syntax](../../../../client-api/operations/maintenance/sorters/put-sorter.mdx#syntax) + + +## Put custom sorter + +* First, create your own sorter class that inherits from the Lucene class [Lucene.Net.Search.FieldComparator](https://lucenenet.apache.org/docs/3.0.3/df/d91/class_lucene_1_1_net_1_1_search_1_1_field_comparator.html). + +* Then, send the custom sorter to the server using the `PutSortersOperation`. + + + + +{`// Assign the code of your custom sorter as a \`string\` +string mySorterCode = ""; + +// Create the \`SorterDefinition\` object +var customSorterDefinition = new SorterDefinition +{ + // The sorter Name must be the same as the sorter's class name in your code + Name = "MySorter", + // The Code must be compilable and include all necessary using statements + Code = mySorterCode +}; + +// Define the put sorters operation, pass the sorter definition +// Note: multiple sorters can be passed, see syntax below +var putSortersOp = new PutSortersOperation(customSorterDefinition); + +// Execute the operation by passing it to Maintenance.Send +store.Maintenance.Send(putSortersOp); +`} + + + + +{`// Assign the code of your custom sorter as a \`string\` +string mySorterCode = ""; + +// Create the \`SorterDefinition\` object +var customSorterDefinition = new SorterDefinition +{ + // The sorter Name must be the same as the sorter's class name in your code + Name = "MySorter", + // The Code must be compilable and include all necessary using statements + Code = mySorterCode +}; + +// Define the put sorters operation, pass the sorter definition +// Note: multiple sorters can be passed, see syntax below +var putSortersOp = new PutSortersOperation(customSorterDefinition); + +// Execute the operation by passing it to Maintenance.SendAsync +await store.Maintenance.SendAsync(putSortersOp); +`} + + + + + + +You can now order your query results using the custom sorter. +A query example is available [here](../../../../client-api/session/querying/sort-query-results.mdx#custom-sorters). + + + + + +## Syntax + + + +{`public PutSortersOperation(params SorterDefinition[] sortersToAdd) +`} + + + +| Parameter | Type | Description | +|-------------------|----------------------|------------------------------------------------------| +| __sortersToAdd__ | `SorterDefinition[]` | One or more Sorter Definitions to send to the server | + + + + +{`public class SorterDefinition +\{ + public string Name \{ get; set; \} + public string Code \{ get; set; \} +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/sorters/_put-sorter-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/sorters/_put-sorter-nodejs.mdx new file mode 100644 index 0000000000..434d8e8eda --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/sorters/_put-sorter-nodejs.mdx @@ -0,0 +1,85 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The Lucene indexing engine allows you to create your own __Custom Sorters__ + where you can define how query results will be ordered based on your specific requirements. + +* Use `PutSortersOperation` to deploy a custom sorter to the RavenDB server. + Once deployed, you can use it to sort query results for all queries made on the __database__ + that is scoped to your [Document Store](../../../../client-api/setting-up-default-database.mdx). + +* To deploy a custom sorter that will apply cluster-wide, to all databases, see [put server-wide sorter](../../../../client-api/operations/server-wide/sorters/put-sorter-server-wide.mdx). + +* A custom sorter can also be uploaded to the server from the [Studio](../../../../studio/database/settings/custom-sorters.mdx). + +* In this page: + * [Put custom sorter](../../../../client-api/operations/maintenance/sorters/put-sorter.mdx#put-custom-sorter) + * [Syntax](../../../../client-api/operations/maintenance/sorters/put-sorter.mdx#syntax) + + +## Put custom sorter + +* First, create your own sorter class that inherits from the Lucene class [Lucene.Net.Search.FieldComparator](https://lucenenet.apache.org/docs/3.0.3/df/d91/class_lucene_1_1_net_1_1_search_1_1_field_comparator.html). + +* Then, send the custom sorter to the server using the `PutSortersOperation`. + + + +{`// Create the sorter definition object +const sorterDefinition = \{ + // The sorter name must be the same as the sorter's class name in your code + name: "MySorter", + // The code must be compilable and include all necessary using statements (C# code) + code: "" +\}; + +// Define the put sorters operation, pass the sorter definition +const putSorterOp = new PutSortersOperation(sorterDefinition); + +// Execute the operation by passing it to maintenance.send +await documentStore.maintenance.send(putSorterOp); +`} + + + + + +You can now order your query results using the custom sorter. +A query example is available [here](../../../../client-api/session/querying/sort-query-results.mdx#custom-sorters). + + + + + +## Syntax + + + +{`const putSorterOp = new PutSortersOperation(sortersToAdd); +`} + + + +| Parameter | Type | Description | +|-------------------|---------------|-------------------------------------------------------------| +| __sortersToAdd__ | `...object[]` | One or more Sorter Definition objects to send to the server | + + + + +{`// The sorter definition object +\{ + name: string; + code: string; +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/maintenance/sorters/put-sorter.mdx b/versioned_docs/version-7.1/client-api/operations/maintenance/sorters/put-sorter.mdx new file mode 100644 index 0000000000..b1ffdf268f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/maintenance/sorters/put-sorter.mdx @@ -0,0 +1,39 @@ +--- +title: "Put Custom Sorter Operation" +hide_table_of_contents: true +sidebar_label: Put Custom Sorter +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import PutSorterCsharp from './_put-sorter-csharp.mdx'; +import PutSorterNodejs from './_put-sorter-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/patching/_category_.json b/versioned_docs/version-7.1/client-api/operations/patching/_category_.json new file mode 100644 index 0000000000..f0be7deba4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/patching/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 7, + "label": Patching, +} diff --git a/versioned_docs/version-7.1/client-api/operations/patching/_set-based-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/patching/_set-based-csharp.mdx new file mode 100644 index 0000000000..3b228ced53 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/patching/_set-based-csharp.mdx @@ -0,0 +1,326 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + +Sometimes we need to update a large number of documents matching certain criteria. A simple SQL query doing that will look like this: + +`UPDATE Users SET IsActive = 0 WHERE LastLogin < '2020-01-01'` + +This is usually not the case for NoSQL databases where set based operations are not supported. RavenDB does support them by passing it a query and an operation definition. It will run the query and perform that operation on its results. + +The same queries and indexes that are used for data retrieval are used for the set based operations. The syntax defining which documents to work on is exactly the same as you'd specified for those documents to be pulled from the store. + +In this page: +[Syntax overview](../../../client-api/operations/patching/set-based.mdx#syntax-overview) +[Examples](../../../client-api/operations/patching/set-based.mdx#examples) +[Additional notes](../../../client-api/operations/patching/set-based.mdx#additional-notes) + + + +## Syntax overview + +### Sending a Patch Request + + + +{`Operation Send(PatchByQueryOperation operation); +`} + + + +| Parameter | | | +| ------------- | ------------- | ----- | +| **operation** | `PatchByQueryOperation` | PatchByQueryOperation object, describing the query and the patch that will be performed | + +| Return Value | | +| ------------- | ----- | +| `Operation` | Object that allows waiting for operation to complete. It also may return information about a performed patch: see examples below. | + +### PatchByQueryOperation + + + +{`public PatchByQueryOperation(string queryToUpdate) +`} + + + + + +{`public PatchByQueryOperation(IndexQuery queryToUpdate, QueryOperationOptions options = null) +`} + + + +| Parameter | Type | Description | +|-------------------|-------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **queryToUpdate** | `string` | The query & patch definition.
The RQL query starts as any other RQL query with a "from" statement.
It continues with an "update" clause that contains the Javascript patching code. | +| **queryToUpdate** | `IndexQuery` | Object containing the query & the patching string,
with the option to use parameters. | +| **options** | `QueryOperationOptions` | Options defining how the operation will be performed and various constraints on how it is performed.
Default: `null` | + + + +## Examples + +### Update whole collection + + +{`// increase by 10 Freight field in all orders +var operation = store + .Operations + .Send(new PatchByQueryOperation(@"from Orders as o + update + \{ + o.Freight +=10; + \}")); +// Wait for the operation to be complete on the server side. +// Not waiting for completion will not harm the patch process and it will continue running to completion. +operation.WaitForCompletion(); +`} + + + +### Update by dynamic query + + +{`// set discount to all orders that was processed by a specific employee +var operation = store + .Operations + .Send(new PatchByQueryOperation(@"from Orders as o + where o.Employee = 'employees/4-A' + update + \{ + o.Lines.forEach(line=> line.Discount = 0.3); + \}")); +operation.WaitForCompletion(); +`} + + + +### Update by static index query result + + +{`// switch all products with supplier 'suppliers/12-A' with 'suppliers/13-A' +var operation = store + .Operations + .Send(new PatchByQueryOperation(new IndexQuery + \{ + Query = @"from index 'Product/Search' as p + where p.Supplier = 'suppliers/12-A' + update + \{ + p.Supplier = 'suppliers/13-A' + \}" + \})); + +operation.WaitForCompletion(); +`} + + + +### Updating a collection name + + +{`// delete the document before recreating it with a different collection name +var operation = store + .Operations + .Send(new PatchByQueryOperation(new IndexQuery + \{ + Query = @"from Orders as c + update + \{ + del(id(c)); + this[""@metadata""][""@collection""] = ""New_Orders""; + put(id(c), this); + \}" + \})); + +operation.WaitForCompletion(); +`} + + + +### Updating by document ID + + +{`// perform a patch by document ID +var operation = store + .Operations + .Send(new PatchByQueryOperation(new IndexQuery + \{ + Query = @"from @all_docs as d + where id() in ('orders/1-A', 'companies/1-A') + update + \{ + d.Updated = true; + \}" + \})); + +operation.WaitForCompletion(); +`} + + + +### Updating by document ID using parameters + + +{`// perform a patch by document ID +var operation = store + .Operations + .Send(new PatchByQueryOperation(new IndexQuery + \{ + QueryParameters = new Parameters + \{ + \{"ids", new[] \{"orders/1-A", "companies/1-A"\}\} + \}, + Query = @"from @all_docs as d + where id() in ($ids) + update + \{ + d.Updated = true; + \}" + \})); + +operation.WaitForCompletion(); +`} + + + +### Updating all documents + + +{`// perform a patch on all documents using @all_docs keyword +var operation = store + .Operations + .Send(new PatchByQueryOperation(new IndexQuery + \{ + Query = @"from @all_docs + update + \{ + this.Updated = true; + \}" + \})); + +operation.WaitForCompletion(); +`} + + + +### Patch on stale results + + +{`// patch on stale results +var operation = store + .Operations + .Send(new PatchByQueryOperation(new IndexQuery + \{ + Query = @"from Orders as o + where o.Company = 'companies/12-A' + update + \{ + o.Company = 'companies/13-A' + \}" + \}, + new QueryOperationOptions + \{ + AllowStale = true + \})); + +operation.WaitForCompletion(); +`} + + + +### Report progress on patch + + +{`// report progress during patch processing +var operation = store + .Operations + .Send(new PatchByQueryOperation(new IndexQuery + \{ + Query = @"from Orders as o + where o.Company = 'companies/12-A' + update + \{ + o.Company = 'companies/13-A' + \}" + \}, + new QueryOperationOptions + \{ + AllowStale = true + \})); + +operation.OnProgressChanged += (sender, x) => +\{ + var det = (DeterminateProgress)x; + Console.WriteLine($"Processed: \{det.Processed\}; Total: \{det.Total\}"); +\}; + +operation.WaitForCompletion(); +`} + + + +### Process patch results details + + +{`// perform patch and create summary of processing statuses +var operation = store + .Operations + .Send(new PatchByQueryOperation(new IndexQuery + \{ + Query = @"from Orders as o + where o.Company = 'companies/12-A' + update + \{ + o.Company = 'companies/13-A' + \}" + \}, + new QueryOperationOptions + \{ + RetrieveDetails = true + \})); + +var result = operation.WaitForCompletion(); +var formattedResults = + result.Details + .Select(x => (BulkOperationResult.PatchDetails)x) + .GroupBy(x => x.Status) + .Select(x => $"\{x.Key\}: \{x.Count()\}").ToList(); + +formattedResults.ForEach(Console.WriteLine); +`} + + + + + +## Additional notes + + + +By default, set based operations will **not work** on indexes that are stale. The operations will **only succeed** if the specified **index is not stale**. This is to make sure you only delete what you intended to delete. + +For indexes that are updated all the time, you can set the AllowStale field of QueryOperationOptions to true if you want to patch on stale results. + + + + + +The patching of documents matching a specified query is run in batches of size 1024. RavenDB doesn't do concurrency checks during the operation so it can happen than a document has been updated or deleted meanwhile. + + + + + +The patching of documents matching a specified query is run in batches of size 1024. +Each batch is handled in a separate write transaction. + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/patching/_set-based-java.mdx b/versioned_docs/version-7.1/client-api/operations/patching/_set-based-java.mdx new file mode 100644 index 0000000000..05609e94de --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/patching/_set-based-java.mdx @@ -0,0 +1,257 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + +Sometimes we need to update a large number of documents matching certain criteria. A simple SQL query doing that will look like this: + +`UPDATE Users SET IsActive = 0 WHERE LastLogin < '2020-01-01'` + +This is usually not the case for NoSQL databases where set based operations are not supported. RavenDB does support them by passing it a query and an operation definition. It will run the query and perform that operation on its results. + +The same queries and indexes that are used for data retrieval are used for the set based operations. The syntax defining which documents to work on is exactly the same as you'd specified for those documents to be pulled from the store. + +In this page: +[Syntax overview](../../../client-api/operations/patching/set-based.mdx#syntax-overview) +[Examples](../../../client-api/operations/patching/set-based.mdx#examples) +[Additional notes](../../../client-api/operations/patching/set-based.mdx#additional-notes) + + + +## Syntax overview + +### Sending a Patch Request + + + +{`Operation sendAsync(PatchByQueryOperation operation); +`} + + + +| Parameter | | | +| ------------- | ------------- | ----- | +| **operation** | `PatchByQueryOperation` | PatchByQueryOperation object, describing the query and the patch that will be performed | + +| Return Value | | +| ------------- | ----- | +| `Operation` | Object that allows waiting for operation to complete. It also may return information about a performed patch: see examples below. | + +### PatchByQueryOperation + + + +{`public PatchByQueryOperation(String queryToUpdate) +`} + + + + + +{`public PatchByQueryOperation(IndexQuery queryToUpdate); + +public PatchByQueryOperation(IndexQuery queryToUpdate, QueryOperationOptions options); +`} + + + +| Parameter | Type | Description | +|-------------------|--------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **queryToUpdate** | `String` or `IndexQuery` | RQL query defining the update operation. The RQL query starts as any other RQL query with "from" and "update" statements. Later, it continues with an "update" clause in which you describe the Javascript patch code. | +| **options** | `QueryOperationOptions` | Options defining how the operation will be performed and various constraints on how it is performed. | + +## Examples + +### Update whole collection + + +{`// increase by 10 Freight field in all orders +Operation operation = store + .operations() + .sendAsync(new PatchByQueryOperation("from Orders as o update \{" + + " o.Freight += 10;" + + "\}")); + +// Wait for the operation to be complete on the server side. +// Not waiting for completion will not harm the patch process and it will continue running to completion. +operation.waitForCompletion(); +`} + + + +### Update by dynamic query + + +{`Operation operation = store + .operations() + .sendAsync(new PatchByQueryOperation("from Orders as o" + + " where o.Employee = 'employees/1-A'" + + " update " + + "\{ " + + " o.Lines.forEach(line => line.Discount = 0.3);" + + "\}")); + +operation.waitForCompletion(); +`} + + + +### Update by static index query result + + +{`// switch all products with supplier 'suppliers/12-A' with 'suppliers/13-A' +Operation operation = store + .operations() + .sendAsync(new PatchByQueryOperation(new IndexQuery("" + + "from index 'Product/Search' as p " + + " where p.Supplier = 'suppliers/12-A'" + + " update \{" + + " p.Supplier = 'suppliers/13-A'" + + "\}"))); + + +operation.waitForCompletion(); +`} + + + +### Updating a collection name + + +{`// delete the document before recreating it with a different collection name + +Operation operation = store + .operations() + .sendAsync(new PatchByQueryOperation(new IndexQuery( + "from Orders as c " + + "update \{" + + " del(id(c));" + + " this['@metadata']['collection'] = 'New_Orders'; " + + " put(id(c), this); " + + "\}" + ))); + +operation.waitForCompletion(); +`} + + + +### Updating by document ID + + +{`// perform a patch by document ID + +Operation operation = store + .operations() + .sendAsync(new PatchByQueryOperation(new IndexQuery( + "from @all_docs as d " + + " where id() in ('orders/1-A', 'companies/1-A')" + + " update " + + "\{" + + " d.Updated = true; " + + "\} " + ))); + +operation.waitForCompletion(); +`} + + + +### Updating by document ID using parameters + + +{`// perform a patch by document ID +IndexQuery indexQuery = new IndexQuery( + "from @all_docs as d " + + " where id() in ($ids)" + + " update " + + " \{" + + " d.Updated = true; " + + "\} " +); +Parameters parameters = new Parameters(); +parameters.put("ids", new String[]\{"orders/1-A", "companies/1-A"\}); +indexQuery.setQueryParameters(parameters); +Operation operation = store + .operations() + .sendAsync(new PatchByQueryOperation(indexQuery)); + +operation.waitForCompletion(); +`} + + + +### Updating all documents + + +{`// perform a patch on all documents using @all_docs keyword + +Operation operation = store + .operations() + .sendAsync(new PatchByQueryOperation(new IndexQuery( + "from @all_docs " + + " update " + + "\{ " + + " this.Updated = true;" + + "\}" + ))); + +operation.waitForCompletion(); +`} + + + +### Patch on stale results + + +{`// patch on stale results + +QueryOperationOptions options = new QueryOperationOptions(); +options.setAllowStale(true); + +Operation operation = store + .operations() + .sendAsync(new PatchByQueryOperation(new IndexQuery( + "from Orders as o " + + "where o.Company = 'companies/12-A' " + + "update " + + "\{ " + + " o.Company = 'companies/13-A';" + + "\} " + ), options)); + + +operation.waitForCompletion(); +`} + + + + + +## Additional notes + + + +By default, set based operations will **not work** on indexes that are stale. The operations will **only succeed** if the specified **index is not stale**. This is to make sure you only delete what you intended to delete. + +For indexes that are updated all the time, you can set the AllowStale field of QueryOperationOptions to true if you want to patch on stale results. + + + + + +The patching of documents matching a specified query is run in batches of size 1024. RavenDB doesn't do concurrency checks during the operation so it can happen than a document has been updated or deleted meanwhile. + + + + + +The patching of documents matching a specified query is run in batches of size 1024. +Each batch is handled in a separate write transaction. + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/patching/_set-based-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/patching/_set-based-nodejs.mdx new file mode 100644 index 0000000000..66cbf27c7d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/patching/_set-based-nodejs.mdx @@ -0,0 +1,402 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Set-based patch operations allow you to apply changes to a set of documents that match specific criteria instead of separately targeting each document. + +* To perform patch operations on a single document see [Single Document Patch Operations](../../../client-api/operations/patching/single-document.mdx). + Set-based patching can also be done from the [Studio](../../../studio/database/documents/patch-view.mdx). + +* In this page: + * [Overview](../../../client-api/operations/patching/set-based.mdx#overview) + * [Defining set-based patching](../../../client-api/operations/patching/set-based.mdx#defining-set-based-patching) + * [Important characteristics](../../../client-api/operations/patching/set-based.mdx#important-characteristics) + * [Examples](../../../client-api/operations/patching/set-based.mdx#examples) + * [Update by collection query](../../../client-api/operations/patching/set-based.mdx#update-by-collection-query) + * [Update by collection query - access metadata](../../../client-api/operations/patching/set-based.mdx#update-by-collection-query---access-metadata) + * [Update by dynamic query](../../../client-api/operations/patching/set-based.mdx#update-by-dynamic-query) + * [Update by static index query](../../../client-api/operations/patching/set-based.mdx#update-by-static-index-query) + * [Update all documents](../../../client-api/operations/patching/set-based.mdx#update-all-documents) + * [Update by document ID](../../../client-api/operations/patching/set-based.mdx#update-by-document-id) + * [Update by document ID using parameters](../../../client-api/operations/patching/set-based.mdx#update-by-document-id-using-parameters) + * [Allow updating stale results](../../../client-api/operations/patching/set-based.mdx#allow-updating-stale-results) + * [Syntax](../../../client-api/operations/patching/set-based.mdx#syntax) + * [Send syntax](../../../client-api/operations/patching/set-based.mdx#send-syntax) + * [PatchByQueryOperation syntax](../../../client-api/operations/patching/set-based.mdx#syntax) + + +## Overview + + + +
__Defining set-based patching__: + * In other databases, a simple SQL query that updates a set of documents can look like this: + `UPDATE Users SET IsActive = 0 WHERE LastLogin < '2020-01-01'` + + * To achieve that in RavenDB, define the following two components within a `PatchByQueryOperation`: + + 1. __The query__: + An [RQL](../../../client-api/session/querying/what-is-rql.mdx) query that defines the set of documents to update. + Use the exact same syntax as you would when querying the database/indexes for usual data retrieval. + + 2. __The update__: + A JavaScript clause that defines the updates to perform on the documents resulting from the query. + + * When sending the `PatchByQueryOperation` to the server, the server will run the query and perform the requested update on the query results. + + + +{`// A "query & update" sample +// Update the set of documents from the Orders collection that match the query criteria: +// ===================================================================================== + +// The RQL part: +from Orders where Freight < 10 + +// The UPDATE part: +update \{ + this.Freight += 10; +\} +`} + + + + + + + __Important characteristics__: +* __Transactional batches__: + The patching of documents matching a specified query is run in batches of size 1024. + Each batch is handled in a separate write transaction. + +* __Dynamic behavior__: + During the patching process, documents that are added/modified after the patching operation has started + may also be patched if they match the query criteria. + +* __Concurrency__: + RavenDB doesn't perform concurrency checks during the patching process so it can happen that a document + has been modified or deleted while patching is in progress. + +* __Patching stale indexes__: + By default, set-based patch operations will only succeed if the index is Not [stale](../../../indexes/stale-indexes.mdx). + For indexes that are frequently updated, you can explicitly allow patching on stale results if needed. + An example can be seen in the [Allow updating stale results](../../../client-api/operations/patching/set-based.mdx#allow-updating-stale-results) example. + +* __Manage lengthy patch operations__: + The set-based patch operation (`PatchByQueryOperation`) runs in the server background may take a long time to complete. + Executing the operation via the `Send` method return an object that can be __awaited for completion__ or __aborted__ (killed). + Learn more about this and see dedicated examples in [Manage length operations](../../../client-api/operations/what-are-operations.mdx#manage-lengthy-operations). + + + + + +## Examples + + + + __Update by collection query__: + + +{`// Update all documents in a collection +// ==================================== + +// Define the Patch by Query Operation, pass the "query & update" string: +const patchByQueryOp = new PatchByQueryOperation( + \`from Orders as o + update + \{ + // Increase the Freight in ALL documents in the Orders collection: + o.Freight += 10; + \}\`); + +// Execute the operation by passing it to operations.send: +const operation = await documentStore.operations.send(patchByQueryOp); +`} + + + + + + + __Update by collection query - access metadata__: + + +{`// Update the collection name for all documents in the collection +// ============================================================== + +// Delete the document before recreating it with a different collection name: +const patchByQueryOp = new PatchByQueryOperation( + \`from Orders as c + update + \{ + del(id(c)); + this["@metadata"]["@collection"] = "New_Orders"; + put(id(c), this); + \}\`); + +const operation = await documentStore.operations.send(patchByQueryOp); +`} + + + + + + + __Update by dynamic query__: + + +{`// Update all documents matching a dynamic query +// ============================================= + +// Update the Discount in all orders that match the dynamic query predicate: +const patchByQueryOp = new PatchByQueryOperation(\`from Orders as o + where o.Employee = 'employees/4-A' + update + \{ + o.Lines.forEach(line=> line.Discount = 0.3); + \}\`); + +const operation = await documentStore.operations.send(patchByQueryOp); + +// Note: An AUTO-INDEX will be created when the dynamic query is executed on the server. +`} + + + + + + + __Update by static index query__: + + + +{`// Update all documents matching a static index query +// ================================================== + +// Modify the Supplier to 'suppliers/13-A' for all products that have 'suppliers/12-A': +const patchByQueryOp = new PatchByQueryOperation(\`from index 'Products/BySupplier' as p + where p.Supplier = 'suppliers/12-A' + update + { + p.Supplier = 'suppliers/13-A' + }\`); + +const operation = await documentStore.operations.send(patchByQueryOp); +`} + + + + +{`class Products_BySupplier extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + // Define the index-fields + this.map("Products", p => ({ + Supplier : e.Supplier + })); + } +} +`} + + + + + + + + __Update all documents__: + + +{`// Update all documents matching an @all_docs query +// ================================================ + +// Patch the 'Updated' field to ALL documents (query is using the @all_docs keyword): +const patchByQueryOp = new PatchByQueryOperation(\`from @all_docs + update + \{ + this.Updated = true; + \}\`); + +const operation = await documentStore.operations.send(patchByQueryOp); +`} + + + + + + + __Update by document ID__: + + +{`// Update all documents matching a query by ID +// =========================================== + +// Patch the 'Updated' field to all documents that have the specified IDs: +const patchByQueryOp = new PatchByQueryOperation(\`from @all_docs as d + where id() in ('orders/1-A', 'companies/1-A') + update + \{ + d.Updated = true; + \}\`); + +const operation = await documentStore.operations.send(patchByQueryOp); +`} + + + + + + + __Update by document ID using parameters__: + + +{`// Update all documents matching a query by ID using query parmeters +// ================================================================= + +// Define an IndexQuery object: +const indexQuery = new IndexQuery(); + +// Define the "query & update" string +// Patch the 'Updated' field to all documents that have the specified IDs +// Parameter ($ids) contains the listed IDs: +indexQuery.query = \`from @all_docs as d + where id() in ($ids) + update \{ + d.Updated = true + \}\`; + +// Define the parameters for the script: +indexQuery.queryParameters = \{ + ids: ["orders/830-A", "companies/91-A"] +\}; + +// Pass the indexQuery to the operation definition +const patchByQueryOp = new PatchByQueryOperation(indexQuery); + +// Execute the operation +const operation = await documentStore.operations.send(patchByQueryOp); +`} + + + + + + + __Allow updating stale results__: +* Set `allowStale` to _true_ to allow patching of stale results. + +* The RQL in this example is using an auto-index. + Use _allowStale_ in exactly the same way when querying a static-index. + + + +{`// Update documents matching a dynamic query even if auot-index is stale +// ===================================================================== + +// Define an IndexQuery object: +const indexQuery = new IndexQuery(); + +// Define the "query & update" string +// Modify company to 'companies/13-A' for all orders that have 'companies/12-A': +indexQuery.query = \`from Orders as o + where o.Company = 'companies/12-A' + update + \{ + o.Company = 'companies/13-A' + \}\`; + +// Define query options: +const queryOptions = \{ + // The query uses an auto-index (index is created if it doesn't exist yet). + // Allow patching on all matching documents even if the auto-index is still stale. + allowStale: true +\}; + +// Pass indexQuery & queryOptions to the operation definition +const patchByQueryOp = new PatchByQueryOperation(indexQuery, queryOptions); + +// Execute the operation +const operation = await documentStore.operations.send(patchByQueryOp); +`} + + + + + + +## Syntax +#### Send syntax + + + +{`await send(operation); +`} + + + +| Parameter | Type | Description | +|---------------|-------------------------|---------------------------------------------------------------------| +| __operation__ | `PatchByQueryOperation` | The operation object describing the query and the patch to perform. | + +| Return value | | +|---------------------------------------|-----------------------------------------------------------------------------------------| +| `Promise` | A promise that resolves to an object that allows waiting for the operation to complete. | +#### PatchByQueryOperation syntax + + + +{`// Available overload: +// =================== +patchByQueryOp = new PatchByQueryOperation(queryToUpdate); +patchByQueryOp = new PatchByQueryOperation(queryToUpdate, options); +`} + + + +| Parameter | Type | Description | +|-------------------|--------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| __queryToUpdate__ | `string` | The query & patch definition.
The RQL query starts as any other RQL query with a "from" statement.
It continues with an "update" clause that contains the Javascript patching code. | +| __queryToUpdate__ | `IndexQuery` | Object containing the query & the patching string,
with the option to use parameters. | +| __options__ | `object` | Options for the _PatchByQueryOperation_. | + + + + +{`class IndexQuery \{ + query; // string + queryParameters; // Record +\} +`} + + + + + +{`// Options for 'PatchByQueryOperation' +\{ + // Limit the amount of base operation per second allowed. + maxOpsPerSecond; // number + + // Indicate whether operations are allowed on stale indexes. + allowStale; // boolean + + // If AllowStale is set to false and index is stale, + // then this is the maximum timeout to wait for index to become non-stale. + // If timeout is exceeded then exception is thrown. + staleTimeout; // number + + // Set whether operation details about each document should be returned by server. + retrieveDetails; // boolean +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/patching/_single-document-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/patching/_single-document-csharp.mdx new file mode 100644 index 0000000000..4726b95eb9 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/patching/_single-document-csharp.mdx @@ -0,0 +1,1230 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The __Patch__ operation is used to perform _partial_ document updates with __one trip to the server__, + instead of loading, modifying, and saving a full document. + The whole operation is executed on the server-side and is useful as a performance enhancement or for + updating denormalized data in entities. + +* Since the operation is executed in a single request to the database, + the patch command is performed in a single write [transaction](../../../client-api/faq/transaction-support.mdx). + +* The current page covers patch operations on single documents. + +* Patching has three possible interfaces: [Session API](../../../client-api/operations/patching/single-document.mdx#session-api), +[Session API using Defer](../../../client-api/operations/patching/single-document.mdx#session-api-using-defer), +and [Operations API](../../../client-api/operations/patching/single-document.mdx#operations-api). + +* Patching can be done from the [client API](../../../client-api/operations/patching/single-document.mdx#examples) as well as in the [studio](../../../studio/database/documents/patch-view.mdx). + +In this page: + +* [API overview](../../../client-api/operations/patching/single-document.mdx#api-overview) + * [Session API](../../../client-api/operations/patching/single-document.mdx#session-api) + * [Session API using Defer](../../../client-api/operations/patching/single-document.mdx#session-api-using-defer) + * [Operations API](../../../client-api/operations/patching/single-document.mdx#operations-api) + * [List of script methods](../../../client-api/operations/patching/single-document.mdx#list-of-script-methods) +* [Examples](../../../client-api/operations/patching/single-document.mdx#examples) + * [Change value of single field](../../../client-api/operations/patching/single-document.mdx#change-value-of-single-field) + * [Change values of two fields](../../../client-api/operations/patching/single-document.mdx#change-values-of-two-fields) + * [Increment value](../../../client-api/operations/patching/single-document.mdx#increment-value) + * [Add or increment](../../../client-api/operations/patching/single-document.mdx#add-or-increment) + * [Add or patch](../../../client-api/operations/patching/single-document.mdx#add-or-patch) + * [Add or patch to an existing array](../../../client-api/operations/patching/single-document.mdx#add-or-patch-to-an-existing-array) + * [Add item to array](../../../client-api/operations/patching/single-document.mdx#add-item-to-array) + * [Insert item into specific position in array](../../../client-api/operations/patching/single-document.mdx#insert-item-into-specific-position-in-array) + * [Modify item in specific position in array](../../../client-api/operations/patching/single-document.mdx#modify-item-in-specific-position-in-array) + * [Remove items from array](../../../client-api/operations/patching/single-document.mdx#remove-items-from-array) + * [Loading documents in a script](../../../client-api/operations/patching/single-document.mdx#loading-documents-in-a-script) + * [Remove property](../../../client-api/operations/patching/single-document.mdx#remove-property) + * [Rename property](../../../client-api/operations/patching/single-document.mdx#rename-property) + * [Add document](../../../client-api/operations/patching/single-document.mdx#add-document) + * [Clone document](../../../client-api/operations/patching/single-document.mdx#clone-document) + * [Increment counter](../../../client-api/operations/patching/single-document.mdx#increment-counter) + * [Delete counter](../../../client-api/operations/patching/single-document.mdx#delete-counter) + * [Get counter](../../../client-api/operations/patching/single-document.mdx#get-counter) + * [Patching using inline string compilation](../../../client-api/operations/patching/single-document.mdx#patching-using-inline-string-compilation) + + + +## API Overview + +## Session API + +A type-safe session interface that allows performing the most common patch operations. +The patch request will be sent to the server only when calling `SaveChanges`. +This way it's possible to perform multiple operations in one request to the server. + + + +### Increment field value +`Session.Advanced.Increment` + + +{`void Increment(T entity, Expression> fieldPath, U delta); + +void Increment(string id, Expression> fieldPath, U delta); +`} + + + +| Parameters | Type | Description | +| ------------- | ------------- | ----- | +| **T** | `Type` | Entity type | +| **U** | `Type` | Field type, must be of numeric type or a `string` of `char` for string concatenation | +| **entity** | `T` | Entity on which the operation should be performed. The entity should be one that was returned by the current session in a `Load` or `Query` operation, this way, the session can track down the entity's ID | +| **entity id** | `string` | Entity ID on which the operation should be performed. | +| **fieldPath** | `Expression>` | Lambda describing the path to the field. | +| **delta** | `U` | Value to be added. | + +* Note how numbers are handled with the [JavaScript engine](../../../server/kb/numbers-in-ravendb.mdx) in RavenDB. +`Session.Advanced.AddOrIncrement` + + +{`void AddOrIncrement(string id, T entity, Expression> path, TU valToAdd); +`} + + + +| Parameters | Type | Description | +| ------------- | ------------- | ----- | +| **T** | `Type` | Entity type | +| **TU** | `Type` | Field type, must be of numeric type or a `string` of `char` for string concatenation | +| **entity** | `T` | Entity on which the operation should be performed. The entity should be one that was returned by the current session in a `Load` or `Query` operation, this way, the session can track down the entity's ID | +| **entity id** | `string` | Entity ID on which the operation should be performed. | +| **path** | `Expression>` | Lambda describing the path to the field. | +| **valToAdd** | `U` | Value to be added. | + + + + + +### Set field value +`Session.Advanced.Patch` + + +{`void Patch(string id, Expression> fieldPath, U value); + +void Patch(T entity, Expression> fieldPath, U value); +`} + + + +| Parameters | Type | Description | +| ------------- | ------------- | ----- | +| **T** | `Type` | Entity type | +| **U** | `Type` | Field type| +| **entity** | `T` | Entity on which the operation should be performed. The entity should be one that was returned by the current session in a `Load` or `Query` operation. This way the session can track down the entity's ID. | +| **entity id** | `string` | Entity ID on which the operation should be performed. | +| **fieldPath** | `Expression>` | Lambda describing the path to the field. | +| **delta** | `U` | Value to set. | +`Session.Advanced.AddOrPatch` + + + +{`void AddOrPatch(string id, T entity, Expression> path, TU value); +`} + + + +| Parameters | Type | Description | +| ------------- | ------------- | ----- | +| **T** | `Type` | Entity type | +| **TU** | `Type` | Field type| +| **entity** | `T` | Entity on which the operation should be performed. The entity should be one that was returned by the current session in a `Load` or `Query` operation. This way the session can track down the entity's ID. | +| **entity id** | `string` | Entity ID on which the operation should be performed. | +| **fieldPath** | `Expression>` | Lambda describing the path to the field. | +| **value** | `U` | Value to set. | + + + + + +### Array manipulation +`Session.Advanced.Patch` + + +{`void Patch(T entity, Expression>> fieldPath, + Expression, object>> arrayModificationLambda); + +void Patch(string id, Expression>> fieldPath, + Expression, object>> arrayModificationLambda); +`} + + + +| Parameters | Type | Description | +|------------------------------| ------------- | ----- | +| **T** | `Type` | Entity type | +| **U** | `Type` | Field type| +| **entity** | `T` | Entity on which the operation should be performed. The entity should be one that was returned by the current session in a `Load` or `Query` operation. This way the session can track down the entity's ID. | +| **entity id** | `string` | Entity ID on which the operation should be performed. | +| **fieldPath** | `Expression>` | Lambda describing the path to the field. | +| **arrayModificationLambda** | `Expression, object>>` | Lambda that modifies the array, see `JavaScriptArray` below. | +`Session.Advanced.AddOrPatch` + + +{`void AddOrPatch(string id, T entity, Expression>> path, + Expression, object>> arrayAdder); +`} + + + +| Parameters | Type | Description | +| ------------- | ------------- | ----- | +| **T** | `Type` | Entity type | +| **TU** | `Type` | Field type| +| **entity** | `T` | Entity on which the operation should be performed. The entity should be one that was returned by the current session in a `Load` or `Query` operation. This way the session can track down the entity's ID. | +| **entity id** | `string` | Entity ID on which the operation should be performed. | +| **path** | `Expression>` | Lambda describing the path to the field. | +| **Expression<Func<JavaScriptArray>** | `Expression, object>>` | Lambda that modifies the array, see `JavaScriptArray` below. | +| **arrayAdder** | `Add()` | Values to add to array. | + + + +`JavaScriptArray` allows building lambdas representing array manipulations for patches. + +| Method Signature| Return Type | Description | +|--------|:-----|-------------| +| **Put(T item)** | `JavaScriptArray` | Allows adding `item` to an array. | +| **Put(params T[] items)** | `JavaScriptArray` | Items to be added to the array. | +| **RemoveAt(int index)** | `JavaScriptArray` | Removes item in position `index` in array. | +| **RemoveAll(Func<T, bool> predicate)** | `JavaScriptArray` | Removes all the items in the array that satisfy the given predicate. | + + + + + + + +## Session API using Defer + +The non-typed Session API for patches uses the `Session.Advanced.Defer` function which allows registering one or more commands. +One of the possible commands is the `PatchCommandData`, describing single document patch command. +The patch request will be sent to the server only when calling `SaveChanges`, this way it's possible to perform multiple operations in one request to the server. + +`Session.Advanced.Defer` + + +{`void Defer(ICommandData[] commands); +`} + + + + + +#### PatchCommandData + +| Constructor | Type | Description | +|--------------------|----------------|------------------------------------------------------------------------------------------------------------------------------------------| +| **id** | `string` | ID of the document to be patched. | +| **changeVector** | `string` | [Can be null] Change vector of the document to be patched, used to verify that the document was not changed before the patch reached it. | +| **patch** | `PatchRequest` | Patch request to be performed on the document. | +| **patchIfMissing** | `PatchRequest` | [Can be null] Patch request to be performed if no document with the given ID was found. | + + + + + +#### PatchRequest + +We highly recommend using scripts with parameters. This allows RavenDB to cache scripts and boost performance. +Parameters can be accessed in the script through the `args` object and passed using PatchRequest's "Values" parameter. + +| Property | Type | Description | +|------------|------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **Script** | `string` | The patching script, written in JavaScript. | +| **Values** | `Dictionary` | Parameters to be passed to the script.
The parameters can be accessed using the '$' prefix.
Parameter starting with a '$' will be used as is, without further concatenation. | + +
+ + + +## Operations API + +An operations interface that exposes the full functionality and allows performing ad-hoc patch operations without creating a session. + +`Raven.Client.Documents.Operations.Send` +`Raven.Client.Documents.Operations.SendAsync` + + + +{`PatchStatus Send(PatchOperation operation); + +Task SendAsync(PatchOperation operation, + SessionInfo sessionInfo = null, + CancellationToken token = default(CancellationToken)); +`} + + + + + +| Constructor | Type | Description | +|-------------------------------------|----------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **id** | `string` | ID of the document to be patched. | +| **changeVector** | `string` | Change vector of the document to be patched.
Used to verify that the document was not modified before the patch reached it.
Can be `null`. | +| **patch** | `PatchRequest` | Patch request to perform on the document. | +| **patchIfMissing** | `PatchRequest` | Patch request to perform if the specified document is not found.
Will run only if no `changeVector` was passed.
Can be `null`. | +| **skipPatchIfChangeVectorMismatch** | `bool` | `true` - do not patch if the document has been modified.
`false` (Default) - execute the patch even if document has been modified.

An exception is thrown if:
this param is `false` + `changeVector` has value + document with that ID and change vector was not found. | + +
+ + + +## List of script methods + +This is a list of a few of the javascript methods that can be used in patch scripts. +See the more comprehensive list at [Knowledge Base: JavaScript Engine](../../../server/kb/javascript-engine.mdx#predefined-javascript-functions). + +| Method | Arguments | Description | +|----------------------|-----------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **load** | `string` or `string[]` | Loads one or more documents into the context of the script by their document IDs | +| **loadPath** | A document and a path to an ID within that document | Loads a related document by the path to its ID | +| **del** | Document ID; change vector | Delete the given document by its ID. If you add the expected change vector and the document's current change vector does not match, the document will _not_ be deleted. | +| **put** | Document ID; document; change vector | Create or overwrite a document with a specified ID and entity. If you try to overwrite an existing document and pass the expected change vector, the put will fail if the specified change vector does not match the document's current change vector. | +| **cmpxchg** | Key | Load a compare exchange value into the context of the script using its key | +| **getMetadata** | Document | Returns the document's metadata | +| **id** | Document | Returns the document's ID | +| **lastModified** | Document | Returns the `DateTime` of the most recent modification made to the given document | +| **counter** | Document; counter name | Returns the value of the specified counter in the specified document | +| **counterRaw** | Document; counter name | Returns the specified counter in the specified document as a key-value pair | +| **incrementCounter** | Document; counter name | Increases the value of the counter by one | +| **deleteCounter** | Document; counter name | Deletes the counter | +| **spatial.distance** | Two points by latitude and longitude; spatial units | Find the distance between to points on the earth | +| **timeseries** | Document; the time series' name | Returns the specified time series object | + + + +## Examples + +### Change value of single field + + + + +{`// change FirstName to Robert +session.Advanced.Patch( + "employees/1", + x => x.FirstName, "Robert"); + +session.SaveChanges(); +`} + + + + +{`// change FirstName to Robert +session.Advanced.Defer(new PatchCommandData( + id: "employees/1", + changeVector: null, + patch: new PatchRequest + { + Script = @"this.FirstName = args.FirstName;", + Values = + { + {"FirstName", "Robert"} + } + }, + patchIfMissing: null)); + +session.SaveChanges(); +`} + + + + +{`// change FirstName to Robert +store.Operations.Send(new PatchOperation( + id: "employees/1", + changeVector: null, + patch: new PatchRequest + { + Script = @"this.FirstName = args.FirstName;", + Values = + { + {"FirstName", "Robert"} + } + }, + patchIfMissing: null)); +`} + + + +### Change values of two fields + + + + +{`// Modify FirstName to Robert and LastName to Carter in single request +// =================================================================== + +// The two Patch operations below are sent via 'SaveChanges()' which complete transactionally, +// as this call generates a single HTTP request to the database. +// Either both will succeed or both will be rolled back since they are applied within the same transaction. +// However, on the server side, the two Patch operations are still executed separately. +// To achieve atomicity at the level of a single server-side operation, use 'Defer' or the operations syntax. + +session.Advanced.Patch("employees/1", x => x.FirstName, "Robert"); +session.Advanced.Patch("employees/1", x => x.LastName, "Carter"); + +session.SaveChanges(); +`} + + + + +{`// Change FirstName to Robert and LastName to Carter in single request +// Note that here we do maintain the atomicity of the operation +session.Advanced.Defer(new PatchCommandData( + id: "employees/1", + changeVector: null, + patch: new PatchRequest + { + Script = @" + this.FirstName = args.UserName.FirstName; + this.LastName = args.UserName.LastName;", + Values = + { + { + "UserName", new + { + FirstName = "Robert", + LastName = "Carter" + } + } + } + }, + patchIfMissing: null)); + +session.SaveChanges(); +`} + + + + +{`// Change FirstName to Robert and LastName to Carter in single request +// Note that here we do maintain the atomicity of the operation +store.Operations.Send(new PatchOperation( + id: "employees/1", + changeVector: null, + patch: new PatchRequest + { + Script = @" + this.FirstName = args.UserName.FirstName; + this.LastName = args.UserName.LastName;", + Values = + { + { + "UserName", new + { + FirstName = "Robert", + LastName = "Carter" + } + } + } + }, patchIfMissing: null)); +`} + + + +### Increment value + + + + +{`// increment UnitsInStock property value by 10 +session.Advanced.Increment("products/1-A", x => x.UnitsInStock, 10); + +session.SaveChanges(); +`} + + + + +{`session.Advanced.Defer(new PatchCommandData( + id: "products/1-A", + changeVector: null, + patch: new PatchRequest + { + Script = @"this.UnitsInStock += args.UnitsToAdd;", + Values = + { + {"UnitsToAdd", 10} + } + }, + patchIfMissing: null)); + +session.SaveChanges(); +`} + + + + +{`store.Operations.Send(new PatchOperation( + id: "products/1-A", + changeVector: null, + patch: new PatchRequest + { + Script = @"this.UnitsInStock += args.UnitsToAdd;", + Values = + { + {"UnitsToAdd", 10} + } + }, + patchIfMissing: null)); +`} + + + +### Add or increment + +`AddOrIncrement` increments an existing field or adds a new one in documents where they didn't exist. + + + +{`// While running AddOrIncrement specify +session.Advanced.AddOrIncrement( + + // Specify document id and entity on which the operation should be performed. + id, + new User + \{ + FirstName = "John", + LastName = "Doe", + LoginCount = 1 + + // The path to the field and value to be added. + \}, x => x.LoginCount, 1); + + session.SaveChanges(); +`} + + +### Add or patch + +`AddOrPatch` adds or edits field(s) in a single document. + +If the document doesn't yet exist, this operation adds the document but doesn't patch it. + + + +{`// While running AddOrPatch specify +session.Advanced.AddOrPatch( + +// Specify document id and entity on which the operation should be performed. + id, + new User + \{ + FirstName = "John", + LastName = "Doe", + LastLogin = DateTime.Now + \}, + // The path to the field and value to set. + x => x.LastLogin, new DateTime(2021, 9, 12)); + +session.SaveChanges(); +`} + + +### Add or patch to an existing array + +This sample shows how to patch an existing array or add it to documents where it doesn't yet exist. + + + +{`// While running AddOrPatch specify +session.Advanced.AddOrPatch( + + // Specify document id and entity on which the operation should be performed. + id, + new User + \{ + FirstName = "John", + LastName = "Doe", + LoginTimes = + new List + \{ + DateTime.UtcNow + \} + \}, + // The path to the field + x => x.LoginTimes, + // Modifies the array + u => u.Add(new DateTime(1993, 09, 12), new DateTime(2000, 01, 01))); + +session.SaveChanges(); +`} + + +### Add item to array + + + + +{`// add a new comment to Comments +session.Advanced.Patch("blogposts/1", + x => x.Comments, + comments => comments.Add(new BlogComment + { + Content = "Lore ipsum", + Title = "Some title" + })); + +session.SaveChanges(); +`} + + + + +{`// add a new comment to Comments +session.Advanced.Defer(new PatchCommandData( + id: "blogposts/1", + changeVector: null, + patch: new PatchRequest + { + Script = "this.Comments.push(args.Comment);", + Values = + { + { + "Comment", new BlogComment + { + Content = "Lore ipsum", + Title = "Some title" + } + } + } + + }, + patchIfMissing: null)); + +session.SaveChanges(); +`} + + + + +{`// add a new comment to Comments +store.Operations.Send(new PatchOperation( + id: "blogposts/1", + changeVector: null, + patch: new PatchRequest + { + Script = "this.Comments.push(args.Comment);", + Values = + { + { + "Comment", new BlogComment + { + Content = "Lore ipsum", + Title = "Some title" + } + } + } + + }, + patchIfMissing: null)); +`} + + + +### Insert item into specific position in array + +Inserting item into specific position is supported only by the non-typed APIs. + + + + +{`// insert a new comment at position 1 to Comments +session.Advanced.Defer(new PatchCommandData( + id: "blogposts/1", + changeVector: null, + patch: new PatchRequest + { + Script = "this.Comments.splice(1, 0, args.Comment);", + Values = + { + { + "Comment", new BlogComment + { + Content = "Lore ipsum", + Title = "Some title" + } + } + } + }, + patchIfMissing: null)); + +session.SaveChanges(); +`} + + + + +{`store.Operations.Send(new PatchOperation( + id: "blogposts/1", + changeVector: null, + patch: new PatchRequest + { + Script = "this.Comments.splice(1, 0, args.Comment);", + Values = + { + { + "Comment", new BlogComment + { + Content = "Lore ipsum", + Title = "Some title" + } + } + } + }, + patchIfMissing: null)); +`} + + + +### Modify item in specific position in array + +Inserting item into specific position is supported only by the non-typed APIs. + + + + +{`// modify a comment at position 3 in Comments +session.Advanced.Defer(new PatchCommandData( + id: "blogposts/1", + changeVector: null, + patch: new PatchRequest + { + Script = "this.Comments.splice(3, 1, args.Comment);", + Values = + { + { + "Comment", new BlogComment + { + Content = "Lore ipsum", + Title = "Some title" + } + } + } + }, + patchIfMissing: null)); + +session.SaveChanges(); +`} + + + + +{`// modify a comment at position 3 in Comments +store.Operations.Send(new PatchOperation( + id: "blogposts/1", + changeVector: null, + patch: new PatchRequest + { + Script = "this.Comments.splice(3, 1, args.Comment);", + Values = + { + { + "Comment", new BlogComment + { + Content = "Lore ipsum", + Title = "Some title" + } + } + } + }, + patchIfMissing: null)); +`} + + + +### Remove items from array + + + + +{`// filter out all comments of a blogpost which contains the word "wrong" in their contents +session.Advanced.Patch("blogposts/1", + x => x.Comments, + comments => comments.RemoveAll(y => y.Content.Contains("wrong"))); + +session.SaveChanges(); +`} + + + + +{`// filter out all comments of a blogpost which contains the word "wrong" in their contents +session.Advanced.Defer(new PatchCommandData( + id: "blogposts/1", + changeVector: null, + patch: new PatchRequest + { + Script = @"this.Comments = this.Comments.filter(comment=> + !comment.Content.includes(args.Text));", + Values = + { + {"Text", "wrong"} + } + }, + patchIfMissing: null)); + +session.SaveChanges(); +`} + + + + +{`// filter out all comments of a blogpost which contains the word "wrong" in their contents +store.Operations.Send(new PatchOperation( + id: "blogposts/1", + changeVector: null, + patch: new PatchRequest + { + Script = @"this.Comments = this.Comments.filter(comment=> + !comment.Content.includes(args.Text));", + Values = + { + {"Text", "wrong"} + } + }, + patchIfMissing: null)); +`} + + + +### Loading documents in a script + +Loading documents is supported only by the non-typed APIs. + + + + +{`// update product names in order, according to loaded product documents +session.Advanced.Defer(new PatchCommandData( + id: "orders/1", + changeVector: null, + patch: new PatchRequest + { + Script = @"this.Lines.forEach(line=> { + var productDoc = load(line.Product); + line.ProductName = productDoc.Name; + });" + }, patchIfMissing: null)); + +session.SaveChanges(); +`} + + + + +{`// update product names in order, according to loaded product documents +store.Operations.Send(new PatchOperation( + id: "blogposts/1", + changeVector: null, + patch: new PatchRequest + { + Script = @"this.Lines.forEach(line=> { + var productDoc = load(line.Product); + line.ProductName = productDoc.Name; + });" + }, + patchIfMissing: null)); +`} + + + +### Remove property + +Removing property supported only by the non-typed APIs. + + + + +{`// remove property Age +session.Advanced.Defer(new PatchCommandData( + id: "employees/1", + changeVector: null, + patch: new PatchRequest + { + Script = @"delete this.Age;" + }, + patchIfMissing: null)); +session.SaveChanges(); +`} + + + + +{`// remove property Age +store.Operations.Send(new PatchOperation( + id: "employees/1", + changeVector: null, + patch: new PatchRequest + { + Script = @"delete this.Age;" + }, + patchIfMissing: null)); +`} + + + +### Rename property + +Renaming property supported only by the non-typed APIs. + + + + +{`// rename FirstName to Name +session.Advanced.Defer(new PatchCommandData( + id: "employees/1", + changeVector: null, + patch: new PatchRequest + { + Script = @"var firstName = this[args.Rename.Old]; + delete this[args.Rename.Old]; + this[args.Rename.New] = firstName;", + Values = + { + { + "Rename", new + { + Old = "FirstName", + New = "Name" + } + } + } + }, + patchIfMissing: null)); + +session.SaveChanges(); +`} + + + + +{`store.Operations.Send(new PatchOperation( + id: "employees/1", + changeVector: null, + patch: new PatchRequest + { + Script = @"var firstName = this[args.Rename.Old]; + delete this[args.Rename.Old]; + this[args.Rename.New] = firstName;", + Values = + { + { + "Rename", new + { + Old = "FirstName", + New = "Name" + } + } + } + }, + patchIfMissing: null)); +`} + + + +### Add document + +Adding a new document is supported only by the non-typed APIs. + + + + +{`session.Advanced.Defer(new PatchCommandData("employees/1-A", null, + new PatchRequest + { + Script = "put('orders/', { Employee: id(this) });", + }, null)); + +session.SaveChanges(); +`} + + + + +{`store.Operations.Send(new PatchOperation("employees/1-A", null, new PatchRequest +{ + Script = "put('orders/', { Employee: id(this) });", +})); +`} + + + +### Clone document + +To clone a document via patching, use the `put` method within the patching script as follows: + + + + +{`session.Advanced.Defer(new PatchCommandData("employees/1-A", null, + new PatchRequest + { + Script = "put('employees/', this);", + }, null)); + +session.SaveChanges(); +`} + + + + +{`store.Operations.Send(new PatchOperation("employees/1-A", null, new PatchRequest +{ + Script = "put('employees/', this);", +})); +`} + + + + + + +**Attachments, Counters, Time Series, and Revisions:** + + * When cloning a document via patching, only the document's fields are copied to the new document. + Attachments, counters, time series data, and revisions from the source document will Not be copied automatically. + * To manage time series & counters via patching, you can use the pre-defined JavaScript methods listed here: + [Counters methods](../../../server/kb/javascript-engine.mdx#counter-operations) & [Time series methods](../../../server/kb/javascript-engine.mdx#time-series-operations). + * Note: When [Cloning a document via the Studio](../../../studio/database/documents/create-new-document.mdx#clone-an-existing-document), + attachments, counters, time Series, and revisions will be copied automatically. + +**Archived documents:** + + * If the source document is archived, the cloned document will Not be archived. + To learn more about archived documents, see [Data archival overview](../../../data-archival/overview.mdx). + + +### Increment counter + +In order to increment or create a counter use <code>incrementCounter</code> method as follows: + + + + +{`var order = session.Load("orders/1-A"); +session.CountersFor(order).Increment("Likes", 1); +session.SaveChanges(); +`} + + + + +{`session.Advanced.Defer(new PatchCommandData("orders/1-A", null, + new PatchRequest + { + Script = "incrementCounter(this, args.name, args.val);", + Values = + { + { "name", "Likes" }, + { "val", 20 } + } + }, null)); +session.SaveChanges(); +`} + + + + +{`store.Operations.Send(new PatchOperation("orders/1-A", null, new PatchRequest +{ + Script = "incrementCounter(this, args.name, args.val);", + Values = + { + { "name", "Likes" }, + { "val", -1 } + } +})); +`} + + + + + + +The method can be called by document ID or by document reference and the value can be negative. + + +### Delete counter + +In order to delete a counter use <code>deleteCounter</code> method as follows: + + + + +{`session.CountersFor("orders/1-A").Delete("Likes"); +session.SaveChanges(); +`} + + + + +{`session.Advanced.Defer(new PatchCommandData("products/1-A", null, + new PatchRequest + { + Script = "deleteCounter(this, args.name);", + Values = + { + { "name", "Likes" }, + } + }, null)); +session.SaveChanges(); +`} + + + + +{`store.Operations.Send(new PatchOperation("products/1-A", null, new PatchRequest +{ + Script = "deleteCounter(this, args.name);", + Values = + { + { "name", "Likes" }, + } +})); +`} + + + + + + +The method can be called by document ID or by document reference + + +### Get counter + +In order to get a counter while patching use <code>counter</code> method as follows: + + + + +{`var order = session.Load("orders/1-A"); +var counters = session.Advanced.GetCountersFor(order); +`} + + + + +{`session.Advanced.Defer(new PatchCommandData("orders/1-A", null, + new PatchRequest + { + Script = @"var likes = counter(this.Company, args.name); + put('result/', {company: this.Company, likes: likes});", + Values = + { + { "name", "Likes" }, + } + }, null)); +session.SaveChanges(); +`} + + + + +{`store.Operations.Send(new PatchOperation("orders/1-A", null, new PatchRequest +{ + Script = @"var likes = counter(this.Company, args.name); + put('result/', {company: this.Company, likes: likes});", + Values = + { + { "name", "Likes" }, + } +})); +`} + + + + + + +The method can be called by document ID or by document reference. + + +### Patching using inline string compilation + +* When using a JavaScript script with the _defer_ or _operations_ syntax, + you can apply logic using **inline string compilation**. + +* To enable this, set the [Patching.AllowStringCompilation](../../../server/configuration/patching-configuration.mdx#patchingallowstringcompilation) configuration key to _true_. + + + + +{`// Modify value using inline string compilation +// ============================================ + +session.Advanced.Defer(new PatchCommandData( + id: "products/1-A", + changeVector: null, + patch: new PatchRequest + { + Script = @" + // Give a discount if the product is low in stock: + const functionBody = 'return doc.UnitsInStock < lowStock ? ' + + 'doc.PricePerUnit * discount :' + + 'doc.PricePerUnit;'; + + // Define a function that processes the document and returns the price: + const calcPrice = new Function('doc', 'lowStock', 'discount', functionBody); + + // Update the product's PricePerUnit based on the function: + this.PricePerUnit = calcPrice(this, args.LowStock, args.Discount);", + + Values = { + {"LowStock", "10"}, + {"Discount", "0.8"} + } + }, + patchIfMissing: null)); + +session.SaveChanges(); + +// The same can be applied using the 'operations' syntax. +`} + + + + +{`// Modify value using inline string compilation +// ============================================ + +store.Operations.Send(new PatchOperation("products/1-A", null, new PatchRequest +{ + Script = @" + // Give a discount if the product is low in stock: + const discountExpression = 'this.UnitsInStock < args.LowStock ? ' + + 'this.PricePerUnit * args.Discount :' + + 'this.PricePerUnit'; + + // Call 'eval', pass the string expression that contains your logic: + const price = eval(discountExpression); + + // Update the product's PricePerUnit: + this.PricePerUnit = price;", + + Values = { + {"LowStock", "10"}, + {"Discount", "0.8"} + } +})); + +// The same can be applied using the 'session defer' syntax. +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/patching/_single-document-java.mdx b/versioned_docs/version-7.1/client-api/operations/patching/_single-document-java.mdx new file mode 100644 index 0000000000..896b74154d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/patching/_single-document-java.mdx @@ -0,0 +1,784 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + +The **Patch** operation is used to perform partial document updates without having to load, modify, and save a full document. +The whole operation is executed on the server side and is useful as a performance enhancement or for updating denormalized data in entities. + +The current page deals with patch operations on single documents. + +Patching has three possible interfaces: [Session API](../../../client-api/operations/patching/single-document.mdx#session-api), [Session API using defer](../../../client-api/operations/patching/single-document.mdx#session-api-using-defer), and [Operations API](../../../client-api/operations/patching/single-document.mdx#operations-api). + +Patching can be done from the client as well as in the studio. + +In this page: +[API overview](../../../client-api/operations/patching/single-document.mdx#api-overview) +[Examples](../../../client-api/operations/patching/single-document.mdx#examples) + + +## API overview + +## Session API + +A session interface that allows performing the most common patch operations. +The patch request will be sent to server only when calling `saveChanges`, this way it's possible to perform multiple operations in one request to the server. + +### Increment Field Value +`session.advanced().increment` + + +{` void increment(String id, String path, U valueToAdd); + + void increment(T entity, String path, U valueToAdd); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **T** | `Class` | Entity class | +| **U** | `Class` | Field class, must be of numeric type, or a `String` of `char` for string concatenation | +| **entity** | `T` | Entity on which the operation should be performed. The entity should be one that was returned by the current session in a `load` or `query` operation, this way, the session can track down the entity's ID | +| **entity id** | `String` | Entity ID on which the operation should be performed. | +| **delta** | `U` | Value to be added. | + +* Note how numbers are handled with the [JavaScript engine](../../../server/kb/numbers-in-ravendb.mdx) in RavenDB. + +### Set Field Value +`session.advanced().patch` + + +{` void patch(String id, String path, U value); + + void patch(T entity, String path, U value); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **T** | `Class` | Entity Class | +| **U** | `Class` | Field class | +| **entity** | `T` | Entity on which the operation should be performed. The entity should be one that was returned by the current session in a `load` or `query` operation, this way, the session can track down the entity's ID | +| **entity id** | `String` | Entity ID on which the operation should be performed. | +| **delta** | `U` | Value to set. | + +### Array Manipulation +`session.advanced().patch` + + +{` void patch(T entity, String pathToArray, Consumer> arrayAdder); + + void patch(String id, String pathToArray, Consumer> arrayAdder); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **T** | `Class` | Entity class | +| **U** | `Class` | Field class | +| **entity** | `T` | Entity on which the operation should be performed. The entity should be one that was returned by the current session in a `Load` or `Query` operation, this way, the session can track down the entity's ID | +| **entity id** | `String` | Entity ID on which the operation should be performed. | +| **arrayAdder** | `Consumer>` | Lambda that modifies the array, see `JavaScriptArray` below. | + + +`JavaScriptArray` allows building lambdas representing array manipulations for patches. + +| Method Signature| Return Type | Description | +|--------|:-----|-------------| +| **put(T item)** | `JavaScriptArray` | Allows adding `item` to an array. | +| **put(T... items)** | `JavaScriptArray` | Items to be added to the array. | +| **put(Collection<T> items)** | `JavaScriptArray` | Items to be added to the array. | +| **removeAt(int index)** | `JavaScriptArray` | Removes item in position `index` in array. | + + + + + +## Session API using defer +The low level session api for patches uses the `session.advanced().defer` function that allows registering single or several commands. +One of the possible commands is the `PatchCommandData`, describing single document patch command. +The patch request will be sent to server only when calling `saveChanges`, this way it's possible to perform multiple operations in one request to the server. + +`session.advanced().defer` + + +{`void defer(ICommandData[] commands); +`} + + + + + +| Constructor| | | +|--------|:-----|-------------| +| **id** | `String` | ID of the document to be patched. | +| **changeVector** | `String` | [Can be null] Change vector of the document to be patched, used to verify that the document was not changed before the patch reached it. | +| **patch** | `PatchRequest` | Patch request to be performed on the document. | +| **patchIfMissing** | `PatchRequest` | [Can be null] Patch request to be performed if no document with the given ID was found. | + + + + + +We highly recommend using scripts with parameters. This allows RavenDB to cache scripts and boost performance. Parameters can be accessed in the script through the "args" object, and passed using PatchRequest's "Values" parameter. + +| Members | | | +| ------------- | ------------- | ----- | +| **Script** | `String` | JavaScript code to be run. | +| **Values** | `Map` | Parameters to be passed to the script. The parameters can be accessed using the '$' prefix. Parameter starting with a '$' will be used as is, without further concatenation . | + + + + + + +## Operations API +An operations interface that exposes the full functionality and allows performing ad-hoc patch operations without creating a session. + + + +{`PatchStatus send(PatchOperation operation); + +PatchStatus send(PatchOperation operation, SessionInfo sessionInfo); + + PatchOperation.Result send(Class entityClass, PatchOperation operation); + + PatchOperation.Result send(Class entityClass, PatchOperation operation, SessionInfo sessionInfo); +`} + + + + + +| Constructor| | | +|--------|:-----|-------------| +| **id** | `String` | ID of the document to be patched. | +| **changeVector** | `String` | [Can be null] Change vector of the document to be patched, used to verify that the document was not changed before the patch reached it. | +| **patch** | `PatchRequest` | Patch request to be performed on the document. | +| **patchIfMissing** | `PatchRequest` | [Can be null] Patch request to be performed if no document with the given ID was found. Will run only if no `changeVector` was passed. | +| **skipPatchIfChangeVectorMismatch** | `boolean` | If false and `changeVector` has value, and document with that ID and change vector was not found, will throw exception. | + + + + + +## List of Script Methods + +This is a list of a few of the javascript methods that can be used in patch scripts. See the +more comprehensive list at [Knowledge Base: JavaScript Engine](../../../server/kb/javascript-engine.mdx#predefined-javascript-functions). + +| Method | Arguments | Description | +| - | - | - | +| **load** | `string` or `string[]` | Loads one or more documents into the context of the script by their document IDs | +| **loadPath** | A document and a path to an ID within that document | Loads a related document by the path to its ID | +| **del** | Document ID; change vector | Delete the given document by its ID. If you add the expected change vector and the document's current change vector does not match, the document will _not_ be deleted. | +| **put** | Document ID; document; change vector | Create or overwrite a document with a specified ID and entity. If you try to overwrite an existing document and pass the expected change vector, the put will fail if the specified change vector does not match the document's current change vector. | +| **cmpxchg** | Key | Load a compare exchange value into the context of the script using its key | +| **getMetadata** | Document | Returns the document's metadata | +| **id** | Document | Returns the document's ID | +| **lastModified** | Document | Returns the `DateTime` of the most recent modification made to the given document | +| **counter** | Document; counter name | Returns the value of the specified counter in the specified document | +| **counterRaw** | Document; counter name | Returns the specified counter in the specified document as a key-value pair | +| **incrementCounter** | Document; counter name | Increases the value of the counter by one | +| **deleteCounter** | Document; counter name | Deletes the counter | + + + +## Examples + +### Change Field's Value + + + + +{`// change firstName to Robert +session + .advanced() + .patch("employees/1", "FirstName", "Robert"); +`} + + + + +{`// change firstName to Robert +PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("this.FirstName = args.firstName"); +patchRequest.setValues(Collections.singletonMap("firstName", "Robert")); +PatchCommandData patchCommandData = new PatchCommandData("employees/1", null, patchRequest, null); +session.advanced().defer(patchCommandData); + +session.saveChanges(); +`} + + + + +{`// change firstName to Robert +PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("this.FirstName = args.firstName;"); +patchRequest.setValues(Collections.singletonMap("firstName", "Robert")); +PatchOperation patchOperation = new PatchOperation("employees/1", null, patchRequest); +store.operations().send(patchOperation); +`} + + + + +### Change Values of Two Fields + + + + +{`// Modify FirstName to Robert and LastName to Carter in single request +// =================================================================== + +// The two Patch operations below are sent via 'saveChanges()' which complete transactionally, +// as this call generates a single HTTP request to the database. +// Either both will succeed or both will be rolled back since they are applied within the same transaction. +// However, on the server side, the two Patch operations are still executed separately. +// To achieve atomicity at the level of a single server-side operation, use 'defer' or the operations syntax. + +session.advanced().patch("employees/1", "FirstName", "Robert"); +session.advanced().patch("employees/1", "LastName", "Carter"); + +session.saveChanges(); +`} + + + + +{`// Change firstName to Robert and lastName to Carter in single request +// Note that here we do maintain the atomicity of the operation +PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("this.FirstName = args.firstName;" + + "this.LastName = args.lastName"); + +Map values = new HashMap<>(); +values.put("firstName", "Robert"); +values.put("lastName", "Carter"); +patchRequest.setValues(values); + +session.advanced().defer(new PatchCommandData("employees/1", null, patchRequest, null)); +session.saveChanges(); +`} + + + + +{`// Change FirstName to Robert and LastName to Carter in single request +// Note that here we do maintain the atomicity of the operation +PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("this.FirstName = args.firstName; " + + "this.LastName = args.lastName"); + +Map values = new HashMap<>(); +values.put("firstName", "Robert"); +values.put("lastName", "Carter"); +patchRequest.setValues(values); + +store.operations().send(new PatchOperation("employees/1", null, patchRequest)); +`} + + + + +### Increment Value + + + + +{`// increment UnitsInStock property value by 10 +session.advanced().increment("products/1-A", "UnitsInStock", 10); + +session.saveChanges(); +`} + + + + +{`PatchRequest request = new PatchRequest(); +request.setScript("this.UnitsInStock += args.unitsToAdd"); +request.setValues(Collections.singletonMap("unitsToAdd", 10)); + +session.advanced().defer( + new PatchCommandData("products/1-A", null, request, null)); +session.saveChanges(); +`} + + + + +{`PatchRequest request = new PatchRequest(); +request.setScript("this.UnitsInStock += args.unitsToAdd"); +request.setValues(Collections.singletonMap("unitsToAdd", 10)); +store.operations().send(new PatchOperation("products/1-A", null, request)); +`} + + + + +### Add Item to Array + + + + +{`BlogComment comment = new BlogComment(); +comment.setContent("Lore ipsum"); +comment.setTitle("Some title"); + +session.advanced() + .patch("blogposts/1", "comments", comments -> comments.add(comment)); + +session.saveChanges(); +`} + + + + +{`// add a new comment to comments +BlogComment comment = new BlogComment(); +comment.setContent("Lore ipsum"); +comment.setTitle("Some title"); + +PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("this.comments.push(args.comment"); +patchRequest.setValues(Collections.singletonMap("comment", comment)); + +session.advanced().defer(new PatchCommandData("blogposts/1", null, patchRequest, null)); +session.saveChanges(); +`} + + + + +{`// add a new comment to comments +BlogComment comment = new BlogComment(); +comment.setContent("Lore ipsum"); +comment.setTitle("Some title"); + +PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("this.comments.push(args.comment"); +patchRequest.setValues(Collections.singletonMap("comment", comment)); + +store.operations().send(new PatchOperation("blogposts/1", null, patchRequest)); +`} + + + + +### Insert Item into Specific Position in Array + +Inserting item into specific position is supported only by the non-typed APIs + + + +{`BlogComment comment = new BlogComment(); +comment.setContent("Lore ipsum"); +comment.setTitle("Some title"); + +PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("this.comments.splice(1, 0, args.comment)"); +patchRequest.setValues(Collections.singletonMap("comment", comment)); + +session.advanced().defer(new PatchCommandData("blogposts/1", null, patchRequest, null)); +session.saveChanges(); +`} + + + + +{`BlogComment comment = new BlogComment(); +comment.setContent("Lore ipsum"); +comment.setTitle("Some title"); + +PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("this.comments.splice(1, 0, args.comment)"); +patchRequest.setValues(Collections.singletonMap("comment", comment)); + +store.operations().send(new PatchOperation("blogposts/1", null, patchRequest)); +`} + + + + +### Modify Item in Specific Position in Array + +Inserting item into specific position is supported only by the non-typed APIs + + + +{`// modify a comment at position 3 in Comments +BlogComment comment = new BlogComment(); +comment.setContent("Lore ipsum"); +comment.setTitle("Some title"); + +PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("this.comments.splice(3, 1, args.comment)"); +patchRequest.setValues(Collections.singletonMap("comment", comment)); + +session.advanced().defer(new PatchCommandData("blogposts/1", null, patchRequest, null)); +session.saveChanges(); +`} + + + + +{`// modify a comment at position 3 in Comments +BlogComment comment = new BlogComment(); +comment.setContent("Lore ipsum"); +comment.setTitle("Some title"); + +PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("this.comments.splice(3, 1, args.comment)"); +patchRequest.setValues(Collections.singletonMap("comment", comment)); + +store.operations().send(new PatchOperation("blogposts/1", null, patchRequest)); +`} + + + + +### Remove Items from Array + +Filtering items from an array supported only by the non-typed APIs + + + +{`//filter out all comments of a blogpost which contains the word "wrong" in their contents +PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("this.comments = this.comments.filter(comment " + + "=> !comment.content.includes(args.text));"); +patchRequest.setValues(Collections.singletonMap("text", "wrong")); + +session.advanced().defer( + new PatchCommandData("blogposts/1", null, patchRequest, null)); +session.saveChanges(); +`} + + + + +{`// filter out all comments of a blogpost which contains the word "wrong" in their contents +PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("this.comments = this.comments.filter(comment " + + "=> !comment.content.includes(args.text));"); +patchRequest.setValues(Collections.singletonMap("text", "wrong")); + +store.operations().send(new PatchOperation("blogposts/1", null, patchRequest)); +`} + + + + +### Loading Documents in a Script + +Loading documents supported only by non-typed APIs + + + +{`// update product names in order, according to loaded product documents +PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("this.Lines.forEach(line => {" + + " var productDoc = load(line.Product);" + + " line.ProductName = productDoc.Name;" + + "});"); + +session.advanced().defer( + new PatchCommandData("orders/1", null, patchRequest, null)); +session.saveChanges(); +`} + + + + +{`// update product names in order, according to loaded product documents +PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("this.Lines.forEach(line => {" + + " var productDoc = load(line.Product);" + + " line.ProductName = productDoc.Name;" + + "});"); + +store.operations().send(new PatchOperation("blogposts/1", null, patchRequest)); +`} + + + + +### Remove Property + +Removing property supported only by the non-typed APIs + + + +{`// rename FirstName to Name + +Map value = new HashMap<>(); +value.put("old", "FirstName"); +value.put("new", "Name"); + +PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("var firstName = this[args.rename.old];" + + "delete this[args.rename.old];" + + "this[args.rename.new] = firstName;"); +patchRequest.setValues(Collections.singletonMap("rename", value)); + +session.advanced().defer(new PatchCommandData("employees/1", null, patchRequest, null)); + +session.saveChanges(); +`} + + + + +{`Map value = new HashMap<>(); +value.put("old", "FirstName"); +value.put("new", "Name"); + +PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("var firstName = this[args.rename.old];" + + "delete this[args.rename.old];" + + "this[args.rename.new] = firstName;"); +patchRequest.setValues(Collections.singletonMap("rename", value)); + +store.operations().send(new PatchOperation("employees/1", null, patchRequest)); +`} + + + + +### Rename Property + +Renaming property supported only by the non-typed APIs + + + +{`// rename FirstName to Name + +Map value = new HashMap<>(); +value.put("old", "FirstName"); +value.put("new", "Name"); + +PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("var firstName = this[args.rename.old];" + + "delete this[args.rename.old];" + + "this[args.rename.new] = firstName;"); +patchRequest.setValues(Collections.singletonMap("rename", value)); + +session.advanced().defer(new PatchCommandData("employees/1", null, patchRequest, null)); + +session.saveChanges(); +`} + + + + +{`Map value = new HashMap<>(); +value.put("old", "FirstName"); +value.put("new", "Name"); + +PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("var firstName = this[args.rename.old];" + + "delete this[args.rename.old];" + + "this[args.rename.new] = firstName;"); +patchRequest.setValues(Collections.singletonMap("rename", value)); + +store.operations().send(new PatchOperation("employees/1", null, patchRequest)); +`} + + + + +### Add Document + +Adding a new document is supported only by the non-typed APIs + + + +{`PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("put('orders/', { Employee: id(this) });"); +PatchCommandData commandData = + new PatchCommandData("employees/1-A", null, patchRequest, null); +session.advanced().defer(commandData); +session.saveChanges(); +`} + + + + +{`PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("put('orders/', { Employee: id(this) });"); + +store.operations().send(new PatchOperation("employees/1-A", null, patchRequest)); +`} + + + + +### Clone Document + +To clone a document via patching, use the `put` method within the patching script as follows: + + + +{`PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("put('employees/', this);"); +PatchCommandData commandData = + new PatchCommandData("employees/1-A", null, patchRequest, null); +session.advanced().defer(commandData); +session.saveChanges(); +`} + + + + +{`PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("put('employees/', this);"); + +store.operations().send(new PatchOperation("employees/1-A", null, patchRequest)); +`} + + + + + + +**Attachments, Counters, Time Series, and Revisions:** + + * When cloning a document via patching, only the document's fields are copied to the new document. + Attachments, counters, time series data, and revisions from the source document will Not be copied automatically. + * To manage time series & counters via patching, you can use the pre-defined JavaScript methods listed here: + [Counters methods](../../../server/kb/javascript-engine.mdx#counter-operations) & [Time series methods](../../../server/kb/javascript-engine.mdx#time-series-operations). + * Note: When [Cloning a document via the Studio](../../../studio/database/documents/create-new-document.mdx#clone-an-existing-document), + attachments, counters, time Series, and revisions will be copied automatically. + +**Archived documents:** + + * If the source document is archived, the cloned document will Not be archived. + + + +### Increment Counter + +In order to increment or create a counter use <code>incrementCounter</code> method as follows + + + +{`HashMap scriptValues = new HashMap<>(); +scriptValues.put("name", "likes"); +scriptValues.put("val", 20); + +PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("incrementCounter(this, args.name, args.val);"); +patchRequest.setValues(scriptValues); + +new PatchCommandData("orders/1-A", null, patchRequest, null); +session.saveChanges(); +`} + + + + +{`HashMap scriptValues = new HashMap<>(); +scriptValues.put("name", "likes"); +scriptValues.put("val", -1); + +PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("incrementCounter(this, args.name, args.val);"); +patchRequest.setValues(scriptValues); + +PatchOperation patchOperation = new PatchOperation("orders/1-A", null, patchRequest); +store.operations().send(patchOperation); +`} + + + + + +The method can be called by document ID or by document reference and the value can be negative + + +### Delete Counter + +In order to delete a counter use <code>deleteCounter</code> method as follows + + + +{`HashMap scriptValues = new HashMap<>(); +scriptValues.put("name", "Likes"); + +PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("deleteCounter(this, args.name);"); +patchRequest.setValues(scriptValues); + +new PatchCommandData("products/1-A", null, patchRequest, null); +session.saveChanges(); +`} + + + + +{`HashMap scriptValues = new HashMap<>(); +scriptValues.put("name", "Likes"); + +PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("deleteCounter(this, args.name);"); +patchRequest.setValues(scriptValues); + +PatchOperation patchOperation = new PatchOperation("products/1-A", null, patchRequest); +store.operations().send(patchOperation); +`} + + + + + +The method can be called by document ID or by document reference + + +### Get Counter + +In order to get a counter while patching use <code>counter</code> method as follows + + + +{`HashMap scriptValues = new HashMap<>(); +scriptValues.put("name", "Likes"); + +PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("var likes = counter(this.Company, args.name);\\n" + + "put('result/', {company: this.Company, likes: likes});"); +patchRequest.setValues(scriptValues); + +new PatchCommandData("orders/1-A", null, patchRequest, null); +session.saveChanges(); +`} + + + + +{`HashMap scriptValues = new HashMap<>(); +scriptValues.put("name", "Likes"); + +PatchRequest patchRequest = new PatchRequest(); +patchRequest.setScript("var likes = counter(this.Company, args.name);\\n" + + "put('result/', {company: this.Company, likes: likes});"); +patchRequest.setValues(scriptValues); + +PatchOperation patchOperation = new PatchOperation("orders/1-A", null, patchRequest); +store.operations().send(patchOperation); +`} + + + + + +The method can be called by document ID or by document reference + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/patching/_single-document-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/patching/_single-document-nodejs.mdx new file mode 100644 index 0000000000..826c859b26 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/patching/_single-document-nodejs.mdx @@ -0,0 +1,1549 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Patching allows **updating only parts of a document** in a single trip to the server, + without having to load, modify, and save the entire document back to the database. + +* This is particularly efficient for large documents or when only a small portion of the document needs to be changed, + reducing the amount of data transferred over the network. + +* The patching operation is executed on the server-side within a [Single write transaction](../../../client-api/faq/transaction-support.mdx). + +* This article covers patch operations on single documents from the Client API. + To patch multiple documents that match certain criteria see [Set based patching](../../../client-api/operations/patching/set-based.mdx). + Patching can also be done from the [Studio](../../../studio/database/documents/patch-view.mdx). + +* In this page: + + * [API overview](../../../client-api/operations/patching/single-document.mdx#api-overview) + + * [Examples](../../../client-api/operations/patching/single-document.mdx#examples) + * [Modify value of single field](../../../client-api/operations/patching/single-document.mdx#modify-value-of-single-field) + * [Modify values of two fields](../../../client-api/operations/patching/single-document.mdx#modify-values-of-two-fields) + * [Increment value](../../../client-api/operations/patching/single-document.mdx#increment-value) + * [Add or increment](../../../client-api/operations/patching/single-document.mdx#add-or-increment) + * [Add or patch](../../../client-api/operations/patching/single-document.mdx#add-or-patch) + * [Add item to array](../../../client-api/operations/patching/single-document.mdx#add-item-to-array) + * [Add or patch an existing array](../../../client-api/operations/patching/single-document.mdx#add-or-patch-an-existing-array) + * [Insert item into specific position in array](../../../client-api/operations/patching/single-document.mdx#insert-item-into-specific-position-in-array) + * [Modify item in specific position in array](../../../client-api/operations/patching/single-document.mdx#modify-item-in-specific-position-in-array) + * [Remove items from array](../../../client-api/operations/patching/single-document.mdx#remove-items-from-array) + * [Load documents in a script](../../../client-api/operations/patching/single-document.mdx#load-documents-in-a-script) + * [Remove property](../../../client-api/operations/patching/single-document.mdx#remove-property) + * [Rename property](../../../client-api/operations/patching/single-document.mdx#rename-property) + * [Add document](../../../client-api/operations/patching/single-document.mdx#add-document) + * [Clone document](../../../client-api/operations/patching/single-document.mdx#clone-document) + * [Create/Increment counter](../../../client-api/operations/patching/single-document.mdx#createincrement-counter) + * [Delete counter](../../../client-api/operations/patching/single-document.mdx#delete-counter) + * [Get counter](../../../client-api/operations/patching/single-document.mdx#get-counter) + * [Patching using inline string compilation](../../../client-api/operations/patching/single-document.mdx#patching-using-inline-string-compilation) + + * [Syntax](../../../client-api/operations/patching/single-document.mdx#syntax) + * [Session API syntax](../../../client-api/operations/patching/single-document.mdx#session-api-syntax) + * [Session API using defer syntax](../../../client-api/operations/patching/single-document.mdx#session-api-using-defer-syntax) + * [Operations API syntax](../../../client-api/operations/patching/single-document.mdx#operations-api-syntax) + * [List of script methods syntax](../../../client-api/operations/patching/single-document.mdx#list-of-script-methods-syntax) + + +## API overview + +Patching can be performed using either of the following interfaces (detailed syntax is provided [below](../../../client-api/operations/patching/single-document.mdx#syntax)): + +* **Session API** +* **Session API using defer** +* **Operations API** + + +#### Session API + +* This interface allows performing most common patch operations. + +* Multiple patch methods can be defined on the [session](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx) + and are sent to the server for execution in a single batch (along with any other modified documents) only when calling [SaveChanges](../../../client-api/session/saving-changes.mdx). + +* This API includes the following patching methods (see examples [below](../../../client-api/operations/patching/single-document.mdx#examples)): + * `patch` + * `addOrPatch` + * `increment` + * `addOrIncrement` + * `patchArray` + * `addOrPatchArray` + + + + +#### Session API using defer + +* Use `defer` to manipulate the patch request directly without wrapper methods. + Define the patch request yourself with a **script** and optional variables. + +* The patch request constructs the `PatchCommandData` command, + which is then added to the session using the `defer` function. + +* Similar to the above Session API, + all patch requests done via `defer` are sent to the server for execution only when _saveChanges_ is called. + + + + +#### Operations API + +* [Operations](../../../client-api/operations/what-are-operations.mdx) allow performing ad-hoc requests directly on the document store **without** creating a session. + +* Similar to the above _defer_ usage, define the patch request yourself with a script and optional variables. + +* The patch requests constructs the `PatchOperation`, which is sent to the server for execution only when _saveChanges_ is called. + + + + +## Examples + + + +#### Modify value of single field + + + +{`// Modify FirstName to Robert using the 'patch' method +// =================================================== + +session.advanced.patch("employees/1-A", "FirstName", "Robert"); +await session.saveChanges(); +`} + + + + +{`// Modify FirstName to Robert using 'defer' with 'PatchCommandData' +// ================================================================ + +const patchRequest = new PatchRequest(); +patchRequest.script = "this.FirstName = args.FirstName;"; +patchRequest.values = { FirstName: "Robert" }; + +const patchCommand = new PatchCommandData("employees/1-A", null, patchRequest); +session.advanced.defer(patchCommand); + +await session.saveChanges(); +`} + + + + +{`// Modify FirstName to Robert via 'PatchOperation' on the documentStore +// ==================================================================== + +const patchRequest = new PatchRequest(); +patchRequest.script = "this.FirstName = args.FirstName;"; +patchRequest.values = { FirstName: "Robert" }; + +const patchOp = new PatchOperation("employees/1-A", null, patchRequest); +await documentStore.operations.send(patchOp); +`} + + + + + + + +#### Modify values of two fields + + + +{`// Modify FirstName to Robert and LastName to Carter in single request +// =================================================================== + +// The two Patch operations below are sent via 'saveChanges()' which complete transactionally, +// as this call generates a single HTTP request to the database. +// Either both will succeed - or both will be rolled back - since they are applied within the same +// transaction. + +// However, on the server side, the two Patch operations are still executed separately. +// To achieve atomicity at the level of a single server-side operation, use 'defer' or an 'operation'. + +session.advanced.patch("employees/1-A", "FirstName", "Robert"); +session.advanced.patch("employees/1-A", "LastName", "Carter"); + +await session.saveChanges(); +`} + + + + +{`// Modify FirstName to Robert and LastName to Carter in single request +// =================================================================== + +// Note that here we do maintain the operation's atomicity +const patchRequest = new PatchRequest(); +patchRequest.script = \`this.FirstName = args.FirstName; + this.LastName = args.LastName;\`; +patchRequest.values = { + FirstName: "Robert", + LastName: "Carter" +}; + +const patchCommand = new PatchCommandData("employees/1-A", null, patchRequest); +session.advanced.defer(patchCommand); + +await session.saveChanges(); +`} + + + + +{`// Modify FirstName to Robert and LastName to Carter in single request +// =================================================================== + +// Note that here we do maintain the operation's atomicity +const patchRequest = new PatchRequest(); +patchRequest.script = \`this.FirstName = args.FirstName; + this.LastName = args.LastName;\`; +patchRequest.values = { + FirstName: "Robert", + LastName: "Carter" +}; + +const patchOp = new PatchOperation("employees/1-A", null, patchRequest); +await documentStore.operations.send(patchOp); +`} + + + + + + + +#### Increment value + + + +{`// Increment UnitsInStock property value by 10 +// =========================================== + +session.advanced.increment("products/1-A", "UnitsInStock", 10); +await session.saveChanges(); +`} + + + + +{`// Increment UnitsInStock property value by 10 +// =========================================== + +const patchRequest = new PatchRequest(); +patchRequest.script = "this.UnitsInStock += args.UnitsToAdd;"; +patchRequest.values = { + UnitsToAdd: 10 +}; + +const patchCommand = new PatchCommandData("products/1-A", null, patchRequest); +session.advanced.defer(patchCommand); + +await session.saveChanges(); +`} + + + + +{`// Increment UnitsInStock property value by 10 +// =========================================== + +const patchRequest = new PatchRequest(); +patchRequest.script = "this.UnitsInStock += args.UnitsToAdd;"; +patchRequest.values = { + UnitsToAdd: 10 +}; + +const patchOp = new PatchOperation("products/1-A", null, patchRequest); +await documentStore.operations.send(patchOp); +`} + + + + + + + +#### Add or increment +`addOrIncrement` behavior: + +* If document exists + has the specified field => + * A numeric field will be **incremented** by the specified value. + * A string field will be **concatenated** with the specified value. + * The entity passed is disregarded. +* If document exists + does Not contain the specified field => + * The field will be **added** to the document with the specified value. + * The entity passed is disregarded. +* If document does Not exist => + * A new document will be **created** from the provided entity. + * The value to increment by is disregarded. + + + + +{`// An entity that will be used in case the specified document is not found: +const newUser = new User(); +newUser.firstName = "John"; +newUser.lastName = "Doe"; +newUser.loginCount = 1; + +session.advanced.addOrIncrement( + // Specify document id on which the operation should be performed + "users/1", + // Specify an entity, + // if the specified document is Not found, a new document will be created from this entity + newUser, + // The field that should be incremented + "loginCount", + // Increment the specified field by this value + 2); + +await session.saveChanges(); +`} + + + + +{`class User { + constructor( + id = null, + firstName = "", + lastName = "", + loginCount = 0, + lastLogin = new Date(), + loginTimes = [] + ) { + Object.assign(this, { + id, + firstName, + lastName, + loginCount, + lastLogin, + loginTimes + }); + } +} +`} + + + + + + + +#### Add or patch +`addOrPatch` behavior: + +* If document exists + has the specified field => + * The field will be **patched**, the specified value will replace the existing value. + * The entity passed is disregarded. +* If document exists + does Not contain the specified field => + * The field will be **added** to the document with the specified value. + * The entity passed is disregarded. +* If document does Not exist => + * A new document will be **created** from the provided entity. + * The value to patch by is disregarded. + + + + +{`// An entity that will be used in case the specified document is not found: +const newUser = new User(); +newUser.firstName = "John"; +newUser.lastName = "Doe"; +newUser.lastLogin = new Date(2024, 0, 1); + +session.advanced.addOrPatch( + // Specify document id on which the operation should be performed + "users/1", + // Specify an entity, + // if the specified document is Not found, a new document will be created from this entity + newUser, + // The field that should be patched + "lastLogin", + // Set the current date and time as the new value for the specified field + new Date()); + +await session.saveChanges(); +`} + + + + +{`class User { + constructor( + id = null, + firstName = "", + lastName = "", + loginCount = 0, + lastLogin = new Date(), + loginTimes = [] + ) { + Object.assign(this, { + id, + firstName, + lastName, + loginCount, + lastLogin, + loginTimes + }); + } +} +`} + + + + + + + +#### Add item to array +`patchArray` behavior: + +* If document exists + has the specified array => + * Item will be **added** to the array. +* If document exists + does Not contain the specified array field => + * No exception is thrown, no patching is done, a new array is Not created. +* If document does Not exist => + * No exception is thrown, no patching is done, a new document is Not created. + + + + +{`// Add a new comment to an array +// ============================= + +// The new comment to add: +const newBlogComment = new BlogComment(); +newBlogComment.content = "Some content"; +newBlogComment.title = "Some title"; + +// Call 'patchArray': +session.advanced.patchArray( + "blogPosts/1", // Document id to patch + "comments", // The array to add the comment to + comments => { // Adding the new comment + comments.push(newBlogComment); + }); + +await session.saveChanges(); +`} + + + + +{`// Add a new comment to an array +// ============================= + +// Define the 'PatchRequest': +const patchRequest = new PatchRequest(); +patchRequest.script = "this.comments.push(args.comment);"; +patchRequest.values = { + comment: { + title: "Some title", + content: "Some content", + } +}; + +// Define the 'PatchCommandData': +const patchCommand = new PatchCommandData("blogPosts/1", null, patchRequest); +session.advanced.defer(patchCommand); + +await session.saveChanges(); +`} + + + + +{`// Add a new comment to an array +// ============================= + +// Define the 'PatchRequest': +const patchRequest = new PatchRequest(); +patchRequest.script = "this.comments.push(args.comment);"; +patchRequest.values = { + comment: { + title: "Some title", + content: "Some content", + } +}; + +// Define and send the 'PatchOperation': +const patchOp = new PatchOperation("blogPosts/1", null, patchRequest); +await documentStore.operations.send(patchOp); +`} + + + + +{`class BlogPost { + constructor( + id = null, + title = "", + body = "", + comments = [] + ) { + Object.assign(this, { + id, + title, + body, + comments + }); + } +} + +class BlogComment { + constructor( + title = "", + content = "" + ) { + Object.assign(this, { + title, + content + }); + } +} +`} + + + + + + + +#### Add or patch an existing array +`addOrPatchArray` behavior: + +* If document exists + has the specified array field => + * The specified values will be **added** to the existing array values. + * The entity passed is disregarded. +* If document exists + does Not contain the specified array field => + * The array field is Not added to the document, no patching is done. + * The entity passed is disregarded. +* If document does Not exist => + * A new document will be **created** from the provided entity. + * The value to patch by is disregarded. + + + + +{`// An entity that will be used in case the specified document is not found: +const newUser = new User(); +newUser.firstName = "John"; +newUser.lastName = "Doe"; +newUser.loginTimes = [new Date(2024, 0, 1)]; + +session.advanced.addOrPatchArray( + // Specify document id on which the operation should be performed + "users/1", + // Specify an entity, + // if the specified document is Not found, a new document will be created from this entity + newUser, + // The array field that should be patched + "loginTimes", + // Add values to the list of the specified array field + a => a.push(new Date(2024, 2, 2), new Date(2024, 3, 3))); + +await session.saveChanges(); +`} + + + + +{`class User { + constructor( + id = null, + firstName = "", + lastName = "", + loginCount = 0, + lastLogin = new Date(), + loginTimes = [] + ) { + Object.assign(this, { + id, + firstName, + lastName, + loginCount, + lastLogin, + loginTimes + }); + } +} +`} + + + + + + + +#### Insert item into specific position in array +* Inserting an item in a specific position is supported only by the _defer_ or the _operations_ syntax. +* No exception is thrown if either the document or the specified array does not exist. + + + + +{`// Insert a new comment at position 1 +// ================================== + +// Define the 'PatchRequest': +const patchRequest = new PatchRequest(); +patchRequest.script = "this.comments.splice(1, 0, args.comment);"; +patchRequest.values = { + comment: { + title: "Some title", + content: "Some content", + } +}; + +// Define the 'PatchCommandData': +const patchCommand = new PatchCommandData("blogPosts/1", null, patchRequest); +session.advanced.defer(patchCommand); + +await session.saveChanges(); +`} + + + + +{`// Insert a new comment at position 1 +// ================================== + +// Define the 'PatchRequest': +const patchRequest = new PatchRequest(); +patchRequest.script = "this.comments.splice(1, 0, args.comment);"; +patchRequest.values = { + comment: { + title: "Some title", + content: "Some content", + } +}; + +// Define and send the 'PatchOperation': +const patchOp = new PatchOperation("blogPosts/1", null, patchRequest); +await documentStore.operations.send(patchOp); +`} + + + + +{`class BlogPost { + constructor( + id = null, + title = "", + body = "", + comments = [] + ) { + Object.assign(this, { + id, + title, + body, + comments + }); + } +} + +class BlogComment { + constructor( + title = "", + content = "" + ) { + Object.assign(this, { + title, + content + }); + } +} +`} + + + + + + + +#### Modify item in specific position in array +* Inserting an item in a specific position is supported only by the _defer_ or the _operations_ syntax. +* No exception is thrown if either the document or the specified array does not exist. + + + + +{`// Modify comment at position 3 +// ============================ + +// Define the 'PatchRequest': +const patchRequest = new PatchRequest(); +patchRequest.script = "this.comments.splice(3, 1, args.comment);"; +patchRequest.values = { + comment: { + title: "Some title", + content: "Some content", + } +}; + +// Define the 'PatchCommandData': +const patchCommand = new PatchCommandData("blogPosts/1", null, patchRequest); +session.advanced.defer(patchCommand); + +await session.saveChanges(); +`} + + + + +{`// Modify comment at position 3 +// ============================ + +// Define the 'PatchRequest': +const patchRequest = new PatchRequest(); +patchRequest.script = "this.comments.splice(3, 1, args.comment);"; +patchRequest.values = { + comment: { + title: "Some title", + content: "Some content", + } +}; + +// Define and send the 'PatchOperation': +const patchOp = new PatchOperation("blogPosts/1", null, patchRequest); +await documentStore.operations.send(patchOp); +`} + + + + +{`class BlogPost { + constructor( + id = null, + title = "", + body = "", + comments = [] + ) { + Object.assign(this, { + id, + title, + body, + comments + }); + } +} + +class BlogComment { + constructor( + title = "", + content = "" + ) { + Object.assign(this, { + title, + content + }); + } +} +`} + + + + + + + +#### Remove items from array +* Removing all items that match some predicate from an array is supported only by the _defer_ or the _operations_ syntax. +* No exception is thrown if either the document or the specified array does not exist. + + + + +{`// Remove all comments that contain the word "wrong" in their content +// ================================================================== + +// Define the 'PatchRequest': +const patchRequest = new PatchRequest(); +patchRequest.script = \`this.comments = this.comments.filter(comment => + !comment.content.includes(args.text));\`; +patchRequest.values = { + text: "wrong" +}; + +// Define the 'PatchCommandData': +const patchCommand = new PatchCommandData("blogPosts/1", null, patchRequest); +session.advanced.defer(patchCommand); + +await session.saveChanges(); +`} + + + + +{`// Remove all comments that contain the word "wrong" in their content +// ================================================================== + +// Define the 'PatchRequest': +const patchRequest = new PatchRequest(); +patchRequest.script = \`this.comments = this.comments.filter(comment => + !comment.content.includes(args.text));\`; +patchRequest.values = { + text: "wrong" +}; + +// Define and send the 'PatchOperation': +const patchOp = new PatchOperation("blogPosts/1", null, patchRequest); +await documentStore.operations.send(patchOp); +`} + + + + +{`class BlogPost { + constructor( + id = null, + title = "", + body = "", + comments = [] + ) { + Object.assign(this, { + id, + title, + body, + comments + }); + } +} + +class BlogComment { + constructor( + title = "", + content = "" + ) { + Object.assign(this, { + title, + content + }); + } +} +`} + + + + + + + +#### Load documents in a script +* Loading documents is supported only by the _defer_ or the _operations_ syntax. + + + + +{`// Load a related document and update a field +// ========================================== + +// Define the 'PatchRequest': +const patchRequest = new PatchRequest(); +patchRequest.script = \`this.Lines.forEach(line => { + const productDoc = load(line.Product); + line.ProductName = productDoc.Name; + });\`; + +// Define the 'PatchCommandData': +const patchCommand = new PatchCommandData("orders/1-A", null, patchRequest); +session.advanced.defer(patchCommand); + +await session.saveChanges(); +`} + + + + +{`// Load a related document and update a field +// ========================================== + +// Define the 'PatchRequest': +const patchRequest = new PatchRequest(); +patchRequest.script = \`this.Lines.forEach(line => { + const productDoc = load(line.Product); + line.ProductName = productDoc.Name; + });\`; + +// Define and send the 'PatchOperation': +const patchOp = new PatchOperation("orders/1-A", null, patchRequest); +await documentStore.operations.send(patchOp); +`} + + + + + + + +#### Remove property +* Removing a property is supported only by the _defer_ or the _operations_ syntax. + + + + +{`// Remove a document property +// ========================== + +// Define the 'PatchRequest': +const patchRequest = new PatchRequest(); +patchRequest.script = \`delete this.Address.PostalCode;\`; + +// Define the 'PatchCommandData': +const patchCommand = new PatchCommandData("employees/1-A", null, patchRequest); +session.advanced.defer(patchCommand); + +await session.saveChanges(); +`} + + + + +{`// Remove a document property +// ========================== + +// Define the 'PatchRequest': +const patchRequest = new PatchRequest(); +patchRequest.script = \`delete this.Address.PostalCode;\`; + +// Define and send the 'PatchOperation': +const patchOp = new PatchOperation("employees/1-A", null, patchRequest); +await documentStore.operations.send(patchOp); +`} + + + + + + + +#### Rename property +* Renaming a property is supported only by the _defer_ or the _operations_ syntax. + + + + +{`// Rename property Name to ProductName +// =================================== + +// Define the 'PatchRequest': +const patchRequest = new PatchRequest(); +patchRequest.script = \`const propertyValue = this[args.currentProperty]; + delete this[args.currentProperty]; + this[args.newProperty] = propertyValue;\`; +patchRequest.values = { + currentProperty: "Name", + newProperty: "ProductName" +}; + +// Define the 'PatchCommandData': +const patchCommand = new PatchCommandData("products/1-A", null, patchRequest); +session.advanced.defer(patchCommand); + +await session.saveChanges(); +`} + + + + +{`// Rename property Name to ProductName +// =================================== + +// Define the 'PatchRequest': +const patchRequest = new PatchRequest(); +patchRequest.script = \`const propertyValue = this[args.currentProperty]; + delete this[args.currentProperty]; + this[args.newProperty] = propertyValue;\`; +patchRequest.values = { + currentProperty: "Name", + newProperty: "ProductName" +}; + +// Define and send the 'PatchOperation': +const patchOp = new PatchOperation("products/1-A", null, patchRequest); +await documentStore.operations.send(patchOp); +`} + + + + + + + +#### Add document +* Adding a new document is supported only by the _defer_ or the _operations_ syntax. + + + + +{`// Add a new document +// ================== + +// Define the 'PatchRequest': +const patchRequest = new PatchRequest(); + +// Add a new document (projects/1) to collection Projects +// The id of the patched document (employees/1-A) is used as content for ProjectLeader property +patchRequest.script = \`put('projects/1', { + ProjectLeader: id(this), + ProjectDesc: 'Some desc..', + '@metadata': { '@collection': 'Projects'} + });\`; + +// Define the 'PatchCommandData': +const patchCommand = new PatchCommandData("employees/1-A", null, patchRequest); +session.advanced.defer(patchCommand); + +await session.saveChanges(); +`} + + + + +{`// Add a new document +// ================== + +// Define the 'PatchRequest': +const patchRequest = new PatchRequest(); + +// Add a new document (projects/1) to collection Projects +// The id of the patched document (employees/1-A) is used as content for ProjectLeader property +patchRequest.script = \`put('projects/1', { + ProjectLeader: id(this), + ProjectDesc: 'Some desc..', + '@metadata': { '@collection': 'Projects'} + });\`; + +// Define and send the 'PatchOperation': +const patchOp = new PatchOperation("employees/1-A", null, patchRequest); +await documentStore.operations.send(patchOp); +`} + + + + + + + +#### Clone document +* Cloning a new document is supported only by the _defer_ or the _operations_ syntax. + + + + +{`// Clone a document +// ================ + +// Define the 'PatchRequest': +const patchRequest = new PatchRequest(); + +// The new document will be in the same collection as 'employees/1-A' +// By specifying 'employees/' the server will generate a "server-side ID' to the new document +patchRequest.script = \`put('employees/', this);\`; + +// Define the 'PatchCommandData': +const patchCommand = new PatchCommandData("employees/1-A", null, patchRequest); +session.advanced.defer(patchCommand); + +await session.saveChanges(); +`} + + + + +{`// Clone a document +// ================ + +// Define the 'PatchRequest': +const patchRequest = new PatchRequest(); + +// The new document will be in the same collection as 'employees/1-A' +// By specifying 'employees/' the server will generate a "server-side ID' to the new document +patchRequest.script = \`put('employees/', this);\`; + +// Define and send the 'PatchOperation': +const patchOp = new PatchOperation("employees/1-A", null, patchRequest); +await documentStore.operations.send(patchOp); +`} + + + + + + +**Attachments, Counters, Time Series, and Revisions:** + + * When cloning a document via patching, only the document's fields are copied to the new document. + Attachments, counters, time series data, and revisions from the source document will Not be copied automatically. + * To manage time series & counters via patching, you can use the pre-defined JavaScript methods listed here: + [Counters methods](../../../server/kb/javascript-engine.mdx#counter-operations) & [Time series methods](../../../server/kb/javascript-engine.mdx#time-series-operations). + * Note: When [Cloning a document via the Studio](../../../studio/database/documents/create-new-document.mdx#clone-an-existing-document), + attachments, counters, time Series, and revisions will be copied automatically. + +**Archived documents:** + + * If the source document is archived, the cloned document will Not be archived. + + + + + + +#### Create/Increment counter + + + +{`// Increment/Create counter +// ======================== + +// Increase counter "Likes" by 10, or create it with a value of 10 if it doesn't exist +session.countersFor("products/1-A").increment("Likes", 10); +await session.saveChanges(); +`} + + + + +{`// Create/Increment counter +// ======================== + +// Define the 'PatchRequest': +const patchRequest = new PatchRequest(); + +// Use the 'incrementCounter' method to create/increment a counter +patchRequest.script = \`incrementCounter(this, args.counterName, args.counterValue);\`; +patchRequest.values = { + counterName: "Likes", + counterValue: 10 +}; + +// Define the 'PatchCommandData': +const patchCommand = new PatchCommandData("products/1-A", null, patchRequest); +session.advanced.defer(patchCommand); + +await session.saveChanges(); +`} + + + + +{`// Create/Increment counter +// ======================== + +// Define the 'PatchRequest': +const patchRequest = new PatchRequest(); + +// Use the 'incrementCounter' method to create/increment a counter +patchRequest.script = \`incrementCounter(this, args.counterName, args.counterValue);\`; +patchRequest.values = { + counterName: "Likes", + counterValue: 10 +}; + +// Define and send the 'PatchOperation': +const patchOp = new PatchOperation("products/1-A", null, patchRequest); +await documentStore.operations.send(patchOp); +`} + + + + + + +Learn more about Counters in this [Counters Overview](../../../document-extensions/counters/overview.mdx). + + + + + + +#### Delete counter + + + +{`// Delete counter +// ============== + +session.countersFor("products/1-A").delete("Likes"); +await session.saveChanges(); +`} + + + + +{`// Delete counter +// ============== + +// Define the 'PatchRequest': +const patchRequest = new PatchRequest(); + +// Use the 'deleteCounter' method to delete a counter +patchRequest.script = \`deleteCounter(this, args.counterName);\`; +patchRequest.values = { + counterName: "Likes" +}; + +// Define the 'PatchCommandData': +const patchCommand = new PatchCommandData("products/1-A", null, patchRequest); +session.advanced.defer(patchCommand); + +await session.saveChanges(); +`} + + + + +{`// Delete counter +// ============== + +// Define the 'PatchRequest': +const patchRequest = new PatchRequest(); + +// Use the 'deleteCounter' method to delete a counter +patchRequest.script = \`deleteCounter(this, args.counterName);\`; +patchRequest.values = { + counterName: "Likes" +}; + +// Define and send the 'PatchOperation': +const patchOp = new PatchOperation("products/1-A", null, patchRequest); +await documentStore.operations.send(patchOp); +`} + + + + + + + +#### Get counter + + + +{`// Get counter value +// ================= + +const counters = await session.counterFor("products/1-A").get("Likes"); +`} + + + + +{`// Get counter value +// ================= + +// Define the 'PatchRequest': +const patchRequest = new PatchRequest(); + +// Use the 'counter' method to get the value of the specified counter +// and then put the results into a new document 'productLikes/' +patchRequest.script = \`const numberOfLikes = counter(this, args.counterName); + put('productLikes/', {ProductName: this.Name, Likes: numberOfLikes});\`; + +patchRequest.values = { + counterName: "Likes" +}; + +// Define the 'PatchCommandData': +const patchCommand = new PatchCommandData("products/1-A", null, patchRequest); +session.advanced.defer(patchCommand); + +await session.saveChanges(); +`} + + + + +{`// Get counter value +// ================= + +// Define the 'PatchRequest': +const patchRequest = new PatchRequest(); + +// Use the 'counter' method to get the value of the specified counter +// and then put the results into a new document 'productLikes/' +patchRequest.script = \`const numberOfLikes = counter(this, args.counterName); + put('productLikes/', {ProductName: this.Name, Likes: numberOfLikes});\`; + +patchRequest.values = { + counterName: "Likes" +}; + +// Define and send the 'PatchOperation': +const patchOp = new PatchOperation("products/1-A", null, patchRequest); +await documentStore.operations.send(patchOp); +`} + + + + + + + +#### Patching using inline string compilation +* When using a JavaScript script with the _defer_ or _operations_ syntax, + you can apply logic using **inline string compilation**. +* To enable this, set the [Patching.AllowStringCompilation](../../../server/configuration/patching-configuration.mdx#patchingallowstringcompilation) configuration key to _true_. + + + + +{`// Modify value using inline string compilation +// ============================================ + +// Define the 'PatchRequest': +const patchRequest = new PatchRequest(); + +// Define the script: +patchRequest.script = \` + // Give a discount if the product is low in stock: + const functionBody = "return doc.UnitsInStock < lowStock ? " + + "doc.PricePerUnit * discount :" + + "doc.PricePerUnit;"; + + // Define a function that processes the document and returns the price: + const calcPrice = new Function("doc", "lowStock", "discount", functionBody); + + // Update the product's PricePerUnit based on the function: + this.PricePerUnit = calcPrice(this, args.lowStock, args.discount);\`; + +patchRequest.values = { + discount: "0.8", + lowStock: "10" +}; + +// Define the 'PatchCommandData': +const patchCommand = new PatchCommandData("products/1-A", null, patchRequest); +session.advanced.defer(patchCommand); + +await session.saveChanges(); + +// The same can be applied using the 'operations' syntax. +`} + + + + +{`// Modify value using inline string compilation +// ============================================ + +// Define the 'PatchRequest': +const patchRequest = new PatchRequest(); + +// Define the script: +patchRequest.script = \` + // Give a discount if the product is low in stock: + const discountExpression = "this.UnitsInStock < args.lowStock ? " + + "this.PricePerUnit * args.discount :" + + "this.PricePerUnit"; + + // Call 'eval', pass the string expression that contains your logic: + const price = eval(discountExpression); + + // Update the product's PricePerUnit: + this.PricePerUnit = price;\`; + +patchRequest.values = { + discount: "0.8", + lowStock: "10" +}; + +// Define and send the 'PatchOperation': +const patchOp = new PatchOperation("products/1-A", null, patchRequest); +await documentStore.operations.send(patchOp); + +// The same can be applied using the 'session defer' syntax. +`} + + + + + + + +## Syntax +### Session API syntax + + + + +{`patch(id, path, value); +patch(entity, path, value); +`} + + + +| Parameter | Type | Description | +|--------------|----------|---------------------------------------------------------------------------------------------------------------------------------------------------| +| **id** | `string` | Document ID on which patching should be performed. | +| **entity** | `object` | Entity on which patching should be performed. The entity should be one that was returned by the current session in a `load` or `query` operation. | +| **Path** | `string` | The path to the field. | +| **value** | `object` | Value to set. | + + + +{`addOrPatch(id, entity, pathToObject, value); +`} + + + +| Parameter | Type | Description | +|-------------|----------|------------------------------------------------------------------------------------------| +| **id** | `string` | Document ID on which patching should be performed. | +| **entity** | `object` | If the specified document is Not found, a new document will be created from this entity. | +| **Path** | `string` | The path to the field. | +| **value** | `object` | Value to set. | + + + +{`increment(id, path, valueToAdd); +increment(entity, path, valueToAdd); +`} + + + +| Parameter | Type | Description | +|-----------------|----------|---------------------------------------------------------------------------------------------------------------------------------------------------| +| **id** | `string` | Document ID on which patching should be performed. | +| **entity** | `object` | Entity on which patching should be performed. The entity should be one that was returned by the current session in a `load` or `query` operation. | +| **path** | `string` | The path to the field. | +| **valueToAdd** | `object` | Value to increment by.
Note how numbers are handled with the [JavaScript engine](../../../server/kb/numbers-in-ravendb.mdx) in RavenDB. | + + + +{`addOrIncrement(id, entity, pathToObject, valToAdd); +`} + + + +| Parameter | Type | Description | +|----------------|----------|------------------------------------------------------------------------------------------| +| **id** | `string` | Document ID on which patching should be performed. | +| **entity** | `object` | If the specified document is Not found, a new document will be created from this entity. | +| **path** | `string` | The path to the field. | +| **valueToAdd** | `object` | Value to increment by. | + + + +{`patchArray(id, pathToArray, arrayAdder); +patchArray(entity, pathToArray, arrayAdder); +`} + + + +| Parameter | Type | Description | +|-----------------|-----------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------| +| **id** | `string` | Document ID on which patching should be performed. | +| **entity** | `object` | Entity on which patching should be performed. The entity should be one that was returned by the current session in a `load` or `query` operation. | +| **pathToArray** | `string` | The path to the array field. | +| **arrayAdder** | `(JavaScriptArray) => void` | Function that modifies the array. | + + + +{`addOrPatchArray(id, entity, pathToObject, arrayAdder); +`} + + + +| Parameter | Type | Description | +|-----------------|-----------------------------|------------------------------------------------------------------------------------------| +| **id** | `string` | Document ID on which patching should be performed. | +| **entity** | `object` | If the specified document is Not found, a new document will be created from this entity. | +| **pathToArray** | `string` | The path to the array field. | +| **arrayAdder** | `(JavaScriptArray) => void` | Function that modifies the array. | | + + + +{`class JavaScriptArray \{ + push(...u); // Add a list of values to add to the array + removeAt(index); // Remove an item from position 'index' in the array +\} +`} + + +### Session API using defer syntax + + + +{`session.advanced.defer(...commands); +`} + + + +| Parameter | Type | Description | +|--------------|------------|-----------------------------------------------------------------------------------------------------------| +| **commands** | `object[]` | List of commands that will be executed on the server.
Use the `PatchCommandData` command for patching. | + + + +{`class PatchCommandData \{ + // ID of document to be patched + id; // string + + // Change vector of document to be patched, can be null. + // Used to verify that the document was not changed before the patch is executed. + changeVector // string; + + // Patch request to be performed on the document + patch; // A PatchRequest object + + // Patch request to perform if no document with the specified ID was found + patchIfMissing; // A PatchRequest object +\} +`} + + + + + +{`class PatchRequest \{ + // The JavaScript code to be run on the server + script; // string + + // Parameters to be passed to the script + values:; // Dictionary + + // It is highly recommend to use the script with the parameters. + // This allows RavenDB to cache scripts and boost performance. + // The parameters are accessed in the script via the \`args\` object. +\} +`} + + +### Operations API syntax + +* Learn more about using operations in this [Operations overview](../../../client-api/operations/what-are-operations.mdx). + + + +{`const patchOperation = new PatchOperation(id, changeVector, patch); + +const patchOperation = new PatchOperation(id, changeVector, patch, patchIfMissing, + skipPatchIfChangeVectorMismatch); +`} + + + +| Constructor | Type | Description | +|--------------------------------------|----------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **id** | `string` | ID of the document to be patched. | +| **changeVector** | `string` | Change vector of the document to be patched.
Used to verify that the document was not modified before the patch is executed. Can be null. | +| **patch** | `PatchRequest` | Patch request to perform on the document. | +| **patchIfMissing** | `PatchRequest` | Patch request to perform if the specified document is not found.
Will run only if no `changeVector` was passed.
Can be null. | +| **skipPatchIfChangeVectorMismatch** | `boolean` | `true` - do not patch if the document has been modified.
`false` (Default) - execute the patch even if document has been modified.

An exception is thrown if:
this param is `false` + `changeVector` has value + document with that ID and change vector was not found. | +### List of script methods syntax + +* For a complete list of JavaScript methods available in patch scripts, + refer to [Knowledge Base: JavaScript Engine](../../../server/kb/javascript-engine.mdx#predefined-javascript-functions). + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/patching/json-patch-syntax.mdx b/versioned_docs/version-7.1/client-api/operations/patching/json-patch-syntax.mdx new file mode 100644 index 0000000000..0ae772b926 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/patching/json-patch-syntax.mdx @@ -0,0 +1,249 @@ +--- +title: "Patching: JSON Patch Syntax" +hide_table_of_contents: true +sidebar_label: JSON Patch Syntax +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Patching: JSON Patch Syntax + + + +* You can use the **JSON Patch Syntax** from your client to apply changes + to RavenDB documents via JSON objects. + +* A JSON Patch is a document constructed of JSON objects, each containing + the ID of a target (RavenDB) document and a patch operation to be applied + to this document. + +* Since the operation is executed in a single request to a database, the JSON Patch command is performed in a single write [transaction](../../../client-api/faq/transaction-support.mdx). + +* JSON Patch operations include - + * **Add** a document property + * **Remove** a document property + * **Replace** the contents of a document property + * **Copy** the contents of one document property to another + * **Move** the contents of one document property to another + * **Test** whether the patching succeeded + +* In this page: + * [JSON Patches](../../../client-api/operations/patching/json-patch-syntax.mdx#json-patches) + * [Running JSON Patches](../../../client-api/operations/patching/json-patch-syntax.mdx#running-json-patches) + * [Patch Operations](../../../client-api/operations/patching/json-patch-syntax.mdx#patch-operations) + * [Add Document Property](../../../client-api/operations/patching/json-patch-syntax.mdx#add-operation) + * [Remove Document Property](../../../client-api/operations/patching/json-patch-syntax.mdx#remove-document-property) + * [Replace Document Property Contents](../../../client-api/operations/patching/json-patch-syntax.mdx#replace-document-property-contents) + * [Copy Document Property Contents to Another Property](../../../client-api/operations/patching/json-patch-syntax.mdx#copy-document-property-contents-to-another-property) + * [Move Document Property Contents to Another Property](../../../client-api/operations/patching/json-patch-syntax.mdx#move-document-property-contents-to-another-property) + * [Test Patching Operation](../../../client-api/operations/patching/json-patch-syntax.mdx#test-patching-operation) + * [Additional JSON Patching Options](../../../client-api/operations/patching/json-patch-syntax.mdx#additional-json-patching-options) + + +## JSON Patches + +* Similar to other forms of patching, JSON Patches can be used by a client to + swiftly change any number of documents without loading and editing the documents + locally first. + +* A series of JSON objects, each containing a patch operation and a document ID, + are added to an ASP `JsonPatchDocument` object that is sent to the server for + execution. +### When are JSON Patches Used? + +JSON Patches include no RQL or C# code, and offer a limited set of operations +in relation to other patching methods. +Users may still prefer them over other methods when, for example - + + * A client of multiple databases of different brands prefers broadcasting patches + with a common syntax to all databases. + * It is easier for an automated process that builds and applies patches, + to send JSON patches. + + + +## Running JSON Patches + +To run JSON patches - + +* Use the `Microsoft.AspNetCore.JsonPatch` namespace from your code. + E.g. `using Microsoft.AspNetCore.JsonPatch;` +* Create a `JsonPatchDocument` instance and append your patches to it. +* Pass your Json Patch Document to RavenDB's `JsonPatchOperation` operation to run the patches. + * `JsonPatchOperation` Parameters + + | Parameters | Type | Description | + |:-------------|:-------------|:-------------| + | id | `string` | The ID of the document we want to patch | + | jsonPatchDocument | `JsonPatchDocument` | Patches document | + + + + + +## Patch Operations + +### Add Operation + +Use the `Add` operation to add a document property or an array element. + +* **Method Parameters** + + | Parameters | Type | Description | + |:-------------|:-------------|:-------------| + | path | `string` | Path to the property we want to add | + | value | `object` | Property value | + +* **Code Sample - Add a document property** + + +{`var patchesDocument = new JsonPatchDocument(); +patchesDocument.Add("/PropertyName", "Contents"); +store.Operations.Send(new JsonPatchOperation(documentId, patchesDocument)); +`} + + +### Remove Document Property + +Use the `Remove` operation to remove a document property or an array element. + +* **Method Parameters** + + | Parameters | Type | Description | + |:-------------|:-------------|:-------------| + | path | `string` | Path to the property we want to remove | + +* **Code Sample - Remove a document property** + + +{`patchesDocument = new JsonPatchDocument(); +patchesDocument.Remove("/PropertyName"); +store.Operations.Send(new JsonPatchOperation(documentId, patchesDocument)); +`} + + +### Replace Document Property Contents + +Use the `Replace` operation to replace the contents of a document property or an array element + +* **Method Parameters** + + | Parameters | Type | Description | + |:-------------|:-------------|:-------------| + | path | `string` | Path to the property whose contents we want to replace | + | value | `object` | New contents | + +* **Code Sample - Replace a document property** + + +{`patchesDocument = new JsonPatchDocument(); +// Replace document property contents with a new value (100) +patchesDocument.Replace("/PropertyName", "NewContents"); +store.Operations.Send(new JsonPatchOperation(documentId, patchesDocument)); +`} + + +### Copy Document Property Contents to Another Property + +Use the `Copy` operation to copy the contents of one document property array element to another + +* **Method Parameters** + + | Parameters | Type | Description | + |:-------------|:-------------|:-------------| + | from | `string` | Path to the property we want to copy | + | path| `string` | Path to the property we want to copy to | + +* **Code Sample - Copy document property contents** + + +{`patchesDocument = new JsonPatchDocument(); +// Copy document property contents to another document property +patchesDocument.Copy("/PropertyName1", "/PropertyName2"); +store.Operations.Send(new JsonPatchOperation(documentId, patchesDocument)); +`} + + +### Move Document Property Contents to Another Property + +Use the `Move` operation to move the contents of one document property or array element to another + +* **Method Parameters** + + | Parameters | Type | Description | + |:-------------|:-------------|:-------------| + | from | `string` | Path to the property whose contents we want to move | + | path| `string` | Path to the property we want to move the contents to | + +* **Code Sample - Move document property contents** + + +{`patchesDocument = new JsonPatchDocument(); +// Move document property contents to another document property +patchesDocument.Move("/PropertyName1", "/PropertyName2"); +store.Operations.Send(new JsonPatchOperation(documentId, patchesDocument)); +`} + + +### Test Patching Operation + +Use the `Test` operation to verify patching operations. +If the test fails, all patching operations included in the patches document will be revoked +and a `RavenException` exception will be thrown. + +* **Method Parameters** + + | Parameters | Type | Description | + |:-------------|:-------------|:-------------| + | path | `string` | Path to the property we want to test | + | value | `object` | Value to compare `path` with | + + +* **Code Sample - Test Patching** + + + +{`patchesDocument = new JsonPatchDocument(); +patchesDocument.Test("/PropertyName", "Value"); // Compare property contents with the value + // Revoke all patch operations if the test fails +try +\{ + store.Operations.Send(new JsonPatchOperation(documentId, patchesDocument)); +\} +catch (RavenException e) +\{ + // handle the exception +\} +`} + + + + + +## Additional JSON Patching Options + +The samples given above remain simple, showing how to manipulate document properties. +Note that JSON Patches have additional options, like the manipulation of array or list elements: + +* **Add an array element** + + +{`patchesDocument = new JsonPatchDocument(); +// Use the path parameter to add an array element +patchesDocument.Add("/ArrayName/12", "Contents"); +store.Operations.Send(new JsonPatchOperation(documentId, patchesDocument)); +`} + + + +You can learn more about additional JSON patching options in the [JSON Patch RFC](https://datatracker.ietf.org/doc/html/rfc6902), +among other resources. + + + diff --git a/versioned_docs/version-7.1/client-api/operations/patching/set-based.mdx b/versioned_docs/version-7.1/client-api/operations/patching/set-based.mdx new file mode 100644 index 0000000000..950d41d7ff --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/patching/set-based.mdx @@ -0,0 +1,44 @@ +--- +title: "Set-Based Patch Operations" +hide_table_of_contents: true +sidebar_label: Set Based +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import SetBasedCsharp from './_set-based-csharp.mdx'; +import SetBasedJava from './_set-based-java.mdx'; +import SetBasedNodejs from './_set-based-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/patching/single-document.mdx b/versioned_docs/version-7.1/client-api/operations/patching/single-document.mdx new file mode 100644 index 0000000000..7a0a616752 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/patching/single-document.mdx @@ -0,0 +1,41 @@ +--- +title: "Single Document Patch Operations" +hide_table_of_contents: true +sidebar_label: Single Document +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import SingleDocumentCsharp from './_single-document-csharp.mdx'; +import SingleDocumentJava from './_single-document-java.mdx'; +import SingleDocumentNodejs from './_single-document-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_add-database-node-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/_add-database-node-csharp.mdx new file mode 100644 index 0000000000..f406565b19 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_add-database-node-csharp.mdx @@ -0,0 +1,117 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When creating a database, you can specify the number of replicas for that database. + This determines the number of database instances in the database-group. + +* **The number of replicas can be dynamically increased** even after the database is up and running, + by adding more nodes to the database-group. + +* The nodes added must already exist in the [cluster topology](../../../server/clustering/rachis/cluster-topology.mdx). + +* Once a new node is added to the database-group, + the cluster assigns a mentor node (from the existing database-group nodes) to update the new node. + +* In this page: + * [Add database node - random](../../../client-api/operations/server-wide/add-database-node.mdx#add-database-node---random) + * [Add database node - specific](../../../client-api/operations/server-wide/add-database-node.mdx#add-database-node---specific) + * [Syntax](../../../client-api/operations/server-wide/add-database-node.mdx#syntax) + +## Add database node - random + +* Use `AddDatabaseNodeOperation` to add another database-instance to the database-group. +* The node added will be a random node from the existing cluster nodes. + + + + +{`// Create the AddDatabaseNodeOperation +// Add a random node to 'Northwind' database-group +var addDatabaseNodeOp = new AddDatabaseNodeOperation("Northwind"); + +// Execute the operation by passing it to Maintenance.Server.Send +DatabasePutResult result = store.Maintenance.Server.Send(addDatabaseNodeOp); + +// Can access the new topology +var numberOfReplicas = result.Topology.AllNodes.Count(); +`} + + + + +{`// Create the AddDatabaseNodeOperation +// Add a random node to 'Northwind' database-group +var addDatabaseNodeOp = new AddDatabaseNodeOperation("Northwind"); + +// Execute the operation by passing it to Maintenance.Server.SendAsync +DatabasePutResult result = await store.Maintenance.Server.SendAsync(addDatabaseNodeOp); + +// Can access the new topology +var numberOfReplicas = result.Topology.AllNodes.Count(); +`} + + + + + + +## Add database node - specific + +* You can specify the node tag to add. +* This node must already exist in the cluster topology. + + + + +{`// Create the AddDatabaseNodeOperation +// Add node C to 'Northwind' database-group +var addDatabaseNodeOp = new AddDatabaseNodeOperation("Northwind", "C"); + +// Execute the operation by passing it to Maintenance.Server.Send +DatabasePutResult result = store.Maintenance.Server.Send(addDatabaseNodeOp); +`} + + + + +{`// Create the AddDatabaseNodeOperation +// Add node C to 'Northwind' database-group +var addDatabaseNodeOp = new AddDatabaseNodeOperation("Northwind", "C"); + +// Execute the operation by passing it to Maintenance.Server.SendAsync +DatabasePutResult result = await store.Maintenance.Server.SendAsync(addDatabaseNodeOp); +`} + + + + + + +## Syntax + + + +{`public AddDatabaseNodeOperation(string databaseName, string nodeTag = null) +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **databaseName** | `string` | Name of a database for which to add the node. | +| **nodeTag** | `string` | Tag of node to add.
Default: a random node from the existing cluster topology will be added. | + +| Object returned by Send operation:
`DatabasePutResult` | Type | Description | +| - | - | - | +| RaftCommandIndex | `long` | Index of the raft command that was executed | +| Name | `string` | Database name | +| Topology | `DatabaseTopology` | The database topology | +| NodesAddedTo | `List` | New nodes added to the cluster topology.
Will be 0 for this operation. | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_add-database-node-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/_add-database-node-nodejs.mdx new file mode 100644 index 0000000000..432c806220 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_add-database-node-nodejs.mdx @@ -0,0 +1,92 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When creating a database, you can specify the number of replicas for that database. + This determines the number of database instances in the database-group. + +* **The number of replicas can be dynamically increased** even after the database is up and running, + by adding more nodes to the database-group. + +* The nodes added must already exist in the [cluster topology](../../../server/clustering/rachis/cluster-topology.mdx). + +* Once a new node is added to the database-group, + the cluster assigns a mentor node (from the existing database-group nodes) to update the new node. + +* In this page: + * [Add database node - random](../../../client-api/operations/server-wide/add-database-node.mdx#add-database-node---random) + * [Add database node - specific](../../../client-api/operations/server-wide/add-database-node.mdx#add-database-node---specific) + * [Syntax](../../../client-api/operations/server-wide/add-database-node.mdx#syntax) + +## Add database node - random + +* Use `AddDatabaseNodeOperation` to add another database-instance to the database-group. +* The node added will be a random node from the existing cluster nodes. + + + +{`// Create the AddDatabaseNodeOperation +// Add a random node to 'Northwind' database-group +const addDatabaseNodeOp = new AddDatabaseNodeOperation("Northwind"); + +// Execute the operation by passing it to maintenance.server.send +const result = await documentStore.maintenance.server.send(addDatabaseNodeOp); + +// Can access the new topology +const numberOfReplicas = getAllNodesFromTopology(result.topology).length; +`} + + + + + +## Add database node - specific + +* You can specify the node tag to add. +* This node must already exist in the cluster topology. + + + +{`// Create the AddDatabaseNodeOperation +// Add node C to 'Northwind' database-group +const addDatabaseNodeOp = new AddDatabaseNodeOperation("Northwind", "C")); + +// Execute the operation by passing it to maintenance.server.send +const result = await documentStore.maintenance.server.send(addDatabaseNodeOp); +`} + + + + + +## Syntax + + + +{`const addDatabaseNodeOp = new AddDatabaseNodeOperation(databaseName, nodeTag?); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **databaseName** | `string` | Name of a database for which to add the node. | +| **nodeTag** | `string` | Tag of node to add.
Default: If not passed then a random node from the existing cluster topology will be added. | + +| Object returned by send operation has: | Type | Description | +| - | - | - | +| raftCommandIndex | `number` | Index of the raft command that was executed | +| name | `string` | Database name | +| topology | `DatabaseTopology` | The database topology | +| nodesAddedTo | `string[]` | New nodes added to the cluster topology.
Will be 0 for this operation. | + + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_add-database-node-php.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/_add-database-node-php.mdx new file mode 100644 index 0000000000..64e98a4f11 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_add-database-node-php.mdx @@ -0,0 +1,90 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When creating a database, you can specify the number of replicas for that database. + This determines the number of database instances in the database-group. + +* **The number of replicas can be dynamically increased** even after the database is up and running, + by adding more nodes to the database-group. + +* The nodes added must already exist in the [cluster topology](../../../server/clustering/rachis/cluster-topology.mdx). + +* Once a new node is added to the database-group, + the cluster assigns a mentor node (from the existing database-group nodes) to update the new node. + +* In this page: + * [Add database node - random](../../../client-api/operations/server-wide/add-database-node.mdx#add-database-node---random) + * [Add database node - specific](../../../client-api/operations/server-wide/add-database-node.mdx#add-database-node---specific) + * [Syntax](../../../client-api/operations/server-wide/add-database-node.mdx#syntax) + +## Add database node - random + +* Use `AddDatabaseNodeOperation` to add another database-instance to the database-group. +* The node added will be a random node from the existing cluster nodes. + + + +{`// Create the AddDatabaseNodeOperation +// Add a random node to 'Northwind' database-group +$addDatabaseNodeOp = new AddDatabaseNodeOperation("Northwind"); + +// Execute the operation by passing it to Maintenance.Server.Send +/** @var DatabasePutResult $result */ +$result = $store->maintenance()->server()->send($addDatabaseNodeOp); + +// Can access the new topology +$numberOfReplicas = count($result->getTopology()->getMembers()); +`} + + + + + +## Add database node - specific + +* You can specify the node tag to add. +* This node must already exist in the cluster topology. + + + +{`// Create the AddDatabaseNodeOperation +// Add node C to 'Northwind' database-group +$addDatabaseNodeOp = new AddDatabaseNodeOperation("Northwind", "C"); + +// Execute the operation by passing it to Maintenance.Server.Send +/** @var DatabasePutResult $result */ +$result = $store->maintenance()->server()->send($addDatabaseNodeOp); +`} + + + + + +## Syntax + + + +{`AddDatabaseNodeOperation(?string $databaseName, ?string $nodeTag = null) +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **$databaseName** | `?string` | Name of a database for which to add the node. | +| **$nodeTag** | `?string` | Tag of node to add.
Default: a random node from the existing cluster topology will be added. | + +| Object returned by Send operation:
`DatabasePutResult` | Type | Description | +| - | - | - | +| $name | `string` | Database name | +| $topology | `DatabaseTopology` | The database topology | +| $nodesAddedTo | `StringArray` | New nodes added to the cluster topology.
Will be 0 for this operation. | +| $raftCommandIndex | `int` | Raft command index | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_add-database-node-python.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/_add-database-node-python.mdx new file mode 100644 index 0000000000..cc0517815d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_add-database-node-python.mdx @@ -0,0 +1,89 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When creating a database, you can specify the number of replicas for that database. + This determines the number of database instances in the database-group. + +* **The number of replicas can be dynamically increased** even after the database is up and running, + by adding more nodes to the database-group. + +* The nodes added must already exist in the [cluster topology](../../../server/clustering/rachis/cluster-topology.mdx). + +* Once a new node is added to the database-group, + the cluster assigns a mentor node (from the existing database-group nodes) to update the new node. + +* In this page: + * [Add database node - random](../../../client-api/operations/server-wide/add-database-node.mdx#add-database-node---random) + * [Add database node - specific](../../../client-api/operations/server-wide/add-database-node.mdx#add-database-node---specific) + * [Syntax](../../../client-api/operations/server-wide/add-database-node.mdx#syntax) + +## Add database node - random + +* Use `AddDatabaseNodeOperation` to add another database-instance to the database-group. +* The node added will be a random node from the existing cluster nodes. + + + +{`# Create the AddDatabaseNodeOperation +# Add a random node to 'Northwind' database-group +add_database_node_op = AddDatabaseNodeOperation("Northwind") + +# Execute the operation by passing it to maintenance.server.send +result = store.maintenance.server.send(add_database_node_op) + +# Can access the new topology +number_of_replicas = len(result.topology.all_nodes) +`} + + + + + +## Add database node - specific + +* You can specify the node tag to add. +* This node must already exist in the cluster topology. + + + +{`# Create the AddDatabaseNodeOperation +# Add node C to 'Northwind +add_database_node_op = AddDatabaseNodeOperation("Northwind", "C") + +# Execute the operation by passing it to maintenance.server.send +result = store.maintenance.server.send(add_database_node_op) +`} + + + + + +## Syntax + + + +{`class AddDatabaseNodeOperation(ServerOperation[DatabasePutResult]): + def __init__(self, database_name: str, node_tag: str = None): ... +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **database_name** | `str` | Name of a database for which to add the node. | +| **node_tag** | `str` | Tag of node to add.
Default: a random node from the existing cluster topology will be added. | + +| Object returned by Send operation:
`DatabasePutResult` | Type | Description | +| - | - | - | +| raft_command_index | `int` | Index of the raft command that was executed | +| name | `str` | Database name | +| topology | `DatabaseTopology` | The database topology | +| nodes_added_to | `list` | New nodes added to the cluster topology.
Will be 0 for this operation. | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_category_.json b/versioned_docs/version-7.1/client-api/operations/server-wide/_category_.json new file mode 100644 index 0000000000..581d93d734 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 4, + "label": Server-Maintenance, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_compact-database-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/_compact-database-csharp.mdx new file mode 100644 index 0000000000..6b121e989a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_compact-database-csharp.mdx @@ -0,0 +1,346 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the `CompactDatabaseOperation` compaction operation to **removes empty gaps on disk** + that still occupy space after deletes. + You can choose whether to compact _documents_ and/or _selected indexes_. + +* **During compaction the database will be offline**. + The operation is a executed asynchronously as a background operation and can be awaited. + +* The operation will **compact the database on one node**. + To compact all database-group nodes, the command must be sent to each node separately. + +* **Target node**: + By default, the operation will be executed on the server node that is defined by the + [client configuration](../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + The operation can be executed on a specific node by using the + [ForNode](../../../client-api/operations/how-to/switch-operations-to-a-different-node.mdx) method. + +* **Target database**: + The database to compact is specified in `CompactSettings` (see examples below). + An exception is thrown if the specified database doesn't exist on the server node. + +* In this page: + * [Examples](../../../client-api/operations/server-wide/compact-database.mdx#examples): + * [Compact documents](../../../client-api/operations/server-wide/compact-database.mdx#examples) + * [Compact specific indexes](../../../client-api/operations/server-wide/compact-database.mdx#compact-specific-indexes) + * [Compact all indexes](../../../client-api/operations/server-wide/compact-database.mdx#compact-all-indexes) + * [Compact on other nodes](../../../client-api/operations/server-wide/compact-database.mdx#compact-on-other-nodes) + * [Compaction triggers compression](../../../client-api/operations/server-wide/compact-database.mdx#compaction-triggers-compression) + * [Compact from Studio](../../../client-api/operations/server-wide/compact-database.mdx#compact-from-studio) + * [Syntax](../../../client-api/operations/server-wide/compact-database.mdx#syntax) + + +## Examples + +#### Compact documents: + +The following example will compact only **documents** for the specified database. + + + + +{`// Define the compact settings +CompactSettings settings = new CompactSettings +{ + // Database to compact + DatabaseName = "Northwind", + + // Set 'Documents' to true to compact all documents in database + // Indexes are not set and will not be compacted + Documents = true +}; + +// Define the compact operation, pass the settings +IServerOperation compactOp = new CompactDatabaseOperation(settings); + +// Execute compaction by passing the operation to Maintenance.Server.Send +Operation operation = documentStore.Maintenance.Server.Send(compactOp); + +// Wait for operation to complete, during compaction the database is offline +operation.WaitForCompletion(); +`} + + + + +{`// Define the compact settings +CompactSettings settings = new CompactSettings +{ + // Database to compact + DatabaseName = "Northwind", + + // Set 'Documents' to true to compact all documents in database + // Indexes are not set and will not be compacted + Documents = true +}; + +// Define the compact operation, pass the settings +IServerOperation compactOp = new CompactDatabaseOperation(settings); + +// Execute compaction by passing the operation to Maintenance.Server.SendAsync +Operation operation = await documentStore.Maintenance.Server.SendAsync(compactOp); + +// Wait for operation to complete, during compaction the database is offline +await operation.WaitForCompletionAsync().ConfigureAwait(false); +`} + + + +#### Compact specific indexes: + +The following example will compact only specific indexes. + + + + +{`// Define the compact settings +CompactSettings settings = new CompactSettings +{ + // Database to compact + DatabaseName = "Northwind", + + // Setting 'Documents' to false will compact only the specified indexes + Documents = false, + + // Specify which indexes to compact + Indexes = new[] { "Orders/Totals", "Orders/ByCompany" }, + + // Optimize indexes is Lucene's feature to gain disk space and efficiency + // Set whether to skip this optimization when compacting the indexes + SkipOptimizeIndexes = false +}; + +// Define the compact operation, pass the settings +IServerOperation compactOp = new CompactDatabaseOperation(settings); + +// Execute compaction by passing the operation to Maintenance.Server.Send +Operation operation = documentStore.Maintenance.Server.Send(compactOp); +// Wait for operation to complete +operation.WaitForCompletion(); +`} + + + + +{`// Define the compact settings +CompactSettings settings = new CompactSettings +{ + // Database to compact + DatabaseName = "Northwind", + + // Setting 'Documents' to false will compact only the specified indexes + Documents = false, + + // Specify which indexes to compact + Indexes = new[] { "Orders/Totals", "Orders/ByCompany" }, + + // Optimize indexes is Lucene's feature to gain disk space and efficiency + // Set whether to skip this optimization when compacting the indexes + SkipOptimizeIndexes = false +}; + +// Define the compact operation, pass the settings +IServerOperation compactOp = new CompactDatabaseOperation(settings); + +// Execute compaction by passing the operation to Maintenance.Server.SendAsync +Operation operation = await documentStore.Maintenance.Server.SendAsync(compactOp); +// Wait for operation to complete +await operation.WaitForCompletionAsync().ConfigureAwait(false); +`} + + + +#### Compact all indexes: + +The following example will compact all indexes and documents. + + + + +{`// Get all indexes names in the database using the 'GetIndexNamesOperation' operation +// Use 'ForDatabase' if the target database is different than the default database defined on the store +string[] allIndexNames = + documentStore.Maintenance.ForDatabase("Northwind") + .Send(new GetIndexNamesOperation(0, int.MaxValue)); + +// Define the compact settings +CompactSettings settings = new CompactSettings +{ + DatabaseName = "Northwind", // Database to compact + + Documents = true, // Compact all documents + + Indexes = allIndexNames, // All indexes will be compacted + + SkipOptimizeIndexes = true // Skip Lucene's indexes optimization +}; + +// Define the compact operation, pass the settings +IServerOperation compactOp = new CompactDatabaseOperation(settings); + +// Execute compaction by passing the operation to Maintenance.Server.Send +Operation operation = documentStore.Maintenance.Server.Send(compactOp); +// Wait for operation to complete +operation.WaitForCompletion(); +`} + + + + +{`// Get all indexes names in the database using the 'GetIndexNamesOperation' operation +// Use 'ForDatabase' if the target database is different than the default database defined on the store +string[] allIndexNames = + documentStore.Maintenance.ForDatabase("Northwind") + .Send(new GetIndexNamesOperation(0, int.MaxValue)); + +// Define the compact settings +CompactSettings settings = new CompactSettings +{ + DatabaseName = "Northwind", // Database to compact + + Documents = true, // Compact all documents + + Indexes = allIndexNames, // All indexes will be compacted + + SkipOptimizeIndexes = true // Skip Lucene's indexes optimization +}; + +// Define the compact operation, pass the settings +IServerOperation compactOp = new CompactDatabaseOperation(settings); + +// Execute compaction by passing the operation to Maintenance.Server.SendAsync +Operation operation = await documentStore.Maintenance.Server.SendAsync(compactOp); +// Wait for operation to complete +await operation.WaitForCompletionAsync(); +`} + + + +#### Compact on other nodes: + +* By default, an operation executes on the server node that is defined by the [client configuration](../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). +* The following example will compact the database on all [member](../../../server/clustering/rachis/cluster-topology.mdx#nodes-states-and-types) nodes from its database-group topology. + `ForNode` is used to execute the operation on a specific node. + + + + +{`// Get all member nodes in the database-group using the 'GetDatabaseRecordOperation' operation +List allMemberNodes = + documentStore.Maintenance.Server.Send(new GetDatabaseRecordOperation("Northwind")) + .Topology.Members; + +// Define the compact settings as needed +CompactSettings settings = new CompactSettings +{ + // Database to compact + DatabaseName = "Northwind", + + //Compact all documents in database + Documents = true +}; + +// Execute the compact operation on each member node +foreach (string nodeTag in allMemberNodes) +{ + // Define the compact operation, pass the settings + IServerOperation compactOp = new CompactDatabaseOperation(settings); + + // Execute the operation on a specific node + // Use \`ForNode\` to specify the node to operate on + Operation operation = documentStore.Maintenance.Server.ForNode(nodeTag).Send(compactOp); + // Wait for operation to complete + operation.WaitForCompletion(); +} +`} + + + + +{`// Get all member nodes in the database-group using the 'GetDatabaseRecordOperation' operation +List allMemberNodes = + documentStore.Maintenance.Server.Send(new GetDatabaseRecordOperation("Northwind")) + .Topology.Members; + +// Define the compact settings as needed +CompactSettings settings = new CompactSettings +{ + // Database to compact + DatabaseName = "Northwind", + + //Compact all documents in database + Documents = true +}; + +// Execute the compact operation on each member node +foreach (string nodeTag in allMemberNodes) +{ + // Define the compact operation, pass the settings + IServerOperation compactOp = new CompactDatabaseOperation(settings); + + // Execute the operation on a specific node + // Use \`ForNode\` to specify the node to operate on + Operation operation = await documentStore.Maintenance.Server.ForNode(nodeTag).SendAsync(compactOp); + // Wait for operation to complete + await operation.WaitForCompletionAsync(); +} +`} + + + + + + +## Compaction triggers compression + +* When document [compression](../../../server/storage/documents-compression.mdx) is turned on, compression is applied to the documents when: + * **New** documents that are created and saved. + * **Existing** documents that are modified and saved. + +* You can use the [compaction](../../../client-api/operations/server-wide/compact-database.mdx) operation to **compress existing documents without having to modify and save** them. + Executing compaction triggers compression on ALL existing documents for the collections that are configured for compression. + +* Learn more about Compression -vs- Compaction [here](../../../server/storage/documents-compression.mdx#compression--vs--compaction). + + + +## Compact from Studio + +* Compaction can be triggered from the [Storage Report](../../../studio/database/stats/storage-report.mdx) view in the Studio. + The operation will compact the database only on the node being viewed (node info is in the Studio footer). + +* To compact the database on another node, + simply trigger compaction from the Storage Report view in a browser tab opened for that other node. + + + +## Syntax + + + +{`public CompactDatabaseOperation(CompactSettings compactSettings) +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **compactSettings** | `CompactSettings` | Settings for the compact operation | + +| `CompactSettings` | Type | Description | +| - | - | - | +| **DatabaseName** | `string` | Name of database to compact. Mandatory param. | +| **Documents** | `bool` | Indicates if documents should be compacted. Optional param. | +| **Indexes** | `string[]` | List of index names to compact. Optional param. | +| **SkipOptimizeIndexes** | `bool` | `true` - Skip Lucene's index optimization while compacting
`false` - Lucene's index optimization will take place while compacting | +| | | **Note**: Either _Documents_ or _Indexes_ (or both) must be specified | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_compact-database-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/_compact-database-nodejs.mdx new file mode 100644 index 0000000000..b0e37bef04 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_compact-database-nodejs.mdx @@ -0,0 +1,230 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the `CompactDatabaseOperation` compaction operation to **removes empty gaps on disk** + that still occupy space after deletes. + You can choose whether to compact _documents_ and/or _selected indexes_. + +* **During compaction the database will be offline**. + The operation is a executed asynchronously as a background operation and can be awaited. + +* The operation will **compact the database on one node**. + To compact all database-group nodes, the command must be sent to each node separately. + +* **Target node**: + By default, the operation will be executed on the server node that is defined by the + [client configuration](../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + The operation can be executed on a specific node by using the + [forNode](../../../client-api/operations/how-to/switch-operations-to-a-different-node.mdx) method. + +* **Target database**: + The database to compact is specified in `CompactSettings` (see examples below). + An exception is thrown if the specified database doesn't exist on the server node. + +* In this page: + * [Examples](../../../client-api/operations/server-wide/compact-database.mdx#examples): + * [Compact documents](../../../client-api/operations/server-wide/compact-database.mdx#examples) + * [Compact specific indexes](../../../client-api/operations/server-wide/compact-database.mdx#compact-specific-indexes) + * [Compact all indexes](../../../client-api/operations/server-wide/compact-database.mdx#compact-all-indexes) + * [Compact on other nodes](../../../client-api/operations/server-wide/compact-database.mdx#compact-on-other-nodes) + * [Compaction triggers compression](../../../client-api/operations/server-wide/compact-database.mdx#compaction-triggers-compression) + * [Compact from Studio](../../../client-api/operations/server-wide/compact-database.mdx#compact-from-studio) + * [Syntax](../../../client-api/operations/server-wide/compact-database.mdx#syntax) + + +## Examples + + + +#### Compact documents + +* The following example will compact only **documents** for the specified database. + + + +{`// Define the compact settings +const compactSettings = \{ + // Database to compact + databaseName: "Northwind", + + // Set 'documents' to true to compact all documents in database + // Indexes are not set and will not be compacted + documents: true +\}; + +// Define the compact operation, pass the settings +const compactOp = new CompactDatabaseOperation(compactSettings); + +// Execute compaction by passing the operation to maintenance.server.send +const asyncOperation = await documentStore.maintenance.server.send(compactOp); + +// Wait for operation to complete, during compaction the database is offline +await asyncOperation.waitForCompletion(); +`} + + + + + + + +#### Compact specific indexes + +* The following example will compact only specific indexes. + + + +{`// Define the compact settings +const compactSettings = \{ + // Database to compact + databaseName: "Northwind", + + // Setting 'documents' to false will compact only the specified indexes + documents: false, + + // Specify which indexes to compact + indexes: ["Orders/Totals", "Orders/ByCompany"] +\}; + +// Define the compact operation, pass the settings +const compactOp = new CompactDatabaseOperation(compactSettings); + +// Execute compaction by passing the operation to maintenance.server.send +const asyncOperation = await documentStore.maintenance.server.send(compactOp); +// Wait for operation to complete +await asyncOperation.waitForCompletion(); +`} + + + + + + + +#### Compact all indexes + +* The following example will compact all indexes and documents. + + + +{`// Get all indexes names in the database using the 'GetIndexNamesOperation' operation +// Use 'forDatabase' if the target database is different than the default database defined on the store +const allIndexNames = await documentStore.maintenance.forDatabase("Northwind") + .send(new GetIndexNamesOperation(0, 50)); + +// Define the compact settings +const compactSettings = \{ + databaseName: "Northwind", // Database to compact + + documents: true, // Compact all documents + + indexes: allIndexNames, // All indexes will be compacted +\}; + +// Define the compact operation, pass the settings +const compactOp = new CompactDatabaseOperation(settings); + +// Execute compaction by passing the operation to maintenance.server.send +const asyncOperation = await documentStore.maintenance.server.send(compactOp); +// Wait for operation to complete +await asyncOperation.waitForCompletion(); +`} + + + + + + + +#### Compact on other nodes + +* By default, an operation executes on the server node that is defined by the [client configuration](../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). +* The following example will compact the database on all [member](../../../server/clustering/rachis/cluster-topology.mdx#nodes-states-and-types) nodes from its database-group topology. + `forNode` is used to execute the operation on a specific node. + + + +{`// Get all member nodes in the database-group using the 'GetDatabaseRecordOperation' operation +const databaseRecord = + await documentStore.maintenance.server.send(new GetDatabaseRecordOperation("Northwind")); +const allMemberNodes = databaseRecord.topology.members; + +// Define the compact settings as needed +const compactSettings = \{ + // Database to compact + databaseName: "Northwind", + + //Compact all documents in database + documents: true +\}; + +// Execute the compact operation on each member node +for (let i = 0; i < allMemberNodes.length; i++) \{ + // Define the compact operation, pass the settings + const compactOp = new CompactDatabaseOperation(compactSettings); + + // Execute the operation on a specific node + // Use \`forNode\` to specify the node to operate on + const serverOpExecutor = await documentStore.maintenance.server.forNode(allMemberNodes[i]); + const asyncOperation = await serverOpExecutor.send(compactOp); + + // Wait for operation to complete + await asyncOperation.waitForCompletion(); +\} +`} + + + + + + +## Compaction triggers compression + +* When document [compression](../../../server/storage/documents-compression.mdx) is turned on, compression is applied to the documents when: + * **New** documents that are created and saved. + * **Existing** documents that are modified and saved. + +* You can use the [compaction](../../../client-api/operations/server-wide/compact-database.mdx) operation to **compress existing documents without having to modify and save** them. + Executing compaction triggers compression on ALL existing documents for the collections that are configured for compression. + +* Learn more about Compression -vs- Compaction [here](../../../server/storage/documents-compression.mdx#compression--vs--compaction). + + + +## Compact from Studio + +* Compaction can be triggered from the [Storage Report](../../../studio/database/stats/storage-report.mdx) view in the Studio. + The operation will compact the database only on the node being viewed (node info is in the Studio footer). + +* To compact the database on another node, + simply trigger compaction from the Storage Report view in a browser tab opened for that other node. + + + +## Syntax + + + +{`const compactOperation = new CompactDatabaseOperation(compactSettings); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **compactSettings** | `object` | Settings for the compact operation.
See object fields below. | + +| compactSettings field | Type | Description | +| - | - | - | +| **databaseName** | `string` | Name of database to compact. Mandatory param. | +| **documents** | `boolean` | Indicates if documents should be compacted. Optional param. | +| **indexes** | `string[]` | List of index names to compact. Optional param. | +| | | **Note**: Either _Documents_ or _Indexes_ (or both) must be specified | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_compact-database-php.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/_compact-database-php.mdx new file mode 100644 index 0000000000..86ad896f67 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_compact-database-php.mdx @@ -0,0 +1,220 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the `CompactDatabaseOperation` compaction operation to **removes empty gaps on disk** + that still occupy space after deletes. + You can choose whether to compact _documents_ and/or _selected indexes_. + +* **During compaction the database will be offline**. + The operation is a executed asynchronously as a background operation and can be waited for + using `waitForCompletion()`. + +* The operation will **compact the database on one node**. + To compact all database-group nodes, the command must be sent to each node separately. + +* **Target node**: + By default, the operation will be executed on the server node that is defined by the + [client configuration](../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + +* **Target database**: + The database to compact is specified in `CompactSettings` (see examples below). + An exception is thrown if the specified database doesn't exist on the server node. + +* In this page: + * [Examples](../../../client-api/operations/server-wide/compact-database.mdx#examples): + * [Compact documents](../../../client-api/operations/server-wide/compact-database.mdx#examples) + * [Compact specific indexes](../../../client-api/operations/server-wide/compact-database.mdx#compact-specific-indexes) + * [Compact all indexes](../../../client-api/operations/server-wide/compact-database.mdx#compact-all-indexes) + * [Compact on other nodes](../../../client-api/operations/server-wide/compact-database.mdx#compact-on-other-nodes) + * [Compaction triggers compression](../../../client-api/operations/server-wide/compact-database.mdx#compaction-triggers-compression) + * [Compact from Studio](../../../client-api/operations/server-wide/compact-database.mdx#compact-from-studio) + * [Syntax](../../../client-api/operations/server-wide/compact-database.mdx#syntax) + + +## Examples + +#### Compact documents: + +The following example will compact only **documents** for the specified database. + + + +{`// Define the compact settings +$settings = new CompactSettings(); +$settings->setDatabaseName("Northwind"); +// Set 'Documents' to true to compact all documents in database +// Indexes are not set and will not be compacted +$settings->setDocuments(true); + + +// Define the compact operation, pass the settings +/** @var OperationIdResult $compactOp */ +$compactOp = new CompactDatabaseOperation($settings); + +// Execute compaction by passing the operation to Maintenance.Server.Send +/** @var Operation $operation */ +$operation = $documentStore->maintenance()->server()->send($compactOp); + +// Wait for operation to complete, during compaction the database is offline +$operation->waitForCompletion(); +`} + + +#### Compact specific indexes: + +The following example will compact only specific indexes. + + + +{`// Define the compact settings +$settings = new CompactSettings(); + +// Database to compact +$settings->setDatabaseName("Northwind"); + +// Setting 'Documents' to false will compact only the specified indexes +$settings->setDocuments(false); + +// Specify which indexes to compact +$settings->setIndexes([ "Orders/Totals", "Orders/ByCompany" ]); + +// Optimize indexes is Lucene's feature to gain disk space and efficiency +// Set whether to skip this optimization when compacting the indexes +$settings->setSkipOptimizeIndexes(false); + + +// Define the compact operation, pass the settings +/** @var OperationIdResult $compactOp */ +$compactOp = new CompactDatabaseOperation($settings); + +// Execute compaction by passing the operation to Maintenance.Server.Send +/** @var Operation $operation */ +$operation = $documentStore->maintenance()->server()->send($compactOp); +// Wait for operation to complete +$operation->waitForCompletion(); +`} + + +#### Compact all indexes: + +The following example will compact all indexes and documents. + + + +{`// Get all indexes names in the database using the 'GetIndexNamesOperation' operation +// Use 'ForDatabase' if the target database is different than the default database defined on the store +/** @var StringArrayResult $allIndexNames */ +$allIndexNames = $documentStore->maintenance()->forDatabase("Northwind") + ->send(new GetIndexNamesOperation(0, PhpClient::INT_MAX_VALUE)); + +// Define the compact settings +$settings = new CompactSettings(); +$settings->setDatabaseName("Northwind"); // Database to compact +$settings->setDocuments(true); // Compact all documents +$settings->setIndexes($allIndexNames->getArrayCopy()); // All indexes will be compacted +$settings->setSkipOptimizeIndexes(true); // Skip Lucene's indexes optimization + +// Define the compact operation, pass the settings +/** @var OperationIdResult $compactOp */ +$compactOp = new CompactDatabaseOperation($settings); + +// Execute compaction by passing the operation to Maintenance.Server.Send +/** @var Operation $operation */ +$operation = $documentStore->maintenance()->server()->send($compactOp); + +// Wait for operation to complete +$operation->waitForCompletion(); +`} + + +#### Compact on other nodes: + +* By default, an operation executes on the server node that is defined by the [client configuration](../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). +* The following example will compact the database on all [member](../../../server/clustering/rachis/cluster-topology.mdx#nodes-states-and-types) nodes from its database-group topology. + `forNode` is used to execute the operation on a specific node. + + + +{`// Get all member nodes in the database-group using the 'GetDatabaseRecordOperation' operation +/** @var DatabaseRecordWithEtag $databaseRecord */ +$databaseRecord = $documentStore->maintenance()->server()->send(new GetDatabaseRecordOperation("Northwind")); + +$allMemberNodes = $databaseRecord->getTopology()->getMembers(); + +// Define the compact settings as needed +$settings = new CompactSettings(); + +$settings->setDatabaseName("Northwind"); +$settings->setDocuments(true); //Compact all documents in database + +// Execute the compact operation on each member node +foreach ($allMemberNodes as $nodeTag) \{ + // Define the compact operation, pass the settings + /** @var OperationIdResult $compactOp */ + $compactOp = new CompactDatabaseOperation($settings); + + // Execute the operation on a specific node + // Use \`ForNode\` to specify the node to operate on + /** @var Operation $operation */ + $operation = $documentStore->maintenance()->server()->forNode($nodeTag)->send($compactOp); + // Wait for operation to complete + $operation->waitForCompletion(); +\} +`} + + + + + +## Compaction triggers compression + +* When document [compression](../../../server/storage/documents-compression.mdx) is turned on, compression is applied to the documents when: + * **New** documents that are created and saved. + * **Existing** documents that are modified and saved. + +* You can use the [compaction](../../../client-api/operations/server-wide/compact-database.mdx) operation + to **compress existing documents without having to modify and save** them. + Executing compaction triggers compression on ALL existing documents for the collections that are configured for compression. + +* Learn more about Compression -vs- Compaction [here](../../../server/storage/documents-compression.mdx#compression--vs--compaction). + + + +## Compact from Studio + +* Compaction can be triggered from the [Storage Report](../../../studio/database/stats/storage-report.mdx) view in the Studio. + The operation will compact the database only on the node being viewed (node info is in the Studio footer). + +* To compact the database on another node, + simply trigger compaction from the Storage Report view in a browser tab opened for that other node. + + + +## Syntax + + + +{`public CompactDatabaseOperation(CompactSettings compactSettings) +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **$compactSettings** | `?CompactSettings` | Settings for the compact operation | + +| `$compactSettings` class parameters | Type | Description | +| - | - | - | +| **$databaseName** | `?string` | Name of database to compact. Mandatory param. | +| **$documents** | `bool` | Indicates if documents should be compacted. Optional param. | +| **$indexes** | `?StringArray` | List of index names to compact. Optional param. | +| **$skipOptimizeIndexes** | `bool` | `true` - Skip Lucene's index optimization while compacting
`false` - Lucene's index optimization will take place while compacting | +| | | **Note**: Either **$documents** or **$indexes** (or both) must be specified | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_compact-database-python.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/_compact-database-python.mdx new file mode 100644 index 0000000000..0317aec27a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_compact-database-python.mdx @@ -0,0 +1,149 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the `CompactDatabaseOperation` compaction operation to **removes empty gaps on disk** + that still occupy space after deletes. + You can choose whether to compact _documents_ and/or _selected indexes_. + +* **During compaction the database will be offline**. + The operation is a executed asynchronously as a background operation and can be waited for + using `wait_for_completion`. + +* The operation will **compact the database on one node**. + To compact all database-group nodes, the command must be sent to each node separately. + +* **Target node**: + By default, the operation will be executed on the server node that is defined by the + [client configuration](../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node). + +* **Target database**: + The database to compact is specified in `CompactSettings` (see examples below). + An exception is thrown if the specified database doesn't exist on the server node. + +* In this page: + * [Examples](../../../client-api/operations/server-wide/compact-database.mdx#examples): + * [Compact documents](../../../client-api/operations/server-wide/compact-database.mdx#examples) + * [Compact specific indexes](../../../client-api/operations/server-wide/compact-database.mdx#compact-specific-indexes) + * [Compact all indexes](../../../client-api/operations/server-wide/compact-database.mdx#compact-all-indexes) + * [Compaction triggers compression](../../../client-api/operations/server-wide/compact-database.mdx#compaction-triggers-compression) + * [Compact from Studio](../../../client-api/operations/server-wide/compact-database.mdx#compact-from-studio) + + +## Examples + +#### Compact documents: + +The following example will compact only **documents** for the specified database. + + + +{`# Define the compact settings +settings = CompactSettings( + # Database to compact + "Northwind", + # Set 'documents' to True to compact all documents in database + # Indexes are not set and will not be compacted + documents=True, +) + +# Define the compact operation, pass the settings +compact_op = CompactDatabaseOperation(settings) + +# Execute compaction by passing the operation to maintenance.server.send +operation = store.maintenance.server.send_async(compact_op) + +# Wait for operation to complete, during compaction the database is offline +operation.wait_for_completion() +`} + + +#### Compact specific indexes: + +The following example will compact only specific indexes. + + + +{`# Define the compact settings +settings = CompactSettings( + # Database to compact + database_name="Northwind", + # Setting 'documents' to False will compact only the specified indexes + documents=False, + # Specify which indexes to compact + indexes=["Orders/Totals", "Orders/ByCompany"], + # Optimize indexes is Lucene's feature to gain disk space and efficiency + # Set whether to skip this optimization when compacting the indexes + skip_optimize_indexes=False, +) +# Define the compact operation, pass the settings +compact_op = CompactDatabaseOperation(settings) + +# Execute compaction by passing the operation to maintenance.server.send +operation = store.maintenance.server.send_async(compact_op) +# Wait for operation to complete +operation.wait_for_completion() +`} + + +#### Compact all indexes: + +The following example will compact all indexes and documents. + + + +{`# Get all indexes names in the database using the 'GetIndexNamesOperation' operation +# Use 'ForDatabase' if the target database is different from the default database defined on the store +all_indexes_names = store.maintenance.for_database("Northwind").send(GetIndexNamesOperation(0, int_max)) + +# Define the compact settings +settings = CompactSettings( + database_name="Northwind", # Database to compact + documents=True, # Compact all documents + indexes=all_indexes_names, # All indexes will be compacted + skip_optimize_indexes=True, # Skip Lucene's indexes optimization +) + +# Define the compact operation, pass the settings +compact_op = CompactDatabaseOperation(settings) + +# Execute compaction by passing the operation to maintenance.server.send +operation = store.maintenance.server.send(compact_op) +# Wait for operation to complete +operation.wait_for_completion() +`} + + + + + +## Compaction triggers compression + +* When document [compression](../../../server/storage/documents-compression.mdx) is turned on, compression is applied to the documents when: + * **New** documents that are created and saved. + * **Existing** documents that are modified and saved. + +* You can use the [compaction](../../../client-api/operations/server-wide/compact-database.mdx) operation + to **compress existing documents without having to modify and save** them. + Executing compaction triggers compression on ALL existing documents for the collections that are configured for compression. + +* Learn more about Compression -vs- Compaction [here](../../../server/storage/documents-compression.mdx#compression--vs--compaction). + + + +## Compact from Studio + +* Compaction can be triggered from the [Storage Report](../../../studio/database/stats/storage-report.mdx) view in the Studio. + The operation will compact the database only on the node being viewed (node info is in the Studio footer). + +* To compact the database on another node, + simply trigger compaction from the Storage Report view in a browser tab opened for that other node. + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_create-database-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/_create-database-csharp.mdx new file mode 100644 index 0000000000..0fe8e76f04 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_create-database-csharp.mdx @@ -0,0 +1,453 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `CreateDatabaseOperation` to create a new database from the **Client API**, as described below. + To create a new database from the **Studio**, see [Create database](../../../studio/database/create-new-database/general-flow.mdx). + +* This operation requires a client certificate with a security clearance of _Operator_ or _ClusterAdmin_. + To learn which operations are allowed at each level, see [Security clearance and permissions](../../../server/security/authorization/security-clearance-and-permissions.mdx). + +* In this article: + * [Create new database](../../../client-api/operations/server-wide/create-database.mdx#create-new-database) + * [Example I - Create non-sharded database](../../../client-api/operations/server-wide/create-database.mdx#example-i---create-non-sharded-database) + * [Example II - Create sharded database](../../../client-api/operations/server-wide/create-database.mdx#example-ii---create-sharded-database) + * [Example III - Ensure database does not exist before creating](../../../client-api/operations/server-wide/create-database.mdx#example-iii---ensure-database-does-not-exist-before-creating) + * [Syntax](../../../client-api/operations/server-wide/create-database.mdx#syntax) + + +## Create new database + + + +##### Example I - Create non-sharded database +* The following simple example creates a non-sharded database with the default replication factor of 1. + + + + +{`// Define the create database operation, pass an instance of DatabaseRecord +var createDatabaseOp = new CreateDatabaseOperation(new DatabaseRecord("DatabaseName")); + +// Execute the operation by passing it to Maintenance.Server.Send +store.Maintenance.Server.Send(createDatabaseOp); +`} + + + + +{`// Define the create database operation, pass an instance of DatabaseRecord +var createDatabaseOp = new CreateDatabaseOperation(new DatabaseRecord("DatabaseName")); + +// Execute the operation by passing it to Maintenance.Server.SendAsync +await store.Maintenance.Server.SendAsync(createDatabaseOp); +`} + + + + +{`// Define the create database operation +var createDatabaseOp = new CreateDatabaseOperation(builder => builder + // Call 'Regular' to create a non-sharded database + .Regular("DatabaseName")); + +// Execute the operation by passing it to Maintenance.Server.Send +store.Maintenance.Server.Send(createDatabaseOp); +`} + + + + +{`// Define the create database operation +var createDatabaseOp = new CreateDatabaseOperation(builder => builder + // Call 'Regular' to create a non-sharded database + .Regular("DatabaseName")); + +// Execute the operation by passing it to Maintenance.Server.SendAsync +await store.Maintenance.Server.SendAsync(createDatabaseOp); +`} + + + + + + + +##### Example II - Create sharded database +* The following example creates a sharded database with 3 shards, each with a replication factor of 2. +* In addition, it enables: + * revisions + * document expiration + * and applies some settings to the database. + + + + +{`// Define the database record: +var databaseRecord = new DatabaseRecord("ShardedDatabaseName") { + + // Configure sharding: + Sharding = new ShardingConfiguration() + { + // Ensure nodes "A", "B", and "C" are available in the cluster + // before executing the database creation. + Shards = new Dictionary() + { + {0, new DatabaseTopology { Members = new List { "A", "B" }}}, + {1, new DatabaseTopology { Members = new List { "A", "C" }}}, + {2, new DatabaseTopology { Members = new List { "B", "C" }}} + } + }, + + // Enable revisions on all collections: + Revisions = new RevisionsConfiguration() + { + Default = new RevisionsCollectionConfiguration() + { + Disabled = false, MinimumRevisionsToKeep = 5 + } + }, + + // Enable the document expiration feature: + Expiration = new ExpirationConfiguration() + { + Disabled = false + }, + + // Apply some database configuration setting: + Settings = new Dictionary() + { + {"Databases.QueryTimeoutInSec", "500"} + } +}; + +// Define the create database operation +var createDatabaseOp = new CreateDatabaseOperation(databaseRecord); + +// Execute the operation by passing it to Maintenance.Server.Send +store.Maintenance.Server.Send(createDatabaseOp); +`} + + + + +{`// Define the database record: +var databaseRecord = new DatabaseRecord("ShardedDatabaseName") { + + // Configure sharding: + Sharding = new ShardingConfiguration() + { + // Ensure nodes "A", "B", and "C" are available in the cluster + // before executing the database creation. + Shards = new Dictionary() + { + {0, new DatabaseTopology { Members = new List { "A", "B" }}}, + {1, new DatabaseTopology { Members = new List { "A", "C" }}}, + {2, new DatabaseTopology { Members = new List { "B", "C" }}} + } + }, + + // Enable revisions on all collections: + Revisions = new RevisionsConfiguration() + { + Default = new RevisionsCollectionConfiguration() + { + Disabled = false, MinimumRevisionsToKeep = 5 + } + }, + + // Enable the document expiration feature: + Expiration = new ExpirationConfiguration() + { + Disabled = false + }, + + // Apply some database configuration setting: + Settings = new Dictionary() + { + {"Databases.QueryTimeoutInSec", "500"} + } +}; + +// Define the create database operation +var createDatabaseOp = new CreateDatabaseOperation(databaseRecord); + +// Execute the operation by passing it to Maintenance.Server.SendAsync +await store.Maintenance.Server.SendAsync(createDatabaseOp); +`} + + + + +{`// Define the create database operation +var createDatabaseOp = new CreateDatabaseOperation(builder => builder + + // Call 'Sharded' to create a sharded database + .Sharded("ShardedDatabaseName", topology => topology + // Ensure nodes "A", "B", and "C" are available in the cluster + // before executing the database creation. + .AddShard(0, new DatabaseTopology {Members = new List {"A", "B"}}) + .AddShard(1, new DatabaseTopology {Members = new List {"A", "C"}}) + .AddShard(2, new DatabaseTopology {Members = new List {"B", "C"}})) + // Enable revisions on all collections: + .ConfigureRevisions(new RevisionsConfiguration() + { + Default = new RevisionsCollectionConfiguration() + { + Disabled = false, MinimumRevisionsToKeep = 5 + } + }) + // Enable the document expiration feature: + .ConfigureExpiration(new ExpirationConfiguration() + { + Disabled = false + }) + // Apply some database configuration setting: + .WithSettings(new Dictionary() + { + { "Databases.QueryTimeoutInSec", "500" } + }) +); + +// Execute the operation by passing it to Maintenance.Server.Send +store.Maintenance.Server.Send(createDatabaseOp); +`} + + + + +{`// Define the create database operation +var createDatabaseOp = new CreateDatabaseOperation(builder => builder + + // Call 'Sharded' to create a sharded database + .Sharded("ShardedDatabaseName", topology => topology + // Ensure nodes "A", "B", and "C" are available in the cluster + // before executing the database creation. + .AddShard(0, new DatabaseTopology {Members = new List {"A", "B"}}) + .AddShard(1, new DatabaseTopology {Members = new List {"A", "C"}}) + .AddShard(2, new DatabaseTopology {Members = new List {"B", "C"}})) + // Enable revisions on all collections: + .ConfigureRevisions(new RevisionsConfiguration() + { + Default = new RevisionsCollectionConfiguration() + { + Disabled = false, MinimumRevisionsToKeep = 5 + } + }) + // Enable the document expiration feature: + .ConfigureExpiration(new ExpirationConfiguration() + { + Disabled = false + }) + // Apply some database configuration setting: + .WithSettings(new Dictionary() + { + { "Databases.QueryTimeoutInSec", "500" } + }) +); + +// Execute the operation by passing it to Maintenance.Server.SendAsync +await store.Maintenance.Server.SendAsync(createDatabaseOp); +`} + + + + + + + +##### Example III - Ensure database does not exist before creating +* To ensure the database does not already exist before creating it, follow this example: + + + + +{`var databaseName = "MyDatabaseName"; + +try +{ + // Try to fetch database statistics to check if the database exists + store.Maintenance.ForDatabase(databaseName) + .Send(new GetStatisticsOperation()); +} +catch (DatabaseDoesNotExistException) +{ + try + { + // The database does not exist, try to create: + var createDatabaseOp = new CreateDatabaseOperation( + new DatabaseRecord(databaseName)); + + store.Maintenance.Server.Send(createDatabaseOp); + } + catch (ConcurrencyException) + { + // The database was created by another client before this call completed + } +} +`} + + + + +{`var databaseName = "MyDatabaseName"; + +try +{ + // Try to fetch database statistics to check if the database exists: + await store.Maintenance.ForDatabase(databaseName) + .SendAsync(new GetStatisticsOperation()); +} +catch (DatabaseDoesNotExistException) +{ + try + { + // The database does not exist, try to create: + var createDatabaseOp = new CreateDatabaseOperation( + new DatabaseRecord(databaseName)); + + await store.Maintenance.Server.SendAsync(createDatabaseOp); + } + catch (ConcurrencyException) + { + // The database was created by another client before this call completed + } +} +`} + + + + + + + +## Syntax + + + +{`// CreateDatabaseOperation overloads: +// ================================== +public CreateDatabaseOperation(DatabaseRecord databaseRecord) \{\} +public CreateDatabaseOperation(DatabaseRecord databaseRecord, int replicationFactor) \{\} +public CreateDatabaseOperation(Action builder) \{\} +`} + + + +| Parameter | Description | +|-----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **databaseRecord** | Instance of `DatabaseRecord` containing database configuration.
See [The Database Record](../../../client-api/operations/server-wide/create-database.mdx#the-database-record) below. | +| **replicationFactor** | Number of nodes the database should be replicated to.

If not specified, the value is taken from `databaseRecord.Topology.ReplicationFactor`,
or defaults to **`1`** if that is not set.

If `Topology` is provided, the `replicationFactor` is ignored. | +| **builder** | Callback used to initialize and fluently configure a new DatabaseRecord.
Receives an `IDatabaseRecordBuilderInitializer` on which you invoke builder methods to construct the record. See [The Database Record Builder](../../../client-api/operations/server-wide/create-database.mdx#the-database-record-builder) below. | +### The Database Record: + +The `DatabaseRecord` is a collection of database configurations: + +| DatabaseRecord constructors | Description | +|---------------------------------------|--------------------------------------------------------------------| +| DatabaseRecord() | Initialize a new database record. | +| DatabaseRecord(`string` databaseName) | Initialize a new database record with the specified database name. | + + + +**Note:** + +* Only the properties listed in the table below can be configured in the `DatabaseRecord` object passed to `CreateDatabaseOperation`. +* For example, although ongoing task definitions are public on the _DatabaseRecord_ class, setting them during database creation will result in an exception. + To define ongoing tasks (e.g., backups, ETL, replication), use the appropriate dedicated operation after the database has been created. + + + +| DatabaseRecord properties | Type | Description | +|------------------------------------|-----------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **AiConnectionStrings** | `Dictionary` | Define [Ai Connection Strings](../../../ai-integration/connection-strings/connection-strings-overview.mdx), keyed by name. | +| **Analyzers** | `Dictionary` | A dictionary defining the [Custom Analyzers](../../../indexes/using-analyzers.mdx#creating-custom-analyzers) available to the database. | +| **AutoIndexes** | `Dictionary` | Auto-index definitions for the database. | +| **Client** | `ClientConfiguration` | [Client behavior](../../../studio/server/client-configuration.mdx) configuration. | +| **ConflictSolverConfig** | `ConflictSolver` | Define the strategy used to resolve [Replication conflicts](../../../server/clustering/replication/replication-conflicts.mdx). | +| **DataArchival** | `DataArchivalConfiguration` | [Data Archival](../../../data-archival/overview.mdx) configuration for the database. | +| **DatabaseName** | `string` | The database name. | +| **Disabled** | `bool` | Set the database initial state.
`true` - disable the database.
`false` - (default) the database will be enabled.

This can be modified later via [ToggleDatabasesStateOperation](../../../client-api/operations/server-wide/toggle-databases-state.mdx). | +| **DocumentsCompression** | `DocumentsCompressionConfiguration` | Configuration settings for [Compressing documents](../../../server/storage/documents-compression.mdx). | +| **ElasticSearchConnectionStrings** | `Dictionary` | Define [ElasticSearch Connection Strings](../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#add-an-elasticsearch-connection-string), keyed by name. | +| **Encrypted** | `bool` | `true` - create an [Encrypted database](../../../server/security/encryption/database-encryption.mdx).

Note: Use `PutSecretKeyCommand` to send your secret key to the server BEFORE creating the database.

`false` - (default) the database will not be encrypted. | +| **Expiration** | `ExpirationConfiguration` | [Expiration](../../../server/extensions/expiration.mdx) configuration for the database. | +| **Indexes** | `Dictionary` | Define [Indexes](../../../client-api/operations/maintenance/indexes/put-indexes.mdx) that will be created with the database - no separate deployment needed. | +| **Integrations** | `IntegrationConfigurations` | Configuration for [Integrations](../../../integrations/postgresql-protocol/overview.mdx),
e.g. `PostgreSqlConfiguration`. | +| **LockMode** | `DatabaseLockMode` | Set the database lock mode.
(default: `Unlock`)

This can be modified later via `SetDatabasesLockOperation`. | +| **OlapConnectionStrings** | `Dictionary` | Define [OLAP Connection Strings](../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#add-an-olap-connection-string), keyed by name. | +| **QueueConnectionStrings** | `Dictionary` | Define [Queue Connection Strings](../../../server/ongoing-tasks/etl/queue-etl/overview.mdx), keyed by name. | +| **RavenConnectionStrings** | `Dictionary` | Define [Raven Connection Strings](../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#add-a-ravendb-connection-string), keyed by name. | +| **Refresh** | `RefreshConfiguration` | [Refresh](../../../server/extensions/refresh.mdx) configuration for the database. | +| **Revisions** | `RevisionsConfiguration` | [Revisions](../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx) configuration for the database. | +| **RevisionsBin** | `RevisionsBinConfiguration` | Configuration for the [Revisions Bin Cleaner](../../../document-extensions/revisions/revisions-bin-cleaner.mdx). | +| **RevisionsForConflicts** | `RevisionsCollectionConfiguration` | Set the revisions configuration for conflicting documents. | +| **RollingIndexes** | `Dictionary` | Dictionary mapping index names to their deployment configurations. | +| **Settings** | `Dictionary` | [Configuration](../../../server/configuration/configuration-options.mdx) settings for the database. | +| **Sharding** | `ShardingConfiguration` | The sharding configuration. | +| **SnowflakeConnectionStrings** | `Dictionary` | Define [Snowflake Connection Strings](../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#add-a-snowflake-connection-string), keyed by name. | +| **Sorters** | `Dictionary` | A dictionary defining the [Custom Sorters](../../../studio/database/settings/custom-sorters.mdx) available to the database. | +| **SqlConnectionStrings** | `Dictionary` | Define [SQL Connection Strings](../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#add-an-sql-connection-string), keyed by name. | +| **Studio** | `StudioConfiguration` | [Studio Configuration](../../../studio/database/settings/studio-configuration.mdx). | +| **TimeSeries** | `TimeSeriesConfiguration` | [Time series](../../../studio/database/settings/time-series-settings.mdx) configuration for the database. | +| **Topology** | `DatabaseTopology` | Optional topology configuration.

Defaults to `null`, in which case the server will determine which nodes to place the database on, based on the specified `ReplicationFactor`. | +| **UnusedDatabaseIds** | `HashSet` | Set database IDs that will be excluded when creating new change vectors. | +### The Database Record Builder: + + + +{`public interface IDatabaseRecordBuilderInitializer +\{ + public IDatabaseRecordBuilder Regular(string databaseName); + public IShardedDatabaseRecordBuilder Sharded(string databaseName, Action builder); + public DatabaseRecord ToDatabaseRecord(); +\} + +public interface IShardedDatabaseRecordBuilder : IDatabaseRecordBuilderBase +\{ +\} + +// Available configurations: +// ========================= + +public interface IDatabaseRecordBuilder : IDatabaseRecordBuilderBase +\{ + public IDatabaseRecordBuilderBase WithTopology(DatabaseTopology topology); + public IDatabaseRecordBuilderBase WithTopology(Action builder); + public IDatabaseRecordBuilderBase WithReplicationFactor(int replicationFactor); +\} + +public interface IDatabaseRecordBuilderBase +\{ + DatabaseRecord ToDatabaseRecord(); + + IDatabaseRecordBuilderBase ConfigureClient(ClientConfiguration configuration); + IDatabaseRecordBuilderBase ConfigureDocumentsCompression(DocumentsCompressionConfiguration configuration); + IDatabaseRecordBuilderBase ConfigureExpiration(ExpirationConfiguration configuration); + IDatabaseRecordBuilderBase ConfigureRefresh(RefreshConfiguration configuration); + IDatabaseRecordBuilderBase ConfigureRevisions(RevisionsConfiguration configuration); + IDatabaseRecordBuilderBase ConfigureStudio(StudioConfiguration configuration); + IDatabaseRecordBuilderBase ConfigureTimeSeries(TimeSeriesConfiguration configuration); + + IDatabaseRecordBuilderBase Disabled(); + IDatabaseRecordBuilderBase Encrypted(); + + IDatabaseRecordBuilderBase WithAnalyzers(params AnalyzerDefinition[] analyzerDefinitions); + IDatabaseRecordBuilderBase WithConnectionStrings(Action builder); + IDatabaseRecordBuilderBase WithIndexes(params IndexDefinition[] indexDefinitions); + IDatabaseRecordBuilderBase WithIntegrations(Action builder); + IDatabaseRecordBuilderBase WithLockMode(DatabaseLockMode lockMode); + IDatabaseRecordBuilderBase WithSettings(Dictionary settings); + IDatabaseRecordBuilderBase WithSettings(Action> builder); + IDatabaseRecordBuilderBase WithSorters(params SorterDefinition[] sorterDefinitions); +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_create-database-java.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/_create-database-java.mdx new file mode 100644 index 0000000000..26b0a191ef --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_create-database-java.mdx @@ -0,0 +1,143 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `CreateDatabaseOperation` to create a new database from the **Client API**, as described below. + To create a new database from the **Studio**, see [Create database](../../../studio/database/create-new-database/general-flow.mdx). + +* This operation requires a client certificate with a security clearance of _Operator_ or _ClusterAdmin_. + To learn which operations are allowed at each level, see [Security clearance and permissions](../../../server/security/authorization/security-clearance-and-permissions.mdx). + +* In this article: + * [Create new database](../../../client-api/operations/server-wide/create-database.mdx#create-new-database) + * [Example I - Create database](../../../client-api/operations/server-wide/create-database.mdx#example-i---create-non-sharded-database) + * [Example II - Ensure database does not exist before creating](../../../client-api/operations/server-wide/create-database.mdx#example-ii---ensure-database-does-not-exist-before-creating) + * [Syntax](../../../client-api/operations/server-wide/create-database.mdx#syntax) + + +## Create new database + + + +##### Example I - Create database +* The following simple example creates a non-sharded database with the default replication factor of 1. + + + +{`DatabaseRecord databaseRecord = new DatabaseRecord(); +databaseRecord.setDatabaseName("MyNewDatabase"); +store.maintenance().server().send(new CreateDatabaseOperation(databaseRecord)); +`} + + + + + + +##### Example II - Ensure database does not exist before creating +* To ensure the database does not already exist before creating it, follow this example: + + + +{`public void ensureDatabaseExists(IDocumentStore store, String database, boolean createDatabaseIfNotExists) \{ + database = ObjectUtils.firstNonNull(database, store.getDatabase()); + + if (StringUtils.isBlank(database)) \{ + throw new IllegalArgumentException("Value cannot be null or whitespace"); + \} + + try \{ + store.maintenance().forDatabase(database).send(new GetStatisticsOperation()); + \} catch (DatabaseDoesNotExistException e) \{ + if (!createDatabaseIfNotExists) \{ + throw e; + \} + + try \{ + DatabaseRecord databaseRecord = new DatabaseRecord(); + databaseRecord.setDatabaseName(database); + store.maintenance().server().send(new CreateDatabaseOperation(databaseRecord)); + \} catch (ConcurrencyException ce) \{ + // The database was already created before calling CreateDatabaseOperation + \} + \} +\} +`} + + + + + + +## Syntax + + + +{`public CreateDatabaseOperation(DatabaseRecord databaseRecord) + +public CreateDatabaseOperation(DatabaseRecord databaseRecord, int replicationFactor) +`} + + + +| Parameter | Type | Description | +|-----------------------|----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **databaseRecord** | DatabaseRecord | Instance of `DatabaseRecord` containing database configuration. | +| **replicationFactor** | int | Number of nodes the database should be replicated to.

If not specified, the value is taken from `topology.replicationFactor`,
or defaults to **`1`** if that is not set.

If `topology` is provided, the `replicationFactor` is ignored. | + +## DatabaseRecord + +`DatabaseRecord` is a collection of database configurations. + +| constructor | Description | +|---------------------------------------|----------------------------------| +| DatabaseRecord(`string` databaseName) | Initialize a new database record | + + + +**Note:** + +* Only the properties listed in the table below can be configured in the `DatabaseRecord` object passed to `CreateDatabaseOperation`. +* For example, although ongoing task definitions are public on the _DatabaseRecord_ class, setting them during database creation will result in an exception. + To define ongoing tasks (e.g., backups, ETL, replication), use the appropriate dedicated operation after the database has been created. + + + +| Property | Type | Description | +|------------------------------------|----------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **analyzers** | `Map` | A dictionary defining the [Custom Analyzers](../../../indexes/using-analyzers.mdx#creating-custom-analyzers) available to the database. | +| **autoIndexes** | `Map` | Auto-index definitions for the database. | +| **client** | `ClientConfiguration` | [Client behavior](../../../studio/server/client-configuration.mdx) configuration. | +| **conflictSolverConfig** | `ConflictSolver` | Define the strategy used to resolve [Replication conflicts](../../../server/clustering/replication/replication-conflicts.mdx). | +| **dataArchival** | `DataArchivalConfiguration` | [Data Archival](../../../data-archival/overview.mdx) configuration for the database. | +| **databaseName** | `String` | The database name. | +| **disabled** | `boolean` (default: false) | Set the database initial state.
`true` - disable the database.
`false` - (default) the database will be enabled.

This can be modified later via [ToggleDatabasesStateOperation](../../../client-api/operations/server-wide/toggle-databases-state.mdx). | +| **documentsCompression** | `DocumentsCompressionConfiguration` | Configuration settings for [Compressing documents](../../../server/storage/documents-compression.mdx). | +| **elasticSearchConnectionStrings** | `Map` | Define [ElasticSearch Connection Strings](../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#add-an-elasticsearch-connection-string), keyed by name. | +| **encrypted** | `boolean` (default: false) | `true` - create an [Encrypted database](../../../server/security/encryption/database-encryption.mdx).

Note: Use `PutSecretKeyCommand` to send your secret key to the server BEFORE creating the database.

`false` - (default) the database will not be encrypted. | +| **expiration** | `ExpirationConfiguration` | [Expiration](../../../server/extensions/expiration.mdx) configuration for the database. | +| **indexes** | `Map` | Define [Indexes](../../../client-api/operations/maintenance/indexes/put-indexes.mdx) that will be created with the database -
no separate deployment needed. | +| **integrations** | `IntegrationConfigurations` | Configuration for [Integrations](../../../integrations/postgresql-protocol/overview.mdx),
e.g. `PostgreSqlConfiguration`. | +| **lockMode** | `DatabaseLockMode` | Set the database lock mode.
(default: `Unlock`)

This can be modified later via `SetDatabasesLockOperation`. | +| **olapConnectionStrings** | `Map` | Define [OLAP Connection Strings](../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#add-an-olap-connection-string), keyed by name. | +| **queueConnectionStrings** | `Map` | Define [Queue Connection Strings](../../../server/ongoing-tasks/etl/queue-etl/overview.mdx), keyed by name. | +| **ravenConnectionStrings** | `Map` | Define [Raven Connection Strings](../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#add-a-ravendb-connection-string), keyed by name. | +| **refresh** | `RefreshConfiguration` | [Refresh](../../../server/extensions/refresh.mdx) configuration for the database. | +| **revisions** | `RevisionsConfiguration` | [Revisions](../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx) configuration for the database. | +| **revisionsForConflicts** | `RevisionsCollectionConfiguration` | Set the revisions configuration for conflicting documents. | +| **rollingIndexes** | `Map` | Dictionary mapping index names to their deployment configurations. | +| **settings** | `Map` | [Configuration](../../../server/configuration/configuration-options.mdx) settings for the database. | +| **sharding** | `ShardingConfiguration` | The sharding configuration. | +| **sorters** | `Map` | A dictionary defining the [Custom Sorters](../../../studio/database/settings/custom-sorters.mdx) available to the database. | +| **sqlConnectionStrings** | `Map` | Define [SQL Connection Strings](../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#add-an-sql-connection-string), keyed by name. | +| **studio** | `StudioConfiguration` | [Studio Configuration](../../../studio/database/settings/studio-configuration.mdx). | +| **timeSeries** | `TimeSeriesConfiguration` | [Time series](../../../studio/database/settings/time-series-settings.mdx) configuration for the database. | +| **topology** | `DatabaseTopology` | Optional topology configuration.

Defaults to `null`, in which case the server will determine which nodes to place the database on, based on the specified `ReplicationFactor`. | +| **unusedDatabaseIds** | `Set` | Set database IDs that will be excluded when creating new change vectors. | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_delete-database-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/_delete-database-csharp.mdx new file mode 100644 index 0000000000..fcb70cb43a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_delete-database-csharp.mdx @@ -0,0 +1,94 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +This operation is used to delete databases from a server, with a possibility to remove all the data from hard drive. + +## Syntax + + + +{`public DeleteDatabasesOperation( + string databaseName, + bool hardDelete, + string fromNode = null, + TimeSpan? timeToWaitForConfirmation = null) +\{ +\} + +public DeleteDatabasesOperation(DeleteDatabasesOperation.Parameters parameters) +\{ +\} + +public class Parameters +\{ + public string[] DatabaseNames \{ get; set; \} + + public bool HardDelete \{ get; set; \} + + public string[] FromNodes \{ get; set; \} + + public TimeSpan? TimeToWaitForConfirmation \{ get; set; \} +\} +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **DatabaseName** | string | Name of a database to delete | +| **HardDelete** | bool | Should all data be removed (data files, indexing files, etc.). | +| **FromNode** | string | Remove the database just from a specific node. Default: `null` which would delete from all | +| **TimeToWaitForConfirmation** | TimeSpan | Time to wait for confirmation. Default: `null` will user server default (15 seconds) | + +## Example I + + + + +{`var parameters = new DeleteDatabasesOperation.Parameters +{ + DatabaseNames = new[] { "MyNewDatabase", "OtherDatabaseToDelete" }, + HardDelete = true, + FromNodes = new[] { "A", "C" }, // optional + TimeToWaitForConfirmation = TimeSpan.FromSeconds(30) // optional +}; +store.Maintenance.Server.Send(new DeleteDatabasesOperation(parameters)); +`} + + + + +{`var parameters = new DeleteDatabasesOperation.Parameters +{ + DatabaseNames = new[] { "MyNewDatabase", "OtherDatabaseToDelete" }, + HardDelete = true, + FromNodes = new[] { "A", "C" }, // optional + TimeToWaitForConfirmation = TimeSpan.FromSeconds(30) // optional +}; +await store.Maintenance.Server.SendAsync(new DeleteDatabasesOperation(parameters)); +`} + + + + +## Example II + +In order to delete just one database from a server, you can also use this simplified constructor + + + + +{`store.Maintenance.Server.Send(new DeleteDatabasesOperation("MyNewDatabase", hardDelete: true, fromNode: null, timeToWaitForConfirmation: null)); +`} + + + + +{`await store.Maintenance.Server.SendAsync(new DeleteDatabasesOperation("MyNewDatabase", hardDelete: true, fromNode: null, timeToWaitForConfirmation: null)); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_delete-database-java.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/_delete-database-java.mdx new file mode 100644 index 0000000000..dcff042968 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_delete-database-java.mdx @@ -0,0 +1,101 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +This operation is used to delete databases from a server, with a possibility to remove all the data from hard drive. + +## Syntax + + + +{`public DeleteDatabasesOperation(String databaseName, boolean hardDelete) + +public DeleteDatabasesOperation(String databaseName, boolean hardDelete, String fromNode) + +public DeleteDatabasesOperation(String databaseName, boolean hardDelete, String fromNode, Duration timeToWaitForConfirmation) + +public DeleteDatabasesOperation(Parameters parameters) +`} + + + + + +{`public static class Parameters \{ + private String[] databaseNames; + private boolean hardDelete; + private String[] fromNodes; + private Duration timeToWaitForConfirmation; + + public String[] getDatabaseNames() \{ + return databaseNames; + \} + + public void setDatabaseNames(String[] databaseNames) \{ + this.databaseNames = databaseNames; + \} + + public boolean isHardDelete() \{ + return hardDelete; + \} + + public void setHardDelete(boolean hardDelete) \{ + this.hardDelete = hardDelete; + \} + + public String[] getFromNodes() \{ + return fromNodes; + \} + + public void setFromNodes(String[] fromNodes) \{ + this.fromNodes = fromNodes; + \} + + public Duration getTimeToWaitForConfirmation() \{ + return timeToWaitForConfirmation; + \} + + public void setTimeToWaitForConfirmation(Duration timeToWaitForConfirmation) \{ + this.timeToWaitForConfirmation = timeToWaitForConfirmation; + \} +\} +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **DatabaseName** | String | Name of a database to delete | +| **HardDelete** | boolean | Should all data be removed (data files, indexing files, etc.). | +| **FromNode** | String | Remove the database just from a specific node. Default: `null` which would delete from all | +| **TimeToWaitForConfirmation** | Duration | Time to wait for confirmation. Default: `null` will user server default (15 seconds) | + +## Example I + + + +{`DeleteDatabasesOperation.Parameters parameters = new DeleteDatabasesOperation.Parameters(); +parameters.setDatabaseNames(new String[]\{ "MyNewDatabase", "OtherDatabaseToDelete" \}); +parameters.setHardDelete(true); +parameters.setFromNodes(new String[]\{ "A", "C" \}); //optional +parameters.setTimeToWaitForConfirmation(Duration.ofSeconds(30)); // optional + +store.maintenance() + .server().send(new DeleteDatabasesOperation(parameters)); +`} + + + +## Example II + +In order to delete just one database from a server, you can also use this constructor + + + +{`store.maintenance().server().send( + new DeleteDatabasesOperation("MyNewDatabase", true, null, null)); +`} + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_get-build-number-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/_get-build-number-csharp.mdx new file mode 100644 index 0000000000..db0757d35c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_get-build-number-csharp.mdx @@ -0,0 +1,62 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To get the server build number use **GetBuildNumberOperation** from `Maintenance.Server` + +## Syntax + + + +{`public GetBuildNumberOperation() +`} + + + +### Return Value + +The result of executing GetBuildNumberOperation is a **BuildNumber** object: + + + +{`public class BuildNumber +\{ + public string ProductVersion \{ get; set; \} + + public int BuildVersion \{ get; set; \} + + public string CommitHash \{ get; set; \} + + public string FullVersion \{ get; set; \} +\} +`} + + + +| Property | Description | +|--------------------|---------------------------------------| +| **ProductVersion** | current product version e.g. "4.0" | +| **BuildVersion** | current build version e.g. 40 | +| **CommitHash** | git commit SHA e.g. "a377982" | +| **FullVersion** | semantic versioning e.g. "4.0.0" | + +## Example + + + + +{`var getBuildNumberResult = documentStore.Maintenance.Server.Send(new GetBuildNumberOperation()); +Console.WriteLine(getBuildNumberResult.BuildVersion); +`} + + + + +{`var buildNumber = await documentStore.Maintenance.Server.SendAsync(new GetBuildNumberOperation()); +Console.WriteLine(buildNumber.BuildVersion); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_get-database-names-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/_get-database-names-csharp.mdx new file mode 100644 index 0000000000..0a30c21809 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_get-database-names-csharp.mdx @@ -0,0 +1,35 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To download available database names from a server, use the `GetDatabaseNames` operation. + +## Syntax + + + +{`public GetDatabaseNamesOperation(int start, int pageSize) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **pageSize** | int | Maximum number of records that will be downloaded | +| **start** | int | Number of records that should be skipped. | + +| Return Value | | +| ------------- | ----- | +| string[] | Names of databases on a server | + +## Example + + + +{`var operation = new GetDatabaseNamesOperation(0, 25); +string[] databaseNames = store.Maintenance.Server.Send(operation); +`} + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_get-database-names-java.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/_get-database-names-java.mdx new file mode 100644 index 0000000000..c76ea5f0b0 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_get-database-names-java.mdx @@ -0,0 +1,35 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To download available database names from a server, use the `GetDatabaseNames` operation. + +## Syntax + + + +{`public GetDatabaseNamesOperation(int start, int pageSize) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **pageSize** | int | Maximum number of records that will be downloaded | +| **start** | int | Number of records that should be skipped. | + +| Return Value | | +| ------------- | ----- | +| String[] | Names of databases on a server | + +## Example + + + +{`GetDatabaseNamesOperation operation = new GetDatabaseNamesOperation(0, 25); +String[] databaseNames = store.maintenance().server().send(operation); +`} + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_modify-conflict-solver-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/_modify-conflict-solver-csharp.mdx new file mode 100644 index 0000000000..a4b0759fcc --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_modify-conflict-solver-csharp.mdx @@ -0,0 +1,81 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +The conflict solver allows you to set a conflict resolution script for each collection or resolve conflicts using the latest version. + +To modify the solver configuration, use **ModifyConflictSolverOperation**. + +## Syntax + + + +{`public ModifyConflictSolverOperation( + string database, + Dictionary collectionByScript = null, + bool resolveToLatest = false) +`} + + + + + +{`public class ScriptResolver +\{ + public string Script \{ get; set; \} +\} +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **database** | string | Name of a database | +| **collectionByScript** | Dictionary<string,ScriptResolver> | Per collection conflict resolution script | +| **resolveToLatest** | bool | Indicates if a conflict should be resolved using the latest version | + + +| Return Value | | +| ------------- | ----- | +| **Key** | Name of database | +| **RaftCommandIndex** | RAFT command index | +| **Solver** | Saved conflict solver configuration | + +## Example I + + + +{`// resolve conflict to latest version +ModifyConflictSolverOperation operation = + new ModifyConflictSolverOperation("Northwind", null, resolveToLatest: true); +store.Maintenance.Server.Send(operation); +`} + + + + +## Example II + + + +{`// resolve conflict by finding max value +string script = @" +var maxRecord = 0; +for (var i = 0; i < docs.length; i++) \{ + maxRecord = Math.max(docs[i].maxRecord, maxRecord); +\} +docs[0].MaxRecord = maxRecord; + +return docs[0];"; + +ModifyConflictSolverOperation operation = + new ModifyConflictSolverOperation("Northwind", new Dictionary + \{ + \{ "Orders", new ScriptResolver \{ Script = script\} \} + \}, resolveToLatest: false); +store.Maintenance.Server.Send(operation); +`} + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_promote-database-node-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/_promote-database-node-csharp.mdx new file mode 100644 index 0000000000..fe8d82fc4c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_promote-database-node-csharp.mdx @@ -0,0 +1,33 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +This operation is used to promote a database node. After promotion, the node is considered as a `Member`. + +## Syntax + + + +{`public PromoteDatabaseNodeOperation(string databaseName, string node) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **databaseName** | string | Name of a database | +| **node** | string | Node tag to promote into database group `Member` | + +## Example + + + +{`PromoteDatabaseNodeOperation promoteOperation = new PromoteDatabaseNodeOperation("Northwind", "C"); +store.Maintenance.Server.Send(promoteOperation); +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_reorder-database-members-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/_reorder-database-members-csharp.mdx new file mode 100644 index 0000000000..94189cfb4d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_reorder-database-members-csharp.mdx @@ -0,0 +1,53 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +**ReorderDatabaseMembersOperation** allows you to change the order of nodes in the [Database Group Topology](../../../studio/database/settings/manage-database-group.mdx). + +## Syntax + + + +{`public ReorderDatabaseMembersOperation(string database, List order) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **database** | string | Name of a database to operate on | +| **order** | List\<string> | List of node tags of all existing nodes in the database group, listed in the exact order that you wish to have.
Throws `ArgumentException` is the reordered list doesn't correspond to the existing nodes of the database group | + + +## Example I + + + +{`// Assume that the current order of database group nodes is : ["A", "B", "C"] + +// Change the order of database group nodes to : ["C", "A", "B"] + +store.Maintenance.Server.Send(new ReorderDatabaseMembersOperation("Northwind", + new List + \{ + "C", "A", "B" + \})); +`} + + + +## Example II + + + +{`// Get the current DatabaseTopology from database record +var topology = store.Maintenance.Server.Send(new GetDatabaseRecordOperation("Northwind")).Topology; + +// Reverse the order of database group nodes +topology.Members.Reverse(); +store.Maintenance.Server.Send(new ReorderDatabaseMembersOperation("Northwind", topology.Members)); +`} + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_restore-backup-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/_restore-backup-csharp.mdx new file mode 100644 index 0000000000..2ce99aa7b8 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_restore-backup-csharp.mdx @@ -0,0 +1,60 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +* To restore a database from its backup, use `RestoreBackupOperation`. +* A backup can also be restored using [Studio](../../../studio/database/create-new-database/from-backup.mdx). + +## Syntax + + + +{`public RestoreBackupOperation(RestoreBackupConfiguration restoreConfiguration) +`} + + + + + +{`public class RestoreBackupConfiguration +\{ + public string DatabaseName \{ get; set; \} + + public string BackupLocation \{ get; set; \} + + public string LastFileNameToRestore \{ get; set; \} + + public string DataDirectory \{ get; set; \} + + public string EncryptionKey \{ get; set; \} +\} +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **DatabaseName** | string | Database name to create during the restore operation | +| **BackupLocation** | string | Directory containing backup files | +| **LastFileNameToRestore** | string | Used for partial restore | +| **DataDirectory** | string | Optional: Database data directory | +| **EncryptionKey** | string | Encryption key used for restore | + +## Example + + + +{`RestoreBackupConfiguration config = new RestoreBackupConfiguration() +\{ + BackupLocation = @"C:\\backups\\Northwind", + DatabaseName = "Northwind" +\}; +RestoreBackupOperation restoreOperation = new RestoreBackupOperation(config); +store.Maintenance.Server.Send(restoreOperation) + .WaitForCompletion(); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_restore-backup-java.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/_restore-backup-java.mdx new file mode 100644 index 0000000000..f6fa3ca910 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_restore-backup-java.mdx @@ -0,0 +1,106 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +* To restore a database from its backup, use `RestoreBackupOperation`. +* A backup can also be restored using [Studio](../../../studio/database/create-new-database/from-backup.mdx). + +## Syntax + + + +{`public RestoreBackupOperation(RestoreBackupConfigurationBase restoreConfiguration); + +public RestoreBackupOperation(RestoreBackupConfigurationBase restoreConfiguration, String nodeTag); +`} + + + + + +{`public abstract class RestoreBackupConfigurationBase \{ + + + public String getDatabaseName() \{ + return databaseName; + \} + + public void setDatabaseName(String databaseName) \{ + this.databaseName = databaseName; + \} + + public String getLastFileNameToRestore() \{ + return lastFileNameToRestore; + \} + + public void setLastFileNameToRestore(String lastFileNameToRestore) \{ + this.lastFileNameToRestore = lastFileNameToRestore; + \} + + public String getDataDirectory() \{ + return dataDirectory; + \} + + public void setDataDirectory(String dataDirectory) \{ + this.dataDirectory = dataDirectory; + \} + + public String getEncryptionKey() \{ + return encryptionKey; + \} + + public void setEncryptionKey(String encryptionKey) \{ + this.encryptionKey = encryptionKey; + \} + + public boolean isDisableOngoingTasks() \{ + return disableOngoingTasks; + \} + + public void setDisableOngoingTasks(boolean disableOngoingTasks) \{ + this.disableOngoingTasks = disableOngoingTasks; + \} + + public boolean isSkipIndexes() \{ + return skipIndexes; + \} + + public void setSkipIndexes(boolean skipIndexes) \{ + this.skipIndexes = skipIndexes; + \} + + public BackupEncryptionSettings getBackupEncryptionSettings() \{ + return backupEncryptionSettings; + \} + + public void setBackupEncryptionSettings(BackupEncryptionSettings backupEncryptionSettings) \{ + this.backupEncryptionSettings = backupEncryptionSettings; + \} +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **databaseName** | String | Database name to create during the restore operation | +| **lastFileNameToRestore** | String | Used for partial restore | +| **dataDirectory** | String | Optional: Database data directory | +| **encryptionKey** | String | Encryption key used for restore | +| **disableOngoingTasks** | boolean | Disable on doing tasks | +| **skipIndexes** | boolean | Skip the indexes| + +## Example + + + +{`RestoreBackupConfiguration config = new RestoreBackupConfiguration(); +config.setBackupLocation("C:\\\\backups\\\\Northwind"); +config.setDatabaseName("Northwind"); +RestoreBackupOperation restoreOperation = new RestoreBackupOperation(config); +store.maintenance().server().send(restoreOperation); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_restore-backup-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/_restore-backup-nodejs.mdx new file mode 100644 index 0000000000..7c3520d6b6 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_restore-backup-nodejs.mdx @@ -0,0 +1,59 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +* To restore a database from its backup, use `RestoreBackupOperation`. +* A backup can also be restored using [Studio](../../../studio/database/create-new-database/from-backup.mdx). + +## Syntax + + + +{`const restoreBackupOperation = new RestoreBackupOperation(restoreConfiguration, "nodeTag"); +`} + + + + + +{`export interface RestoreBackupConfigurationBase \{ + databaseName, + lastFileNameToRestore, + dataDirectory, + encryptionKey, + disableOngoingTasks, + skipIndexes, + type, + backupEncryptionSettings +\} +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **databaseName** | string | Database name to create during the restore operation | +| **lastFileNameToRestore** | string | Used for partial restore | +| **dataDirectory** | string | Optional: Database data directory | +| **encryptionKey** | string | Encryption key used for restore | +| **disableOngoingTasks** | boolean | true/false to disable/enable Ongoing Tasks| +| **skipIndexes** | boolean | true/false to disable/enable indexes import| +| **type** | RestoreType | Encryption key used for restore | +| **backupEncryptionSettings** | BackupEncryptionSettings | Backup encryption settings | + +## Example + + + +{`restoreConfiguration = \{ + databaseName: "Northwind", + skipIndexes: false +\} +const restoreBackupOperation = RestoreBackupOperation(restoreConfiguration, "A"); +const restoreResult = await store.maintenance.server.send(restoreBackupOperation); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_toggle-databases-state-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/_toggle-databases-state-csharp.mdx new file mode 100644 index 0000000000..936ec32a8b --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_toggle-databases-state-csharp.mdx @@ -0,0 +1,139 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `ToggleDatabasesStateOperation` to enable/disable a single database or multiple databases. + +* The database will be enabled/disabled on all nodes in the [database-group](../../../studio/database/settings/manage-database-group.mdx). + +* In this page: + + * [Enable/Disable database from the Client API](../../../client-api/operations/server-wide/toggle-databases-state.mdx#enable/disable-database-from-the-client-api) + * [Enable database](../../../client-api/operations/server-wide/toggle-databases-state.mdx#enable-database) + * [Disable database](../../../client-api/operations/server-wide/toggle-databases-state.mdx#disable-database) + * [Syntax](../../../client-api/operations/server-wide/toggle-databases-state.mdx#syntax) + * [Disable database via the file system](../../../client-api/operations/server-wide/toggle-databases-state.mdx#disable-database-via-the-file-system) + + +## Enable/Disable database from the Client API + +#### Enable database: + + + + +{`// Define the toggle state operation +// specify the database name & pass 'false' to enable +var enableDatabaseOp = new ToggleDatabasesStateOperation("Northwind", disable: false); + +// To enable multiple databases use: +// var enableDatabaseOp = +// new ToggleDatabasesStateOperation(new [] { "DB1", "DB2", ... }, disable: false); + +// Execute the operation by passing it to Maintenance.Server.Send +var toggleResult = documentStore.Maintenance.Server.Send(enableDatabaseOp); +`} + + + + +{`// Define the toggle state operation +// specify the database name(s) & pass 'false' to enable +var enableDatabaseOp = new ToggleDatabasesStateOperation(new [] { "Foo", "Bar" }, disable: false); + +// Execute the operation by passing it to Maintenance.Server.SendAsync +var toggleResult = await documentStore.Maintenance.Server.SendAsync(enableDatabaseOp); +`} + + + +#### Disable database: + + + + +{`// Define the toggle state operation +// specify the database name(s) & pass 'true' to disable +var disableDatabaseOp = new ToggleDatabasesStateOperation("Northwind", disable: true); + +// To disable multiple databases use: +// var disableDatabaseOp = +// new ToggleDatabasesStateOperation(new [] { "DB1", "DB2", ... }, disable: true); + +// Execute the operation by passing it to Maintenance.Server.Send +var toggleResult = documentStore.Maintenance.Server.Send(disableDatabaseOp); +`} + + + + +{`// Define the toggle state operation +// specify the database name(s) & pass 'true' to disable +var disableDatabaseOp = new ToggleDatabasesStateOperation("Northwind", disable: true); + +// Execute the operation by passing it to Maintenance.Server.SendAsync +var toggleResult = await documentStore.Maintenance.Server.SendAsync(disableDatabaseOp); +`} + + + +#### Syntax: + + + +{`// Available overloads: +public ToggleDatabasesStateOperation(string databaseName, bool disable) +public ToggleDatabasesStateOperation(string[] databaseNames, bool disable) +`} + + + +| Parameter | Type | Description | +|-------------------|------------|-------------------------------------------------------------------------------------------| +| **databaseName** | `string` | Name of database for which to toggle state | +| **databaseNames** | `string[]` | List of database names for which to toggle state | +| **disable** | `bool` | `true` - request to disable the database(s)
`false`- request to enable the database(s) | + + + +{`// Executing the operation returns the following object: +public class DisableDatabaseToggleResult +\{ + public bool Disabled; // Is database disabled + public string Name; // Name of the database + public bool Success; // Has request succeeded + public string Reason; // Reason for success or failure +\} +`} + + + + + +## Disable database via the file system + +It may sometimes be useful to disable a database manually, through the file system. + +* To **manually disable** a database: + + * Place a file named `disable.marker` in the [database directory](../../../server/storage/directory-structure.mdx). + * The `disable.marker` file can be empty, + and can be created by any available method, e.g. using the File Explorer, a terminal, or code. + +* Attempting to use a manually disabled database will generate the following exception: + + Unable to open database: '{DatabaseName}', + it has been manually disabled via the file: '{disableMarkerPath}'. + To re-enable, remove the disable.marker and reload the database. + +* To **enable** a manually disabled database: + + * First, remove the `disable.marker` file from the database directory. + * Then, [reload the database](../../../studio/database/settings/database-settings.mdx#how-to-reload-the-database). + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_toggle-databases-state-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/_toggle-databases-state-nodejs.mdx new file mode 100644 index 0000000000..31de0141ed --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_toggle-databases-state-nodejs.mdx @@ -0,0 +1,123 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `ToggleDatabasesStateOperation` to enable/disable a single database or multiple databases. + +* The database will be enabled/disabled on all nodes in the [database-group](../../../studio/database/settings/manage-database-group.mdx). + +* In this page: + + * [Enable/Disable database from the Client API](../../../client-api/operations/server-wide/toggle-databases-state.mdx#enable/disable-database-from-the-client-api) + * [Enable database](../../../client-api/operations/server-wide/toggle-databases-state.mdx#enable-database) + * [Disable database](../../../client-api/operations/server-wide/toggle-databases-state.mdx#disable-database) + * [Syntax](../../../client-api/operations/server-wide/toggle-databases-state.mdx#syntax) + * [Disable database via the file system](../../../client-api/operations/server-wide/toggle-databases-state.mdx#disable-database-via-the-file-system) + + +## Enable/Disable database from the Client API + + + +
**Enable database**: + + + +{`// Define the toggle state operation +// specify the database name & pass 'false' to enable +const enableDatabaseOp = new ToggleDatabasesStateOperation("Northwind", false); + +// To enable multiple databases use: +// const enableDatabaseOp = +// new ToggleDatabasesStateOperation(["DB1", "DB2", ...], false); + +// Execute the operation by passing it to maintenance.server.send +const toggleResult = await documentStore.maintenance.server.send(enableDatabaseOp); +`} + + + + + + + **Disable database**: + + + +{`// Define the toggle state operation +// specify the database name(s) & pass 'true' to disable +const disableDatabaseOp = new ToggleDatabasesStateOperation("Northwind", true); + +// To disable multiple databases use: +// const disableDatabaseOp = +// new ToggleDatabasesStateOperation(["DB1", "DB2", ...], true); + +// Execute the operation by passing it to maintenance.server.send +const toggleResult = await documentStore.maintenance.server.send(disableDatabaseOp); +`} + + + + + + + **Syntax**: + + + +{`// Available overloads: +const enableDatabaseOp = new ToggleDatabasesStateOperation(databaseName, disable); +const enableDatabaseOp = new ToggleDatabasesStateOperation(databaseNames, disable); +`} + + + +| Parameter | Type | Description | +|-------------------|----------|---------------------------------------------------------------------------------------------| +| **databaseName** | `string` | Name of database for which to toggle state | +| **databaseNames** | `string[]` | List of database names for which to toggle state | +| **disable** | `boolean` | `true` - request to disable the database(s)
`false`- request to enable the database(s) | + + + +{`// Executing the operation returns an object with the following properties: +\{ + disabled, // Is database disabled + name, // Name of the database + success, // Has request succeeded + reason // Reason for success or failure +\} +`} + + + + + + +## Disable database via the file system + +It may sometimes be useful to disable a database manually, through the file system. + +* To **manually disable** a database: + + * Place a file named `disable.marker` in the [database directory](../../../server/storage/directory-structure.mdx). + * The `disable.marker` file can be empty, + and can be created by any available method, e.g. using the File Explorer, a terminal, or code. + +* Attempting to use a manually disabled database will generate the following exception: + + Unable to open database: '{DatabaseName}', + it has been manually disabled via the file: '{disableMarkerPath}'. + To re-enable, remove the disable.marker and reload the database. + +* To **enable** a manually disabled database: + + * First, remove the `disable.marker` file from the database directory. + * Then, [reload the database](../../../studio/database/settings/database-settings.mdx#how-to-reload-the-database). + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_toggle-databases-state-php.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/_toggle-databases-state-php.mdx new file mode 100644 index 0000000000..ea80cc6f36 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_toggle-databases-state-php.mdx @@ -0,0 +1,113 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `ToggleDatabasesStateOperation` to enable/disable a single database or multiple databases. + +* The database will be enabled/disabled on all nodes in the [database-group](../../../studio/database/settings/manage-database-group.mdx). + +* In this page: + + * [Enable/Disable database from the Client API](../../../client-api/operations/server-wide/toggle-databases-state.mdx#enable/disable-database-from-the-client-api) + * [Enable database](../../../client-api/operations/server-wide/toggle-databases-state.mdx#enable-database) + * [Disable database](../../../client-api/operations/server-wide/toggle-databases-state.mdx#disable-database) + * [Syntax](../../../client-api/operations/server-wide/toggle-databases-state.mdx#syntax) + * [Disable database via the file system](../../../client-api/operations/server-wide/toggle-databases-state.mdx#disable-database-via-the-file-system) + + +## Enable/Disable database from the Client API + +#### Enable database: + + + +{`// Define the toggle state operation +// specify the database name & pass 'false' to enable +$enableDatabaseOp = new ToggleDatabasesStateOperation("Northwind", false); + +// To enable multiple databases use: +// $enableDatabaseOp = new ToggleDatabasesStateOperation([ "DB1", "DB2", ... ], false); + +// Execute the operation by passing it to Maintenance.Server.Send +/** @var DisableDatabaseToggleResult $toggleResult */ +$toggleResult = $documentStore->maintenance()->server()->send($enableDatabaseOp); +`} + + +#### Disable database: + + + +{`// Define the toggle state operation +// specify the database name(s) & pass 'true' to disable +$disableDatabaseOp = new ToggleDatabasesStateOperation("Northwind", true); + +// To disable multiple databases use: +// $disableDatabaseOp = new ToggleDatabasesStateOperation([ "DB1", "DB2", ... ], true); + +// Execute the operation by passing it to Maintenance.Server.Send +/** @var DisableDatabaseToggleResult $toggleResult */ +$toggleResult = $documentStore->maintenance()->server()->send($disableDatabaseOp); +`} + + +#### Syntax: + + + +{`class ToggleDatabasesStateOperation(ServerOperation[DisableDatabaseToggleResult]): + def __init__(self, database_name: str, disable: bool): ... + @classmethod + def from_multiple_names(cls, database_names: List[str], disable: bool): ... +`} + + + +| Parameter | Type | Description | +|--------------------|---------|---------------------------------------------------------------------------------------| +| **$databaseName** | `string` / `StringArray` / `array` | Name or list of names of database/s whose state to toggle | +| **$disable** | `bool` | `true` - request to disable the database(s)
`talse`- request to enable the database(s) | + + + +{`class DisableDatabaseToggleResult: + def __init__( + self, disabled: bool = None, name: str = None, success: bool = None, reason: str = None + ) -> None: + self.disabled = disabled # Is database disabled + self.name = name # Name of the database + self.success = success # Has request succeeded + self.reason = reason # Reason for success or failure +`} + + + + + +## Disable database via the file system + +It may sometimes be useful to disable a database manually, through the file system. + +* To **manually disable** a database: + + * Place a file named `disable.marker` in the [database directory](../../../server/storage/directory-structure.mdx). + * The `disable.marker` file can be empty, + and can be created by any available method, e.g. using the File Explorer, a terminal, or code. + +* Attempting to use a manually disabled database will generate the following exception: + + Unable to open database: '{DatabaseName}', + it has been manually disabled via the file: '{disableMarkerPath}'. + To re-enable, remove the disable.marker and reload the database. + +* To **enable** a manually disabled database: + + * First, remove the `disable.marker` file from the database directory. + * Then, [reload the database](../../../studio/database/settings/database-settings.mdx#how-to-reload-the-database). + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_toggle-databases-state-python.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/_toggle-databases-state-python.mdx new file mode 100644 index 0000000000..9dad0f4dda --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_toggle-databases-state-python.mdx @@ -0,0 +1,112 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `ToggleDatabasesStateOperation` to enable/disable a single database or multiple databases. + +* The database will be enabled/disabled on all nodes in the [database-group](../../../studio/database/settings/manage-database-group.mdx). + +* In this page: + + * [Enable/Disable database from the Client API](../../../client-api/operations/server-wide/toggle-databases-state.mdx#enable/disable-database-from-the-client-api) + * [Enable database](../../../client-api/operations/server-wide/toggle-databases-state.mdx#enable-database) + * [Disable database](../../../client-api/operations/server-wide/toggle-databases-state.mdx#disable-database) + * [Syntax](../../../client-api/operations/server-wide/toggle-databases-state.mdx#syntax) + * [Disable database via the file system](../../../client-api/operations/server-wide/toggle-databases-state.mdx#disable-database-via-the-file-system) + + +## Enable/Disable database from the Client API + +#### Enable database: + + + +{`# Define the toggle state operation +# specify the database name & pass 'False' to enable +enable_database_op = ToggleDatabasesStateOperation("Northwind", disable=False) + +# To enable multiple databases use: +# enable_database_op = ToggleDatabasesStateOperation.from_multiple_names(["DB1", "DB2", ...], disable=False) + +# Execute the operation by passing it to maintenance.server.send +toggle_result = store.maintenance.server.send(enable_database_op) +`} + + +#### Disable database: + + + +{`# Define the toggle state operation +# specify the database name(s) & pass 'True' to disable +disable_database_op = ToggleDatabasesStateOperation("Northwind", disable=True) + +# To disable multiple databases use: +# enable_database_op = ToggleDatabasesStateOperation.from_multiple_names(["DB1", "DB2", ...], disable=True) + +# Execute the operation by passing it to maintenance.server.send +toggle_result = store.maintenance.server.send(disable_database_op) +`} + + +#### Syntax: + + + +{`class ToggleDatabasesStateOperation(ServerOperation[DisableDatabaseToggleResult]): + def __init__(self, database_name: str, disable: bool): ... + @classmethod + def from_multiple_names(cls, database_names: List[str], disable: bool): ... +`} + + + +| Parameter | Type | Description | +|--------------------|---------|-------------------------------------------------------------------------------------------| +| **database_name** | `str` | Name of database for which to toggle state | +| **database_names** | `str[]` | List of database names for which to toggle state | +| **disable** | `bool` | `True` - request to disable the database(s)
`False`- request to enable the database(s) | + + + +{`class DisableDatabaseToggleResult: + def __init__( + self, disabled: bool = None, name: str = None, success: bool = None, reason: str = None + ) -> None: + self.disabled = disabled # Is database disabled + self.name = name # Name of the database + self.success = success # Has request succeeded + self.reason = reason # Reason for success or failure +`} + + + + + +## Disable database via the file system + +It may sometimes be useful to disable a database manually, through the file system. + +* To **manually disable** a database: + + * Place a file named `disable.marker` in the [database directory](../../../server/storage/directory-structure.mdx). + * The `disable.marker` file can be empty, + and can be created by any available method, e.g. using the File Explorer, a terminal, or code. + +* Attempting to use a manually disabled database will generate the following exception: + + Unable to open database: '{DatabaseName}', + it has been manually disabled via the file: '{disableMarkerPath}'. + To re-enable, remove the disable.marker and reload the database. + +* To **enable** a manually disabled database: + + * First, remove the `disable.marker` file from the database directory. + * Then, [reload the database](../../../studio/database/settings/database-settings.mdx#how-to-reload-the-database). + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/_toggle-dynamic-database-distribution-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/_toggle-dynamic-database-distribution-csharp.mdx new file mode 100644 index 0000000000..8817946dc5 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/_toggle-dynamic-database-distribution-csharp.mdx @@ -0,0 +1,46 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +#Operations: Server: Toggle Dynamic Database Distribution + + +* In [dynamic database distribution](../../../server/clustering/distribution/distributed-database.mdx#dynamic-database-distribution) mode, +if a database node is down, another cluster node is added to the database group to compensate. + +* Use this operation to toggle dynamic distribution for a particular database group. + +* This can also be done [in the studio](../../../studio/database/settings/manage-database-group.mdx#database-group-topology---actions) under +database group settings. + + + + + + + + +{`public SetDatabaseDynamicDistributionOperation(string databaseName, bool allowDynamicDistribution) +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **databaseName** | string | Name of database group | +| **allowDynamicDistribution** | bool | Set to `true` to activate dynamic distribution mode. | +### Example + + + +{`SetDatabaseDynamicDistributionOperation operation = + new SetDatabaseDynamicDistributionOperation("NorthWind", true); +documentStore.Maintenance.Server.Send(operation); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/add-database-node.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/add-database-node.mdx new file mode 100644 index 0000000000..1334b92e80 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/add-database-node.mdx @@ -0,0 +1,39 @@ +--- +title: "Adding a Database Node" +hide_table_of_contents: true +sidebar_label: Add Database Node +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import AddDatabaseNodeCsharp from './_add-database-node-csharp.mdx'; +import AddDatabaseNodePython from './_add-database-node-python.mdx'; +import AddDatabaseNodePhp from './_add-database-node-php.mdx'; +import AddDatabaseNodeNodejs from './_add-database-node-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_category_.json b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_category_.json new file mode 100644 index 0000000000..66d2a31fb6 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 12, + "label": Certificates, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_create-client-certificate-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_create-client-certificate-csharp.mdx new file mode 100644 index 0000000000..b67a23d5a1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_create-client-certificate-csharp.mdx @@ -0,0 +1,98 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can generate a client certificate using **CreateClientCertificateOperation**. + +* Learn the rationale needed to properly define client certificates in [The RavenDB Security Authorization Approach](../../../../server/security/authentication/certificate-management.mdx#the-ravendb-security-authorization-approach) + + + +## Syntax + + + +{`public CreateClientCertificateOperation(string name, + Dictionary permissions, + SecurityClearance clearance, + string password = null) +`} + + + + + +{`// The role assigned to the certificate: +public enum SecurityClearance +\{ + ClusterAdmin, + ClusterNode, + Operator, + ValidUser +\} +`} + + + + + +{`// The access level for a 'ValidUser' security clearance: +public enum DatabaseAccess +\{ + Read, + ReadWrite, + Admin +\} +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **name** | string | Name of a certificate | +| **permissions** | Dictionary<string, DatabaseAccess> | Dictionary mapping databases to access level | +| **clearance** | SecurityClearance | Access level | +| **password** | string | Optional certificate password, default: no password | + +| Return Value | | +| ------------- | ----- | +| **RawData** | client certificate raw data | + +## Example I + + + +{`// With the security clearance set to Cluster Administrator or Operator, +// the user of this certificate will have access to all databases +CreateClientCertificateOperation operation = + new CreateClientCertificateOperation( + "admin", null, SecurityClearance.Operator); +CertificateRawData certificateRawData = + store.Maintenance.Server.Send(operation); +byte[] cert = certificateRawData.RawData; +`} + + + +## Example II + + + +{`// When the security clearance is ValidUser, you must specify an access level for each database +CreateClientCertificateOperation operation = + new CreateClientCertificateOperation( + "user1", new Dictionary +\{ + \{ "Northwind", DatabaseAccess.Admin \} +\}, SecurityClearance.ValidUser, "myPassword"); +CertificateRawData certificateRawData = + store.Maintenance.Server.Send(operation); +byte[] cert = certificateRawData.RawData; +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_create-client-certificate-java.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_create-client-certificate-java.mdx new file mode 100644 index 0000000000..a2e5b247e3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_create-client-certificate-java.mdx @@ -0,0 +1,95 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can generate a client certificate using **CreateClientCertificateOperation**. + +* Learn the rationale needed to properly define client certificates in [The RavenDB Security Authorization Approach](../../../../server/security/authentication/certificate-management.mdx#the-ravendb-security-authorization-approach) + + + +## Syntax + + + +{`public CreateClientCertificateOperation(String name, + Map permissions, + SecurityClearance clearance) + +public CreateClientCertificateOperation(String name, + Map permissions, + SecurityClearance clearance, + String password) +`} + + + + + +{`public enum SecurityClearance \{ + CLUSTER_ADMIN, + CLUSTER_NODE, + OPERATOR, + VALID_USER +\} +`} + + + + + +{`public enum DatabaseAccess \{ + READ, + READ_WRITE, + ADMIN +\} +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **name** | String | Name of a certificate | +| **permissions** | Map<String, DatabaseAccess> | Map with database to access level mapping | +| **clearance** | SecurityClearance | Access level | +| **password** | String | Optional certificate password, default: no password | + +| Return Value | | +| ------------- | ----- | +| **RawData** | client certificate raw data | + +## Example I + + + +{`// With user role set to Cluster Administrator or Operator the user of this certificate +// is going to have access to all databases + +CreateClientCertificateOperation operation = new CreateClientCertificateOperation("admin", + null, SecurityClearance.OPERATOR); +CertificateRawData certificateRawData = store.maintenance().server().send(operation); +byte[] certificatesZipped = certificateRawData.getRawData(); +`} + + + +## Example II + + + +{`// when security clearance is ValidUser, you need to specify per database permissions +CreateClientCertificateOperation operation = new CreateClientCertificateOperation("user1", + Collections.singletonMap("Northwind", DatabaseAccess.ADMIN), + SecurityClearance.VALID_USER, + "myPassword"); + +CertificateRawData certificateRawData = store.maintenance().server().send(operation); +byte[] certificateZipped = certificateRawData.getRawData(); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_create-client-certificate-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_create-client-certificate-nodejs.mdx new file mode 100644 index 0000000000..64b26c64ed --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_create-client-certificate-nodejs.mdx @@ -0,0 +1,79 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can generate a client certificate using **CreateClientCertificateOperation**. + +* Learn the rationale needed to properly define client certificates in [The RavenDB Security Authorization Approach](../../../../server/security/authentication/certificate-management.mdx#the-ravendb-security-authorization-approach) + + + +## Usage + + + +{`const cert1 = await store.maintenance.server.send( + new CreateClientCertificateOperation([name], [permissions], [clearance], [password])); +`} + + + +`SecurityClearance` options: + +* `UnauthenticatedClients` +* `ClusterAdmin` +* `ClusterNode` +* `Operator` +* `ValidUser` + +`DatabaseAccess ` options: + +* `ReadWrite` +* `Admin` + +| Parameters | | | +| ------------- | ------------- | ----- | +| **name** | string | Name of a certificate | +| **permissions** | Record<string, DatabaseAccess> | Record mapping databases to access level | +| **clearance** | SecurityClearance | Access level | +| **password** | string | Optional certificate password, default: no password | + +| Return Value | | +| ------------- | ----- | +| **RawData** | client certificate raw data | + +## Example I + + + +{`// With user role set to Cluster Administrator or Operator the user of this certificate +// is going to have access to all databases +const clientCertificateOperation = await store.maintenance.server.send( + new CreateClientCertificateOperation("admin", \{\}, "Operator")); +const certificateRawData = clientCertificateOperation.rawData; +`} + + + +## Example II + + + +{`// when security clearance is ValidUser, you need to specify per database permissions + +const clearance = \{ + [store.database]: "ReadWrite" +\}; + \} + +t clientCertificateOperation = await store.maintenance.server.send( +new CreateClientCertificateOperation("user1", clearance, "ValidUser", "myPassword")); +t certificateRawData = clientCertificateOperation.rawData; +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_delete-certificate-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_delete-certificate-csharp.mdx new file mode 100644 index 0000000000..7c37121dec --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_delete-certificate-csharp.mdx @@ -0,0 +1,31 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +You can delete a client certificate using the **DeleteCertificateOperation**. + +## Syntax + + + +{`public DeleteCertificateOperation(string thumbprint) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **thumbprint** | string | The certificate thumbprint | + +## Example I + + + +{`string thumbprint = "a909502dd82ae41433e6f83886b00d4277a32a7b"; +store.Maintenance.Server.Send(new DeleteCertificateOperation(thumbprint)); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_delete-certificate-java.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_delete-certificate-java.mdx new file mode 100644 index 0000000000..3bfcfd4593 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_delete-certificate-java.mdx @@ -0,0 +1,31 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +You can delete client certificate using **DeleteCertificateOperation**. + +## Syntax + + + +{`public DeleteCertificateOperation(String thumbprint); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **thumbprint** | String | The certificate thumbprint | + +## Example I + + + +{`String thumbprint = "a909502dd82ae41433e6f83886b00d4277a32a7b"; +store.maintenance().server().send(new DeleteCertificateOperation(thumbprint)); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_delete-certificate-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_delete-certificate-nodejs.mdx new file mode 100644 index 0000000000..ace71e94f0 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_delete-certificate-nodejs.mdx @@ -0,0 +1,31 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +You can delete a client certificate using the **DeleteCertificateOperation**. + +## Usage + + + +{`await store.maintenance.server.send(new DeleteCertificateOperation([thumbprint])); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **thumbprint** | string | The certificate thumbprint | + +## Example I + + + +{`const thumbprint = "a909502dd82ae41433e6f83886b00d4277a32a7b"; +await store.maintenance.server.send(new DeleteCertificateOperation(thumbprint)); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_get-certificate-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_get-certificate-csharp.mdx new file mode 100644 index 0000000000..887b498f1e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_get-certificate-csharp.mdx @@ -0,0 +1,35 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To get a client certificate by thumbprint use **GetCertificateOperation**. + +## Syntax + + + +{`public GetCertificateOperation(string thumbprint) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **thumbprint** | string | Certificate thumbprint | + +| Return Value | | +| ------------- | ----- | +| `CertificateDefinition` | Certificate definition | + +## Example + + + +{`string thumbprint = "a909502dd82ae41433e6f83886b00d4277a32a7b"; +CertificateDefinition definition = + store.Maintenance.Server.Send(new GetCertificateOperation(thumbprint)); +`} + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_get-certificate-java.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_get-certificate-java.mdx new file mode 100644 index 0000000000..d5b7bc8440 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_get-certificate-java.mdx @@ -0,0 +1,36 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To get a client certificate by thumbprint use **GetCertificateOperation**. + +## Syntax + + + +{`public GetCertificateOperation(String thumbprint) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **thumbprint** | String | Certificate thumbprint | + +| Return Value | | +| ------------- | ----- | +| `CertificateDefinition` | Certificate definition | + +## Example + + + +{`String thumbprint = "a909502dd82ae41433e6f83886b00d4277a32a7b"; +CertificateDefinition definition = store.maintenance() + .server() + .send(new GetCertificateOperation(thumbprint)); +`} + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_get-certificates-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_get-certificates-csharp.mdx new file mode 100644 index 0000000000..de8b78ec93 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_get-certificates-csharp.mdx @@ -0,0 +1,35 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To get client certificates use **GetCertificatesOperation**. + +## Syntax + + + +{`public GetCertificatesOperation(int start, int pageSize) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **pageSize** | int | Maximum number of records that will be downloaded | +| **start** | int | Number of records that should be skipped | + +| Return Value | | +| ------------- | ----- | +| `CertificateDefinition[]` | Array of certificate definitions | + +## Example + + + +{`CertificateDefinition[] definitions = + store.Maintenance.Server.Send(new GetCertificatesOperation(0, 20)); +`} + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_get-certificates-java.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_get-certificates-java.mdx new file mode 100644 index 0000000000..f7796a7c27 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_get-certificates-java.mdx @@ -0,0 +1,36 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To get client certificates use **GetCertificatesOperation**. + +## Syntax + + + +{`public GetCertificatesOperation(int start, int pageSize) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **pageSize** | int | Maximum number of records that will be downloaded | +| **start** | int | Number of records that should be skipped | + +| Return Value | | +| ------------- | ----- | +| `CertificateDefinition[]` | Array of certificate definitions | + +## Example + + + +{`CertificateDefinition[] definitions = store.maintenance() + .server() + .send(new GetCertificatesOperation(0, 20)); +`} + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_put-client-certificate-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_put-client-certificate-csharp.mdx new file mode 100644 index 0000000000..3797ee28a4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_put-client-certificate-csharp.mdx @@ -0,0 +1,105 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `PutClientCertificateOperation` to register an existing client certificate. + +* To register an existing client certificate from the Studio, + see [Upload an existing client certificate](../../../../studio/server/certificates/server-management-certificates-view.mdx#upload-an-existing-client-certificate). + +* In this article: + * [Put client certificate example](../../../../client-api/operations/server-wide/certificates/put-client-certificate.mdx#put-client-certificate-example) + * [Syntax](../../../../client-api/operations/server-wide/certificates/put-client-certificate.mdx#syntax) + + +## Put client certificate example + + + + +{`X509Certificate2 certificate = new X509Certificate2("c:\\\\path_to_pfx_file"); + +// Define the put client certificate operation +var putClientCertificateOp = new PutClientCertificateOperation( + "certificateName", + certificate, + new Dictionary(), + SecurityClearance.ClusterAdmin); + +// Execute the operation by passing it to Maintenance.Server.Send +store.Maintenance.Server.Send(putClientCertificateOp); +`} + + + + +{`X509Certificate2 certificate = new X509Certificate2("c:\\\\path_to_pfx_file"); + +// Define the put client certificate operation +var putClientCertificateOp = new PutClientCertificateOperation( + "certificateName", + certificate, + new Dictionary(), + SecurityClearance.ClusterAdmin); + +// Execute the operation by passing it to Maintenance.Server.SendAsync +await store.Maintenance.Server.SendAsync(putClientCertificateOp); +`} + + + + + + +## Syntax + + + +{`public PutClientCertificateOperation( + string name, + X509Certificate2 certificate, + Dictionary permissions, + SecurityClearance clearance) +`} + + + +| Parameter | Type | Description | +|-----------------|--------------------------------------|-----------------------------------------------------------------------------------------------------| +| **name** | `string` | A name for the certificate. | +| **certificate** | `X509Certificate2` | The certificate to register. | +| **permissions** | `Dictionary` | A dictionary mapping database name to access level.
Relevant only when clearance is `ValidUser`. | +| **clearance** | `SecurityClearance` | Access level (role) assigned to the certificate. | + + + +{`// The role assigned to the certificate: +public enum SecurityClearance +\{ + ClusterAdmin, + ClusterNode, + Operator, + ValidUser +\} +`} + + + + +{`// The access level for a 'ValidUser' security clearance: +public enum DatabaseAccess +\{ + Read, + ReadWrite, + Admin +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_put-client-certificate-java.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_put-client-certificate-java.mdx new file mode 100644 index 0000000000..c4816e7e37 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_put-client-certificate-java.mdx @@ -0,0 +1,80 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `PutClientCertificateOperation` to register an existing client certificate. + +* To register an existing client certificate from the Studio, + see [Upload an existing client certificate](../../../../studio/server/certificates/server-management-certificates-view.mdx#upload-an-existing-client-certificate). + +* In this article: + * [Put client certificate example](../../../../client-api/operations/server-wide/certificates/put-client-certificate.mdx#put-client-certificate-example) + * [Syntax](../../../../client-api/operations/server-wide/certificates/put-client-certificate.mdx#syntax) + + +## Put client certificate example + + + +{`byte[] rawCert = Files.readAllBytes(Paths.get("")); +String certificateAsBase64 = Base64.getEncoder().encodeToString(rawCert); + +store.maintenance().server().send( + new PutClientCertificateOperation( + "certificateName", + certificateAsBase64, + new HashMap<>(), + SecurityClearance.CLUSTER_ADMIN)); +`} + + + + + +## Syntax + + + +{`public PutClientCertificateOperation(String name, + String certificate, + Map permissions, + SecurityClearance clearance) +`} + + + +| Parameter | Type | Description | +|-----------------|-------------------------------|------------------------------------------------------------------------------------------------------| +| **name** | `String` | A name for the certificate. | +| **certificate** | `String` | The certificate to register. | +| **permissions** | `Map` | A dictionary mapping database name to access level.
Relevant only when clearance is `VALID_USER`. | +| **clearance** | `SecurityClearance` | Access level (role) assigned to the certificate. | + + + +{`public enum SecurityClearance \{ + CLUSTER_ADMIN, + CLUSTER_NODE, + OPERATOR, + VALID_USER +\} +`} + + + + +{`public enum DatabaseAccess \{ + READ, + READ_WRITE, + ADMIN +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_put-client-certificate-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_put-client-certificate-nodejs.mdx new file mode 100644 index 0000000000..072036c0ff --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/_put-client-certificate-nodejs.mdx @@ -0,0 +1,68 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `PutClientCertificateOperation` to register an existing client certificate. + +* To register an existing client certificate from the Studio, + see [Upload an existing client certificate](../../../../studio/server/certificates/server-management-certificates-view.mdx#upload-an-existing-client-certificate). + +* In this article: + * [Put client certificate example](../../../../client-api/operations/server-wide/certificates/put-client-certificate.mdx#put-client-certificate-example) + * [Syntax](../../../../client-api/operations/server-wide/certificates/put-client-certificate.mdx#syntax) + + +## Put client certificate example + + + +{`const rawCert = fs.readFileSync(""); +const certificateAsBase64 = rawCert.toString("base64"); + +const putClientCertificateOp = new PutClientCertificateOperation( + "certificateName", + certificateAsBase64, + \{\}, + "ClusterAdmin"); + +await store.maintenance.server.send(putClientCertificateOp); +`} + + + + + +## Syntax + + + +{`const putOperation = + new PutClientCertificateOperation(name, certificate, permissions, clearance); +`} + + + +| Parameter | Type | Description | +|-----------------|----------------------------------|-----------------------------------------------------------------------------------------------------| +| **name** | `string` | A name for the certificate. | +| **certificate** | `string` | The certificate to register. | +| **permissions** | `Record` | A dictionary mapping database name to access level.
Relevant only when clearance is `ValidUser`. | +| **clearance** | `SecurityClearance` | Access level (role) assigned to the certificate. | + +* `SecurityClearance` options: + * `ClusterAdmin` + * `ClusterNode` + * `Operator` + * `ValidUser` + +* `DatabaseAccess ` options: + * `Read` + * `ReadWrite` + * `Admin` + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/create-client-certificate.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/create-client-certificate.mdx new file mode 100644 index 0000000000..d19dab74ce --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/create-client-certificate.mdx @@ -0,0 +1,42 @@ +--- +title: "Operations: Server: How to Generate a Client Certificate" +hide_table_of_contents: true +sidebar_label: Create Client Certificate +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import CreateClientCertificateCsharp from './_create-client-certificate-csharp.mdx'; +import CreateClientCertificateJava from './_create-client-certificate-java.mdx'; +import CreateClientCertificateNodejs from './_create-client-certificate-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/delete-certificate.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/delete-certificate.mdx new file mode 100644 index 0000000000..c1cccb2259 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/delete-certificate.mdx @@ -0,0 +1,35 @@ +--- +title: "Operations: Server: How to Delete a Client Certificate" +hide_table_of_contents: true +sidebar_label: Delete Certificate +sidebar_position: 3 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import DeleteCertificateCsharp from './_delete-certificate-csharp.mdx'; +import DeleteCertificateJava from './_delete-certificate-java.mdx'; +import DeleteCertificateNodejs from './_delete-certificate-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/get-certificate.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/get-certificate.mdx new file mode 100644 index 0000000000..570bb84a57 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/get-certificate.mdx @@ -0,0 +1,29 @@ +--- +title: "Operations: Server: How to Get a Certificate" +hide_table_of_contents: true +sidebar_label: Get Certificate +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetCertificateCsharp from './_get-certificate-csharp.mdx'; +import GetCertificateJava from './_get-certificate-java.mdx'; + +export const supportedLanguages = ["csharp", "java"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/get-certificates.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/get-certificates.mdx new file mode 100644 index 0000000000..0754296533 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/get-certificates.mdx @@ -0,0 +1,29 @@ +--- +title: "Operations: Server: How to Get Certificates" +hide_table_of_contents: true +sidebar_label: Get Certificates +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetCertificatesCsharp from './_get-certificates-csharp.mdx'; +import GetCertificatesJava from './_get-certificates-java.mdx'; + +export const supportedLanguages = ["csharp", "java"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/put-client-certificate.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/put-client-certificate.mdx new file mode 100644 index 0000000000..22d405b2ff --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/certificates/put-client-certificate.mdx @@ -0,0 +1,36 @@ +--- +title: "Put Client Certificate Operation" +hide_table_of_contents: true +sidebar_label: Put Client Certificate +sidebar_position: 4 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import PutClientCertificateCsharp from './_put-client-certificate-csharp.mdx'; +import PutClientCertificateJava from './_put-client-certificate-java.mdx'; +import PutClientCertificateNodejs from './_put-client-certificate-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/compact-database.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/compact-database.mdx new file mode 100644 index 0000000000..62646c3d4c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/compact-database.mdx @@ -0,0 +1,51 @@ +--- +title: "Compact Database Operation" +hide_table_of_contents: true +sidebar_label: Compact Database +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import CompactDatabaseCsharp from './_compact-database-csharp.mdx'; +import CompactDatabasePython from './_compact-database-python.mdx'; +import CompactDatabasePhp from './_compact-database-php.mdx'; +import CompactDatabaseNodejs from './_compact-database-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/configuration/_category_.json b/versioned_docs/version-7.1/client-api/operations/server-wide/configuration/_category_.json new file mode 100644 index 0000000000..a20298e082 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/configuration/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 13, + "label": Configuration, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/configuration/_get-serverwide-client-configuration-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/configuration/_get-serverwide-client-configuration-csharp.mdx new file mode 100644 index 0000000000..28628dfc5e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/configuration/_get-serverwide-client-configuration-csharp.mdx @@ -0,0 +1,60 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* It is recommended to first refer to the [put server-wide client-configuration](../../../../client-api/operations/server-wide/configuration/put-serverwide-client-configuration.mdx) article for general knowledge. + +* Use `GetServerWideClientConfigurationOperation` to get the current server-wide Client-Configuration set on the server. + +* In this page: + * [Get client-configuration](../../../../client-api/operations/maintenance/configuration/get-client-configuration.mdx#get-client-configuration) + * [Syntax](../../../../client-api/operations/maintenance/configuration/get-client-configuration.mdx#syntax) + + +## Get client-configuration + + + + +{`// Define the get client-configuration operation +var getServerWideClientConfigOp = new GetServerWideClientConfigurationOperation(); + +// Execute the operation by passing it to Maintenance.Server.Send +ClientConfiguration config = store.Maintenance.Server.Send(getServerWideClientConfigOp); +`} + + + + +{`// Define the get client-configuration operation +var getServerWideClientConfigOp = new GetServerWideClientConfigurationOperation(); + +// Execute the operation by passing it to Maintenance.Server.SendAsync +ClientConfiguration config = + await store.Maintenance.Server.SendAsync(getServerWideClientConfigOp); +`} + + + + + + +## Syntax + + + +{`public GetServerWideClientConfigurationOperation() +`} + + + +| Return Value | | +|-----------------------|------------------------------------------------| +| `ClientConfiguration` | Configuration which will be used by the Client | + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/configuration/_get-serverwide-client-configuration-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/configuration/_get-serverwide-client-configuration-nodejs.mdx new file mode 100644 index 0000000000..5c535a88c2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/configuration/_get-serverwide-client-configuration-nodejs.mdx @@ -0,0 +1,59 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* It is recommended to first refer to the [put server-wide client-configuration](../../../../client-api/operations/server-wide/configuration/put-serverwide-client-configuration.mdx) article for general knowledge. + +* Use `GetServerWideClientConfigurationOperation` to get the current server-wide Client-Configuration set on the server. + +* In this page: + * [Get client-configuration](../../../../client-api/operations/maintenance/configuration/get-client-configuration.mdx#get-client-configuration) + * [Syntax](../../../../client-api/operations/maintenance/configuration/get-client-configuration.mdx#syntax) + + +## Get client-configuration + + + +{`// Define the get client-configuration operation +const getServerWideClientConfigOp = new GetServerWideClientConfigurationOperation(); + +// Execute the operation by passing it to maintenance.server.send +const config = await documentStore.maintenance.server.send(getServerWideClientConfigOp); +`} + + + + + +## Syntax + + + +{`const getServerWideClientConfigOp = new GetServerWideClientConfigurationOperation(); +`} + + + + + +{`// Executing the operation returns the client-configuration object: +\{ + identityPartsSeparator, + etag, + disabled, + maxNumberOfRequestsPerSession, + readBalanceBehavior, + loadBalanceBehavior, + loadBalancerContextSeed +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/configuration/_put-serverwide-client-configuration-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/configuration/_put-serverwide-client-configuration-csharp.mdx new file mode 100644 index 0000000000..663e7a7231 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/configuration/_put-serverwide-client-configuration-csharp.mdx @@ -0,0 +1,104 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The server-wide Client-Configuration is a set of configuration options that are set __on the server__ and apply to any client when communicating with __any__ database in the cluster. + See the available configuration options in the article about [put client-configuration for database](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx#what-can-be-configured). + +* To set the server-wide Client-Configuration on the server: + + * Use `PutServerWideClientConfigurationOperation` from the client code. + See the example below. + + * Or, set the server-wide Client-Configuration from the Studio [Client-Configuration view](../../../../studio/server/client-configuration.mdx). + +* A Client-Configuration that is set on the server __overrides__ the initial Client-Configuration that is set on the client when creating the Document Store. + A Client-Configuration that is set on the server for the [database level](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx) + will __override__ the server-wide Client-Configuration for that database. + +* Once the Client-Configuration is modified on the server, the running client will [receive the updated settings](../../../../client-api/configuration/load-balance/overview.mdx#keeping-the-client-topology-up-to-date) + the next time it makes a request to the database. +* In this page: + * [Put client-configuration (server-wide)](../../../../client-api/operations/server-wide/configuration/put-serverwide-client-configuration.mdx#put-client-configuration-(server-wide)) + * [Syntax](../../../../client-api/operations/server-wide/configuration/put-serverwide-client-configuration.mdx#syntax) + + +## Put client-configuration (server-wide) + + + + +{`// Define the client-configuration object +ClientConfiguration clientConfiguration = new ClientConfiguration +{ + MaxNumberOfRequestsPerSession = 100, + ReadBalanceBehavior = ReadBalanceBehavior.FastestNode + // ... +}; + +// Define the put server-wide client-configuration operation, pass the configuration +var putServerWideClientConfigOp = new PutServerWideClientConfigurationOperation(clientConfiguration); + +// Execute the operation by passing it to Maintenance.Server.Send +store.Maintenance.Server.Send(putServerWideClientConfigOp); +`} + + + + +{`// Define the client-configuration object +ClientConfiguration clientConfiguration = new ClientConfiguration +{ + MaxNumberOfRequestsPerSession = 100, + ReadBalanceBehavior = ReadBalanceBehavior.FastestNode + // ... +}; + +// Define the put server-wide client-configuration operation, pass the configuration +var putServerWideClientConfigOp = new PutServerWideClientConfigurationOperation(clientConfiguration); + +// Execute the operation by passing it to Maintenance.Server.SendAsync +tore.Maintenance.Server.SendAsync(putServerWideClientConfigOp); +`} + + + + + + +## Syntax + + + +{`public PutServerWideClientConfigurationOperation(ClientConfiguration configuration) +`} + + + +| Parameter | Type | Description | +|-------------------|-----------------------|-----------------------------------------------------------------------------------------| +| __configuration__ | `ClientConfiguration` | Client configuration that will be set on the server
(server-wide, for all databases) | + + + +{`public class ClientConfiguration +\{ + private char? _identityPartsSeparator; + public long Etag \{ get; set; \} + public bool Disabled \{ get; set; \} + public int? MaxNumberOfRequestsPerSession \{ get; set; \} + public ReadBalanceBehavior? ReadBalanceBehavior \{ get; set; \} + public LoadBalanceBehavior? LoadBalanceBehavior \{ get; set; \} + public int? LoadBalancerContextSeed \{ get; set; \} + public char? IdentityPartsSeparator; // can be any character except '|' +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/configuration/_put-serverwide-client-configuration-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/configuration/_put-serverwide-client-configuration-nodejs.mdx new file mode 100644 index 0000000000..196d5546c7 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/configuration/_put-serverwide-client-configuration-nodejs.mdx @@ -0,0 +1,83 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The server-wide Client-Configuration is a set of configuration options that are set __on the server__ and apply to any client when communicating with __any__ database in the cluster. + See the available configuration options in the article about [put client-configuration for database](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx#what-can-be-configured). + +* To set the server-wide Client-Configuration on the server: + + * Use `PutServerWideClientConfigurationOperation` from the client code. + See the example below. + + * Or, set the server-wide Client-Configuration from the Studio [Client-Configuration view](../../../../studio/server/client-configuration.mdx). + +* A Client-Configuration that is set on the server __overrides__ the initial Client-Configuration that is set on the client when creating the Document Store. + A Client-Configuration that is set on the server for the [database level](../../../../client-api/operations/maintenance/configuration/put-client-configuration.mdx) + will __override__ the server-wide Client-Configuration for that database. + +* Once the Client-Configuration is modified on the server, the running client will [receive the updated settings](../../../../client-api/configuration/load-balance/overview.mdx#keeping-the-client-topology-up-to-date) + the next time it makes a request to the database. +* In this page: + * [Put client-configuration (server-wide)](../../../../client-api/operations/server-wide/configuration/put-serverwide-client-configuration.mdx#put-client-configuration-(server-wide)) + * [Syntax](../../../../client-api/operations/server-wide/configuration/put-serverwide-client-configuration.mdx#syntax) + + +## Put client-configuration (server-wide) + + + +{`// Define the client-configuration object +const clientConfiguration = \{ + maxNumberOfRequestsPerSession: 200, + readBalanceBehavior: "FastestNode", + // ... +\}; + +// Define the put server-wide client-configuration operation, pass the configuration +const putServerWideClientConfigOp = + new PutServerWideClientConfigurationOperation(clientConfiguration); + +// Execute the operation by passing it to maintenance.server.send +await documentStore.maintenance.server.send(putServerWideClientConfigOp); +`} + + + + + +## Syntax + + + +{`const putServerWideClientConfigOp = new PutServerWideClientConfigurationOperation(configuration); +`} + + + +| Parameter | Type | Description | +|-------------------|----------|-----------------------------------------------------------------------------------------| +| __configuration__ | `object` | Client configuration that will be set on the server
(server-wide, for all databases) | + + + +{`// The client-configuration object +\{ + identityPartsSeparator, + etag, + disabled, + maxNumberOfRequestsPerSession, + readBalanceBehavior, + loadBalanceBehavior, + loadBalancerContextSeed +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/configuration/get-serverwide-client-configuration.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/configuration/get-serverwide-client-configuration.mdx new file mode 100644 index 0000000000..b3cd75e182 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/configuration/get-serverwide-client-configuration.mdx @@ -0,0 +1,32 @@ +--- +title: "Get Client Configuration Operation (Server-Wide)" +hide_table_of_contents: true +sidebar_label: Get Server Wide Client Configuration +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetServerwideClientConfigurationCsharp from './_get-serverwide-client-configuration-csharp.mdx'; +import GetServerwideClientConfigurationNodejs from './_get-serverwide-client-configuration-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/configuration/put-serverwide-client-configuration.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/configuration/put-serverwide-client-configuration.mdx new file mode 100644 index 0000000000..747e96ebd3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/configuration/put-serverwide-client-configuration.mdx @@ -0,0 +1,37 @@ +--- +title: "Put Client Configuration Operation (Server-Wide)" +hide_table_of_contents: true +sidebar_label: Put Server Wide Client Configuration +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import PutServerwideClientConfigurationCsharp from './_put-serverwide-client-configuration-csharp.mdx'; +import PutServerwideClientConfigurationNodejs from './_put-serverwide-client-configuration-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/create-database.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/create-database.mdx new file mode 100644 index 0000000000..cd7c943779 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/create-database.mdx @@ -0,0 +1,31 @@ +--- +title: "Create Database Operation" +hide_table_of_contents: true +sidebar_label: Create Database +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import CreateDatabaseCsharp from './_create-database-csharp.mdx'; +import CreateDatabaseJava from './_create-database-java.mdx'; + +export const supportedLanguages = ["csharp", "java"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/delete-database.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/delete-database.mdx new file mode 100644 index 0000000000..3ca84bce26 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/delete-database.mdx @@ -0,0 +1,29 @@ +--- +title: "Operations: Server: How to delete a database?" +hide_table_of_contents: true +sidebar_label: Delete Databases +sidebar_position: 3 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import DeleteDatabaseCsharp from './_delete-database-csharp.mdx'; +import DeleteDatabaseJava from './_delete-database-java.mdx'; + +export const supportedLanguages = ["csharp", "java"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/get-build-number.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/get-build-number.mdx new file mode 100644 index 0000000000..e5267bff6b --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/get-build-number.mdx @@ -0,0 +1,24 @@ +--- +title: "Operations: Server: How to Get Server Build Number" +hide_table_of_contents: true +sidebar_label: Get Build Number +sidebar_position: 4 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetBuildNumberCsharp from './_get-build-number-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/get-database-names.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/get-database-names.mdx new file mode 100644 index 0000000000..cd5bf5e7aa --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/get-database-names.mdx @@ -0,0 +1,29 @@ +--- +title: "Operations: Server: How to Get the Names of Databases on a Server" +hide_table_of_contents: true +sidebar_label: Get Database Names +sidebar_position: 5 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetDatabaseNamesCsharp from './_get-database-names-csharp.mdx'; +import GetDatabaseNamesJava from './_get-database-names-java.mdx'; + +export const supportedLanguages = ["csharp", "java"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/logs/_category_.json b/versioned_docs/version-7.1/client-api/operations/server-wide/logs/_category_.json new file mode 100644 index 0000000000..df73a11b53 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/logs/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 14, + "label": Logs, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/logs/_get-logs-configuration-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/logs/_get-logs-configuration-csharp.mdx new file mode 100644 index 0000000000..02e84b13b8 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/logs/_get-logs-configuration-csharp.mdx @@ -0,0 +1,66 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To get the server logs configuration, use **GetLogsConfigurationOperation** from `Maintenance.Server` + +## Syntax + + + +{`public GetLogsConfigurationOperation() +`} + + + +### Return Value + +The result of executing GetLogsConfigurationOperation is a **GetLogsConfigurationResult** object: + + + +{`public class GetLogsConfigurationResult +\{ + public LogMode CurrentMode \{ get; set; \} + + public LogMode Mode \{ get; set; \} + + public string Path \{ get; set; \} + + public bool UseUtcTime \{ get; set; \} +\} +`} + + + +| Property | Description | +|-----------------|----------------------------------------------------------------------------------------------| +| **CurrentMode** | Current mode that is active | +| **Mode** | Mode that is written in the configuration file and which will be used after a server restart | +| **Path** | Path to which logs will be written | +| **UseUtcTime** | Indicates if logs will be written in UTC or in server local time | + +## Example + + + + +{`GetLogsConfigurationResult logsConfiguration = store + .Maintenance + .Server + .Send(new GetLogsConfigurationOperation()); +`} + + + + +{`GetLogsConfigurationResult logsConfiguration = await store + .Maintenance + .Server + .SendAsync(new GetLogsConfigurationOperation()); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/logs/_set-logs-configuration-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/logs/_set-logs-configuration-csharp.mdx new file mode 100644 index 0000000000..16a240eab4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/logs/_set-logs-configuration-csharp.mdx @@ -0,0 +1,61 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To set the server logs configuration, use **SetLogsConfigurationOperation** from `Maintenance.Server`. The server logs configuration is not persisted and will get back to the original value after server restart. + +## Syntax + + + +{`public SetLogsConfigurationOperation(Parameters parameters) +`} + + + + + +{`public class Parameters +\{ + public LogMode Mode \{ get; set; \} +\} +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **Mode** | `LogMode` | Logging mode (level) to be set | + +## Example + + + + +{`store + .Maintenance + .Server + .Send(new SetLogsConfigurationOperation( + new SetLogsConfigurationOperation.Parameters + { + Mode = LogMode.Information + })); +`} + + + + +{`await store + .Maintenance + .Server + .SendAsync(new SetLogsConfigurationOperation( + new SetLogsConfigurationOperation.Parameters + { + Mode = LogMode.Information + })); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/logs/get-logs-configuration.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/logs/get-logs-configuration.mdx new file mode 100644 index 0000000000..f4971d4e44 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/logs/get-logs-configuration.mdx @@ -0,0 +1,24 @@ +--- +title: "Operations: Server: How to Get Logs Configuration" +hide_table_of_contents: true +sidebar_label: Get Logs Configuration +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetLogsConfigurationCsharp from './_get-logs-configuration-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/logs/set-logs-configuration.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/logs/set-logs-configuration.mdx new file mode 100644 index 0000000000..5b98c76fdb --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/logs/set-logs-configuration.mdx @@ -0,0 +1,24 @@ +--- +title: "Operations: Server: How to Get Logs Configuration" +hide_table_of_contents: true +sidebar_label: Set Logs Configuration +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import SetLogsConfigurationCsharp from './_set-logs-configuration-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/modify-conflict-solver.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/modify-conflict-solver.mdx new file mode 100644 index 0000000000..b768cf3042 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/modify-conflict-solver.mdx @@ -0,0 +1,24 @@ +--- +title: "Operations: Server: How to Modify a Conflict Solver" +hide_table_of_contents: true +sidebar_label: Modify Conflict Solver +sidebar_position: 6 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import ModifyConflictSolverCsharp from './_modify-conflict-solver-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/promote-database-node.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/promote-database-node.mdx new file mode 100644 index 0000000000..4be72dd5ea --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/promote-database-node.mdx @@ -0,0 +1,25 @@ +--- +title: "Operations: Server: How to Promote a Database Node?" +hide_table_of_contents: true +sidebar_label: Promote Database Node +sidebar_position: 7 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import PromoteDatabaseNodeCsharp from './_promote-database-node-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/reorder-database-members.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/reorder-database-members.mdx new file mode 100644 index 0000000000..041ba04ef1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/reorder-database-members.mdx @@ -0,0 +1,24 @@ +--- +title: "Operations: Server: How to reoder database members?" +hide_table_of_contents: true +sidebar_label: Reorder Database Members +sidebar_position: 10 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import ReorderDatabaseMembersCsharp from './_reorder-database-members-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/restore-backup.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/restore-backup.mdx new file mode 100644 index 0000000000..a57aa82308 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/restore-backup.mdx @@ -0,0 +1,51 @@ +--- +title: "Operations: Server: How to Restore a Database from the Backup" +hide_table_of_contents: true +sidebar_label: Restore Backup +sidebar_position: 8 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import RestoreBackupCsharp from './_restore-backup-csharp.mdx'; +import RestoreBackupJava from './_restore-backup-java.mdx'; +import RestoreBackupNodejs from './_restore-backup-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/sorters/_category_.json b/versioned_docs/version-7.1/client-api/operations/server-wide/sorters/_category_.json new file mode 100644 index 0000000000..cae272e9eb --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/sorters/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 15, + "label": Sorters, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/sorters/_put-sorter-server-wide-csharp.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/sorters/_put-sorter-server-wide-csharp.mdx new file mode 100644 index 0000000000..4e50c5c433 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/sorters/_put-sorter-server-wide-csharp.mdx @@ -0,0 +1,115 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The Lucene indexing engine allows you to create your own __Custom Sorters__ + where you can define how query results will be ordered based on your specific requirements. + +* Use `PutServerWideSortersOperation` to deploy a custom sorter to the RavenDB server. + Once deployed, you can use it to sort query results for all queries made on __all databases__ in your cluster. + +* To deploy a custom sorter that will apply only to the database scoped to your [Document Store](../../../../client-api/setting-up-default-database.mdx), + see [put custom sorter](../../../../client-api/operations/maintenance/sorters/put-sorter.mdx). + +* A custom sorter can also be uploaded server-wide from the [Studio](../../../../studio/database/settings/custom-sorters.mdx). + +* In this page: + * [Put custom sorter server-wide](../../../../client-api/operations/server-wide/sorters/put-sorter-server-wide.mdx#put-custom-sorter-server-wide) + * [Syntax](../../../../client-api/operations/server-wide/sorters/put-sorter-server-wide.mdx#syntax) + + +## Put custom sorter server-wide + +* First, create your own sorter class that inherits from the Lucene class [Lucene.Net.Search.FieldComparator](https://lucenenet.apache.org/docs/3.0.3/df/d91/class_lucene_1_1_net_1_1_search_1_1_field_comparator.html). + +* Then, send the custom sorter to the server using the `PutServerWideSortersOperation`. + + + + +{`// Assign the code of your custom sorter as a \`string\` +string mySorterCode = ""; + +// Create the \`SorterDefinition\` object +var customSorterDefinition = new SorterDefinition +{ + // The sorter Name must be the same as the sorter's class name in your code + Name = "MySorter", + // The Code must be compilable and include all necessary using statements + Code = mySorterCode +}; + +// Define the put sorters operation, pass the sorter definition +// Note: multiple sorters can be passed, see syntax below +var putSortersServerWideOp = new PutServerWideSortersOperation(customSorterDefinition); + +// Execute the operation by passing it to Maintenance.Server.Send +store.Maintenance.Server.Send(putSortersServerWideOp); +`} + + + + +{`// Assign the code of your custom sorter as a \`string\` +string mySorterCode = ""; + +// Create the \`SorterDefinition\` object +var customSorterDefinition = new SorterDefinition +{ + // The sorter Name must be the same as the sorter's class name in your code + Name = "MySorter", + // The Code must be compilable and include all necessary using statements + Code = mySorterCode +}; + +// Define the put sorters operation, pass the sorter definition +// Note: multiple sorters can be passed, see syntax below +var putSortersServerWideOp = new PutServerWideSortersOperation(customSorterDefinition); + +// Execute the operation by passing it to Maintenance.Server.SendAsync +await store.Maintenance.Server.SendAsync(putSortersServerWideOp); +`} + + + + + + +You can now order your query results using the custom sorter. +A query example is available [here](../../../../client-api/session/querying/sort-query-results.mdx#custom-sorters). + + + + + +## Syntax + + + +{`public PutServerWideSortersOperation(params SorterDefinition[] sortersToAdd) +`} + + + +| Parameter | Type | Description | +|-------------------|----------------------|------------------------------------------------------| +| __sortersToAdd__ | `SorterDefinition[]` | One or more Sorter Definitions to send to the server | + + + + +{`public class SorterDefinition +\{ + public string Name \{ get; set; \} + public string Code \{ get; set; \} +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/sorters/_put-sorter-server-wide-nodejs.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/sorters/_put-sorter-server-wide-nodejs.mdx new file mode 100644 index 0000000000..fc32e216ce --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/sorters/_put-sorter-server-wide-nodejs.mdx @@ -0,0 +1,85 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The Lucene indexing engine allows you to create your own __Custom Sorters__ + where you can define how query results will be ordered based on your specific requirements. + +* Use `PutServerWideSortersOperation` to deploy a custom sorter to the RavenDB server. + Once deployed, you can use it to sort query results for all queries made on __all databases__ in your cluster. + +* To deploy a custom sorter that will apply only to the database scoped to your [Document Store](../../../../client-api/setting-up-default-database.mdx), + see [put custom sorter](../../../../client-api/operations/maintenance/sorters/put-sorter.mdx). + +* A custom sorter can also be uploaded server-wide from the [Studio](../../../../studio/database/settings/custom-sorters.mdx). + +* In this page: + * [Put custom sorter server-wide](../../../../client-api/operations/server-wide/sorters/put-sorter-server-wide.mdx#put-custom-sorter-server-wide) + * [Syntax](../../../../client-api/operations/server-wide/sorters/put-sorter-server-wide.mdx#syntax) + + +## Put custom sorter server-wide + +* First, create your own sorter class that inherits from the Lucene class [Lucene.Net.Search.FieldComparator](https://lucenenet.apache.org/docs/3.0.3/df/d91/class_lucene_1_1_net_1_1_search_1_1_field_comparator.html). + +* Then, send the custom sorter to the server using the `PutServerWideSortersOperation`. + + + +{`// Create the sorter definition object +const sorterDefinition = \{ + // The sorter name must be the same as the sorter's class name in your code + name: "MySorter", + // The code must be compilable and include all necessary using statements (C# code) + code: "" +\}; + +// Define the put sorters operation, pass the sorter definition +const putSortersServerWideOp = new PutServerWideSortersOperation(sorterDefinition); + +// Execute the operation by passing it to maintenance.server.send +await documentStore.maintenance.server.send(putSortersServerWideOp ); +`} + + + + + +You can now order your query results using the custom sorter. +A query example is available [here](../../../../client-api/session/querying/sort-query-results.mdx#custom-sorters). + + + + + +## Syntax + + + +{`const putSortersServerWideOp = new PutServerWideSortersOperation(sortersToAdd); +`} + + + +| Parameter | Type | Description | +|-------------------|---------------|------------------------------------------------------| +| __sortersToAdd__ | `...object[]` | One or more Sorter Definitions to send to the server | + + + + +{`// The sorter definition object +\{ + name: string; + code: string; +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/sorters/put-sorter-server-wide.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/sorters/put-sorter-server-wide.mdx new file mode 100644 index 0000000000..edaf3d3879 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/sorters/put-sorter-server-wide.mdx @@ -0,0 +1,39 @@ +--- +title: "Put Custom Sorter (Server-Wide) Operation" +hide_table_of_contents: true +sidebar_label: Put Custom Sorter +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import PutSorterServerWideCsharp from './_put-sorter-server-wide-csharp.mdx'; +import PutSorterServerWideNodejs from './_put-sorter-server-wide-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/toggle-databases-state.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/toggle-databases-state.mdx new file mode 100644 index 0000000000..2866753094 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/toggle-databases-state.mdx @@ -0,0 +1,45 @@ +--- +title: "Toggle Databases State Operation (Enable / Disable)" +hide_table_of_contents: true +sidebar_label: Toggle Databases State +sidebar_position: 9 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import ToggleDatabasesStateCsharp from './_toggle-databases-state-csharp.mdx'; +import ToggleDatabasesStatePython from './_toggle-databases-state-python.mdx'; +import ToggleDatabasesStatePhp from './_toggle-databases-state-php.mdx'; +import ToggleDatabasesStateNodejs from './_toggle-databases-state-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/server-wide/toggle-dynamic-database-distribution.mdx b/versioned_docs/version-7.1/client-api/operations/server-wide/toggle-dynamic-database-distribution.mdx new file mode 100644 index 0000000000..2f994e707a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/server-wide/toggle-dynamic-database-distribution.mdx @@ -0,0 +1,30 @@ +--- +title: "Operations: Server: Toggle Dynamic Database Distribution" +hide_table_of_contents: true +sidebar_label: Toggle Dynamic Database Distribution +sidebar_position: 11 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import ToggleDynamicDatabaseDistributionCsharp from './_toggle-dynamic-database-distribution-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/operations/what-are-operations.mdx b/versioned_docs/version-7.1/client-api/operations/what-are-operations.mdx new file mode 100644 index 0000000000..5f9223a814 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/operations/what-are-operations.mdx @@ -0,0 +1,51 @@ +--- +title: "What are Operations" +hide_table_of_contents: true +sidebar_label: What are Operations +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import WhatAreOperationsCsharp from './_what-are-operations-csharp.mdx'; +import WhatAreOperationsJava from './_what-are-operations-java.mdx'; +import WhatAreOperationsPython from './_what-are-operations-python.mdx'; +import WhatAreOperationsPhp from './_what-are-operations-php.mdx'; +import WhatAreOperationsNodejs from './_what-are-operations-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/rest-api/_category_.json b/versioned_docs/version-7.1/client-api/rest-api/_category_.json new file mode 100644 index 0000000000..506cea47c7 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/rest-api/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 19, + "label": REST API, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/rest-api/document-commands/_category_.json b/versioned_docs/version-7.1/client-api/rest-api/document-commands/_category_.json new file mode 100644 index 0000000000..65668e24f1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/rest-api/document-commands/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 1, + "label": Document Commands, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/rest-api/document-commands/batch-commands.mdx b/versioned_docs/version-7.1/client-api/rest-api/document-commands/batch-commands.mdx new file mode 100644 index 0000000000..8079cd8eee --- /dev/null +++ b/versioned_docs/version-7.1/client-api/rest-api/document-commands/batch-commands.mdx @@ -0,0 +1,923 @@ +--- +title: "Batch Commands" +hide_table_of_contents: true +sidebar_label: Batch Commands +sidebar_position: 5 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Batch Commands + + +* Use this endpoint with the **`POST`** method to send multiple commands in one request: +`/databases//bulk_docs` + +* The commands are sent as a JSON array in the [request body](../../../client-api/rest-api/document-commands/batch-commands.mdx#body). + +* All the commands in the batch will either succeed or fail as a **single transaction**. Changes will not be visible until +the entire batch completes. + +* [Options](../../../client-api/rest-api/document-commands/batch-commands.mdx#batch-options) can be set to make the server wait +for indexing and replication to complete before returning. + +* In this page: + * [Basic Example](../../../client-api/rest-api/document-commands/batch-commands.mdx#basic-example) + * [Request Format](../../../client-api/rest-api/document-commands/batch-commands.mdx#request-format) + * [Commands](../../../client-api/rest-api/document-commands/batch-commands.mdx#commands) + * [Response Format](../../../client-api/rest-api/document-commands/batch-commands.mdx#response-format) + * [More Examples](../../../client-api/rest-api/document-commands/batch-commands.mdx#more-examples) + +## Basic Example + +This is a cURL request to a database named "Example" on our [playground server](http://live-test.ravendb.net). +It batches two commands: + +1. Upload a new document called "person/1". +2. Execute a [patch](../../../client-api/operations/patching/single-document.mdx) on that same document. + + + +{`curl -X POST "http://live-test.ravendb.net/databases/Example/bulk_docs" +-H "Content-Type: application/json" +-d "\{ + \\"Commands\\": [ + \{ + \\"Id\\": \\"person/1\\", + \\"ChangeVector\\": null, + \\"Document\\": \{ + \\"Name\\": \\"John Smith\\" + \}, + \\"Type\\": \\"PUT\\" + \}, + \{ + \\"Id\\": \\"person/1\\", + \\"ChangeVector\\": null, + \\"Patch\\": \{ + \\"Script\\": \\"this.Name = 'Jane Doe';\\", + \\"Values\\": \{\} + \}, + \\"Type\\": \\"PATCH\\" + \} + ] +\}" +`} + + +Linebreaks are added for clarity. + + +#### Response: + + + +{`HTTP/1.1 201 Created +Server: nginx +Date: Sun, 15 Sep 2019 14:12:30 GMT +Content-Type: application/json; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Content-Encoding: gzip +Vary: Accept-Encoding +Raven-Server-Version: 4.2.4.42 + +\{ + "Results": [ + \{ + "Type": "PUT", + "@id": "person/1", + "@collection": "@empty", + "@change-vector": "A:1-urx5nDNUT06FCpCon1wCyA", + "@last-modified": "2019-09-15T14:12:30.0425811" + \}, + \{ + "Id": "person/1", + "ChangeVector": "A:2-urx5nDNUT06FCpCon1wCyA", + "LastModified": "2019-09-15T14:12:30.0495095", + "Type": "PATCH", + "PatchStatus": "Patched", + "Debug": null + \} + ] +\} +`} + + + + + +## Request Format + +This is the general format of a cURL request with a batch of commands that _does not_ include a Put Attachment Command +(see the format for batching a Put Attachment Command [below](../../../client-api/rest-api/document-commands/batch-commands.mdx#put-attachment-command)): + + + +{`curl -X POST "/databases//bulk_docs?" +-H "Content-Type: application/json" +-d "\{ + \\"Commands\\": [ + \{ \}, + ... + ] +\}" +`} + + +Linebreaks are added for clarity. + + +#### Query String + +The query string takes [batch options](../../../client-api/rest-api/document-commands/batch-commands.mdx#batch-options), which +can make the server wait for indexing and replication to finish before responding. + + +#### Header + +The header `Content-Type` is required and takes one of two values: + +* `application/json` - if the batch _does not_ include a Put Attachment Command. +* `multipart/mixed; boundary=` - if the batch [_does_](../../../client-api/rest-api/document-commands/batch-commands.mdx#put-attachment-command) +include a Put Attachment Command. The "separator" is an arbitrary string used to demarcate the attachment streams and +commands array. + + +#### Body + +The body contains a JSON array of commands. + + + +{`-d "\{ + \\"Commands\\": [ + \{ + \\"Id\\": \\"\\", + ... + \\"Type\\": \\"\\" + \}, + \{ \}, + ... + ] +\}" +`} + + +Depending on the shell you're using to run cURL, you will probably need to escape all double quotes within the request body +using a backslash: `"` -> `\"`. + +The following commands can be sent using the batch command: + +* [Put Document Command](../../../client-api/rest-api/document-commands/batch-commands.mdx#put-document-command) +* [Patch Document Command](../../../client-api/rest-api/document-commands/batch-commands.mdx#patch-document-command) +* [Delete Document Command](../../../client-api/rest-api/document-commands/batch-commands.mdx#delete-document-command) +* [Delete by Prefix Command](../../../client-api/rest-api/document-commands/batch-commands.mdx#delete-by-prefix-command) +* [Put Attachment Command](../../../client-api/rest-api/document-commands/batch-commands.mdx#put-attachment-command) +* [Delete Attachment Command](../../../client-api/rest-api/document-commands/batch-commands.mdx#delete-attachment-command) +### Batch Options + +These options, configured in the query string, make the server wait until indexing or replication have completed before responding. If these have not +completed before a specified amount of time has passed, the server can either respond as normal or throw an exception. + +This is the general format of a cURL request that includes batch options in the query string: + + + +{`curl -X POST "/databases//bulk_docs?= + &= + &= + ..." +-H "Content-Type: " +-d "\{ \}" +`} + + +Linebreaks are added for clarity. + +#### Indexing Options + +| Query Parameter | Type | Description | +|---------------------------|------------|----------------------------------------------------------------------------------------------------------------------------------------------| +| **waitForIndexesTimeout** | `TimeSpan` | The amount of time to wait for indexing to complete. [Format of `TimeSpan`](https://docs.microsoft.com/en-us/dotnet/api/system.timespan). | +| **waitForIndexThrow** | `boolean` | Set to `true` to throw an exception if the indexing doesn't complete before `waitForIndexesTimeout`.
Set to `false` to receive the normal response body. | +| **waitForSpecificIndex** | `string[]` | Wait only for the listed indexes to finish updating, rather than all indexes. | + +#### Replication Options + +| Query Parameter | Type | Description | +|-------------------------------------|------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **waitForReplicasTimeout** | `TimeSpan` | The amount of time to wait for replication to complete. [Format of `TimeSpan`](https://docs.microsoft.com/en-us/dotnet/api/system.timespan). | +| **throwOnTimeoutInWaitForReplicas** | `boolean` | Set to `true` to throw an exception if the replication doesn't complete before `waitForReplicasTimeout`.
Set to `false` to receive the normal response body. | +| **numberOfReplicasToWaitFor** | `int` / `string` | The number of replicas that should be made before `waitForReplicasTimeout`. Set this parameter to `majority` to wait until the data has been replicated to a majority of the nodes in the database group. Default = `1`. | + +## Commands + +### Put Document Command + +Upload a new document or update an existing document. + +Format within the `Commands` array in the [request body](../../../client-api/rest-api/document-commands/batch-commands.mdx#request-format): + + + +{`\{ + \\"Id\\": \\"\\", + \\"ChangeVector\\": \\"\\", + \\"Document\\": \{ + + \}, + \\"Type\\": \\"PUT\\", + \\"ForceRevisionCreationStrategy\\": \\"Before\\" +\} +`} + + + +| Parameter | Description | Required | +|-------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------| +| **Id** | ID of document to create or update | Yes to update, [no to create](../../../client-api/document-identifiers/working-with-document-identifiers.mdx#autogenerated-ids) | +| **ChangeVector** | When updating an existing document, this parameter that document's expected [change vector](../../../server/clustering/replication/change-vector.mdx). If it does not match the server-side change vector a concurrency exception is thrown.
An exception is also thrown if the document does not exist. | No | +| **Document** | JSON document to create, or to replace the existing document | Yes | +| **Type** | Set to `PUT` | Yes | +| **ForceRevisionCreationStrategy** | When updating an existing document, set to `Before` to make a [revision](../../../document-extensions/revisions/overview.mdx) of the document before it is updated. | No | + +### Patch Document Command + +Update a document. A [patch](../../../client-api/operations/patching/single-document.mdx) is executed on the server side and +does not involve loading the document, avoiding the cost of sending the entire document in a round trip over the network. + +Format within the `Commands` array in the [request body](../../../client-api/rest-api/document-commands/batch-commands.mdx#request-format): + + + +{`\{ + \\"Id\\": \\"\\", + \\"ChangeVector\\": \\"\\", + \\"Patch\\": \{ + \\"Script\\": \\" >\\", + \\"Values\\": \{ + \\"\\": \\"\\", + ... + \} + \}, + \\"PatchIfMissing\\": \{ + \\"Script\\": \\"\\", + \\"Values\\": \{ + + \} + \}, + \\"Type\\": \\"PATCH\\" +\} +`} + + + +| Parameter | Description | Required | +| - | - | - | +| **Id** | ID of a document to execute the patch on | Yes | +| **ChangeVector** | The document's expected [change vector](../../../server/clustering/replication/change-vector.mdx). If it does not match the server-side change vector a concurrency exception is thrown. | No | +| **Patch** | A script that modifies the specified document. [Details below](../../../client-api/rest-api/document-commands/batch-commands.mdx#patch-request). | Yes | +| **PatchIfMissing** | An alternative script to be executed if no document with the given ID is found. This will create a new document with the given ID. [Details below](../../../client-api/rest-api/document-commands/batch-commands.mdx#patch-request). | No | +| **Type** | Set to `PATCH` | Yes | + +#### Patch Request + +Using scripts with arguments allows RavenDB to cache scripts and boost performance. For cURL, use single quotes `'` to +wrap strings. + +| Sub-Parameter | Description | Required | +| - | - | - | +| **Script** | Javascript commands to perform on the document. Use arguments from `Values` with a `$` prefix, i.e. `$`. | Yes | +| **Values** | Arguments that can be used in the script. | No | +### Delete Document Command + +Delete a document by its ID. + +Format within the `Commands` array in the [request body](../../../client-api/rest-api/document-commands/batch-commands.mdx#request-format): + + + +{`\{ + \\"Id\\": \\"\\", + \\"ChangeVector\\": \\"\\", + \\"Type\\": \\"DELETE\\" +\} +`} + + + +| Parameter | Description | Required | +| - | - | - | +| **Id** | ID of document to delete (only one can be deleted per command) | Yes | +| **ChangeVector** | The document's expected [change vector](../../../server/clustering/replication/change-vector.mdx). If it does not match the server-side change vector a concurrency exception is thrown. | No | +| **Type** | Set to `DELETE` | Yes | +### Delete by Prefix Command + +Delete all documents whose IDs begin with a certain prefix. + +Format within the `Commands` array in the [request body](../../../client-api/rest-api/document-commands/batch-commands.mdx#request-format): + + + +{`\{ + \\"Id\\": \\"\\", + \\"IdPrefixed\\": true, + \\"Type\\": \\"DELETE\\" +\} +`} + + + +| Parameter | Description | Required | +| - | - | - | +| **Id** | All documents whose IDs begin with this string will be deleted | Yes | +| **IdPrefixed** | Set to `true` (distinguishes this as a Delete by Prefix Command rather than the Delete Document Command described above) | Yes | +| **Type** | Set to `DELETE` | Yes | +### Put Attachment Command + +Add an [attachment](../../../document-extensions/attachments/what-are-attachments.mdx) to a document, or update an existing attachment. + +If a batch contains a Put Attachment Command, the cURL format of the request is slightly different from a batch that doesn't. +The `Content-Type` header takes `multipart/mixed; boundary=""` instead of the default `application/json`. +The body contains the `Commands` array followed by each of the attachments, passed in the form of binary streams. The attachment streams come in the +same order as their respective Put Attachment Commands within the `Commands` array. The `separator` demarcates these sections. + +The general form of a cURL request: + + + +{`curl -X POST "/databases//bulk_docs" +-H "Content-Type: multipart/mixed; boundary=" +-d " +-- +\{ + \\"Commands\\":[ + \{ + \\"Id\\": \\"\\", + \\"Name\\": \\"\\", + \\"ContentType\\": \\"\\" + \\"ChangeVector\\": \\"\\", + \\"Type\\": \\"AttachmentPUT\\" + \}, + ... + ] +\} +-- +Command-Type: AttachmentStream + + +-- +... +----" +`} + + + +| Parameter | Description | Required | +|------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------| +| **boundary** | The "separator" - an arbitrary string that demarcates the attachment streams.
The attachment streams come in the same order as their respective Put Attachment Commands in the commands array.
The string used as a separator must not appear elsewhere in the request body - i.e. "ChangeVector" or "{[" are not valid separators. | Yes | +| **Id** | Document ID | Yes | +| **Name** | Name of attachment to create or update | Yes | +| **ContentType** | Mime type of the attachment | No | +| **ChangeVector** | The document's expected [change vector](../../../server/clustering/replication/change-vector.mdx). If it does not match the server-side change vector a concurrency exception is thrown. | No | +| **Type** | Set to `AttachmentPUT` | Yes | + +### Delete Attachment Command + +Delete an attachment in a certain document. + +Format within the `Commands` array in the [request body](../../../client-api/rest-api/document-commands/batch-commands.mdx#request-format): + + + +{`\{ + \\"Id\\": \\"\\", + \\"Name\\": \\"\\", + \\"ChangeVector\\": \\"\\", + \\"Type\\": \\"AttachmentDELETE\\" +\} +`} + + + +| Parameter | Description | Required | +| - | - | - | +| **Id** | ID of document for which to delete the attachment | Yes | +| Name | Name of the attachment to delete | Yes | +| **ChangeVector** | The document's expected [change vector](../../../server/clustering/replication/change-vector.mdx). If it does not match the server-side change vector a concurrency exception is thrown. | No | +| **Type** | Set to `AttachmentDELETE` | Yes | + + + +## Response Format + +### Http Status Codes + +| Code | Description | +| - | - | +| `201` | The transaction was successfully completed. | +| `408` | The time specified by the options `waitForIndexThrow` or `waitForReplicasTimeout` passed before indexing or replication completed respectively, and an exception is thrown. This only happens if `throwOnTimeoutInWaitForReplicas` or `waitForIndexThrow` are set to `true`. | +| `409` | A specified change vector did not match the server-side change vector, or a change vector was specified for a document that does not exist. A concurrency exception is thrown. | +| `500` | Invalid request, such as a put attachment command for a document that does not exist. | + +### Response Body + +Results appear in the same order as the commands in the request body. + + + +{`\{ + "Results":[ + \{ \}, + ... + ] +\} +`} + + + +* Format within the `Results` array in the response body: + * [Put Document Command](../../../client-api/rest-api/document-commands/batch-commands.mdx#put-document-command-1) + * [Patch Document Command](../../../client-api/rest-api/document-commands/batch-commands.mdx#patch-document-command-1) + * [Delete Document Command](../../../client-api/rest-api/document-commands/batch-commands.mdx#delete-document-command-1) + * [Delete by Prefix Command](../../../client-api/rest-api/document-commands/batch-commands.mdx#delete-by-prefix-command-1) + * [Put Attachment Command](../../../client-api/rest-api/document-commands/batch-commands.mdx#put-attachment-command-1) + * [Delete Attachment Command](../../../client-api/rest-api/document-commands/batch-commands.mdx#delete-attachment-command-1) + +### Put Document Command + + + +{`\{ + "Type": "PUT", + "@id": "", + "@collection": "", + "@change-vector": "", + "@last-modified": "" +\} +`} + + + +| Parameter | Description | +| - | - | +| **Type** | Same as the `Type` of the command sent - in this case `PUT`. | +| **@id** | The ID of the document that has been created or modified. | +| **@collection** | Name of the [collection](../../../client-api/faq/what-is-a-collection.mdx) that contains the document. If none was specified, the collection will be `@empty`. | +| **@change-vector** | The document's change vector after the command was executed. | + +### Patch Document Command + + + +{`\{ + "@id": "", + "@change-vector": "", + "@last-modified": "", + "Type": "PATCH", + "PatchStatus": "", + "Debug": null +\} +`} + + + +| Parameter | Description | +| - | - | +| **@id** | The ID of the document that has been patched or created. | +| **@change-vector** | The document's change vector after the command was executed. Returns `null` if the command did not result in any changes. | +| **@last-modified** | Date and time (UTC) of the most recent modification made to the document. | +| **Type** | Same as the `Type` of the command sent - in this case `PATCH`. | +| **PatchStatus** | See [below](../../../client-api/rest-api/document-commands/batch-commands.mdx#patchstatus) | +| **Debug** | Should always return `null` in the context of batch commands. | + +#### PatchStatus + +| Status | Description | +| - | - | +| **DocumentDoesNotExist** | No document with the specified ID exists. This will only be returned if no `PatchIfMissing` script was given. | +| **Created** | No document with the specified ID existed, so a new document was created with that ID and `PatchIfMissing` was applied. | +| **Patched** | The specified document was successfully patched. | +| **Skipped** | Should not appear in the context of batch commands. | +| **NotModified** | Patch was successful but did not result in a modification to the document. | + +### Delete Document Command + + + +{`\{ + "Id": "", + "Type": "DELETE", + "Deleted": +\} +`} + + + +| Parameter | Description | +| - | - | +| **Id** | The ID of the document that has been deleted. | +| **Type** | Same as the `Type` of the command sent - in this case `DELETE`. | +| **Deleted** | `true` if the document was successfully deleted, `false` if not (for instance, because the specified document did not exist). | + +### Delete by Prefix Command + + + +{`\{ + "Id": "", + "Type": "DELETE", + "Deleted": +\} +`} + + + +| Parameter | Description | +| - | - | +| **Id** | The document ID prefix of the documents that were deleted. | +| **Type** | Same as the `Type` of the command sent - in this case `DELETE`. | +| **Deleted** | `true` if the documents were successfully deleted, `false` if not (for instance, because no documents with the specified prefix exist). | + +### Put Attachment Command + + + +{`\{ + "Id": "", + "Type": "AttachmentPUT", + "Name": "", + "ChangeVector": "", + "Hash": "", + "ContentType": "", + "Size": , + "DocumentChangeVector": "" +\} +`} + + + +| Parameter | Description | +| - | - | +| **Id** | The ID of the document for which the attachment was put. | +| **Type** | Same as the `Type` of the command sent - in this case `AttachmentPUT`. | +| **Name** | Name of the attachment that was created or updated. | +| **ChangeVector** | A change vector specific to the _attachment_, distinct from the usual document change vector. Use this change vector in requests to update this attachment. | +| **Hash** | Hash representing the attachment. | +| **ContentType** | MIME type of the attachment. | +| **Size** | Size of the attachment in bytes. | +| **DocumentChangeVector** | The document's change vector after the command was executed. | + +### Delete Attachment Command + + + +{`\{ + "Type": "AttachmentDELETE", + "@id": "", + "Name": "" +\} +`} + + + +| Parameter | Description | +| - | - | +| **Type** | Same as the `Type` of the command sent - in this case `AttachmentDELETE`. | +| **@id** | The ID of the document for which the attachment was deleted. | +| **Name** | Name of the attachment that was deleted. | +| **DocumentChangeVector** | The document's change vector after the command was executed. | + + + +## More Examples + +[About Northwind](../../../start/about-examples.mdx), the database used in our examples. + +* In this section: + * [Put Document Command](../../../client-api/rest-api/document-commands/batch-commands.mdx#put-document-command-2) + * [Patch Document Command](../../../client-api/rest-api/document-commands/batch-commands.mdx#patch-document-command-2) + * [Delete Document Command](../../../client-api/rest-api/document-commands/batch-commands.mdx#delete-document-command-2) + * [Delete by Prefix Command](../../../client-api/rest-api/document-commands/batch-commands.mdx#delete-by-prefix-command-2) + * [Put Attachment Command](../../../client-api/rest-api/document-commands/batch-commands.mdx#put-attachment-command-2) + * [Delete Attachment Command](../../../client-api/rest-api/document-commands/batch-commands.mdx#delete-attachment-command-2) +### Put Document Command + +Request: + + + +{`curl -X POST "http://live-test.ravendb.net/databases/Example/bulk_docs" +-H "Content-Type: application/json" +-d "\{ + \\"Commands\\": [ + \{ + \\"Id\\": \\"person/1\\", + \\"ChangeVector\\": null, + \\"Document\\": \{ + \\"Name\\": \\"John Smith\\" + \}, + \\"Type\\": \\"PUT\\" + \} + ] +\}" +`} + + + +Response: + + + +{`HTTP/1.1 201 Created +Server:"nginx" +Date:"Wed, 18 Sep 2019 16:14:20 GMT" +Content-Type:"application/json; charset=utf-8" +Transfer-Encoding:"chunked" +Connection:"keep-alive" +Content-Encoding:"gzip" +Vary:"Accept-Encoding" +Raven-Server-Version:"4.2.4.42" + +\{ + "Results": [ + \{ + "Type": "PUT", + "@id": "person/1", + "@collection": "@empty", + "@change-vector": "A:5951-pITDlhlRaEeJh16dDBREzg, A:1887-0N64iiIdYUKcO+yq1V0cPA, A:6214-xwmnvG1KBkSNXfl7/0yJ1A", + "@last-modified": "2019-09-18T16:14:20.5759532" + \} + ] +\} +`} + + +### Patch Document Command + +Request: + + + +{`curl -X POST "http://live-test.ravendb.net/databases/Example/bulk_docs" +-H "Content-Type: application/json" +-d "\{ + \\"Commands\\": [ + \{ + \\"Id\\": \\"person/1\\", + \\"ChangeVector\\": null, + \\"Patch\\": \{ + \\"Script\\": \\"this.Name = 'Jane Doe';\\", + \\"Values\\": \{\} + \}, + \\"Type\\": \\"PATCH\\" + \} + ] +\}" +`} + + + +Response: + + + +{`HTTP/1.1 201 Created +Server:"nginx" +Date:"Wed, 18 Sep 2019 16:18:13 GMT" +Content-Type:"application/json; charset=utf-8" +Transfer-Encoding:"chunked" +Connection:"keep-alive" +Content-Encoding:"gzip" +Vary:"Accept-Encoding" +Raven-Server-Version:"4.2.4.42" + +\{ + "Results": [ + \{ + "Id": "person/1", + "ChangeVector": "A:5952-pITDlhlRaEeJh16dDBREzg, A:1887-0N64iiIdYUKcO+yq1V0cPA, A:6214-xwmnvG1KBkSNXfl7/0yJ1A", + "LastModified": "2019-09-18T16:18:13.5745560", + "Type": "PATCH", + "PatchStatus": "Patched", + "Debug": null + \} + ] +\} +`} + + +### Delete Document Command + +Request: + + + +{`curl -X POST "http://live-test.ravendb.net/databases/Example/bulk_docs" +-H "Content-Type: application/json" +-d "\{ + \\"Commands\\": [ + \{ + \\"Id\\": \\"employees/1-A\\", + \\"ChangeVector\\": null, + \\"Type\\": \\"DELETE\\" + \} + ] +\}" +`} + + + +Response: + + + +{`HTTP/1.1 201 Created +Server:"nginx" +Date:"Wed, 18 Sep 2019 16:30:15 GMT" +Content-Type:"application/json; charset=utf-8" +Transfer-Encoding:"chunked" +Connection:"keep-alive" +Content-Encoding:"gzip" +Vary:"Accept-Encoding" +Raven-Server-Version:"4.2.4.42" + +\{ + "Results": [ + \{ + "Id": "employees/1-A", + "Type": "DELETE", + "Deleted": true, + "ChangeVector": null + \} + ] +\} +`} + + +### Delete by Prefix Command + +Request: + + + +{`curl -X POST "http://live-test.ravendb.net/databases/Example/bulk_docs" +-H "Content-Type: application/json" +-d "\{ + \\"Commands\\": [ + \{ + \\"Id\\": \\"employ\\", + \\"ChangeVector\\": null, + \\"IdPrefixed\\": true, + \\"Type\\": \\"DELETE\\" + \} + ] +\}" +`} + + + +Response: + + + +{`HTTP/1.1 201 Created +Server:"nginx" +Date:"Wed, 18 Sep 2019 16:32:16 GMT" +Content-Type:"application/json; charset=utf-8" +Transfer-Encoding:"chunked" +Connection:"keep-alive" +Content-Encoding:"gzip" +Vary:"Accept-Encoding" +Raven-Server-Version:"4.2.4.42" + +\{ + "Results": [ + \{ + "Id": "employ", + "Type": "DELETE", + "Deleted": true + \} + ] +\} +`} + + +### Put Attachment Command + +Request: + + + +{`curl -X POST "http://live-test.ravendb.net/databases/Example/bulk_docs" +-H "Content-Type: multipart/mixed; boundary=some_boundary" +-d " +--some_boundary +\{ + \\"Commands\\": [ + \{ + \\"Id\\":\\"shippers/1-A\\", + \\"Name\\":\\"some_file\\", + \\"ContentType\\":\\"text\\" + \\"Type\\":\\"AttachmentPUT\\", + \} + ] +\} +--some_boundary +Command-Type: AttachmentStream + +12345 +--some_boundary--" +`} + + + +Response: + + + +{`HTTP/1.1 201 Created +Server:"nginx" +Date:"Wed, 18 Sep 2019 16:40:43 GMT" +Content-Type:"application/json; charset=utf-8" +Transfer-Encoding:"chunked" +Connection:"keep-alive" +Content-Encoding:"gzip" +Vary:"Accept-Encoding" +Raven-Server-Version:"4.2.4.42" + +\{ + "Results": [ + \{ + "Id": "shippers/1-A", + "Type": "AttachmentPUT", + "Name": "some_file", + "ChangeVector": "A:5973-pITDlhlRaEeJh16dDBREzg, A:1887-0N64iiIdYUKcO+yq1V0cPA, A:6214-xwmnvG1KBkSNXfl7/0yJ1A", + "Hash": "DHnN2gtPymAUoaFxtgjxfU83O8fxGHw8+H/P+kkPxjg=", + "ContentType": "text", + "Size": 5, + "DocumentChangeVector": "A:5974-pITDlhlRaEeJh16dDBREzg, A:1887-0N64iiIdYUKcO+yq1V0cPA, A:6214-xwmnvG1KBkSNXfl7/0yJ1A" + \} + ] +\} +`} + + +### Delete Attachment Command + +Request: + + + +{`curl -X POST "http://live-test.ravendb.net/databases/Example/bulk_docs" +-H "Content-Type: application/json" +-d "\{ + \\"Commands\\": [ + \{ + \\"Id\\": \\"categories/2-A\\", + \\"Name\\": \\"image.jpg\\", + \\"ChangeVector\\": null, + \\"Type\\": \\"AttachmentDELETE\\" + \} + ] +\}" +`} + + + +Response: + + + +{`HTTP/1.1 201 Created +Server:"nginx" +Date:"Wed, 18 Sep 2019 16:44:40 GMT" +Content-Type:"application/json; charset=utf-8" +Transfer-Encoding:"chunked" +Connection:"keep-alive" +Content-Encoding:"gzip" +Vary:"Accept-Encoding" +Raven-Server-Version:"4.2.4.42" + +\{ + "Results": [ + \{ + "Type": "AttachmentDELETE", + "@id": "categories/2-A", + "Name": "image.jpg", + "DocumentChangeVector": "A:5979-pITDlhlRaEeJh16dDBREzg, A:1887-0N64iiIdYUKcO+yq1V0cPA, A:6214-xwmnvG1KBkSNXfl7/0yJ1A" + \} + ] +\} +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/rest-api/document-commands/delete-document.mdx b/versioned_docs/version-7.1/client-api/rest-api/document-commands/delete-document.mdx new file mode 100644 index 0000000000..f12f2a4f08 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/rest-api/document-commands/delete-document.mdx @@ -0,0 +1,90 @@ +--- +title: "Delete a Document" +hide_table_of_contents: true +sidebar_label: Delete a Document +sidebar_position: 4 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Delete a Document + + +* Use this endpoint with the `**DELETE**` method to delete one document from the database: +`/databases//docs?id=` + +* In this page: + * [Example](../../../client-api/rest-api/document-commands/delete-document.mdx#example) + * [Request Format](../../../client-api/rest-api/document-commands/delete-document.mdx#request-format) + * [Response Format](../../../client-api/rest-api/document-commands/delete-document.mdx#response-format) + + +## Example + +This is a cURL request to delete the document "employees/1-A" from a database named "Example" on our +[playground server](http://live-test.ravendb.net): + + + +{`curl -X DELETE "http://live-test.ravendb.net/databases/Example/docs?id=employees/1-A" +`} + + + +Response: + + + +{`HTTP/1.1 204 +status: 204 +Server: nginx +Date: Tue, 27 Aug 2019 11:40:12 GMT +Connection: keep-alive +Raven-Server-Version: 4.2.3.42 +`} + + + + + +## Request Format + +This is the general format of the cURL request: + + + +{`curl -X DELETE "/databases//docs?id=" +--header "If-Match: " +`} + + + +| Query Parameters | Description | Required | +| - | - | - | +| **id** | ID of a document to be deleted. | Yes | + +| Headers | Description | Required | +| - | - | - | +| **If-Match** | Expected [change vector](../../../server/clustering/replication/change-vector.mdx). If it matches the server-side change vector the document is deleted, if they don't match a concurrency exception is thrown. | No | + + + +## Response Format + +| Header | Description | +| - | - | +| **Content-Type** | MIME media type and character encoding. This should always be: `application/json; charset=utf-8`. | +| **Raven-Server-Version** | Version or RavenDB the responding server is running | + +| HTTP Status Code | Description | +| - | - | +| `204` | The document was successfully deleted, _or_ no document with the specified ID exists. | +| `409` | The change vector submitted did not match the server-side change vector. A concurrency exception is thrown. | + + + diff --git a/versioned_docs/version-7.1/client-api/rest-api/document-commands/get-all-documents.mdx b/versioned_docs/version-7.1/client-api/rest-api/document-commands/get-all-documents.mdx new file mode 100644 index 0000000000..7462edb72c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/rest-api/document-commands/get-all-documents.mdx @@ -0,0 +1,431 @@ +--- +title: "Get All Documents" +hide_table_of_contents: true +sidebar_label: Get All Documents +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Get All Documents + + +* Use this endpoint with the **`GET`** method to retrieve all documents from the database: +`/databases//docs` + +* Query parameters can be used to page the results. + +* In this page: + * [Basic Example](../../../client-api/rest-api/document-commands/get-all-documents.mdx#basic-example) + * [Request Format](../../../client-api/rest-api/document-commands/get-all-documents.mdx#request-format) + * [Response Format](../../../client-api/rest-api/document-commands/get-all-documents.mdx#response-format) + * [Query Parameter Examples](../../../client-api/rest-api/document-commands/get-all-documents.mdx#query-parameter-examples) + * [start](../../../client-api/rest-api/document-commands/get-all-documents.mdx#start) + * [pageSize](../../../client-api/rest-api/document-commands/get-all-documents.mdx#pagesize) + * [metadataOnly](../../../client-api/rest-api/document-commands/get-all-documents.mdx#metadataonly) + +## Basic Example + +This is a cURL request to a database named "Example" on our [playground server](http://live-test.ravendb.net). Paging +through all of the documents in the database, the request skips the first 9 documents and retrieves the next 2. + + + +{`curl -X GET "http://live-test.ravendb.net/databases/Example/docs?start=9&pageSize=2" +`} + + + +Response: + + + +{`HTTP/1.1 200 OK +Server: nginx +Date: Thu, 10 Oct 2019 12:00:40 GMT +Content-Type: application/json; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Content-Encoding: gzip +ETag: "A:2134-W33iO0zJC0qZKWh6fjnp6A, A:1887-0N64iiIdYUKcO+yq1V0cPA, A:6214-xwmnvG1KBkSNXfl7/0yJ1A" +Vary: Accept-Encoding +Raven-Server-Version: 4.2.4.42 + +\{ + "Results": [ + \{ + "Name": "Seafood", + "Description": "Seaweed and fish", + "@metadata": \{ + "@attachments": [ + \{ + "Name": "image.jpg", + "Hash": "GWdpGVCWyLsrtNdA5AOee0QOZFG6rKIqCosZZN5WnCA=", + "ContentType": "image/jpeg", + "Size": 33396 + \} + ], + "@collection": "Categories", + "@change-vector": "A:2107-W33iO0zJC0qZKWh6fjnp6A", + "@flags": "HasAttachments", + "@id": "categories/8-A", + "@last-modified": "2018-07-27T12:21:39.1315788Z" + \} + \}, + \{ + "Name": "Produce", + "Description": "Dried fruit and bean curd", + "@metadata": \{ + "@attachments": [ + \{ + "Name": "image.jpg", + "Hash": "asY7yUHhdgaVoKhivgua0OUSJKXqNDa3Z1uLP9XAocM=", + "ContentType": "image/jpeg", + "Size": 61749 + \} + ], + "@collection": "Categories", + "@change-vector": "A:2104-W33iO0zJC0qZKWh6fjnp6A", + "@flags": "HasAttachments", + "@id": "categories/7-A", + "@last-modified": "2018-07-27T12:21:11.2283909Z" + \} + \} + ] +\} +`} + + + + + +## Request Format + +This is the general format of a cURL request that uses all query string parameters: + + + +{`curl -X GET "/databases//docs? + &start= + &pageSize= + &metadataOnly=" +--header "If-None-Match: " +`} + + +Linebreaks are added for clarity. + + +#### Query String Parameters + +| Parameter | Description | Required | +| - | - | - | +| **start** | Number of results to skip. | No | +| **pageSize** | Maximum number of results to retrieve. | No | +| **metadataOnly** | Set this parameter to `true` to retrieve only the document metadata from each result. | No | + +#### Headers + +| Header | Description | Required | +| - | - | - | +| **If-None-Match** | This header takes a hash representing the previous results of an **identical** request. The hash is found in the response header `ETag`. If the results were not modified since the previous request, the server responds with http status code `304`, and the requested documents are retrieved from a local cache rather than over the network. | No | + + + +## Response Format + +#### Http Status Codes + +| Code | Description | +| ----------- | - | +| `200` | Results were successfully retrieved | +| `304` | In response to an `If-None-Match` check: none of the requested documents were modified since they were last loaded, so they were not retrieved from the server. | + +#### Headers + +| Header | Description | +| - | - | +| **Content-Type** | MIME media type and character encoding. This should always be: `application/json; charset=utf-8` | +| **ETag** | Hash representing the state of these results. If another, **identical** request is made, this hash can be sent in the `If-None-Match` header to check whether the retrieved documents have been modified since the last response. | +| **Raven-Server-Version** | Version of RavenDB that the responding server is running | + +#### Body + +Retrieved documents are sorted in descending order of their [change vectors](../../../server/clustering/replication/change-vector.mdx). +A retrieved document is identical in contents and format to the document stored in the server - unless the `metadataOnly` +parameter is set to `true`. + +This is the general format of the JSON response body: + + + +{`\{ + "Results": [ + \{ + "":"", + ... + "@metadata":\{ + ... + \} + \}, + \{ \}, + ... + ] +\} +`} + + +Linebreaks are added for clarity. + + + +## Query Parameter Examples + +[About Northwind](../../../start/about-examples.mdx), the database used in our examples. + +In this section: + +* [start](../../../client-api/rest-api/document-commands/get-all-documents.mdx#start) +* [pageSize](../../../client-api/rest-api/document-commands/get-all-documents.mdx#pagesize) +* [metadataOnly](../../../client-api/rest-api/document-commands/get-all-documents.mdx#metadataonly) +### start + +Skip first 1,057 documents, and retrieve the rest (our version of Northwind contains 1,059 documents). +cURL request: + + + +{`curl -X GET "http://live-test.ravendb.net/databases/Example/docs?start=1057" +`} + + + +Response: + + + +{`HTTP/1.1 200 OK +Server: nginx +Date: Thu, 10 Oct 2019 16:30:37 GMT +Content-Type: application/json; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Content-Encoding: gzip +ETag: "A:2134-W33iO0zJC0qZKWh6fjnp6A, A:1887-0N64iiIdYUKcO+yq1V0cPA, A:6214-xwmnvG1KBkSNXfl7/0yJ1A" +Vary: Accept-Encoding +Raven-Server-Version: 4.2.4.42 + +\{ + "Results": [ + \{ + "ExternalId": "ALFKI", + "Name": "Alfreds Futterkiste", + "Contact": \{ + "Name": "Maria Anders", + "Title": "Sales Representative" + \}, + "Address": \{ + "Line1": "Obere Str. 57", + "Line2": null, + "City": "Berlin", + "Region": null, + "PostalCode": "12209", + "Country": "Germany", + "Location": \{ + "Latitude": 53.24939, + "Longitude": 14.43286 + \} + \}, + "Phone": "030-0074321", + "Fax": "030-0076545", + "@metadata": \{ + "@collection": "Companies", + "@change-vector": "A:3-W33iO0zJC0qZKWh6fjnp6A", + "@id": "companies/1-A", + "@last-modified": "2018-07-27T12:11:53.0182893Z" + \} + \}, + \{ + "Max": 8, + "@metadata": \{ + "@collection": "@hilo", + "@change-vector": "A:1-W33iO0zJC0qZKWh6fjnp6A", + "@id": "Raven/Hilo/categories", + "@last-modified": "2018-07-27T12:11:53.0145929Z" + \} + \} + ] +\} +`} + + +### pageSize + +Retrieve the first document. +cURL request: + + + +{`curl -X GET "http://live-test.ravendb.net/databases/Example/docs?pageSize=1" +`} + + + +Response: + + + +{`HTTP/1.1 200 OK +Server: nginx +Date: Thu, 10 Oct 2019 16:33:31 GMT +Content-Type: application/json; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Content-Encoding: gzip +ETag: "A:2134-W33iO0zJC0qZKWh6fjnp6A, A:1887-0N64iiIdYUKcO+yq1V0cPA, A:6214-xwmnvG1KBkSNXfl7/0yJ1A" +Vary: Accept-Encoding +Raven-Server-Version: 4.2.4.42 + +\{ + "Results": [ + \{ + "LastName": "Callahan", + "FirstName": "Laura", + "Title": "Inside Sales Coordinator", + "Address": \{ + "Line1": "4726 - 11th Ave. N.E.", + "Line2": null, + "City": "Seattle", + "Region": "WA", + "PostalCode": "98105", + "Country": "USA", + "Location": \{ + "Latitude": 47.664164199999988, + "Longitude": -122.3160148 + \} + \}, + "HiredAt": "1994-03-05T00:00:00.0000000", + "Birthday": "1958-01-09T00:00:00.0000000", + "HomePhone": "(206) 555-1189", + "Extension": "2344", + "ReportsTo": "employees/2-A", + "Notes": [ + "Laura received a BA in psychology from the University of Washington. She has also completed a course in business French. She reads and writes French." + ], + "Territories": [ + "19428", + "44122", + "45839", + "53404" + ], + "@metadata": \{ + "@attachments": [ + \{ + "Name": "photo.jpg", + "Hash": "8dte+O8Ds9RJx8dKruWurqapAojM/ZxjHBMst9wm5sI=", + "ContentType": "image/jpeg", + "Size": 14446 + \} + ], + "@collection": "Employees", + "@change-vector": "A:2134-W33iO0zJC0qZKWh6fjnp6A", + "@flags": "HasAttachments", + "@id": "employees/8-A", + "@last-modified": "2018-07-27T12:26:25.0179915Z" + \} + \} + ] +\} +`} + + +### metadataOnly + +Skip first 123 documents, take the next 5, and retrieve only the metadata of each document. +cURL request: + + + +{`curl -X GET "http://live-test.ravendb.net/databases/Example/docs? + start=123 + &pageSize=5 + &metadataOnly=true" +`} + + +Linebreaks are added for clarity. + +Response: + + + +{`HTTP/1.1 200 OK +Server: nginx +Date: Thu, 10 Oct 2019 16:50:00 GMT +Content-Type: application/json; charset=utf-8 +Connection: keep-alive +ETag: "A:2134-W33iO0zJC0qZKWh6fjnp6A, A:1887-0N64iiIdYUKcO+yq1V0cPA, A:6214-xwmnvG1KBkSNXfl7/0yJ1A" +Vary: Accept-Encoding +Raven-Server-Version: 4.2.4.42 +Content-Length: 918 + +\{ + "Results": [ + \{ + "@metadata": \{ + "@collection": "Orders", + "@change-vector": "A:1871-W33iO0zJC0qZKWh6fjnp6A", + "@flags": "HasRevisions", + "@id": "orders/728-A", + "@last-modified": "2018-07-27T12:11:53.1753957Z" + \} + \}, + \{ + "@metadata": \{ + "@collection": "Orders", + "@change-vector": "A:1869-W33iO0zJC0qZKWh6fjnp6A", + "@flags": "HasRevisions", + "@id": "orders/727-A", + "@last-modified": "2018-07-27T12:11:53.1751418Z" + \} + \}, + \{ + "@metadata": \{ + "@collection": "Orders", + "@change-vector": "A:1867-W33iO0zJC0qZKWh6fjnp6A", + "@flags": "HasRevisions", + "@id": "orders/726-A", + "@last-modified": "2018-07-27T12:11:53.1749721Z" + \} + \}, + \{ + "@metadata": \{ + "@collection": "Orders", + "@change-vector": "A:1865-W33iO0zJC0qZKWh6fjnp6A", + "@flags": "HasRevisions", + "@id": "orders/725-A", + "@last-modified": "2018-07-27T12:11:53.1747646Z" + \} + \}, + \{ + "@metadata": \{ + "@collection": "Orders", + "@change-vector": "A:1863-W33iO0zJC0qZKWh6fjnp6A", + "@flags": "HasRevisions", + "@id": "orders/724-A", + "@last-modified": "2018-07-27T12:11:53.1745710Z" + \} + \} + ] +\} +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/rest-api/document-commands/get-documents-by-id.mdx b/versioned_docs/version-7.1/client-api/rest-api/document-commands/get-documents-by-id.mdx new file mode 100644 index 0000000000..d0a1f45b3f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/rest-api/document-commands/get-documents-by-id.mdx @@ -0,0 +1,471 @@ +--- +title: "Get Documents by ID" +hide_table_of_contents: true +sidebar_label: Get Documents by ID +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Get Documents by ID + + +* Use this endpoint with the **`GET`** method to retrieve documents from the database according to their document IDs: +`/databases//docs?id=` + +* Query parameters can be used to include [related documents](../../../client-api/how-to/handle-document-relationships.mdx#includes) and +[counters](../../../document-extensions/counters/overview.mdx). + +* In this page: + * [Basic Example](../../../client-api/rest-api/document-commands/get-documents-by-id.mdx#basic-example) + * [Request Format](../../../client-api/rest-api/document-commands/get-documents-by-id.mdx#request-format) + * [Response Format](../../../client-api/rest-api/document-commands/get-documents-by-id.mdx#response-format) + * [More Examples](../../../client-api/rest-api/document-commands/get-documents-by-id.mdx#more-examples) + * [Get Multiple Documents](../../../client-api/rest-api/document-commands/get-documents-by-id.mdx#get-multiple-documents) + * [Get Related Documents](../../../client-api/rest-api/document-commands/get-documents-by-id.mdx#get-related-documents) + * [Get Document Metadata Only](../../../client-api/rest-api/document-commands/get-documents-by-id.mdx#get-document-metadata-only) + * [Get Document Counters](../../../client-api/rest-api/document-commands/get-documents-by-id.mdx#get-document-counters) + +## Basic Example + +This is a cURL request to retrieve one document named "products/48-A" from a database named "Example" on our +[playground server](http://live-test.ravendb.net): + + + +{`curl -X GET "http://live-test.ravendb.net/databases/Example/docs?id=products/48-A" +`} + + + +Response: + + + +{`HTTP/1.1 200 OK +Server: nginx +Date: Tue, 10 Sep 2019 10:33:04 GMT +Content-Type: application/json; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Content-Encoding: gzip +ETag: "A:285-k50KTOC5G0mfVXKjomTNFQ" +Vary: Accept-Encoding +Raven-Server-Version: 4.2.4.42 + +\{ + "Results": [ + \{ + "Name": "Chocolade", + "Supplier": "suppliers/22-A", + "Category": "categories/3-A", + "QuantityPerUnit": "10 pkgs.", + "PricePerUnit": 12.7500, + "UnitsInStock": 22, + "UnitsOnOrder": 15, + "Discontinued": false, + "ReorderLevel": 25, + "@metadata": \{ + "@collection": "Products", + "@change-vector": "A:285-k50KTOC5G0mfVXKjomTNFQ", + "@id": "products/48-A", + "@last-modified": "2018-07-27T12:11:53.0300420Z" + \} + \} + ], + "Includes": \{\} +\} +`} + + + + + +## Request Format + +This is the general format of a cURL request that uses all parameters: + + + +{`curl -X GET "/databases//docs? + id= + &include= + &counter= + &metadataOnly=" +--header "If-None-Match:" +`} + + +Linebreaks are added for clarity. + + +#### Query String Parameters + +| Parameter | Description | Required / # | +|------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------| +| **id** | ID of a document to retrieve.
If no IDs are specified, all the documents in the database are retrieved in descending order of their [change vectors](../../../server/clustering/replication/change-vector.mdx). | Yes;
Can be used more than once | +| **include** | Path to a field containing the ID of another, 'related' document. [See: How to Handle Document Relationships](../../../client-api/how-to/handle-document-relationships.mdx#includes). | No;
Can be used more than once | +| **counter** | Name of a [counter](../../../document-extensions/counters/overview.mdx) to retrieve. Set this parameter to `@all_counters` to retrieve all counters of the specified documents. Counters of _included_ documents, however, will not be retrieved. | No;
Can be used more than once | +| **metadataOnly** | Set this parameter to `true` to retrieve only the metadata of each document. This does not apply to included documents which are retrieved with their complete contents. | No;
Used once | + +#### Headers + +| Header | Description | Required | +| - | - | - | +| **If-None-Match** | This header takes a hash representing the previous results of an **identical** request. The hash is found in the response header `ETag`. If the results were not modified since the previous request, the server responds with http status code `304` and the requested documents are retrieved from a local cache rather than over the network. | No | + +## Response Format + +#### Http Status Codes + +| Code | Description | +| - | - | +| `200` | Results are successfully retrieved. If a requested document could not be found, the result returned is `null`. | +| `304` | In response to an `If-None-Match` check: none of the requested documents were modified since they were last loaded, so they were not retrieved from the server. | +| `404` | No document with the specified ID was found. This code is only sent when _one_ document was requested. Otherwise, see status code `200`. | + +#### Headers + +| Header | Description | +| - | - | +| **Content-Type** | MIME media type and character encoding. This should always be: `application/json; charset=utf-8`. | +| **ETag** | Hash representing the state of these results. If another, **identical** request is made, this hash can be sent in the `If-None-Match` header to check whether the retrieved documents have been modified since the last response. If none were modified, they are not retrieved. | +| **Raven-Server-Version** | Version of RavenDB that the responding server is running. | + +#### Body + +A retrieved document is identical in contents and format to the document stored on the server (unless the `metadataOnly` +parameter is set to `true`). + +This is the general JSON format of the response body: + + + +{`\{ + "Results": [ + \{ + + \}, + \{ \}, + ... + ], + "Includes": + "": \{ + + \}, + "": \{ \}, + ... + \} + "CounterIncludes": \{ + "": [ + \{ + "DocumentId": "", + "CounterName": "", + "TotalValue": + \}, + \{ \}, + ... + ], + "": [ ], + ... + \} +\} +`} + + +Linebreaks are added for clarity. + + + +## More Examples + +[About Northwind](../../../start/about-examples.mdx), the database used in our examples. + +In this section: + +* [Get Multiple Documents](../../../client-api/rest-api/document-commands/get-documents-by-id.mdx#get-multiple-documents) +* [Get Related Documents](../../../client-api/rest-api/document-commands/get-documents-by-id.mdx#get-related-documents) +* [Get Document Metadata Only](../../../client-api/rest-api/document-commands/get-documents-by-id.mdx#get-document-metadata-only) +* [Get Document Counters](../../../client-api/rest-api/document-commands/get-documents-by-id.mdx#get-document-counters) +### Get Multiple Documents + +Example cURL request: + + + +{`curl -X GET "http://live-test.ravendb.net/databases/Example/docs? + id=shippers/1-A + &id=shippers/2-A" +`} + + +Linebreaks are added for clarity. + +Response: + + + +{`HTTP/1.1 200 OK +Server: nginx +Date: Thu, 12 Sep 2019 09:23:49 GMT +Content-Type: application/json; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Content-Encoding: gzip +ETag: "Hash-auWLG9xq3imTfRdJvlKIL32LhEM0IwJ20eiibWse0X8=" +Vary: Accept-Encoding +Raven-Server-Version: 4.2.4.42 + +\{ + "Results": [ + \{ + "Name": "Speedy Express", + "Phone": "(503) 555-9831", + "@metadata": \{ + "@collection": "Shippers", + "@change-vector": "A:349-k50KTOC5G0mfVXKjomTNFQ", + "@id": "shippers/1-A", + "@last-modified": "2018-07-27T12:11:53.0317375Z" + \} + \}, + \{ + "Name": "United Package", + "Phone": "(503) 555-3199", + "@metadata": \{ + "@collection": "Shippers", + "@change-vector": "A:351-k50KTOC5G0mfVXKjomTNFQ", + "@id": "shippers/2-A", + "@last-modified": "2018-07-27T12:11:53.0317596Z" + \} + \} + ], + "Includes": \{\} +\} +`} + + +### Get Related Documents + +Example cURL request: + + + +{`curl -X GET "http://live-test.ravendb.net/databases/Demo/docs? + id=products/48-A + &include=Supplier + &include=Category" +`} + + +Linebreaks are added for clarity. + +Response: + + + +{`HTTP/1.1 200 OK +Server: nginx +Date: Tue, 10 Sep 2019 10:40:27 GMT +Content-Type: application/json; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Content-Encoding: gzip +ETag: "Hash-9oK1ZcWmNa9SD9hP8m0vT355ztQuFnF/vKD5ILyI/KY=" +Vary: Accept-Encoding +Raven-Server-Version: 4.2.4.42 + +\{ + "Results": [ + \{ + "Name": "Chocolade", + "Supplier": "suppliers/22-A", + "Category": "categories/3-A", + "QuantityPerUnit": "10 pkgs.", + "PricePerUnit": 12.7500, + "UnitsInStock": 22, + "UnitsOnOrder": 15, + "Discontinued": false, + "ReorderLevel": 25, + "@metadata": \{ + "@collection": "Products", + "@change-vector": "A:285-k50KTOC5G0mfVXKjomTNFQ", + "@id": "products/48-A", + "@last-modified": "2018-07-27T12:11:53.0300420Z" + \} + \} + ], + "Includes": \{ + "suppliers/22-A": \{ + "Contact": \{ + "Name": "Dirk Luchte", + "Title": "Accounting Manager" + \}, + "Name": "Zaanse Snoepfabriek", + "Address": \{ + "Line1": "Verkoop Rijnweg 22", + "Line2": null, + "City": "Zaandam", + "Region": null, + "PostalCode": "9999 ZZ", + "Country": "Netherlands", + "Location": null + \}, + "Phone": "(12345) 1212", + "Fax": "(12345) 1210", + "HomePage": null, + "@metadata": \{ + "@collection": "Suppliers", + "@change-vector": "A:399-k50KTOC5G0mfVXKjomTNFQ", + "@id": "suppliers/22-A", + "@last-modified": "2018-07-27T12:11:53.0335729Z" + \} + \}, + "categories/3-A": \{ + "Name": "Confections", + "Description": "Desserts, candies, and sweet breads", + "@metadata": \{ + "@attachments": [ + \{ + "Name": "image.jpg", + "Hash": "1QxSMa3tBr+y8wQYNre7E9UJFFVTNWGjVoC+IC+gSSs=", + "ContentType": "image/jpeg", + "Size": 47955 + \} + ], + "@collection": "Categories", + "@change-vector": "A:2092-k50KTOC5G0mfVXKjomTNFQ", + "@flags": "HasAttachments", + "@id": "categories/3-A", + "@last-modified": "2018-07-27T12:16:44.1738714Z" + \} + \} + \} +\} +`} + + +### Get Document Metadata Only + +Example cURL request: + + + +{`curl -X GET "http://live-test.ravendb.net/databases/Example/docs? + id=orders/19-A + &metadataOnly=true" +`} + + +Linebreaks are added for clarity. + +Response: + + + +{`HTTP/1.1 200 OK +Server: nginx +Date: Tue, 10 Sep 2019 10:52:28 GMT +Content-Type: application/json; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Content-Encoding: gzip +ETag: "A:453-k50KTOC5G0mfVXKjomTNFQ" +Vary: Accept-Encoding +Raven-Server-Version: 4.2.4.42 + +\{ + "Results": [ + \{ + "@metadata": \{ + "@collection": "Orders", + "@change-vector": "A:453-k50KTOC5G0mfVXKjomTNFQ", + "@flags": "HasRevisions", + "@id": "orders/19-A", + "@last-modified": "2018-07-27T12:11:53.0476121Z" + \} + \} + ], + "Includes": \{\} +\} +`} + + +### Get Document Counters + +Example cURL request: + + + +{`curl -X GET "http://live-test.ravendb.net/databases/Example/docs? + id=products/48-A + &counter=MoLtUaE" +`} + + +Linebreaks are added for clarity. + +Response: + + + +{`HTTP/1.1 200 OK +Server: nginx +Date: Tue, 10 Sep 2019 12:26:04 GMT +Content-Type: application/json; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Content-Encoding: gzip +ETag: "A:5957-k50KTOC5G0mfVXKjomTNFQ" +Vary: Accept-Encoding +Raven-Server-Version: 4.2.4.42 + +\{ + "Results": [ + \{ + "Name": "Chocolade", + "Supplier": "suppliers/22-A", + "Category": "categories/3-A", + "QuantityPerUnit": "10 pkgs.", + "PricePerUnit": 12.7500, + "UnitsInStock": 22, + "UnitsOnOrder": 15, + "Discontinued": false, + "ReorderLevel": 25, + "@metadata": \{ + "@collection": "Products", + "@counters": [ + "#OfCounters", + "MoLtUaE" + ], + "@change-vector": "A:285-k50KTOC5G0mfVXKjomTNFQ", + "@id": "products/48-A", + "@flags": "HasRevisions, HasCounters", + "@last-modified": "2019-09-10T12:25:44.1759382Z" + \} + \} + ], + "Includes": \{\}, + "CounterIncludes": \{ + "orders/19-A": [ + \{ + "DocumentId": "orders/19-A", + "CounterName": "MoLtUaE", + "TotalValue": 42 + \} + ] + \} +\} +`} + + + +(Note that the standard [Northwind data](../../../start/about-examples.mdx) does not contain any [counters](../../../document-extensions/counters/overview.mdx) +when it is [generated in the studio](../../../studio/database/document-extensions/counters.mdx) - counters were added to "products/48-A" for this example) + + + diff --git a/versioned_docs/version-7.1/client-api/rest-api/document-commands/get-documents-by-prefix.mdx b/versioned_docs/version-7.1/client-api/rest-api/document-commands/get-documents-by-prefix.mdx new file mode 100644 index 0000000000..d64809ba4a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/rest-api/document-commands/get-documents-by-prefix.mdx @@ -0,0 +1,502 @@ +--- +title: "Get Documents by Prefix" +hide_table_of_contents: true +sidebar_label: Get Documents by Prefix +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Get Documents by Prefix + + +* Use this endpoint with the **`GET`** method to retrieve documents from the database by a common prefix in their document IDs: +`/databases//docs?startsWith=` + +* Query parameters can be used to filter and page the results. + +* In this page: + * [Basic Example](../../../client-api/rest-api/document-commands/get-documents-by-prefix.mdx#basic-example) + * [Request Format](../../../client-api/rest-api/document-commands/get-documents-by-prefix.mdx#request-format) + * [Response Format](../../../client-api/rest-api/document-commands/get-documents-by-prefix.mdx#response-format) + * [More Examples](../../../client-api/rest-api/document-commands/get-documents-by-prefix.mdx#more-examples) + * [Get Using `matches`](../../../client-api/rest-api/document-commands/get-documents-by-prefix.mdx#get-using) + * [Get Using `matches` and `exclude`](../../../client-api/rest-api/document-commands/get-documents-by-prefix.mdx#get-usingand) + * [Get Using `startAfter`](../../../client-api/rest-api/document-commands/get-documents-by-prefix.mdx#get-using-1) + * [Page Results](../../../client-api/rest-api/document-commands/get-documents-by-prefix.mdx#page-results) + * [Get Document Metadata Only](../../../client-api/rest-api/document-commands/get-documents-by-prefix.mdx#get-document-metadata-only) + +## Basic Example + +This is a cURL request to retrieve all documents whose IDs begin with the prefix "ship" from a database named "Example" on +our [playground server](http://live-test.ravendb.net): + + + +{`curl -X GET "http://live-test.ravendb.net/databases/Example/docs?startsWith=ship" +`} + + + +Response: + + + +{`HTTP/1.1 200 OK +Server: nginx +Date: Tue, 10 Sep 2019 15:25:34 GMT +Content-Type: application/json; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Content-Encoding: gzip +ETag: "A:2137-pIhs+72n6USJoZ5XIvTHvQ" +Vary: Accept-Encoding +Raven-Server-Version: 4.2.4.42 + +\{ + "Results": [ + \{ + "Name": "Speedy Express", + "Phone": "(503) 555-9831", + "@metadata": \{ + "@collection": "Shippers", + "@change-vector": "A:349-k50KTOC5G0mfVXKjomTNFQ", + "@id": "shippers/1-A", + "@last-modified": "2018-07-27T12:11:53.0317375Z" + \} + \}, + \{ + "Name": "United Package", + "Phone": "(503) 555-3199", + "@metadata": \{ + "@collection": "Shippers", + "@change-vector": "A:351-k50KTOC5G0mfVXKjomTNFQ", + "@id": "shippers/2-A", + "@last-modified": "2018-07-27T12:11:53.0317596Z" + \} + \}, + \{ + "Name": "Federal Shipping", + "Phone": "(503) 555-9931", + "@metadata": \{ + "@collection": "Shippers", + "@change-vector": "A:353-k50KTOC5G0mfVXKjomTNFQ", + "@id": "shippers/3-A", + "@last-modified": "2018-07-27T12:11:53.0317858Z" + \} + \} + ] +\} +`} + + + + + +## Request Format + +This is the general format of a cURL request that uses all parameters: + + + +{`curl -X GET "/databases//docs? + startsWith= + &matches=||... + &exclude=||... + &startAfter= + &start= + &pageSize= + &metadataOnly=" +--header "If-None-Match: " +`} + + +Linebreaks are added for clarity. + + +#### Query String Parameters + +| Parameter | Description | Required | +| - | - | - | +| **startsWith** | Retrieve all documents whose IDs begin with this string. If the value of this parameter is left empty, all documents in the database are retrieved. | Yes | +| **matches** | Retrieve documents whose IDs are exactly ``+``. Accepts multiple values separated by a pipe character: ' \| ' . Use `?` to represent any single character, and `*` to represent any string. | No | +| **exclude** | _Exclude_ documents whose IDs are exactly ``+``. Accepts multiple values separated by a pipe character: ' \| ' . Use `?` to represent any single character, and `*` to represent any string. | No | +| **startAfter** | Retrieve only the results after the first document ID that begins with this prefix. | No | +| **start** | Number of results to skip. | No | +| **pageSize** | Maximum number of results to retrieve. | No | +| **metadataOnly** | Set this parameter to `true` to retrieve only the document metadata from each result. | No | + +#### Headers + +| Header | Description | Required | +| - | - | - | +| **If-None-Match** | This header takes a hash representing the previous results of an **identical** request. The hash is found in the response header `ETag`. If the results were not modified since the previous request, the server responds with http status code `304` and the requested documents are retrieved from a local cache rather than over the network. | No | + + + +## Response Format + +#### Http Status Codes + +| Code | Description | +| ----------- | - | +| `200` | Results were successfully retrieved. If no documents with the specified prefix could be found, the results array is empty. | +| `304` | In response to an `If-None-Match` check: none of the requested documents were modified since they were last loaded, so they were not retrieved from the server. | + +#### Headers + +| Header | Description | +| - | - | +| **Content-Type** | MIME media type and character encoding. This should always be: `application/json; charset=utf-8` | +| **ETag** | Hash representing the state of these results. If another, **identical** request is made, this hash can be sent in the `If-None-Match` header to check whether the retrieved documents have been modified since the last response. If none were modified. | +| **Raven-Server-Version** | Version of RavenDB that the responding server is running | + +#### Body + +Retrieved documents are sorted in ascending [lexical order](https://en.wikipedia.org/wiki/Lexicographical_order) of their +document IDs. A retrieved document is identical in contents and format to the document stored in the server - unless the +`metadataOnly` parameter is set to `true`. + +This is the general JSON format of the response body: + + + +{`\{ + "Results": [ + \{ + "":"", + ... + "@metadata":\{ + ... + \} + \}, + \{ \}, + ... + ] +\} +Linebreaks are added for clarity. +`} + + + + + +## More Examples + +[About Northwind](../../../start/about-examples.mdx), the database used in our examples. + +In this section: + +* [Get Using `matches`](../../../client-api/rest-api/document-commands/get-documents-by-prefix.mdx#get-using) +* [Get Using `matches` and `exclude`](../../../client-api/rest-api/document-commands/get-documents-by-prefix.mdx#get-usingand) +* [Get Using `startAfter`](../../../client-api/rest-api/document-commands/get-documents-by-prefix.mdx#get-using-1) +* [Page Results](../../../client-api/rest-api/document-commands/get-documents-by-prefix.mdx#page-results) +* [Get Document Metadata Only](../../../client-api/rest-api/document-commands/get-documents-by-prefix.mdx#get-document-metadata-only) +### Get Using `matches` + +cURL request: + + + +{`curl -X GET "http://live-test.ravendb.net/databases/Example/docs? + startsWith=shipp + &matches=ers/3-A|ers/1-A" +`} + + +Linebreaks are added for clarity. + +Response: + + + +{`HTTP/1.1 200 OK +Server: nginx +Date: Thu, 12 Sep 2019 10:57:58 GMT +Content-Type: application/json; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Content-Encoding: gzip +ETag: "A:5972-k50KTOC5G0mfVXKjomTNFQ" +Vary: Accept-Encoding +Raven-Server-Version: 4.2.4.42 + +\{ + "Results": [ + \{ + "Name": "Speedy Express", + "Phone": "(503) 555-9831", + "@metadata": \{ + "@collection": "Shippers", + "@change-vector": "A:349-k50KTOC5G0mfVXKjomTNFQ", + "@id": "shippers/1-A", + "@last-modified": "2018-07-27T12:11:53.0317375Z" + \} + \}, + \{ + "Name": "Federal Shipping", + "Phone": "(503) 555-9931", + "@metadata": \{ + "@collection": "Shippers", + "@change-vector": "A:353-k50KTOC5G0mfVXKjomTNFQ", + "@id": "shippers/3-A", + "@last-modified": "2018-07-27T12:11:53.0317858Z" + \} + \} + ] +\} +`} + + +### Get Using `matches` and `exclude` + +cURL request: + + + +{`curl -X GET "http://live-test.ravendb.net/databases/Example/docs? + startsWith=shipp + &matches=ers/3-A|ers/1-A + &exclude=ers/3-A" +`} + + +Linebreaks are added for clarity. + +Response: + + + +{`HTTP/1.1 200 OK +Server: nginx +Date: Thu, 12 Sep 2019 12:24:50 GMT +Content-Type: application/json; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Content-Encoding: gzip +ETag: "A:5972-k50KTOC5G0mfVXKjomTNFQ" +Vary: Accept-Encoding +Raven-Server-Version: 4.2.4.42 + +\{ + "Results": [ + \{ + "Name": "Speedy Express", + "Phone": "(503) 555-9831", + "@metadata": \{ + "@collection": "Shippers", + "@change-vector": "A:349-k50KTOC5G0mfVXKjomTNFQ", + "@id": "shippers/1-A", + "@last-modified": "2018-07-27T12:11:53.0317375Z" + \} + \} + ] +\} +`} + + +### Get Using `startAfter` + +cURL request: + + + +{`curl -X GET "http://live-test.ravendb.net/databases/Example/docs? + startsWith=shipp + startAfter=shippers/1-A" +`} + + +Linebreaks are added for clarity. + +Response: + + + +{`HTTP/1.1 200 OK +Server: nginx +Date: Thu, 12 Sep 2019 12:37:39 GMT +Content-Type: application/json; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Content-Encoding: gzip +ETag: "A:5972-k50KTOC5G0mfVXKjomTNFQ" +Vary: Accept-Encoding +Raven-Server-Version: 4.2.4.42 + +\{ + "Results": [ + \{ + "Name": "United Package", + "Phone": "(503) 555-3199", + "@metadata": \{ + "@collection": "Shippers", + "@change-vector": "A:351-k50KTOC5G0mfVXKjomTNFQ", + "@id": "shippers/2-A", + "@last-modified": "2018-07-27T12:11:53.0317596Z" + \} + \}, + \{ + "Name": "Federal Shipping", + "Phone": "(503) 555-9931", + "@metadata": \{ + "@collection": "Shippers", + "@change-vector": "A:353-k50KTOC5G0mfVXKjomTNFQ", + "@id": "shippers/3-A", + "@last-modified": "2018-07-27T12:11:53.0317858Z" + \} + \} + ] +\} +`} + + +### Page Results + +cURL request: + + + +{`curl -X GET "http://live-test.ravendb.net/databases/Example/docs? + startsWith=product + &start=50 + &pageSize=2" +`} + + +Linebreaks are added for clarity. + +Response: + + + +{`HTTP/1.1 200 OK +Server: nginx +Date: Thu, 12 Sep 2019 13:17:44 GMT +Content-Type: application/json; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Content-Encoding: gzip +ETag: "A:5972-k50KTOC5G0mfVXKjomTNFQ" +Vary: Accept-Encoding +Raven-Server-Version: 4.2.4.42 + +\{ + "Results": [ + \{ + "Name": "Pâté chinois", + "Supplier": "suppliers/25-A", + "Category": "categories/6-A", + "QuantityPerUnit": "24 boxes x 2 pies", + "PricePerUnit": 24.0000, + "UnitsInStock": 25, + "UnitsOnOrder": 115, + "Discontinued": false, + "ReorderLevel": 20, + "@metadata": \{ + "@collection": "Products", + "@change-vector": "A:8170-k50KTOC5G0mfVXKjomTNFQ, A:1887-0N64iiIdYUKcO+yq1V0cPA, A:6214-xwmnvG1KBkSNXfl7/0yJ1A", + "@id": "products/55-A", + "@last-modified": "2018-07-27T12:11:53.0303784Z" + \} + \}, + \{ + "Name": "Gnocchi di nonna Alice", + "Supplier": "suppliers/26-A", + "Category": "categories/5-A", + "QuantityPerUnit": "24 - 250 g pkgs.", + "PricePerUnit": 38.0000, + "UnitsInStock": 26, + "UnitsOnOrder": 21, + "Discontinued": false, + "ReorderLevel": 30, + "@metadata": \{ + "@collection": "Products", + "@change-vector": "A:8172-k50KTOC5G0mfVXKjomTNFQ, A:1887-0N64iiIdYUKcO+yq1V0cPA, A:6214-xwmnvG1KBkSNXfl7/0yJ1A", + "@id": "products/56-A", + "@last-modified": "2018-07-27T12:11:53.0304385Z" + \} + \} + ] +\} +`} + + + +Note that the document ID numbers are 55 and 56 rather than the expected 51 and 52 because results are sorted in lexical order. +### Get Document Metadata Only + +cURL request: + + + +{`curl -X GET "http://live-test.ravendb.net/databases/Example/docs? + startsWith=regio + &metadataOnly=true" +`} + + +Linebreaks are added for clarity. + +Response: + + + +{`HTTP/1.1 200 OK +Server: nginx +Date: Thu, 12 Sep 2019 13:44:16 GMT +Content-Type: application/json; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Content-Encoding: gzip +ETag: "A:5972-k50KTOC5G0mfVXKjomTNFQ" +Vary: Accept-Encoding +Raven-Server-Version: 4.2.4.42 + +\{ + "Results": [ + \{ + "@metadata": \{ + "@collection": "Regions", + "@change-vector": "A:9948-k50KTOC5G0mfVXKjomTNFQ, A:1887-0N64iiIdYUKcO+yq1V0cPA, A:6214-xwmnvG1KBkSNXfl7/0yJ1A", + "@id": "regions/1-A", + "@last-modified": "2018-07-27T12:11:53.2016685Z" + \} + \}, + \{ + "@metadata": \{ + "@collection": "Regions", + "@change-vector": "A:9954-k50KTOC5G0mfVXKjomTNFQ, A:1887-0N64iiIdYUKcO+yq1V0cPA, A:6214-xwmnvG1KBkSNXfl7/0yJ1A", + "@id": "regions/2-A", + "@last-modified": "2018-07-27T12:11:53.2021826Z" + \} + \}, + \{ + "@metadata": \{ + "@collection": "Regions", + "@change-vector": "A:9950-k50KTOC5G0mfVXKjomTNFQ, A:1887-0N64iiIdYUKcO+yq1V0cPA, A:6214-xwmnvG1KBkSNXfl7/0yJ1A", + "@id": "regions/3-A", + "@last-modified": "2018-07-27T12:11:53.2018086Z" + \} + \}, + \{ + "@metadata": \{ + "@collection": "Regions", + "@change-vector": "A:9952-k50KTOC5G0mfVXKjomTNFQ, A:1887-0N64iiIdYUKcO+yq1V0cPA, A:6214-xwmnvG1KBkSNXfl7/0yJ1A", + "@id": "regions/4-A", + "@last-modified": "2018-07-27T12:11:53.2019223Z" + \} + \} + ] +\} +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/rest-api/document-commands/put-documents.mdx b/versioned_docs/version-7.1/client-api/rest-api/document-commands/put-documents.mdx new file mode 100644 index 0000000000..6c8bd2163f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/rest-api/document-commands/put-documents.mdx @@ -0,0 +1,206 @@ +--- +title: "Put a Document" +hide_table_of_contents: true +sidebar_label: Put a Document +sidebar_position: 3 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Put a Document + + +* Use this endpoint with the **`PUT`** method to upload a new document to the database, or update an existing one: +`/databases//docs` + +* In this page: + * [Examples](../../../client-api/rest-api/document-commands/put-documents.mdx#examples) + * [Request Format](../../../client-api/rest-api/document-commands/put-documents.mdx#request-format) + * [Request Body](../../../client-api/rest-api/document-commands/put-documents.mdx#request-body) + * [Response Format](../../../client-api/rest-api/document-commands/put-documents.mdx#response-format) + + +## Examples + +These are cURL requests to a database named "Example" on our [playground server](http://live-test.ravendb.net) to store and +then modify a document. + +#### 1) Store a new document "person/1-A" in the collection "People" + + + +{`curl -X PUT "http://live-test.ravendb.net/databases/Example/docs?id=person/1-A" +-d "\{ + \\"FirstName\\":\\"Jane\\", + \\"LastName\\":\\"Doe\\", + \\"Age\\":42, + \\"@metadata\\":\{ + \\"@collection\\":\\"People\\" + \} +\}" +`} + + +Linebreaks are added for clarity. + +Response: + + + +{`HTTP/1.1 201 +status: 201 +Server: nginx +Date: Tue, 27 Aug 2019 10:58:28 GMT +Content-Type: application/json; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Content-Encoding: gzip +Vary: Accept-Encoding +Raven-Server-Version: 4.2.3.42 + +\{ + "Id":"person/1-A", + "ChangeVector":"A:1" +\} +`} + + + +#### 2) Update that same document + + + +{`curl -X PUT "http://live-test.ravendb.net/databases/Example/docs?id=person/1-A" +--header "If-Match: A:1-L8hp6eYcA02dkVIEifGfKg" +-d "\{ + \\"FirstName\\":\\"John\\", + \\"LastName\\":\\"Smith\\", + \\"Age\\":24, + \\"@metadata\\":\{ + \\"@collection\\": \\"People\\" + \} +\}" +`} + + + +The response is the same as the previous response except for the updated change vector: + + + +{`HTTP/1.1 201 +status: 201 +Server: nginx +Date: Tue, 27 Aug 2019 10:59:54 GMT +Content-Type: application/json; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Content-Encoding: gzip +Vary: Accept-Encoding +Raven-Server-Version: 4.2.3.42 + +\{ + "Id":"person/1-A", + "ChangeVector":"A:3" +\} +`} + + + + + +## Request Format + +This is the general format of the cURL request: + + + +{`curl -X PUT "/databases//docs?id=" +--header "If-Match: " +-d "" +`} + + + +#### Query String Parameters + +| Parameter | Description | Required | +| - | - | - | +| **id** | Unique ID under which the new document will be stored, or the ID of an existing document to be updated | Yes | + +#### Headers + +| Header | Description | Required | +| - | - | - | +| **If-Match** | When updating an existing document, this header passes the document's expected [change vector](../../../server/clustering/replication/change-vector.mdx). If this change vector doesn't match the document's server-side change vector, a concurrency exception is thrown. | No | + +#### Request Body + +The body contains a JSON document. This will replace the existing document with the specified ID if one exists. Otherwise, +it will become a new document with the specified ID. + + + +{`\{ + \\"\\": \\"\\", + ... + \\"@metadata\\": \{ + \\"@collection\\": \\"\\", + ... + \} +\} +`} + + +Depending on the shell you're using to run cURL, you will probably need to escape all double quotes within the request body +using a backslash: `"` -> `\"`. + +When updating an existing document, you'll need to include its [collection](../../../client-api/faq/what-is-a-collection.mdx) +name in the metadata or an exception will be thrown. Exceptions to this rule are documents in the collection `@empty` - +i.e. not in any collection. A document's collection cannot be modified. + +Another way to make this request is to save your document as a file (such as a `.txt`), and pass the path to that file in +the request body: + + + +{`curl -X PUT "/databases//docs?id=" +-d "<@path/to/yourDocument.txt>" +`} + + + + + +## Response Format + +The response body is JSON and contains the document ID and current [change vector](../../../server/clustering/replication/change-vector.mdx): + + + +{`\{ + "Id": "", + "ChangeVector": "" +\} +`} + + + +| Header | Description | +| - | - | +| **Content-Type** | MIME media type and character encoding. This should always be: `application/json; charset=utf-8`. | +| **Raven-Server-Version** | Version of RavenDB the responding server is running | + +| HTTP Status Code | Description | +| - | - | +| `201` | The document was successfully stored / updated | +| `409` | The change vector submitted did not match the server-side change vector. A concurrency exception is thrown. | +| `500` | Server error, e.g. when the submitted document's collection tag did not match the specified document's collection tag. | + + + diff --git a/versioned_docs/version-7.1/client-api/rest-api/queries/_category_.json b/versioned_docs/version-7.1/client-api/rest-api/queries/_category_.json new file mode 100644 index 0000000000..b79f52fd77 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/rest-api/queries/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 2, + "label": Queries, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/rest-api/queries/delete-by-query.mdx b/versioned_docs/version-7.1/client-api/rest-api/queries/delete-by-query.mdx new file mode 100644 index 0000000000..823ee5f7ed --- /dev/null +++ b/versioned_docs/version-7.1/client-api/rest-api/queries/delete-by-query.mdx @@ -0,0 +1,140 @@ +--- +title: "Delete By Query" +hide_table_of_contents: true +sidebar_label: Delete by Query +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Delete By Query + + +* Use this endpoint with the **`DELETE`** method to delete all documents that satisfy a query: +`/databases//queries` + +* In this page: + * [Example](../../../client-api/rest-api/queries/delete-by-query.mdx#example) + * [Request Format](../../../client-api/rest-api/queries/delete-by-query.mdx#request-format) + * [Response Format](../../../client-api/rest-api/queries/delete-by-query.mdx#response-format) + + +## Example + +This cURL request sends a query to a database named "Example" on our [playground server](http://live-test.ravendb.net). The +results of this query - in this case, one document named "employees/1-A" - are all deleted. + + + +{`curl -X DELETE "http://live-test.ravendb.net/databases/Example/queries" +-d "\{ \\"Query\\": \\"from Employees where FirstName = 'Nancy'\\" \}" +`} + + +Linebreaks are added for clarity. + +Response: + + + +{`HTTP/1.1 200 OK +Server: nginx +Date: Sun, 24 Nov 2019 12:21:11 GMT +Content-Type: application/json; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Content-Encoding: gzip +Vary: Accept-Encoding +Raven-Server-Version: 4.2.5.42 +Request-Time: 5 + +\{ + "OperationId": 42, + "OperationNodeTag": "A" +\} +`} + + + + + +## Request Format + +This the general format of a cURL request that uses all query string parameters: + + + +{`curl -X GET "/databases//docs? + allowStale= + &staleTimeout= + &maxOpsPerSec=" +-d "\{ \}" +`} + + + +#### Query String Parameters + +| Option | Description | +| - | - | +| **allowStale** | If the query is on an index (rather than a collection), this determines whether to delete results from a [stale index](../../../indexes/stale-indexes.mdx). If set to `false` and the specified index is stale, an exception is thrown. Default: `false`. | +| **staleTimeout** | If `allowStale` is set to `false`, this parameter sets the amount of time to wait for the index not to be stale. If the time runs out, an exception is thrown. The value is of type [TimeSpan](https://docs.microsoft.com/en-us/dotnet/api/system.timespan). Default: `null` - if the index is stale the exception is thrown immediately. | +| **maxOpsPerSec** | The maximum number of deletions per second the server can perform in the background. Default: no limit. | + +#### Body + +This is the general format of the request body: + + + +{`-d "\{ + \\"Query\\": \\">\\", + \\"QueryParameters\\": \{ + \\"\\":\\"\\", + ... + \} +\}" +`} + + +Depending on the shell you're using to run cURL, you will probably need to escape all +double quotes within the request body using a backslash: `"` -> `\"`. + +| Parameter | Description | +| - | - | +| **Query** | A query in [RQL](../../../client-api/session/querying/what-is-rql.mdx). You can insert parameters from the `QueryParameters` object with `$` | +| **QueryParameters** | A list of values that can be used in the query, such as strings, ints, or documents IDs. Inputs from your users should always be passed as query parameters to avoid SQL injection attacks, and in general it's best practice to pass all your right-hand operands as parameters. | + + +## Response Format + +#### Http Status Codes + +| Code | Description | +| - | - | +| `200` | The request was valid. This includes the case where the query found 0 results, or the specified index does not exist, etc. | +| `500` | Bad request or server-side exception | + +#### Body + + + +{`\{ + "OperationId": , + "OperationNodeTag": "" +\} +`} + + + +| Field | Description | +| - | - | +| **OperationId** | Increments each time the server recieves a new Operation to execute, including as `DeleteByQuery` and `PatchByQuery` | +| **OperationNodeTag** | The tag of the Cluster Node that first received the Delete by Query Operation. Values are `A` to `Z`. See [Cluster Topology](../../../server/clustering/rachis/cluster-topology.mdx). | + + diff --git a/versioned_docs/version-7.1/client-api/rest-api/queries/patch-by-query.mdx b/versioned_docs/version-7.1/client-api/rest-api/queries/patch-by-query.mdx new file mode 100644 index 0000000000..247a39218f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/rest-api/queries/patch-by-query.mdx @@ -0,0 +1,145 @@ +--- +title: "Patch By Query" +hide_table_of_contents: true +sidebar_label: Patch by Query +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Patch By Query + + +* Use this endpoint with the **`PATCH`** method to update all documents that satisfy a query: +`/databases//queries` + +* [Patching](../../../client-api/operations/patching/set-based.mdx) occurs on the server side. + +* In this page: + * [Example](../../../client-api/rest-api/queries/patch-by-query.mdx#example) + * [Request Format](../../../client-api/rest-api/queries/patch-by-query.mdx#request-format) + * [Response Format](../../../client-api/rest-api/queries/patch-by-query.mdx#response-format) + + +## Example + +This cURL request sends a query with an `update` clause to a database named "Example" on our +[playground server](http://live-test.ravendb.net). The results of this query will each be modified on the server side. + + + +{`curl -X PATCH "http://live-test.ravendb.net/databases/Example/queries" +-d "\{ \\"Query\\": \{ \\"Query\\": \\"from Employees as E update\{ E.FirstName = 'Bob' \}\\" \} \}" +`} + + +Linebreaks are added for clarity. + +Response: + + + +{`HTTP/1.1 200 OK +Server: nginx +Date: Sun, 24 Nov 2019 12:24:51 GMT +Content-Type: application/json; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Content-Encoding: gzip +Vary: Accept-Encoding +Raven-Server-Version: 4.2.5.42 +Request-Time: 5 + +\{ + "OperationId": 42, + "OperationNodeTag": "A" +\} +`} + + + + + +## Request Format + +This the general format of a cURL request that uses all query string parameters: + + + +{`curl -X GET "/databases//docs? + allowStale= + &staleTimeout= + &maxOpsPerSec=" +-d "\{ \}" +`} + + + +#### Query String Parameters + +| Option | Description | +| - | - | +| **allowStale** | If the query is on an index (rather than a collection), this determines whether to patch results from a [stale index](../../../indexes/stale-indexes.mdx). If set to `false` and the specified index is stale, an exception is thrown. Default: `false`. | +| **staleTimeout** | If `allowStale` is set to `false`, this parameter sets the amount of time to wait for the index not to be stale. If the time runs out, an exception is thrown. The value is of type [TimeSpan](https://docs.microsoft.com/en-us/dotnet/api/system.timespan). Default: `null` - if the index is stale the exception is thrown immediately. | +| **maxOpsPerSec** | The maximum number of patches per second the server can perform in the background. Default: no limit. | + +#### Body + +This is the general format of the request body: + + + +{`-d "\{ + \\"Query\\": \{ + \\"Query\\": \\">\\", + \\"QueryParameters\\": \{ + \\"\\":\\"\\", + ... + \} + \} +\}" +`} + + +Depending on the shell you're using to run cURL, you will probably need to escape all +double quotes within the request body using a backslash: `"` -> `\"`. + +| Parameter | Description | +| - | - | +| **Query** | A query in [RQL](../../../client-api/session/querying/what-is-rql.mdx). You can insert parameters from the `QueryParameters` object with `$` | +| **QueryParameters** | A list of values that can be used in the query, such as strings, ints, or documents IDs. Inputs from your users should always be passed as query parameters to avoid SQL injection attacks, and in general it's best practice to pass all your right-hand operands as parameters. | + + +## Response Format + +#### Http Status Codes + +| Code | Description | +| - | - | +| `200` | The request was valid. This includes the case where the query found 0 results, or the specified index does not exist, etc. | +| `400` | Bad request | +| `500` | Server-side exception | + +#### Body + + + +{`\{ + "OperationId": , + "OperationNodeTag": "" +\} +`} + + + +| Field | Description | +| - | - | +| **OperationId** | Increments each time the server recieves a new Operation to execute, such as `DeleteByQuery` or `PatchByQuery` | +| **OperationNodeTag** | The tag of the Cluster Node that first received the Patch by Query Operation. Values are `A` to `Z`. See [Cluster Topology](../../../server/clustering/rachis/cluster-topology.mdx). | + + diff --git a/versioned_docs/version-7.1/client-api/rest-api/queries/query-the-database.mdx b/versioned_docs/version-7.1/client-api/rest-api/queries/query-the-database.mdx new file mode 100644 index 0000000000..431ce41a26 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/rest-api/queries/query-the-database.mdx @@ -0,0 +1,616 @@ +--- +title: "Query the Database" +hide_table_of_contents: true +sidebar_label: Query the Database +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Query the Database + + +* Use this endpoint with the **`POST`** method to query the database: +`/databases//queries` + +* Queries are written in [RQL](../../../client-api/session/querying/what-is-rql.mdx), our user friendly SQL-like query language. + +* In this page: + * [Basic Example](../../../client-api/rest-api/queries/query-the-database.mdx#basic-example) + * [Request Format](../../../client-api/rest-api/queries/query-the-database.mdx#request-format) + * [Response Format](../../../client-api/rest-api/queries/query-the-database.mdx#response-format) + * [More Examples](../../../client-api/rest-api/queries/query-the-database.mdx#more-examples) + + +## Basic Example + +This cURL request queries the [collection](../../../client-api/faq/what-is-a-collection.mdx) `Shippers` in a database named +"Example" on our [playground server](http://live-test.ravendb.net). +The response contains all documents from this collection. + + + +{`curl -X POST "http://live-test.ravendb.net/databases/Example/queries" +-d "\{ \\"Query\\": \\"from Shippers\\" \}" +`} + + +Linebreaks are added for clarity. + +Response: + + + +{`HTTP/1.1 200 OK +Date: Wed, 06 Nov 2019 15:54:15 GMT +Content-Type: application/json; charset=utf-8 +Server: Kestrel +ETag: -786759538542975908 +Vary: Accept-Encoding +Raven-Server-Version: 4.1.9.41023 +Request-Time: 0 +Content-Length: 1103 + +\{ + "TotalResults": 3, + "SkippedResults": 0, + "DurationInMs": 0, + "IncludedPaths": null, + "IndexName": "collection/Shippers", + "Results": [ + \{ + "Name": "Speedy Express", + "Phone": "(503) 555-9831", + "@metadata": \{ + "@collection": "Shippers", + "@change-vector": "A:8529-+pXj/MXEzkeiuFCvLdipcw, A:1887-0N64iiIdYUKcO+yq1V0cPA, A:6214-xwmnvG1KBkSNXfl7/0yJ1A", + "@id": "shippers/1-A", + "@last-modified": "2018-07-27T12:11:53.0317375Z" + \} + \}, + \{ + "Name": "United Package", + "Phone": "(503) 555-3199", + "@metadata": \{ + "@collection": "Shippers", + "@change-vector": "A:8531-+pXj/MXEzkeiuFCvLdipcw, A:1887-0N64iiIdYUKcO+yq1V0cPA, A:6214-xwmnvG1KBkSNXfl7/0yJ1A", + "@id": "shippers/2-A", + "@last-modified": "2018-07-27T12:11:53.0317596Z" + \} + \}, + \{ + "Name": "Federal Shipping", + "Phone": "(503) 555-9931", + "@metadata": \{ + "@collection": "Shippers", + "@change-vector": "A:8533-+pXj/MXEzkeiuFCvLdipcw, A:1887-0N64iiIdYUKcO+yq1V0cPA, A:6214-xwmnvG1KBkSNXfl7/0yJ1A", + "@id": "shippers/3-A", + "@last-modified": "2018-07-27T12:11:53.0317858Z" + \} + \} + ], + "Includes": \{\}, + "IndexTimestamp": "0001-01-01T00:00:00.0000000", + "LastQueryTime": "0001-01-01T00:00:00.0000000", + "IsStale": false, + "ResultEtag": -786759538542975908, + "NodeTag": "A" +\} +`} + + + + + +## Request Format + +This is the general format of a cURL request that uses all query string parameters: + + + +{`curl -X POST "/databases//queries? + metadataOnly= + &includeServerSideQuery= + &debug=" +--header "If-None-Match: " +-d "\{ \}" +`} + + +Linebreaks are added for clarity. + + +#### Query String Parameters + +| Parameter | Description | Required | +| - | - | - | +| **metadataOnly** | Set this parameter to `true` to retrieve only the document metadata from each result | No | +| **includeServerSideQuery** | Adds the RQL query that is run on the server side, which may look slightly different than the query sent | No | +| **debug** | Takes one of several values - listed in the table below - that modify the results or add information | No | + +#### Values of `debug` parameter + +| Value | Description | +|-------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **entries** | Returns the index entries instead of the complete documents, meaning only those fields that are indexed by the queried index | +| **explain** | Used for queries on [Auto Indexes](../../../indexes/creating-and-deploying.mdx#auto-indexes).
Returns _just_ the name of an existing index that can be used to satisfy this query. If no appropriate index could be found, returns the next best index with an explanation of why it is not appropriate for this query - e.g. it does not index the necessary fields.
If no index was found, this query will _not_ trigger the creation of an auto index as it normally would. | +| **serverSideQuery** | Returns _just_ the RQL query that is run on the server side, which may look slightly different than the query sent | +| **graph** | Returns [Graph Query](../../../indexes/querying/graph/graph-queries-overview.mdx) results analyzed as nodes and edges | +| **detailedGraphResult** | Returns [Graph Query](../../../indexes/querying/graph/graph-queries-overview.mdx) results arranged by their corresponding parts of the query | + +#### Headers + +| Header | Description | +|-------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **If-None-Match** | This optional header tells the server to check whether the requested data has been changed since the last request.

To use it, insert the value of the header `ResponseEtag` from the response to your previous query. This value is a hash of type `long` that represents the state of the index or collection that satisfied the query. If that index or collection has not been updated, the server will respond with http status code `304` and no results will be retrieved.

Note that this is regardless of the content of the query itself. | + +#### Body + +This is the general format of the request body: + + + +{`-d "\{ + \\"Query\\": \\">\\", + \\"QueryParameters\\": \{ + \\"\\":\\"\\", + ... + \} +\}" +`} + + + +Depending on the shell you're using to run cURL, +you will probably need to escape all double quotes within the request body using a backslash: `"` -> `\"`. + +| Parameter | Description | +|---------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **Query** | A query in [RQL](../../../client-api/session/querying/what-is-rql.mdx). You can insert parameters from the `QueryParameters` object with `$` | +| **QueryParameters** | A list of values that can be used in the query, such as strings, ints, or documents IDs.
Inputs from your users should always be passed as query parameters to avoid SQL injection attacks, and in general it's best practice to pass all your right-hand operands as parameters. | + +## Response Format + +#### Http Status Codes + +| Code | Description | +| - | - | +| `200` | Results are successfully retrieved, including the case where there are 0 results | +| `304` | In response to a query with the `If-None-Match` header: the same index was used to satisfy the query, and none of the requested documents were modified since they were last loaded, so they were not retrieved from the server. (They are retrieved from the local cache instead). | +| `404` | The specified index could not be found. In the case where a specified collection could not be found, see status code `200`. | +| `500` | Invalid query or server-side exception | + + +#### Body + + + +{`\{ + "TotalResults": , + "SkippedResults": , + "CappedMaxResults": , + "DurationInMs": , + "IncludedPaths": [ + "", + ... + ], + "IndexName": "", + "Results": [ + \{ + + \}, + ... + ], + "Includes": + "": \{ + + \}, + "": \{ \}, + ... + \}, + "IndexTimestamp": "", + "LastQueryTime": "", + "IsStale": , + "ResultEtag": , + "NodeTag": "", + "Timings": \{ \}, + "ServerSideQuery" +\} +`} + + + +| Field | Description | +| - | - | +| **TotalResults** | The total number of results of the query | +| **CappedMaxResults** | The number of results retrieved after the [maximum page size](../../../indexes/querying/paging.mdx) is applied. If paging was not used, this field does not appear. | +| **SkippedResults** | The number of results that were skipped, e.g. because there were [duplicates](../../../indexes/querying/distinct.mdx) | +| **DurationInMs** | Number of milliseconds it took to satisfy the query on the server side | +| **IncludedPaths** | Array of the paths within the queried documents to the [related document](../../../client-api/how-to/handle-document-relationships.mdx#includes) IDs. Default: `null` | +| **IndexName** | Name of the index used to satisfy the query | +| **Results** | List of documents returned by the query, sorted in ascending order of their [change vectors](../../../server/clustering/replication/change-vector.mdx) | +| **Includes** | List of included documents returned by the query, sorted in ascending alphabetical order | +| **IndexTimestamp** | The last time the index was updated. [DateTime format](https://docs.microsoft.com/en-us/dotnet/api/system.datetime) | +| **LastQueryTime** | The last time the index was queried. This includes the case where the most recent query occurred after this query. | +| **IsStale** | Whether the results are [stale](../../../indexes/stale-indexes.mdx) | +| **ResultEtag** | A hash of type `long` representing the results. When making another request identical to this one, this value can be sent in the `If-None-Match` header to check whether the results have been modified since this response. If not, the results will be retrieved from a local cache instead of from the server. | +| **NodeTag** | The tag of the Cluster Node that responded to the query. Values are `A` to `Z`. See [Cluster Topology](../../../server/clustering/rachis/cluster-topology.mdx). | +| **Timings** | If [requested](../../../client-api/session/querying/debugging/query-timings.mdx), the duration of the query operation and each of its sub-stages. See the structure of the [`Timings` object](../../../client-api/rest-api/queries/query-the-database.mdx#the--object) and the [timings example](../../../client-api/rest-api/queries/query-the-database.mdx#get-timing-details) below. | + +#### The `Timings` Object + +`Timings` tells you the duration of the whole query operation, including a breakdown of the different stages and sub-stages of the +operation. Examples of these stages might be the query itself or the amount of time the server waited for an index not to be stale. +These are the durations on the server side, not including the transfer over the network. + +The `Timings` object itself has a hierarchical structure, with each stage containing a list of sub-stages, which contain their +own lists, and so on. Each stage contains a `DurationInMs` field with the total number of milliseconds the stage took, and a field +called `Timings` which contains the list of sub-stages. If a stage has no sub-stages, the value of its `Timings` field is `null`. + +At every level of this structure, stages are listed in _alphabetical order_ of the stage's names. The durations of sub-stages only +roughly add up to the duration of the parent stage because `DurationInMs` values are rounded to the nearest whole number. + + + +{`"Timings": \{ + "DurationInMs": , + "Timings": \{ + "": \{ + "DurationInMs": , + "Timings": \{ + "": \{ + "DurationInMs": , + "Timings": \{ + "": \{ + \}, + ... + \}, + "": \{ \}, + ... + \} +\} +`} + + + + + +## More Examples + +[About Northwind](../../../start/about-examples.mdx), the database used in our examples. + +In this section: + +* [Include Related Documents](../../../client-api/rest-api/queries/query-the-database.mdx#include-related-documents) +* [Page Results](../../../client-api/rest-api/queries/query-the-database.mdx#page-results) +* [Get Timing Details](../../../client-api/rest-api/queries/query-the-database.mdx#get-timing-details) +### Include Related Documents + +This query tells the server to include a [related document](../../../client-api/how-to/handle-document-relationships.mdx#includes). + +Paths within documents can be passed as a `string` (`'Address.City'`), or directly (`Address.City`) as in this query. When writing +paths as a `string` keep in mind [these conventions](../../../client-api/how-to/handle-document-relationships.mdx#path-conventions). + +Request: + + + +{`curl -X POST "http://live-test.ravendb.net/databases/Example/queries" +-d "\{ \\"Query\\": \\"from Products where Name = 'Chocolade' include Supplier, Category\\" \}" +`} + + + +Response: + + + +{`HTTP/1.1 200 OK +Server: nginx +Date: Thu, 21 Nov 2019 14:55:59 GMT +Content-Type: application/json; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Content-Encoding: gzip +ETag: -829128196141269816 +Vary: Accept-Encoding +Raven-Server-Version: 4.2.5.42 +Request-Time: 166 + +\{ + "TotalResults": 1, + "SkippedResults": 0, + "DurationInMs": 165, + "IncludedPaths": [ + "Supplier", + "Category" + ], + "IndexName": "Auto/Products/ByName", + "Results": [ + \{ + "Name": "Chocolade", + "Supplier": "suppliers/22-A", + "Category": "categories/3-A", + "QuantityPerUnit": "10 pkgs.", + "PricePerUnit": 12.7500, + "UnitsInStock": 22, + "UnitsOnOrder": 15, + "Discontinued": false, + "ReorderLevel": 25, + "@metadata": \{ + "@collection": "Products", + "@change-vector": "A:285-axxGtO/AJUGOLMLrpcu8hA", + "@id": "products/48-A", + "@index-score": 4.65065813064575, + "@last-modified": "2018-07-27T12:11:53.0300420Z" + \} + \} + ], + "Includes": \{ + "suppliers/22-A": \{ + "Contact": \{ + "Name": "Dirk Luchte", + "Title": "Accounting Manager" + \}, + "Name": "Zaanse Snoepfabriek", + "Address": \{ + "Line1": "Verkoop Rijnweg 22", + "Line2": null, + "City": "Zaandam", + "Region": null, + "PostalCode": "9999 ZZ", + "Country": "Netherlands", + "Location": null + \}, + "Phone": "(12345) 1212", + "Fax": "(12345) 1210", + "HomePage": null, + "@metadata": \{ + "@collection": "Suppliers", + "@change-vector": "A:399-axxGtO/AJUGOLMLrpcu8hA", + "@id": "suppliers/22-A", + "@last-modified": "2018-07-27T12:11:53.0335729Z" + \} + \}, + "categories/3-A": \{ + "Name": "Confections", + "Description": "Desserts, candies, and sweet breads", + "@metadata": \{ + "@attachments": [ + \{ + "Name": "image.jpg", + "Hash": "1QxSMa3tBr+y8wQYNre7E9UJFFVTNWGjVoC+IC+gSSs=", + "ContentType": "image/jpeg", + "Size": 47955 + \} + ], + "@collection": "Categories", + "@change-vector": "A:2092-axxGtO/AJUGOLMLrpcu8hA", + "@flags": "HasAttachments", + "@id": "categories/3-A", + "@last-modified": "2018-07-27T12:16:44.1738714Z" + \} + \} + \}, + "IndexTimestamp": "2019-11-21T14:55:59.4797461", + "LastQueryTime": "2019-11-21T14:55:59.4847597", + "IsStale": false, + "ResultEtag": -829128196141269816, + "NodeTag": "A" +\} +`} + + + +### Paging Results + +This query uses the `limit` keyword to skip the first 5 results and retrieve the next 2: + + + +{`curl -X POST "http://live-test.ravendb.net/databases/Example/queries" +-d "\{ \\"Query\\": \\"from index 'Product/Search' limit 5, 2 \\" \}" +`} + + + +Response: + + + +{`HTTP/1.1 200 OK +Server: nginx +Date: Thu, 21 Nov 2019 15:25:45 GMT +Content-Type: application/json; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Content-Encoding: gzip +ETag: 7666904607700231125 +Vary: Accept-Encoding +Raven-Server-Version: 4.2.5.42 +Request-Time: 0 + +\{ + "TotalResults": 77, + "CappedMaxResults": 2, + "SkippedResults": 0, + "DurationInMs": 0, + "IncludedPaths": null, + "IndexName": "Product/Search", + "Results": [ + \{ + "Name": "Grandma's Boysenberry Spread", + "Supplier": "suppliers/3-A", + "Category": "categories/2-A", + "QuantityPerUnit": "12 - 8 oz jars", + "PricePerUnit": 25.0000, + "UnitsInStock": 3, + "UnitsOnOrder": 120, + "Discontinued": false, + "ReorderLevel": 25, + "@metadata": \{ + "@collection": "Products", + "@change-vector": "A:201-axxGtO/AJUGOLMLrpcu8hA", + "@id": "products/6-A", + "@index-score": 1, + "@last-modified": "2018-07-27T12:11:53.0274169Z" + \} + \}, + \{ + "Name": "Uncle Bob's Organic Dried Pears", + "Supplier": "suppliers/3-A", + "Category": "categories/7-A", + "QuantityPerUnit": "12 - 1 lb pkgs.", + "PricePerUnit": 30.0000, + "UnitsInStock": 3, + "UnitsOnOrder": 15, + "Discontinued": false, + "ReorderLevel": 10, + "@metadata": \{ + "@collection": "Products", + "@change-vector": "A:203-axxGtO/AJUGOLMLrpcu8hA", + "@id": "products/7-A", + "@index-score": 1, + "@last-modified": "2018-07-27T12:11:53.0275119Z" + \} + \} + ], + "Includes": \{\}, + "IndexTimestamp": "2019-11-21T14:55:01.6473995", + "LastQueryTime": "2019-11-21T15:25:45.7308416", + "IsStale": false, + "ResultEtag": 7666904607700231125, + "NodeTag": "A" +\} +`} + + + +### Get Timing Details + +In this request we see a query on the `Orders` collection, filtered by the values of the fields `Employee` and `Company` +(incidentally, both point to related documents), and a projection that selects only the `Freight` and `ShipVia` fields +to be retrieved from the server. Finally, using the same syntax as for related documents shown above, it asks for +`timings()`. + + + +{`curl -X POST "http://live-test.ravendb.net/databases/Example/queries?" +-d "\{\\"Query\\": \\"from Orders + where Employee = 'employees/1-A' + and Company = 'companies/91-A' + select Freight, ShipVia + include timings()\\"\}" +`} + + + +Response: + + + +{`HTTP/1.1 200 OK +Server: nginx +Date: Thu, 21 Nov 2019 16:58:32 GMT +Content-Type: application/json; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Content-Encoding: gzip +ETag: -1802145387109965474 +Vary: Accept-Encoding +Raven-Server-Version: 4.2.5.42 +Request-Time: 214 + +\{ + "TotalResults": 2, + "SkippedResults": 0, + "DurationInMs": 213, + "IncludedPaths": null, + "IndexName": "Auto/Orders/ByCompanyAndEmployee", + "Results": [ + \{ + "Freight": 3.94, + "ShipVia": "shippers/3-A", + "@metadata": \{ + "@projection": true, + "@change-vector": "A:45767-axxGtO/AJUGOLMLrpcu8hA, A:1887-0N64iiIdYUKcO+yq1V0cPA, A:6214-xwmnvG1KBkSNXfl7/0yJ1A", + "@flags": "HasRevisions", + "@id": "orders/127-A", + "@index-score": 6.3441801071167, + "@last-modified": "2018-07-27T12:11:53.0677162Z" + \} + \}, + \{ + "Freight": 23.79, + "ShipVia": "shippers/3-A", + "@metadata": \{ + "@projection": true, + "@change-vector": "A:46603-axxGtO/AJUGOLMLrpcu8hA, A:1887-0N64iiIdYUKcO+yq1V0cPA, A:6214-xwmnvG1KBkSNXfl7/0yJ1A", + "@flags": "HasRevisions", + "@id": "orders/545-A", + "@index-score": 6.3441801071167, + "@last-modified": "2018-07-27T12:11:53.1390160Z" + \} + \} + ], + "Includes": \{\}, + "IndexTimestamp": "2019-11-21T16:58:32.8180797", + "LastQueryTime": "2019-11-21T16:58:32.8179978", + "IsStale": false, + "ResultEtag": -1802145387109965474, + "NodeTag": "A", + "Timings": \{ + "DurationInMs": 213, + "Timings": \{ + "Optimizer": \{ + "DurationInMs": 46, + "Timings": null + \}, + "Query": \{ + "DurationInMs": 0, + "Timings": \{ + "Lucene": \{ + "DurationInMs": 0, + "Timings": null + \}, + "Retriever": \{ + "DurationInMs": 0, + "Timings": \{ + "Projection": \{ + "DurationInMs": 0, + "Timings": \{ + "Storage": \{ + "DurationInMs": 0, + "Timings": null + \} + \} + \} + \} + \} + \} + \}, + "Staleness": \{ + "DurationInMs": 165, + "Timings": null + \} + \} + \} +\} +`} + + + +At the end of the response body above we see the `Timings` object which shows all the stages of the operation listed in +alphabetical order. In this case there was an `Optimizer` stage, during which a new dynamic index was created to satisfy the +query. The name of this new index is shown at the top of the body: `Auto/Orders/ByCompanyAndEmployee`. Next came a `Staleness` +stage during which the indexing itself took place. Lastly came the `Query` stage itself. This included a [Lucene search engine](https://lucene.apache.org/) +substage and a `Retriever` substage. As you can see, since the index has already done all the work, the query itself takes less +than a millisecond. From now on, similar queries on this index will also take the server a millisecond or less to complete. + + + diff --git a/versioned_docs/version-7.1/client-api/rest-api/rest-api-intro.mdx b/versioned_docs/version-7.1/client-api/rest-api/rest-api-intro.mdx new file mode 100644 index 0000000000..23349ccd7c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/rest-api/rest-api-intro.mdx @@ -0,0 +1,167 @@ +--- +title: "Introduction to the REST API" +hide_table_of_contents: true +sidebar_label: Introduction to the REST API +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Introduction to the REST API + + +* This page covers some basic information that will help you learn to use the REST API: + * How to use the CLI tool *cURL*. + * A description of the JSON format for the purposes of writing and parsing it. + * Some of the HTTP status codes used in the API. + +* To learn more about HTTP and REST in general, try these tutorials: + * [HTTP guide for developers by Mozilla](https://developer.mozilla.org/en-US/docs/Web/HTTP) + * [REST API Tutorial website](https://www.restapitutorial.com/) + +* In this page: + * [cURL Basics](../../client-api/rest-api/rest-api-intro.mdx#curl-basics) + * [Document Format and Structure](../../client-api/rest-api/rest-api-intro.mdx#document-format-and-structure) + * [Using cURL With HTTPS](../../client-api/rest-api/rest-api-intro.mdx#using-curl-with-https) + * [Common HTTP Status Codes](../../client-api/rest-api/rest-api-intro.mdx#common-http-status-codes) + + +## cURL Basics + +A good way to familiarize yourself with the RavenDB REST API is with the command line tool cURL, which allows you to construct and +send individual HTTP requests. You can download cURL from [curl.haxx.se](https://curl.haxx.se/download.html) (If you're using Linux +your CLI may already have cURL installed). You can learn how to use it with the [cURL documentation](https://curl.haxx.se/docs/). +This page just covers the basics you'll need to interact with RavenDB. + +All cURL commands begin with the keyword `curl` and contain the URL of your RavenDB server or one of its endpoints. This command retrieves the first document from +a database named "Demo" located on our public [playground server](http://live-test.ravendb.net), and prints it in your CLI: + + + +{`curl http://live-test.ravendb.net/databases/demo/docs?pagesize=1 +`} + + + +The other parameters of the HTTP request are specified using 'options'. These are the main cURL options that interest us: + +| Option | Purpose | +| - | - | +| -X | Set the [HTTP method](https://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html) that is sent with the request | +| -H | Add one or more headers, e.g. to provide extra information about the contents of the request body | +| -d | This option denotes the beginning of the body of the request. The body itself is wrapped with double quotes `"`. One of the ways to upload a document to the server is to send it in the body. | +| -T | Set the path to a file you want to upload, such as a document or attachment | +| --cert | (For https) the path to your certificate file | +| --key | (For https) the path to your private key file | + +This request uploads a document to a database on the playground server from a local file: + + + +{`curl -X PUT http://live-test.ravendb.net/databases/demo/docs?id=example -T document.txt +`} + + +[More about how to upload documents](../../client-api/rest-api/document-commands/put-documents.mdx) + + + +## Document Format and Structure + +In RavenDB all documents have a standard [JSON](https://www.json.org/) format. In essence, every JSON object is composed of a series +of key-value pairs. A document with a complex structure might look something like this: + + + +{`\{ + "": , + "": "", + "an array": [ + , + "", + ... + ], + "an object": \{ + "": , + "": "", + ... + \}, + ... +\} +`} + + + +The whole object is wrapped in curly brackets `{}`. The `` is always a string, and the `` can be a string (denoted by +double quotes), a number, a boolean, or null. The value can also be an array of values wrapped in square brackets `[]`, or it can itself be another JSON object +wrapped in another pair of curly brackets. Whitespace is completely optional. In the above example and throughout the documentation, +JSON is broken into multiple lines for the sake of clarity. When using cURL, the entire command including the request body +needs to be on one line. + + +#### Sending raw JSON using cURL +Sending raw JSON in the body faces us with a problem: the body itself is wrapped with double quotes `"`, +so the double quotes within the JSON will be interpreted by the parser as the end of the body. The solution is to escape every double quote +by putting a backslash `\` before it, like this: + + + +{`-d "\{ + \\"a string\\": \\"some text\\", + \\"a number\\": 42 +\}" +`} + + + + +#### Binary data +In addition to JSON, pure binary data can be stored as an [attachment](../../document-extensions/attachments/what-are-attachments.mdx) +associated with an existing document. Files can be added to the request with the `-T` option. Some types of requests, though, allow you to include raw binary in the body - such as the +[Put Attachment Command](../../client-api/rest-api/document-commands/batch-commands.mdx#put-attachment-command). + + + +## Using cURL With HTTPS + +HTTPS adds public-key encryption on top of standard HTTP to protect information during transit between client and server. It has +become increasingly common throughout the internet in recent years. Our [setup wizard](../../start/installation/setup-wizard.mdx) makes +it very simple to set up server secure using a free [Let's Encrypt](https://letsencrypt.org/) certificate. + +To communicate with a secure server over https, you need to specify the paths to the your client certificate and private key +files with the `--cert` and `--key` options respectively: + + + +{`curl --cert --key "" +`} + + + +These files can be found in the configuration Zip package you recieved at the end of the setup wizard. You can download this Zip package +again by going to this endpoint: `/admin/debug/cluster-info-package`. The certificate and key are found at +the root of the package with the names: `admin.client.certificate..crt`, and +`admin.client.certificate..key` respectively. + + + +## Common HTTP Status Codes + +These are a few of the HTTP status codes we use in our REST API, and what we mean by them: + +| Code | [Official IANA description](https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml) | Purpose | +| - | - | - | +| 200 | OK | Indicates that a valid request was received by the server, such as `GET` requests and queries. This includes cases where the response body itself is empty because the query returned 0 results. | +| 201 | Created | Confirms the success of document `PUT` requests | +| 304 | Not Modified | When prompted, the server can check if the requested data has been modified since the previous request. If it hasn't, the server responds with this status code to tell the client that it can continue to use the locally cached copy of the data. This is a mechanism we often use to minimize traffic over the network. | +| 404 | Not Found | Sometimes used to indicate that the request was valid but the requested data could not be found | +| 409 | Conflict | Indicates that the database has received [conflicting commands](../../server/clustering/replication/replication-conflicts.mdx). This happens in clusters when different nodes receive commands to modify the same data at the same time - before the modification could be passed on to the rest of the cluster. | +| 500 | Internal Server Error | Used for exceptions that occur on the server side | + + + diff --git a/versioned_docs/version-7.1/client-api/security/_category_.json b/versioned_docs/version-7.1/client-api/security/_category_.json new file mode 100644 index 0000000000..e9e2bab6c4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/security/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 14, + "label": Security, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/security/_deserialization-security-csharp.mdx b/versioned_docs/version-7.1/client-api/security/_deserialization-security-csharp.mdx new file mode 100644 index 0000000000..f3c8912f93 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/security/_deserialization-security-csharp.mdx @@ -0,0 +1,203 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Data deserialization can trigger the execution of gadgets that + may initiate RCE attacks on the client machine. +* To handle this threat, RavenDB's default deserializer blocks the + deserialization of known [`.NET` RCE gadgets](https://cheatsheetseries.owasp.org/cheatsheets/Deserialization_Cheat_Sheet.html#known-net-rce-gadgets). +* Users can easily modify the list of namespaces and object types + that deserialization is forbidden or allowed for. + +* In this page: + * [Securing Deserialization](../../client-api/security/deserialization-security.mdx#securing-deserialization) + * [Invoking a Gadget](../../client-api/security/deserialization-security.mdx#invoking-a-gadget) + * [DefaultRavenSerializationBinder](../../client-api/security/deserialization-security.mdx#defaultravenserializationbinder) + * [RegisterForbiddenNamespace](../../client-api/security/deserialization-security.mdx#section) + * [RegisterForbiddenType](../../client-api/security/deserialization-security.mdx#section-1) + * [RegisterSafeType](../../client-api/security/deserialization-security.mdx#section-2) + * [Example](../../client-api/security/deserialization-security.mdx#example) + + +## Securing Deserialization + +* When a RavenDB client uses the [Newtonsoft library](https://www.newtonsoft.com/json/help/html/SerializingJSON.htm) + to deserialize a JSON string to a `.NET` object, the object may include + a reference to a **gadget** (a code segment) and the deserialization + process may execute this gadget. +* Some gadgets attempt to exploit the deserialization process and initiate + an RCE (Remote Code Execution) attack that may, for example, inject the + system with malicious code. RCE attacks may sabotage the system, gain + control over it, steal information, and so on. +* To prevent such exploitation, RavenDB's default deserializer + blocks deserialization for suspicious namespaces and + [known `.NET` RCE gadgets](https://cheatsheetseries.owasp.org/cheatsheets/Deserialization_Cheat_Sheet.html#known-net-rce-gadgets): + `System.Configuration.Install.AssemblyInstaller` + `System.Activities.Presentation.WorkflowDesigner` + `System.Windows.ResourceDictionary` + `System.Windows.Data.ObjectDataProvider` + `System.Windows.Forms.BindingSource` + `Microsoft.Exchange.Management.SystemManager.WinForms.ExchangeSettingsProvider` + `System.Data.DataViewManager, System.Xml.XmlDocument/XmlDataDocument` + `System.Management.Automation.PSObject` + +* Users can easily [modify](../../client-api/security/deserialization-security.mdx#defaultravenserializationbinder) + the list of namespaces and object types for which deserialization is forbidden + or allowed. + + + +## Invoking a Gadget + +* **Directly-loaded gadgets Cannot be blocked using the default binder**. + When a gadget is loaded directly its loading and execution during + deserialization is **permitted** regardless of the content of the + default deserializer list. + + E.g., the following segment will be executed, + + +{`// The object will be allowed to be deserialized +// regardless of the default binder list. +session.Load("Gadget"); +`} + + + +* **Indirectly-loaded gadgets Can be blocked using the default binder**. + When a gadget is loaded indirectly its loading and execution during + deserialization **can be blocked** using the default deserializer list. + + E.g., in the following sample, taken [from here](https://book.hacktricks.xyz/pentesting-web/deserialization/basic-.net-deserialization-objectdataprovider-gadgets-expandedwrapper-and-json.net#abusing-json.net), + a gadget is loaded indirectly: its name is included as a value + and will only take its place and be used to execute the gadget + during deserialization. + Including this type in the default deserialization list will + prevent the gadget's deserialization and execution. + + +{`string userdata = @"\{ + '$type':'System.Windows.Data.ObjectDataProvider, PresentationFramework, Version=4.0.0.0, + Culture=neutral, PublicKeyToken=31bf3856ad364e35', + 'MethodName':'Start', + 'MethodParameters':\{ + '$type':'System.Collections.ArrayList, mscorlib, Version=4.0.0.0, + Culture=neutral, PublicKeyToken=b77a5c561934e089', + '$values':['cmd', '/c calc.exe'] + \}, + 'ObjectInstance':\{'$type':'System.Diagnostics.Process, System, Version=4.0.0.0, + Culture=neutral, PublicKeyToken=b77a5c561934e089'\} +\}"; +`} + + + + + + +## `DefaultRavenSerializationBinder` + +Use the `DefaultRavenSerializationBinder` convention and its methods to +block the deserialization of suspicious namespaces and object types or +allow the deserialization of trusted object types. + +Define a `DefaultRavenSerializationBinder` instance, use the dedicated +methods to forbid or allow the deserialization of entities, and register +the defined instance as a serialization convention as shown +[below](../../client-api/security/deserialization-security.mdx#example). + + +Be sure to update the default deserializer list **before** the initialization +of the document that you want the list to apply to. + +### `RegisterForbiddenNamespace` +Use `RegisterForbiddenNamespace` to prevent the deserialization of objects loaded from a given namespace. + + + +{`public void RegisterForbiddenNamespace(string @namespace) +`} + + + + | Parameter | Type | Description | + |:-------------:|:-------------:|-------------| + | **@namespace** | `string` | The name of a namespace from which deserialization won't be allowed. | + + + Attempting to deserialize a forbidden namespace will throw an + `InvalidOperationException` exception with the following details: + _"Cannot resolve type" + `type.FullName` + "because the namespace is on a blacklist due to + security reasons. Please customize json deserializer in the conventions and override SerializationBinder + with your own logic if you want to allow this type."_ + +### `RegisterForbiddenType` +Use `RegisterForbiddenType` to prevent the deserialization of a given object type. + + + +{`public void RegisterForbiddenType(Type type) +`} + + + + | Parameter | Type | Description | + |:-------------:|:-------------:|-------------| + | **type** | `Type` | An object type whose deserialization won't be allowed. | + + + Attempting to deserialize a forbidden object type will throw an + `InvalidOperationException` exception with the following details: + _"Cannot resolve type" + `type.FullName` + "because the type is on a blacklist due to + security reasons. + Please customize json deserializer in the conventions and override SerializationBinder + with your own logic if you want to allow this type."_ + +### `RegisterSafeType` +Use `RegisterSafeType` to **allow** the deserialization of a given object type. + + + +{`public void RegisterSafeType(Type type) +`} + + + + | Parameter | Type | Description | + |:-------------:|:-------------:|-------------| + | **type** | `Type` | An object type whose deserialization **will** be allowed. | + +## Example + + + +{`// Create a default serialization binder +var binder = new DefaultRavenSerializationBinder(); +// Register a forbidden namespace +binder.RegisterForbiddenNamespace("SuspiciousNamespace"); +// Register a forbidden object type +binder.RegisterForbiddenType(suspiciousObject.GetType()); +// Register a trusted object type +binder.RegisterSafeType(trustedObject.GetType()); + +var store = new DocumentStore() +\{ + Conventions = + \{ + Serialization = new NewtonsoftJsonSerializationConventions + \{ + // Customize store deserialization using the defined binder + CustomizeJsonDeserializer = deserializer => deserializer.SerializationBinder = binder + \} + \} +\}; +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/security/deserialization-security.mdx b/versioned_docs/version-7.1/client-api/security/deserialization-security.mdx new file mode 100644 index 0000000000..59f18ec740 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/security/deserialization-security.mdx @@ -0,0 +1,29 @@ +--- +title: "Security: Deserialization" +hide_table_of_contents: true +sidebar_label: Deserialization Security +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import DeserializationSecurityCsharp from './_deserialization-security-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/_category_.json b/versioned_docs/version-7.1/client-api/session/_category_.json new file mode 100644 index 0000000000..25b2102722 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 6, + "label": Session, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/_deleting-entities-csharp.mdx b/versioned_docs/version-7.1/client-api/session/_deleting-entities-csharp.mdx new file mode 100644 index 0000000000..dd2a653dd0 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_deleting-entities-csharp.mdx @@ -0,0 +1,105 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Entities can be marked for deletion by using the `Delete` method, but will not be removed from the server until `SaveChanges` is called. + +## Syntax + + + +{`void Delete(T entity); + +void Delete(string id); + +void Delete(string id, string expectedChangeVector); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **entity** | `T` | instance of the entity to delete | +| **id** | `string` | ID of the entity to delete | +| **expectedChangeVector** | `string` | a change vector to use for concurrency checks | + +## Example I + + + + +{`Employee employee = session.Load("employees/1"); + +session.Delete(employee); +session.SaveChanges(); +`} + + + + +{`Employee employee = await session.LoadAsync("employees/1"); + +session.Delete(employee); +await session.SaveChangesAsync(); +`} + + + + + +If UseOptimisticConcurrency is set to 'true' (default 'false'), the Delete() method will use loaded 'employees/1' change vector for concurrency check and might throw ConcurrencyException. + + +## Example II + + + + +{`session.Delete("employees/1"); +session.SaveChanges(); +`} + + + + +{`session.Delete("employees/1"); +await session.SaveChangesAsync(); +`} + + + + + +In this overload, the Delete() method will not do any change vector based concurrency checks because the change vector for 'employees/1' is unknown. + + + + +If entity is **not** tracked by session, then executing: + + + +{`session.Delete("employees/1"); +`} + + + +is equal to doing: + + + +{`session.Advanced.Defer(new DeleteCommandData("employees/1", changeVector: null)); +`} + + + + +In this sample the change vector is null - this means that there will be no concurrency checks. A non-null and valid change vector value will trigger a concurrency check. + + +You can read more about defer operations [here](../../client-api/session/how-to/defer-operations.mdx). + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_deleting-entities-java.mdx b/versioned_docs/version-7.1/client-api/session/_deleting-entities-java.mdx new file mode 100644 index 0000000000..4db1c60908 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_deleting-entities-java.mdx @@ -0,0 +1,85 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Entities can be marked for deletion by using the `delete` method, but will not be removed from the server until `saveChanges` is called. + +## Syntax + + + +{` void delete(T entity); + +void delete(String id); + +void delete(String id, String expectedChangeVector); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **entity** | `T` | instance of the entity to delete | +| **id** | `String` | ID of the entity to delete | +| **expectedChangeVector** | `String` | a change vector to use for concurrency checks | + +## Example I + + + +{`Employee employee = session.load(Employee.class, "employees/1"); + +session.delete(employee); +session.saveChanges(); +`} + + + + +If useOptimisticConcurrency is set to 'true' (default 'false'), the delete() method will use loaded 'employees/1' change vector for concurrency check and might throw ConcurrencyException. + + +## Example II + + + +{`session.delete("employees/1"); +session.saveChanges(); +`} + + + + +In this overload, the delete() method will not do any change vector based concurrency checks because the change vector for 'employees/1' is unknown. + + + + +If entity is **not** tracked by session, then executing + + + +{`session.delete("employees/1"); +`} + + + +is equal to doing + + + +{`session.advanced().defer(new DeleteCommandData("employees/1", null)); +`} + + + + +In this sample the change vector is null - this means that there will be no concurrency checks. A non-null and valid change vector value will trigger a concurrency check. + + +You can read more about defer operations [here](./how-to/defer-operations). + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_deleting-entities-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/_deleting-entities-nodejs.mdx new file mode 100644 index 0000000000..29a9b31411 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_deleting-entities-nodejs.mdx @@ -0,0 +1,85 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Entities can be marked for deletion by using the `delete()` method, but will *not* be removed from the server until `saveChanges()` is called. + +## Syntax + + + +{`await session.delete(entity); + +await session.delete(id); + +await session.delete(id, [changeVector]); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **entity** | `object` | Instance of the entity to delete | +| **id** | `string` | The entity ID | +| **changeVector** | `string` | a change vector to use for concurrency checks | + +## Example I + + + +{`const employee = await session.load("employees/1"); + +await session.delete(employee); +await session.saveChanges(); +`} + + + + +If `useOptimisticConcurrency` is set to *true* (default *false*), the `delete()` method will use loaded *employees/1* change vector for concurrency check and might throw `ConcurrencyException`. + + +## Example II + + + +{`await session.delete("employees/1"); +await session.saveChanges(); +`} + + + + +In this example, the `delete()` method will not do any change vector based concurrency checks because the change vector for *employees/1* is unknown. + + + + +If entity is **not** tracked by session, then executing + + + +{`await session.delete("employees/1"); +`} + + + +is equal to doing + + + +{`await session.advanced.defer(new DeleteCommandData("employees/1", null)); +`} + + + + +In this sample the change vector is null - this means that there will be no concurrency checks. A non-null and valid change vector value will trigger a concurrency check. + + +You can read more about defer operations [here](./how-to/defer-operations). + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_deleting-entities-php.mdx b/versioned_docs/version-7.1/client-api/session/_deleting-entities-php.mdx new file mode 100644 index 0000000000..6dad4e286a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_deleting-entities-php.mdx @@ -0,0 +1,85 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Entities can be marked for deletion by using the `delete` method, but will not be removed from the server until `saveChanges` is called. + +## Syntax + + + +{`public function delete(?object $entity): void; + +public function delete(?string $id): void; + +public function delete(?string $id, ?string $expectedChangeVector): void; +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **entity** | `T` | instance of the entity to delete | +| **id** | `string` | ID of the entity to delete | +| **expectedChangeVector** | `string` | a change vector to use for concurrency checks | + +## Example I + + + +{`$employee = $session->load(Employee::class, "employees/1"); + +$session->delete($employee); +$session->saveChanges(); +`} + + + + +If UseOptimisticConcurrency is set to 'true' (default 'false'), the Delete() method will use loaded 'employees/1' change vector for concurrency check and might throw ConcurrencyException. + + +## Example II + + + +{`$session->delete("employees/1"); +$session->saveChanges(); +`} + + + + +In this overload, the Delete() method will not do any change vector based concurrency checks because the change vector for 'employees/1' is unknown. + + + + +If entity is **not** tracked by session, then executing: + + + +{`$session->delete("employees/1"); +`} + + + +is equal to doing: + + + +{`$session->advanced()->defer(new DeleteCommandData("employees/1", null)); +`} + + + + +In this sample the change vector is null - this means that there will be no concurrency checks. A non-null and valid change vector value will trigger a concurrency check. + + +You can read more about defer operations [here](../../client-api/session/how-to/defer-operations.mdx). + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_deleting-entities-python.mdx b/versioned_docs/version-7.1/client-api/session/_deleting-entities-python.mdx new file mode 100644 index 0000000000..c326c0c97f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_deleting-entities-python.mdx @@ -0,0 +1,81 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Entities can be marked for deletion by using the `delete()` method, but will not be removed from the server until `save_changes()` is called. + +## Syntax + + + +{`def delete(self, key_or_entity: Union[str, object], expected_change_vector: Optional[str] = None) -> None: + ... +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **key_or_entity** | `str` or `object` | ID of the document or instance of the entity to delete | +| **expected_change_vector** | `str` | a change vector to use for concurrency checks | + +## Example I + + + +{`employee = session.load("employees/1") + +session.delete(employee) +session.save_changes() +`} + + + + +If use_optimistic_concurrency is set to 'True' (default 'False'), the delete() method will use loaded 'employees/1' change vector for concurrency check and might throw ConcurrencyException. + + +## Example II + + + +{`session.delete("employees/1") +session.save_changes() +`} + + + + +The delete() method will not do any change vector based concurrency checks because the change vector for 'employees/1' is unknown. + + + + +If entity is **not** tracked by session, then executing: + + + +{`session.delete("employees/1") +`} + + + +is equal to doing: + + + +{`session.advanced.defer(DeleteCommandData("employees/1", change_vector=None)) +`} + + + + +In this sample the change vector is None - this means that there will be no concurrency checks. A not-None and valid change vector value will trigger a concurrency check. + + +You can read more about defer operations [here](../../client-api/session/how-to/defer-operations.mdx). + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_loading-entities-csharp.mdx b/versioned_docs/version-7.1/client-api/session/_loading-entities-csharp.mdx new file mode 100644 index 0000000000..46ada52224 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_loading-entities-csharp.mdx @@ -0,0 +1,639 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* There are several methods that allow users to load documents from the database and convert them to entities. + +* This article covers the following methods: + + - [Load](../../client-api/session/loading-entities.mdx#load) + - [Load with Includes](../../client-api/session/loading-entities.mdx#load-with-includes) + - [Load - multiple entities](../../client-api/session/loading-entities.mdx#load---multiple-entities) + - [LoadStartingWith](../../client-api/session/loading-entities.mdx#loadstartingwith) + - [ConditionalLoad](../../client-api/session/loading-entities.mdx#conditionalload) + - [Stream](../../client-api/session/loading-entities.mdx#stream) + - [IsLoaded](../../client-api/session/loading-entities.mdx#isloaded) + +* For loading entities lazily see [perform requests lazily](../../client-api/session/how-to/perform-operations-lazily.mdx). + + +## Load + +The most basic way to load a single entity is to use one of the `Load` methods. + + + + +{`TResult Load(string id); +`} + + + + +{`Task LoadAsync(string id); +`} + + + + +| Parameter | Type | Description | +| ------------- | ------------- | ----- | +| **id** | `string` | Identifier of a document that will be loaded. | + +| Return Type | Description | +| ------------- | ----- | +| `TResult` | Instance of `TResult` or `null` if a document with a given ID does not exist. | + +### Example + + + + +{`Employee employee = session.Load("employees/1"); +`} + + + + +{`Employee employee = await asyncSession.LoadAsync("employees/1"); +`} + + + + + +From RavenDB version 4.x onwards, only string identifiers are supported. If you are upgrading from 3.x, this is a major change, because in 3.x non-string identifiers are supported. + + + + +## Load with Includes + +When there is a 'relationship' between documents, those documents can be loaded in a +single request call using the `Include + Load` methods. Learn more in +[How To Handle Document Relationships](../../client-api/how-to/handle-document-relationships.mdx). + + +Also see: + +* [Including Counters](../../document-extensions/counters/counters-and-other-features.mdx#including-counters) +* [Including Time Series](../../document-extensions/timeseries/client-api/session/include/overview.mdx) +* [Including Compare Exchange Values](../../client-api/operations/compare-exchange/include-compare-exchange.mdx) +* [Including Document Revisions](../../document-extensions/revisions/client-api/session/including.mdx) + + + + +{`ILoaderWithInclude Include(string path); + +ILoaderWithInclude Include(Expression> path); + +ILoaderWithInclude Include(Expression>> path); + +ILoaderWithInclude Include(Expression> path); + +ILoaderWithInclude Include(Expression>> path); +`} + + + +| Parameter | Type | Description | +| ------------- | ------------- | ----- | +| **path** | `string` or Expression | Path in documents in which the server should look for 'referenced' documents. | + +| Return Type | Description | +| ------------- | ----- | +| `ILoaderWithInclude` | The `Include` method by itself does not materialize any requests but returns loader containing methods such as `Load`. | + +### Example I + +We can use this code to also load an employee which made the order. + + + + +{`// loading 'products/1' +// including document found in 'Supplier' property +Product product = session + .Include("Supplier") + .Load("products/1"); + +Supplier supplier = session.Load(product.Supplier); // this will not make server call +`} + + + + +{`// loading 'products/1' +// including document found in 'Supplier' property +Product product = await asyncSession + .Include("Supplier") + .LoadAsync("products/1"); + +Supplier supplier = await asyncSession.LoadAsync(product.Supplier); // this will not make server call +`} + + + + +### Example II + + + + +{`// loading 'products/1' +// including document found in 'Supplier' property +Product product = session + .Include(x => x.Supplier) + .Load("products/1"); + +Supplier supplier = session.Load(product.Supplier); // this will not make server call +`} + + + + +{`// loading 'products/1' +// including document found in 'Supplier' property +Product product = await asyncSession + .Include(x => x.Supplier) + .LoadAsync("products/1"); + +Supplier supplier = await asyncSession.LoadAsync(product.Supplier); // this will not make server call +`} + + + + + + +## Load - multiple entities + +To load multiple entities at once, use one of the following `Load` overloads. + + + + +{`Dictionary Load(IEnumerable ids); +`} + + + + +{`Task> LoadAsync(IEnumerable ids); +`} + + + + +| Parameter | Type | Description | +| ------------- | ------------- | ----- | +| **ids** | `IEnumerable` | Multiple document identifiers to load | + +| Return Type | Description | +| ------------- | ----- | +| `Dictionary` | Instance of Dictionary which maps document identifiers to `TResult` or `null` if a document with given ID doesn't exist. | + + + + +{`Dictionary employees = session.Load(new[] +{ + "employees/1", + "employees/2", + "employees/3" +}); +`} + + + + +{`Dictionary employees = await asyncSession.LoadAsync(new[] +{ + "employees/1", + "employees/2", +}); +`} + + + + + + +## LoadStartingWith + +To load multiple entities that contain a common prefix, use the `LoadStartingWith` method from the `Advanced` session operations. + + + + +{`T[] LoadStartingWith( + string idPrefix, + string matches = null, + int start = 0, + int pageSize = 25, + string exclude = null, + string startAfter = null); + +void LoadStartingWithIntoStream( + string idPrefix, + Stream output, + string matches = null, + int start = 0, + int pageSize = 25, + string exclude = null, + string startAfter = null); +`} + + + + +{`Task LoadStartingWithAsync( + string idPrefix, + string matches = null, + int start = 0, + int pageSize = 25, + string exclude = null, + string startAfter = null); + +Task LoadStartingWithIntoStreamAsync( + string idPrefix, + Stream output, + string matches = null, + int start = 0, + int pageSize = 25, + string exclude = null, + string startAfter = null); +`} + + + + +| Parameter | Type | Description | +| ------------- | ------------- | ----- | +| **idPrefix** | `string` | prefix for which the documents should be returned | +| **matches** | `string` | pipe ('|') separated values for which document IDs (after 'idPrefix') should be matched ('?' any single character, '*' any characters) | +| **start** | `int` | number of documents that should be skipped | +| **pageSize** | `int` | maximum number of documents that will be retrieved | +| **exclude** | `string` | pipe ('|') separated values for which document IDs (after 'idPrefix') should **not** be matched ('?' any single character, '*' any characters) | +| **skipAfter** | `string` | skip document fetching until given ID is found and return documents after that ID (default: `null`) | + +| Return Type | Description | +| ------------- | ----- | +| `TResult[]` | Array of entities matching given parameters. | +| `Stream` | Output entities matching given parameters as a stream. | + +### Example I + + + + +{`// return up to 128 entities with Id that starts with 'employees' +Employee[] result = session + .Advanced + .LoadStartingWith("employees", null, 0, 128); +`} + + + + +{`// return up to 128 entities with Id that starts with 'employees' +Employee[] result = (await asyncSession + .Advanced + .LoadStartingWithAsync("employees", null, 0, 128)) + .ToArray(); +`} + + + + +### Example II + + + + +{`// return up to 128 entities with Id that starts with 'employees/' +// and rest of the key begins with "1" or "2" e.g. employees/10, employees/25 +Employee[] result = session + .Advanced + .LoadStartingWith("employees/", "1*|2*", 0, 128); +`} + + + + +{`// return up to 128 entities with Id that starts with 'employees/' +// and rest of the key begins with "1" or "2" e.g. employees/10, employees/25 +Employee[] result = (await asyncSession + .Advanced + .LoadStartingWithAsync("employees/", "1*|2*", 0, 128)) + .ToArray(); +`} + + + + + + +## ConditionalLoad + +This method can be used to check whether a document has been modified +since the last time its change vector was recorded, so that the cost of loading it +can be saved if it has not been modified. + +The `ConditionalLoad` method takes a document's [change vector](../../server/clustering/replication/change-vector.mdx). +If the entity is tracked by the session, this method returns the entity. If the entity +is not tracked, it checks if the provided change vector matches the document's +current change vector on the server side. If they match, the entity is not loaded. +If the change vectors _do not_ match, the document is loaded. + +The method is accessible from the `session.Advanced` operations. + + + + +{`(T Entity, string ChangeVector) ConditionalLoad(string id, string changeVector); +`} + + + + +{`Task<(T Entity, string ChangeVector)> ConditionalLoadAsync(string id, string changeVector); +`} + + + + +| Parameter | Type | Description | +| ------------- | ------------- | ----- | +| **id** | `string` | The identifier of a document to be loaded. | +| **changeVector** | `string` | The change vector you want to compare with the server-side change vector. If the change vectors match, the document is not loaded. | + +| Return Type | Description | +|--------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ValueTuple `(T Entity, string ChangeVector)` | If the given change vector and the server side change vector do not match, the method returns the requested entity and its current change vector.
If the change vectors match, the method returns `default` as the entity, and the current change vector.
If the specified document, the method returns only `default` without a change vector. | + +### Example + + + + +{`string changeVector; +User user = new User { Name = "Bob" }; + +using (var session = store.OpenSession()) +{ + session.Store(user, "users/1"); + session.SaveChanges(); + + changeVector = session.Advanced.GetChangeVectorFor(user); +} + +// New session which does not track our User entity +using (var session = store.OpenSession()) +{ + // The given change vector matches + // the server-side change vector + // Does not load the document + var result1 = session.Advanced + .ConditionalLoad("users/1", changeVector); + + // Modify the document + user.Name = "Bob Smith"; + session.Store(user); + session.SaveChanges(); + + // Change vectors do not match + // Loads the document + var result2 = session.Advanced + .ConditionalLoad("users/1", changeVector); +} +`} + + + + +{`string changeVector; +User user = new User { Name = "Bob" }; + +using (var session = store.OpenAsyncSession()) +{ + await session.StoreAsync(user, "users/1"); + await session.SaveChangesAsync(); + + changeVector = session.Advanced.GetChangeVectorFor(user); +} + +// New session which does not track our User entity +using (var session = store.OpenAsyncSession()) +{ + // The given change vector matches + // the server-side change vector + // Does not load the document + var result1 = await session.Advanced + .ConditionalLoadAsync("users/1", changeVector); + + // Modify the document + user.Name = "Bob Smith"; + await session.StoreAsync(user); + await session.SaveChangesAsync(); + + // Change vectors do not match + // Loads the document + var result2 = await session.Advanced + .ConditionalLoadAsync("users/1", changeVector); +} +`} + + + + + + +## Stream + +Entities can be streamed from the server using one of the following `Stream` methods from the `Advanced` session operations. + +Streaming query results does not support the [`include` feature](../../client-api/how-to/handle-document-relationships.mdx#includes). +Learn more in [How to Stream Query Results](../../client-api/session/querying/how-to-stream-query-results.mdx). + + +Entities loaded using `Stream` will be transient (not attached to session). + + + + + +{`IEnumerator> Stream(IQueryable query); + +IEnumerator> Stream(IQueryable query, out StreamQueryStatistics streamQueryStats); + +IEnumerator> Stream(IDocumentQuery query); + +IEnumerator> Stream(IRawDocumentQuery query); + +IEnumerator> Stream(IRawDocumentQuery query, out StreamQueryStatistics streamQueryStats); + +IEnumerator> Stream(IDocumentQuery query, out StreamQueryStatistics streamQueryStats); + +IEnumerator> Stream(string startsWith, string matches = null, int start = 0, int pageSize = int.MaxValue, string startAfter = null); +`} + + + + +{`Task>> StreamAsync(IQueryable query); + +Task>> StreamAsync(IQueryable query, out StreamQueryStatistics streamQueryStats); + +Task>> StreamAsync(IDocumentQuery query); + +Task>> StreamAsync(IRawDocumentQuery query); + +Task>> StreamAsync(IRawDocumentQuery query, out StreamQueryStatistics streamQueryStats); + +Task>> StreamAsync(IDocumentQuery query, out StreamQueryStatistics streamQueryStats); + +Task>> StreamAsync(string startsWith, string matches = null, int start = 0, int pageSize = int.MaxValue, string startAfter = null); +`} + + + + +| Parameter | Type | Description | +| ------------- | ------------- | ----- | +| **startsWith** | `string` | prefix for which documents should be streamed | +| **matches** | `string` | pipe ('|') separated values for which document IDs should be matched ('?' any single character, '*' any characters) | +| **start** | `int` | number of documents that should be skipped | +| **pageSize** | `int` | maximum number of documents that will be retrieved | +| **skipAfter** | `string` | skip document fetching until a given ID is found and returns documents after that ID (default: `null`) | +| **StreamQueryStats** | `streamQueryStats` (out parameter) | Information about the streaming query (amount of results, which index was queried, etc.) | + +| Return Type | Description | +| ------------- | ----- | +| `IEnumerator<`[StreamResult](../../glossary/stream-result.mdx)`>` | Enumerator with entities. | +| `streamQueryStats` (out parameter) | Information about the streaming query (amount of results, which index was queried, etc.) | + + +### Example I + +Stream documents for a ID prefix: + + + + +{`IEnumerator> enumerator = session + .Advanced + .Stream("employees/"); + +while (enumerator.MoveNext()) +{ + StreamResult employee = enumerator.Current; +} +`} + + + + +{`IAsyncEnumerator> enumerator = await asyncSession + .Advanced + .StreamAsync("employees/"); + +while (await enumerator.MoveNextAsync()) +{ + StreamResult employee = enumerator.Current; +} +`} + + + + +## Example 2 + +Fetch documents for a ID prefix directly into a stream: + + + + +{`using (var outputStream = new MemoryStream()) +{ + session + .Advanced + .LoadStartingWithIntoStream("employees/", outputStream); +} +`} + + + + +{`using (var outputStream = new MemoryStream()) +{ + await asyncSession + .Advanced + .LoadStartingWithIntoStreamAsync("employees/", outputStream); +} +`} + + + + + + +## IsLoaded + +Use the `IsLoaded` method from the `Advanced` session operations +To check if an entity is attached to a session (e.g. because it's been +previously loaded). + + +`IsLoaded` checks if an attempt to load a document has been already made +during the current session, and returns `true` even if such an attemp was +made and failed. +If, for example, the `Load` method was used to load `employees/3` during +this session and failed because the document has been previously deleted, +`IsLoaded` will still return `true` for `employees/3` for the remainder +of the session just because of the attempt to load it. + + + + +{`bool IsLoaded(string id); +`} + + + +| Parameter | Type | Description | +| ------------- | ------------- | ----- | +| **id** | `string` | Entity ID for which the check should be performed. | + +| Return Type | Description | +| ------------- | ----- | +| `bool` | Indicates if an entity with a given ID is loaded. | + +### Example + + + + +{`bool isLoaded = session.Advanced.IsLoaded("employees/1"); // false +Employee employee = session.Load("employees/1"); +isLoaded = session.Advanced.IsLoaded("employees/1"); // true +`} + + + + +{`bool isLoaded = asyncSession.Advanced.IsLoaded("employees/1"); // false +Employee employee = await asyncSession.LoadAsync("employees/1"); +isLoaded = asyncSession.Advanced.IsLoaded("employees/1"); // true +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_loading-entities-java.mdx b/versioned_docs/version-7.1/client-api/session/_loading-entities-java.mdx new file mode 100644 index 0000000000..9a0e9c176e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_loading-entities-java.mdx @@ -0,0 +1,396 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +There are several methods with many overloads that allow users to download documents +from the database and convert them to entities. This article will cover the following +methods: + +- [Load](../../client-api/session/loading-entities.mdx#load) +- [Load with Includes](../../client-api/session/loading-entities.mdx#load-with-includes) +- [Load - multiple entities](../../client-api/session/loading-entities.mdx#load---multiple-entities) +- [LoadStartingWith](../../client-api/session/loading-entities.mdx#loadstartingwith) +- [ConditionalLoad](../../client-api/session/loading-entities.mdx#conditionalload) +- [Stream](../../client-api/session/loading-entities.mdx#stream) +- [IsLoaded](../../client-api/session/loading-entities.mdx#isloaded) + + +## Load + +The most basic way to load a single entity is to use one of the `load` methods. + + + +{` T load(Class clazz, String id); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **id** | `String` | Identifier of a document that will be loaded. | + +| Return Value | | +| ------------- | ----- | +| T | Instance of `T` or `null` if a document with a given ID does not exist. | + +### Example + + + +{`Employee employee = session.load(Employee.class, "employees/1"); +`} + + + + +From RavenDB version 4.x onwards, only string identifiers are supported. If you are upgrading from 3.x, this is a major change, because in 3.x non-string identifiers are supported. + + + + +## Load with Includes + +When there is a 'relationship' between documents, those documents can be loaded in a +single request call using the `include + load` methods. Learn more in +[How To Handle Document Relationships](../../client-api/how-to/handle-document-relationships.mdx). +See also [including counters](../../document-extensions/counters/counters-and-other-features.mdx#including-counters) +and [including time series](../../document-extensions/timeseries/client-api/session/include/overview.mdx). + + + +{`ILoaderWithInclude include(String path); + + Map load(Class clazz, String... ids); + + Map load(Class clazz, Collection ids); + + TResult load(Class clazz, String id); +`} + + + +| Parameter | Type | Description | +| ------------- | ------------- | ----- | +| **path** | `String` | Path in documents in which the server should look for 'referenced' documents. | +| **ids** | `String` | Ids to load. | + +| Return Type | Description | +| ------------- | ----- | +| `ILoaderWithInclude` | The `include` method by itself does not materialize any requests but returns loader containing methods such as `load`. | + +### Example I + +We can use this code to also load an employee which made the order. + + + +{`// loading 'products/1' +// including document found in 'supplier' property +Product product = session + .include("Supplier") + .load(Product.class, "products/1"); + +Supplier supplier = session.load(Supplier.class, product.getSupplier()); // this will not make server call +`} + + + + + +## Load - multiple entities + +To load multiple entities at once, use one of the following `load` overloads. + + + +{` Map load(Class clazz, String... ids); + + Map load(Class clazz, Collection ids); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **ids** | Collection<String> or String... | Multiple document identifiers to load | + +| Return Value | | +| ------------- | ----- | +| Map<String, T> | Instance of Map which maps document identifiers to `T` or `null` if a document with given ID doesn't exist. | + + + +{`Map employees + = session.load(Employee.class, + "employees/1", "employees/2", "employees/3"); +`} + + + + + +## LoadStartingWith + +To load multiple entities that contain a common prefix, use the `loadStartingWith` method from the `advanced` session operations. + + + +{` T[] loadStartingWith(Class clazz, String idPrefix); + + T[] loadStartingWith(Class clazz, String idPrefix, String matches); + + T[] loadStartingWith(Class clazz, String idPrefix, String matches, int start); + + T[] loadStartingWith(Class clazz, String idPrefix, String matches, int start, int pageSize); + + T[] loadStartingWith(Class clazz, String idPrefix, String matches, int start, int pageSize, String exclude); + + T[] loadStartingWith(Class clazz, String idPrefix, String matches, int start, int pageSize, String exclude, String startAfter); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **idPrefix** | String | prefix for which the documents should be returned | +| **matches** | String | pipe ('|') separated values for which document IDs (after 'idPrefix') should be matched ('?' any single character, '*' any characters) | +| **start** | int | number of documents that should be skipped | +| **pageSize** | int | maximum number of documents that will be retrieved | +| **exclude** | String | pipe ('|') separated values for which document IDs (after 'idPrefix') should **not** be matched ('?' any single character, '*' any characters) | +| **skipAfter** | String | skip document fetching until given ID is found and return documents after that ID (default: `null`) | + +| Return Value | | +| ------------- | ----- | +| T[] | Array of entities matching given parameters. | + +### Example I + + + +{`// return up to 128 entities with Id that starts with 'employees' +Employee[] result = session + .advanced() + .loadStartingWith(Employee.class, "employees/", null, 0, 128); +`} + + + +### Example II + + + +{`// return up to 128 entities with Id that starts with 'employees/' +// and rest of the key begins with "1" or "2" e.g. employees/10, employees/25 +Employee[] result = session + .advanced() + .loadStartingWith(Employee.class, "employees/", "1*|2*", 0, 128); +`} + + + + + +## ConditionalLoad + +The `conditionalLoad` method takes a document's [change vector](../../server/clustering/replication/change-vector.mdx). +If the entity is tracked by the session, this method returns the entity. If the entity +is not tracked, it checks if the provided change vector matches the document's +current change vector on the server side. If they match, the entity is not loaded. +If the change vectors _do not_ match, the document is loaded. + +In other words, this method can be used to check whether a document has been modified +since the last time its change vector was recorded, so that the cost of loading it +can be saved if it has not been modified. + +The method is accessible from the `session.advanced()` operations. + + + +{` ConditionalLoadResult conditionalLoad(Class clazz, String id, String changeVector); +`} + + + +| Parameter | Type | Description | +| ------------- | ------------- | ----- | +| **clazz** | `Class` | The class of a document to be loaded. | +| **id** | `String` | The identifier of a document to be loaded. | +| **changeVector** | `String` | The change vector you want to compare with the server-side change vector. If the change vectors match, the document is not loaded. | + +| Return Type | Description | +|---------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ConditionalLoadResult<T> `(Class, String ChangeVector)` | If the given change vector and the server side change vector do not match, the method returns the requested entity and its current change vector.
If the change vectors match, the method returns `default` as the entity, and the current change vector.
If the specified document, the method returns only `default` without a change vector. | + +### Example + + + +{`try (IDocumentSession session = store.openSession()) \{ + + String changeVector; + User user = new User("Bob"); + + session.store(User.class, "users/1"); + session.saveChanges(); + + changeVector = session.advanced().getChangeVectorFor(user); +\} + +User user = new User("Bob"); +String changeVector = "a"; + +try (IDocumentSession session = store.openSession()) \{ + // New session which does not track our User entity + + // The given change vector matches + // the server-side change vector + // Does not load the document + ConditionalLoadResult result1 = session.advanced() + .conditionalLoad(User.class, "users/1", changeVector); + + // Modify the document + user.setName("Bob Smith"); + session.store(user); + session.saveChanges(); + + // Change vectors do not match + // Loads the document + ConditionalLoadResult result2 = session.advanced() + .conditionalLoad(User.class, "users/1", changeVector); +\} +`} + + + + + +## Stream + +Entities can be streamed from the server using one of the following `stream` methods from the `advanced` session operations. + + + +{` CloseableIterator> stream(IDocumentQuery query); + + CloseableIterator> stream(IDocumentQuery query, Reference streamQueryStats); + + CloseableIterator> stream(IRawDocumentQuery query); + + CloseableIterator> stream(IRawDocumentQuery query, Reference streamQueryStats); + + CloseableIterator> stream(Class clazz, String startsWith); + + CloseableIterator> stream(Class clazz, String startsWith, String matches); + + CloseableIterator> stream(Class clazz, String startsWith, String matches, int start); + + CloseableIterator> stream(Class clazz, String startsWith, String matches, int start, int pageSize); + + CloseableIterator> stream(Class clazz, String startsWith, String matches, int start, int pageSize, String startAfter); +`} + + + +| Parameter | Type | Description | +| ------------- | ------------- | ----- | +| **startsWith** | `String` | prefix for which documents should be streamed | +| **matches** | `String` | pipe ('|') separated values for which document IDs should be matched ('?' any single character, '*' any characters) | +| **start** | `int` | number of documents that should be skipped | +| **pageSize** | `int` | maximum number of documents that will be retrieved | +| **skipAfter** | `String` | skip document fetching until a given ID is found and returns documents after that ID (default: `null`) | +| **streamQueryStats** | `Reference streamQueryStats (out parameter)` | Information about the streaming query (amount of results, which index was queried, etc.) | + +| Return Value | | +| ------------- | ----- | +| CloseableIterator<StreamResult<T>> | Iterator with entities. | +| streamQueryStats (out parameter) | Information about the streaming query (amount of results, which index was queried, etc.) | + + +### Example I + +Stream documents for a ID prefix: + + + +{`try (CloseableIterator> iterator = + session.advanced().stream(Employee.class, "employees/")) \{ + while (iterator.hasNext()) \{ + StreamResult employee = iterator.next(); + \} +\} +`} + + + +## Example 2 + +Fetch documents for a ID prefix directly into a stream: + + + +{`ByteArrayOutputStream baos = new ByteArrayOutputStream(); +session + .advanced() + .loadStartingWithIntoStream("employees/", baos); +`} + + + +### Remarks + + +Entities loaded using `stream` will be transient (not attached to session). + + + + +## IsLoaded + +Use the `isLoaded` method from the `advanced` session operations +To check if an entity is attached to a session (e.g. because it's been +previously loaded). + + +`isLoaded` checks if an attempt to load a document has been already made +during the current session, and returns `true` even if such an attemp was +made and failed. +If, for example, the `load` method was used to load `employees/3` during +this session and failed because the document has been previously deleted, +`isLoaded` will still return `true` for `employees/3` for the remainder +of the session just because of the attempt to load it. + + + + +{`boolean isLoaded(String id); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **id** | `String` | Entity ID for which the check should be performed. | + +| Return Value | | +| ------------- | ----- | +| boolean | Indicates if an entity with a given ID is loaded. | + +### Example + + + +{`boolean isLoaded = session.advanced().isLoaded("employees/1");//false +Employee employee = session.load(Employee.class, "employees/1"); +isLoaded = session.advanced().isLoaded("employees/1"); // true +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_loading-entities-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/_loading-entities-nodejs.mdx new file mode 100644 index 0000000000..c5c594a5c1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_loading-entities-nodejs.mdx @@ -0,0 +1,416 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* There are several methods that allow users to load documents from the database and convert them to entities. + +* This article covers the following methods: + + - [Load](../../client-api/session/loading-entities.mdx#load) + - [Load with Includes](../../client-api/session/loading-entities.mdx#load-with-includes) + - [Load - multiple entities](../../client-api/session/loading-entities.mdx#load---multiple-entities) + - [LoadStartingWith](../../client-api/session/loading-entities.mdx#loadstartingwith) + - [ConditionalLoad](../../client-api/session/loading-entities.mdx#conditionalload) + - [IsLoaded](../../client-api/session/loading-entities.mdx#isloaded) + - [Stream](../../client-api/session/loading-entities.mdx#stream) + +* For loading entities lazily see [perform requests lazily](../../client-api/session/how-to/perform-operations-lazily.mdx). + + +## Load + +The most basic way to load a single entity is to use session's `load()` method. + + + +{`await session.load(id, [documentType]); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **id** | string | Identifier of a document that will be loaded. | +| **documentType** | function | A class constructor used for reviving the results' entities | + +| Return Value | | +| ------------- | ----- | +| `Promise` | A `Promise` returning `object` or `null` if a document with a given ID does not exist. | + +### Example + + + +{`const employee = await session.load("employees/1"); +`} + + + + +In 4.x RavenDB, only string identifiers are supported. If you are upgrading from 3.x, this is a major change, because in 3.x non-string identifiers are supported. + + + + +## Load with Includes + +When there is a *relationship* between documents, those documents can be loaded in a single request call using the `include()` and `load()` methods. + + + +{`session.include(path); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **path** | string | Field path in documents in which the server should look for 'referenced' documents. | + +| Return Value | | +| ------------- | ----- | +| `object{load()}` | The `include()` method by itself does not materialize any requests but returns loader containing methods such as `load()`. | + +### Example I + +We can use this code to also load an employee which made the order. + + + +{`// loading 'products/1' +// including document found in 'supplier' property +const product = await session + .include("supplier") + .load("products/1"); + +const supplier = await session.load(product.supplier); // this will *not* make a server call +`} + + + + + +## Load - multiple entities + +To load multiple entities at once, use one of the following ways to call `load()`. + + + +{`await session.load(idsArray, [documentType]); +await session.load(idsArray, [options]); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **idsArray** | string[] | Multiple document identifiers to load | +| **documentType** | function | A class constructor used for reviving the results' entities | +| **options** | string | Options with the following properties | +| **documentType** | function | A class construcor used for reviving the results' entities | +| **includes** | string[] | Field paths in documents in which the server should look for 'referenced' documents. | + +| Return Value | | +| ------------- | ----- | +| `Promise<{ [id]: object }>` | A `Promise` resolving to an object mapping document identifiers to `object` or `null` if a document with given ID doesn't exist | + + + +{`const employees = await session.load( + ["employees/1", "employees/2", "employees/3"]); +// \{ +// "employees/1": \{ ... \}, +// "employees/2": \{ ... \} +// "employees/3": \{ ... \} +// \} +`} + + + + + +## LoadStartingWith + +To load multiple entities that contain a common prefix, use the `loadStartingWith()` method from the `advanced` session operations. + + + +{`await session.advanced.loadStartingWith(idPrefix, [options]); + +await session.advanced.loadStartingWithIntoStream(idPrefix, output, [options]); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **idPrefix** | string | prefix for which the documents should be returned | +| **options** | string | Options with the following properties | +| **matches** | string | pipe ('|') separated values for which document IDs (after 'idPrefix') should be matched ('?' any single character, '*' any characters) | +| **start** | number | number of documents that should be skipped | +| **pageSize** | number | maximum number of documents that will be retrieved | +| **exclude** | string | pipe ('|') separated values for which document IDs (after 'idPrefix') should **not** be matched ('?' any single character, '*' any characters) | +| **skipAfter** | string | skip document fetching until given ID is found and return documents after that ID (default: `null`) | +| **documentType** | function | A class constructor used for reviving the results' entities | + +| Return Value | | +| ------------- | ----- | +| `Promise` | A `Promise` resolving to an array of entities matching given parameters | + +### Example I + + + +{`// return up to 128 entities with Id that starts with 'employees' +const result = await session + .advanced + .loadStartingWith("employees/", \{ + start: 0, + pageSize: 128 + \}); +`} + + + +### Example II + + + +{`// return up to 128 entities with Id that starts with 'employees/' +// and rest of the key begins with "1" or "2" e.g. employees/10, employees/25 +const result = await session + .advanced + .loadStartingWith("employees/", \{ + matches: "1*|2*", + start: 0, + pageSize: 128 + \}); +`} + + + + + +## ConditionalLoad + +This method can be used to check whether a document has been modified +since the last time its change vector was recorded, so that the cost of loading it +can be saved if it has not been modified. + +The `conditionalLoad` method takes a document's [change vector](../../server/clustering/replication/change-vector.mdx). +If the entity is tracked by the session, this method returns the entity. If the entity +is not tracked, it checks if the provided change vector matches the document's +current change vector on the server side. If they match, the entity is not loaded. +If the change vectors _do not_ match, the document is loaded. + +The method is accessible from the `session.Advanced` operations. + + +{`await session.advanced.conditionalLoad(id, changeVector, clazz); +`} + + + +| Parameter | Type | Description | +| ------------- | ------------- | ----- | +| **id** | `string` | The identifier of a document to be loaded. | +| **changeVector** | `string` | The change vector you want to compare with the server-side change vector. If the change vectors match, the document is not loaded. | + +| Return Type | Description | +|--------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ValueTuple `(object, changeVector)` | If the given change vector and the server side change vector do not match, the method returns the requested entity and its current change vector.
If the change vectors match, the method returns `default` as the entity, and the current change vector.
If the specified document, the method returns only `default` without a change vector. | + +### Example + + + +{`const session = store.openSession(); +const user = new User("Bob"); +await session.store(user, "users/1"); +await session.saveChanges(); + +const changeVector = session.advanced.getChangeVectorFor(user); + +\{ + // New session which does not track our User entity + // The given change vector matches + // the server-side change vector + // Does not load the document + const session = store.openSession(); + const result1 = await session.advanced + .conditionalLoad("users/1", changeVector, User); + + // Modify the document + user.name = "Bob Smith"; + await session.store(user); + await session.saveChanges(); + + // Change vectors do not match + // Loads the document + const result2 = await session.advanced + .conditionalLoad("users/1", changeVector, User); +`} + + + + + +## Stream + +Entities can be streamed from the server using the `stream()` method from the `advanced` session operations. + + + +{`// stream query results +await session.stream(query, [statsCallback]); + +// stream documents with ID starting with +await session.stream(idPrefix, [options]); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **idPrefix** | string | prefix for which the documents should be returned | +| **query** | query object | a query obtained from a call to `session.query()` or `session.advanced.rawQuery()` | +| **options** | string | Options with the following properties | +| **startsWith** | string | prefix for which documents should be streamed | +| **matches** | string | pipe ('|') separated values for which document IDs should be matched ('?' any single character, '*' any characters) | +| **start** | number | number of documents that should be skipped | +| **pageSize** | number | maximum number of documents that will be retrieved | +| **skipAfter** | string | skip document fetching until a given ID is found and returns documents after that ID (default: `null`) | +| **documentType** | function | A class constructor used for reviving the results' entities | +| **statsCallback** | function | callback returning information about the streaming query (amount of results, which index was queried, etc.) | + + +| Return Value | | +| ------------- | ----- | +| `Promise` | A `Promise` resolving to readable stream with query results | + + +### Example I + +Stream documents for a ID prefix: + + + +{`// stream() returns a Node.js Readable +const stream = await session.advanced.stream("employees/"); + +stream.on("data", data => \{ + // Employee \{ name: 'Anna', id: 'employees/1-A' \} +\}); + +stream.on("error", err => \{ + // handle errors +\}); + +stream.on("end", () => \{ + // stream ended +\}); +`} + + + +### Example 2 + +Fetch documents for a ID prefix directly into a writable stream: + + + +{`const employeesFile = fs.createWriteStream("employees.json"); +await session.advanced.loadStartingWithIntoStream("employees/", employeesFile); +`} + + + + +Entities loaded using `stream()` will be transient (not attached to session). + + + + +## IsLoaded + +Use the `isLoaded` method from the `advanced` session operations +To check if an entity is attached to a session (e.g. because it's been +previously loaded). + + +`isLoaded` checks if an attempt to load a document has been already made +during the current session, and returns `true` even if such an attemp was +made and failed. +If, for example, the `load` method was used to load `employees/3` during +this session and failed because the document has been previously deleted, +`isLoaded` will still return `true` for `employees/3` for the remainder +of the session just because of the attempt to load it. + + + + +{`session.advanced.isLoaded(id); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **id** | string | Entity ID for which the check should be performed. | + +| Return Value | | +| ------------- | ----- | +| boolean | Indicates if an entity with a given ID is loaded. | + +### Example + + + +{`session.advanced.isLoaded("employees/1"); // false +const employee = await session.load("employees/1"); +session.advanced.isLoaded("employees/1"); // true +`} + + + + + +### On entities loading, JS classes and the **documentType** parameter + +Type information about the entity and its contents is by default stored in the document metadata. Based on that its types are revived when loaded from the server. + + +In order to avoid passing **documentType** argument every time, you can register the type in the document conventions using the `registerEntityType()` method before calling DocumentStore's `initialize()` like so: + + + +{`class Pet \{ + constructor(name) \{ + this.name = name; + \} +\} + +class Person \{ + constructor(name, pet) \{ + this.name = name; + this.pet = pet; + \} +\} + +documentStore.conventions.registerEntityType(Person); +documentStore.conventions.registerEntityType(Pet); +// ... + +documentStore.initialize(); +`} + + + + + +If you fail to do so, entities (and all subobjects) loaded from the server are going to be plain object literals and not instances of the original type they were stored with. + + diff --git a/versioned_docs/version-7.1/client-api/session/_loading-entities-php.mdx b/versioned_docs/version-7.1/client-api/session/_loading-entities-php.mdx new file mode 100644 index 0000000000..b09f832bef --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_loading-entities-php.mdx @@ -0,0 +1,350 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* There are several methods that allow users to load documents from the database and convert them to entities. + +* This article covers the following methods: + + - [`load`](../../client-api/session/loading-entities.mdx#load) + - [`load` with `include`](../../client-api/session/loading-entities.mdx#load-with-include) + - [`load` - multiple entities](../../client-api/session/loading-entities.mdx#load---multiple-entities) + - [`loadStartingWith`](../../client-api/session/loading-entities.mdx#loadstartingwith) + - [`conditionalLoad`](../../client-api/session/loading-entities.mdx#conditionalload) + - [`isLoaded`](../../client-api/session/loading-entities.mdx#isloaded) + +* For loading entities lazily see [perform requests lazily](../../client-api/session/how-to/perform-operations-lazily.mdx). + + +## `load` + +The most basic way to load a single entity is to use the `load` method. + + + +{`public function load(?string $className, ?string $id): ?object; +`} + + + +| Parameter | Type | Description | +| ------------- | ------------- | ----- | +| **id** | `string` | An ID to load a single entity by | +| **className** | `string` | What entity type to load | + +| Return Type | Description | +| ------------- | ----- | +| `?object` | The loaded entity, or `null` if an entity with the given ID doesn't exist | + +### Example + + + +{`/** @var Employee $employee */ +$employee = $session->load(Employee::class, "employees/1"); +`} + + + + +Starting with RavenDB version 4.x, only string identifiers are supported. +If you are upgrading from 3.x, this is a major change since in `3.x`` non-string +identifiers were supported. + + + + +## `load` with `include` + +When there is a 'relationship' between documents, those documents can be loaded in a +single request call using the `include + load` methods. Learn more in +[How To Handle Document Relationships](../../client-api/how-to/handle-document-relationships.mdx). + + +Also see: + +* [Including Counters](../../document-extensions/counters/counters-and-other-features.mdx#including-counters) +* [Including Time Series](../../document-extensions/timeseries/client-api/session/include/overview.mdx) +* [Including Compare Exchange Values](../../client-api/operations/compare-exchange/include-compare-exchange.mdx) +* [Including Document Revisions](../../document-extensions/revisions/client-api/session/including.mdx) + + + + +{`function include(?string $path): LoaderWithIncludeInterface; + +public function load(string $className, array $ids): ObjectArray; +public function load(string $className, StringArray $ids): ObjectArray; + +public function load(string $className, string $id): ?object; +`} + + + +| Parameter | Type | Description | +| ------------- | ------------- | ----- | +| **path** | `string` | A path that the server should search the referenced documents by | +| **className** | `string` | What entity type to load | +| **id** | `string` | An ID to load a single entity by | +| **ids** | `array`/`StringArray` | An array of IDs to load entities by | + +| Return Type | Description | +| ------------- | ----- | +| `?object` | The loaded entity, or `null` if an entity with the given ID doesn't exist | +| `ObjectArray` | An array of loaded entities | +| `LoaderWithIncludeInterface` | The `include` method doesn't satidfy requests directly but returns an interface that can be used | + +### Example I + +We can use this code to also load an employee which made the order. + + + +{`// loading 'products/1' +// including document found in 'supplier' property + +/** @var Product $product */ +$product = $session + ->include("Supplier") + ->load(Product::class, "products/1"); + +$supplier = $session->load(Supplier::class, $product->getSupplier()); // this will not make server call +`} + + + +### Example II + + + +{`// loading 'products/1' +// including document found in 'supplier' property +/** @var Product $product */ +$product = $session + ->include("Supplier") + ->load(Product::class, "products/1"); + +$supplier = $session->load(Supplier::class, $product->getSupplier()); // this will not make server call +`} + + + + + +## `load` - multiple entities + +To load multiple entities at once, use one of the following `load` overloads. + + + +{`public function load(string $className, array $ids): ObjectArray; +public function load(string $className, StringArray $ids): ObjectArray; +`} + + + +| Parameter | Type | Description | +| ------------- | ------------- | ----- | +| **className** | `string` | What entity type to load | +| **ids** | `array`/`StringArray` | An array of IDs to load entities by | + +| Return Type | Description | +| ------------- | ----- | +| `ObjectArray` | An array of loaded entities | + + + +{`$employees = $session->load(Employee::class, "employees/1", "employees/2", "employees/3"); +`} + + + + + +## `loadStartingWith` + +To load multiple entities that contain a common prefix, use the `loadStartingWith` method from the `advanced` session operations. + + + +{`public function loadStartingWith( + string $className, + ?string $idPrefix, + ?string $matches = null, + int $start = 0, + int $pageSize = 25, + ?string $exclude = null, + ?string $startAfter = null +): ObjectArray; +`} + + + +| Parameter | Type | Description | +| ------------- | ------------- | ----- | +| **className** | `string` | What entity type to load | +| **idPrefix** | `string` | ID prefix: documents will be retrieved if their ID starts with the given prefix | +| **matches** | `string` | pipe (\|) separated values, that document IDs (after 'idPrefix') should match.
`?` - any single character
`*` - any string of characters | +| **start** | `int` | number of documents to skip | +| **pageSize** | `int` | maximum number of documents to retrieve | +| **exclude** | `string` | pipe (\|) separated values, that document IDs (after 'idPrefix') should **not** match.
`?` - any single character
`*` - any string of characters | +| **startAfter** | `string` | skip fetching document until the given ID is found, and return documents after this ID (default: `null`) | + +| Return Type | Description | +| ------------- | ----- | +| `ObjectArray` | an array of entities matching the given parameters | + +### Example I + + + +{`// return up to 128 entities with Id that starts with 'employees' +$result = $session + ->advanced() + ->loadStartingWith(Employee::class, "employees/", null, 0, 128); +`} + + + +### Example II + + + +{`// return up to 128 entities with Id that starts with 'employees/' +// and rest of the key begins with "1" or "2" e.g. employees/10, employees/25 +$result = $session + ->advanced() + ->loadStartingWith(Employee::class, "employees/", "1*|2*", 0, 128); +`} + + + + + +## `conditionalLoad` + +This method can be used to check whether a document has been modified +since the last time its change vector was recorded, so that the cost of loading it +can be saved if it has not been modified. + +The `conditionalLoad` method takes a document's [change vector](../../server/clustering/replication/change-vector.mdx). +If the entity is tracked by the session, this method returns the entity. If the entity +is not tracked, it checks if the provided change vector matches the document's +current change vector on the server side. If they match, the entity is not loaded. +If the change vectors _do not_ match, the document is loaded. + +The method is accessible from the session `advanced` operations. + + + +{`function conditionalLoad(?string $className, ?string $id, ?string $changeVector): ConditionalLoadResult; +`} + + + +| Parameter | Type | Description | +| ------------- | ------------- | ----- | +| **className** | `string` | What entity type to load | +| **id** | `string` | The identifier of a document to load | +| **changeVector** | `string` | The change vector you want to compare with the server-side change vector. If the change vectors match, the document is not loaded. | + +| Return Type | Description | +| ------------- | ----- | +| `ConditionalLoadResult` | If the given change vector and the server side change vector do not match, the method returns the requested entity and its current change vector.
If the change vectors match, the method returns `default` as the entity, and the current change vector.
If the specified document, the method returns only `default` without a change vector. | + +### Example + + + +{`$session = $store->openSession(); +try \{ + $changeVector = ""; + $user = new User("Bob"); + + $session->store($user, "users/1"); + $session->saveChanges(); + + $changeVector = $session->advanced()->getChangeVectorFor($user); +\} finally \{ + $session->close(); +\} + +$user = new User("Bob"); +$changeVector = "a"; + +$session = $store->openSession(); +try \{ + // New session which does not track our User entity + + // The given change vector matches + // the server-side change vector + // Does not load the document + $result1 = $session->advanced() + ->conditionalLoad(User::class, "users/1", $changeVector); + + // Modify the document + $user->setName("Bob Smith"); + $session->store($user); + $session->saveChanges(); + + // Change vectors do not match + // Loads the document + $result2 = $session->advanced() + ->conditionalLoad(User::class, "users/1", $changeVector); +\} finally \{ + $session->close(); +\} +`} + + + + + +## isLoaded + +Use the `isLoaded` method from the `advanced` session operations +To check if an entity is attached to a session (e.g. because it's been +previously loaded). + + +`isLoaded` checks if an attempt to load a document has been already made +during the current session, and returns `true` even if such an attemp was +made and failed. +If, for example, the `load` method was used to load `employees/3` during +this session and failed because the document has been previously deleted, +`isLoaded` will still return `true` for `employees/3` for the remainder +of the session just because of the attempt to load it. + + + + +{`function isLoaded(string $id): bool; +`} + + + +| Parameter | Type | Description | +| ------------- | ------------- | ----- | +| **id** | `string` | The ID of the entity to perform the check for | + +| Return Type | Description | +| ------------- | ----- | +| `bool` | Indicates whether an entity with a given ID is loaded | + +### Example + + + +{`$isLoaded = $session->advanced()->isLoaded("employees/1"); //false +$employee = $session->load(Employee::class, "employees/1"); +$isLoaded = $session->advanced()->isLoaded("employees/1"); // true +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_loading-entities-python.mdx b/versioned_docs/version-7.1/client-api/session/_loading-entities-python.mdx new file mode 100644 index 0000000000..ed64054b56 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_loading-entities-python.mdx @@ -0,0 +1,407 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* There are several methods that allow users to load documents from the database and convert them to entities. + +* This article covers the following methods: + + - [load](../../client-api/session/loading-entities.mdx#load) + - [load with includes](../../client-api/session/loading-entities.mdx#load-with-includes) + - [load_starting_with](../../client-api/session/loading-entities.mdx#load_starting_with) + - [load_starting_with_into_stream](../../client-api/session/loading-entities.mdx#load_starting_with_into_stream) + - [conditional_load](../../client-api/session/loading-entities.mdx#conditional_load) + - [stream](../../client-api/session/loading-entities.mdx#stream) + - [is_loaded](../../client-api/session/loading-entities.mdx#is_loaded) + +* For loading entities lazily see [perform requests lazily](../../client-api/session/how-to/perform-operations-lazily.mdx). + + +From RavenDB version 4.x onward, only string identifiers are supported. +If you are upgrading from 3.x, this is a major change, because in 3.x non-string identifiers are supported as well. + + + +## load + +Use the `load` method to load **an entity** or **multiple entities**. + + + +{`def load( + self, + key_or_keys: Union[List[str], str], + object_type: Optional[Type[_T]] = None, + includes: Callable[[IncludeBuilder], None] = None, +) -> Union[Dict[str, _T], _T]: ... +`} + + + +| Parameter | Type | Description | +| ------------- | ------------- | ----- | +| **key_or_keys** | `str` or `List[str]` | Identifier or a list of identifiers of entities to load | +| **object_type**
(optional) | `[Type[_T]` | Entity type to load (optional) | +| **includes**
(optional) | `Callable[[IncludeBuilder], None]` | A **consumer function** that takes an [include builder](../../client-api/how-to/handle-document-relationships.mdx#includes) argument.
The user should use the builder inside this function to _include_ all the data needed within a load. | + +| Return Type | Description | +| ------------- | ----- | +| `_T` | If a single document was requested, return an instance of the document or `None` if no document was found | +| `Dict[str, _T]` | If multiple documents were requested, return a dictionary of document instances or `None` if no documents were found | + + +### Examples + +* Load an entiry + + +{`employee = session.load("employees/1", Employee) +`} + + + +* Load multiple entities: + + +{`employees = session.load(["employees/1", "employees/2", "employees/3"], Employee) +`} + + + + + +## load with includes + +When there is a 'relationship' between documents, those documents can be loaded in a +single request call using the `include + load` methods. Learn more in +[How To Handle Document Relationships](../../client-api/how-to/handle-document-relationships.mdx). + + +Also see: + +* [Including Counters](../../document-extensions/counters/counters-and-other-features.mdx#including-counters) +* [Including Time Series](../../document-extensions/timeseries/client-api/session/include/overview.mdx) +* [Including Compare Exchange Values](../../client-api/operations/compare-exchange/include-compare-exchange.mdx) +* [Including Document Revisions](../../document-extensions/revisions/client-api/session/including.mdx) + + + + +{`def include(self, path: str) -> LoaderWithInclude: ... +`} + + + +| Parameter | Type | Description | +| ------------- | ------------- | ----- | +| **path** | `str` | Search path that the server will use to look for the 'referenced' documents | + +| Return Type | Description | +| ------------- | ----- | +| `LoaderWithInclude` | The ` include` method by itself does not materialize any requests but returns loader containing methods such as `load`. | + +### Example I + +We can use this code to also load an employee which made the order. + + + +{`# loading 'products/1' +# including document found in 'supplier' property +products_by_key = session.include("supplier").load("products/1", Product) +product = products_by_key["products/1"] + +supplier = session.load(product.supplier) # this will not make server call +`} + + + +### Example II + + + +{`# loading 'products/1' +# including document found in 'Supplier' property +products_by_key = session.include("Supplier").load("products/1", Product) +product = products_by_key["products/1"] + +supplier = session.load(product.supplier, Supplier) +`} + + + + + +## load_starting_with + +To load multiple entities with a common prefix, use the `advanced` session operation `load_starting_with`. + + +{`def load_starting_with( + self, + id_prefix: str, + object_type: Optional[Type[_T]] = None, + matches: Optional[str] = None, + start: Optional[int] = None, + page_size: Optional[int] = None, + exclude: Optional[str] = None, + start_after: Optional[str] = None, +) -> List[_T]: ... +`} + + + +| Parameter | Type | Description | +| ------------- | ------------- | ----- | +| **id_prefix** | `str` | Prefix to return the documents to | +| **object_type**
(optional) | `Type[_T]` | The object type | +| **matches**
| `str` | Pipe ('|') separated values for which document IDs (after 'id_prefix') should be matched ('?' any single character, '*' any characters) | +| **start**
(optional) | `int` | Number of documents that should be skipped | +| **page_size**
(optional) | `int` | Maximum number of documents that will be retrieved | +| **exclude**
(optional) | `str` | Pipe ('|') separated values for which document IDs (after 'id_prefix') should **not** be matched ('?' any single character, '*' any characters) | +| **start_after**
(optional) | `str` | Skip document fetching until given ID is found and return documents after that ID (default: `None`) | + +| Return Type | Description | +| ----------- | ----------- | +| `List[_T]` | An array of entities matching the given parameters | + +### Example I + + + +{`# return up to 128 entities with Id that starts with 'employees' +result = session.advanced.load_starting_with("employees/", Employee, None, 0, 128) +`} + + + +### Example II + + + +{`# return up to 128 entities with Id that starts with 'employees' +# and rest of the key begins with "1" or "2" e.g. employees/10, employees/25 +result = session.advanced.load_starting_with("employees/", Employee, "1*|2*", 0, 128) +`} + + + + + +## load_starting_with_into_stream + +To output multiple entities with a common prefix into a stream, use the `advanced` session operation `load_starting_with_into_stream`. + + +{`def load_starting_with_into_stream( + self, + id_prefix: str, + matches: str = None, + start: int = 0, + page_size: int = 25, + exclude: str = None, + start_after: str = None, +) -> bytes: ... +`} + + + +| Parameter | Type | Description | +| ------------- | ------------- | ----- | +| **id_prefix** | `str` | Prefix to return the documents to | +| **matches**
| `str` | Pipe ('|') separated values for which document IDs (after 'id_prefix') should be matched ('?' any single character, '*' any characters) | +| **start** | `int` | Number of documents that should be skipped | +| **page_size** | `int` | Maximum number of documents that will be retrieved | +| **exclude** | `str` | Pipe ('|') separated values for which document IDs (after 'id_prefix') should **not** be matched ('?' any single character, '*' any characters) | +| **start_after** | `str` | Skip document fetching until given ID is found and return documents after that ID (default: `None`) | + +| Return Type | Description | +| ----------- | ----------- | +| `bytes` | The retrieved entities, returned as a stream of bytes | + +### Example + +To stream entities from the `employees` collection, use: + + +{`results_bytes = session.advanced.load_starting_with_into_stream("employees/") +`} + + + + + +## conditional_load + +This method can be used to check whether a document has been modified +since the last time its change vector was recorded, so that the cost of loading it +can be saved if it has not been modified. + +The `conditional_load` method takes a document's [change vector](../../server/clustering/replication/change-vector.mdx). +If the entity is tracked by the session, this method returns the entity. If the entity +is not tracked, it checks if the provided change vector matches the document's +current change vector on the server side. If they match, the entity is not loaded. +If the change vectors _do not_ match, the document is loaded. + +The method is accessible from the `session.advanced` operations. + + + +{`def conditional_load( + self, key: str, change_vector: str, object_type: Optional[Type[_T]] = None +) -> ConditionalLoadResult[_T]: ... +`} + + + +| Parameter | Type | Description | +| ------------- | ------------- | ----- | +| **key** | `str` | The identifier of a document to be loaded | +| **change_vector** | `str` | The change vector you want to compare with the server-side change vector. If the change vectors match, the document is not loaded. | +| **object_type**
(optional) | `Type[_T]` | Object type | + +| Return Type | Description | +|-----------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `ConditionalLoadResult[_T]` | If the given change vector and the server side change vector do not match, the method returns the requested entity and its current change vector.
If the change vectors match, the method returns `default` as the entity, and the current change vector.
If the specified document, the method returns only `default` without a change vector. | + +### Example + + + +{`change_vector: Optional[str] = None +user = User(name="Bob") + +with store.open_session() as session: + session.store(user, "users/1") + session.save_changes() + + change_vector = session.advanced.get_change_vector_for(user) + +# Now session which does not track our User entity +with store.open_session() as session: + # The given change vector matches + # the server-side change vector + # Does not load the document + result1 = session.advanced.conditional_load("users/1", change_vector) + + # Modify the document + user.name = "Bob Smith" + session.store(user) + session.save_changes() + + # Change vectors do not natch + # Loads the document + result2 = session.advanced.conditional_load("user/1", change_vector) +`} + + + + + +<!-- +## stream + +Entities can be streamed from the server using one of the following `stream` methods from the `advanced` session operations. + +Streaming query results does not support the [`include` feature](../../client-api/how-to/handle-document-relationships.mdx#includes). +Learn more in [How to Stream Query Results](../../client-api/session/querying/how-to-stream-query-results.mdx). + + +Entities loaded using `stream` will be transient (not attached to session). + + + + +{`# waiting for merge, will be supported from 5.4 client release (https://pypi.org/project/ravendb/) +`} + + + +| Parameter | Type | Description | +| ------------- | ------------- | ----- | +| **starts_with** | `str` | prefix for which documents should be streamed | +| **matches** | `str` | pipe ('|') separated values for which document IDs should be matched ('?' any single character, '*' any characters) | +| **start** | `int` | number of documents that should be skipped | +| **page_size** | `int` | maximum number of documents that will be retrieved | +| **start_after** | `str` | skip document fetching until a given ID is found and returns documents after that ID (default: `None`) | +| **stream_query_stats** | `streamQueryStats` (out parameter) | Information about the streaming query (amount of results, which index was queried, etc.) | + +| Return Type | Description | +| ------------- | ----- | +| `IEnumerator<`[StreamResult](../../glossary/stream-result.mdx)`>` | Enumerator with entities. | +| `streamQueryStats` (out parameter) | Information about the streaming query (amount of results, which index was queried, etc.) | + + +### Example I + +Stream documents for a ID prefix: + + + +{`# unsupported, will be supported from 5.4 client release (https://pypi.org/project/ravendb/) +`} + + + +## Example 2 + +Fetch documents for a ID prefix directly into a stream: + + + +{`results_bytes = session.advanced.load_starting_with_into_stream("employees/") +`} + + + + +--> + +## is_loaded + +Use the `is_loaded` method from the `advanced` session operations +To check if an entity is attached to a session (e.g. because it's been +previously loaded). + + +`is_loaded` checks if an attempt to load a document has been already made +during the current session, and returns `True` even if such an attemp was +made and failed. +If, for example, the `load` method was used to load `employees/3` during +this session and failed because the document has been previously deleted, +`is_loaded` will still return `True` for `employees/3` for the remainder +of the session just because of the attempt to load it. + + + + +{`def is_loaded(self, key: str) -> bool: ... +`} + + + +| Parameter | Type | Description | +| ------------- | ------------- | ----- | +| **key** | `str` | ID of the entity whose status is checked | + +| Return Type | Description | +| ------------- | ----- | +| `bool` | Indicates if the given entity is loaded | + +### Example + + + +{`is_loaded = session.advanced.is_loaded("employees/1") # False +employee = session.load("employees/1") +is_loaded = session.advanced.is_loaded("employees/1") # True +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_opening-a-session-csharp.mdx b/versioned_docs/version-7.1/client-api/session/_opening-a-session-csharp.mdx new file mode 100644 index 0000000000..51f1cdfb57 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_opening-a-session-csharp.mdx @@ -0,0 +1,160 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A Session object is obtained from the [Document Store](../../client-api/what-is-a-document-store.mdx). + +* A Session can operate Synchronously or Asynchronously. + * `OpenSession()` - Open a Session for a **Synchronous** mode of operation. + * `OpenAsyncSession()` - Open a Session for **Asynchronous** mode of operation. + +* Various Session options can be configured using the `SessionOptions` object. + If no database is specified in the options then the [Default Database](../../client-api/setting-up-default-database.mdx) (stored in the Document Store) is assumed. + +* Be sure to wrap the Session variable with a 'using' statement to ensure proper disposal. + +* In this page: + * [Syntax](../../client-api/session/opening-a-session.mdx#syntax) + * [Session options](../../client-api/session/opening-a-session.mdx#session-options) + * [Open session example](../../client-api/session/opening-a-session.mdx#open-session-example) + +## Syntax + +* Use `OpenSession()` / `OpenAsyncSession()` to open a session from the Document Store. +* The following overloads are available: + + + + +{`// Open a Session for the default database configured in \`DocumentStore.Database\` +IDocumentSession OpenSession(); + +// Open a Session for a specified database +IDocumentSession OpenSession(string database); + +// Open a Session and pass it a preconfigured SessionOptions object +IDocumentSession OpenSession(SessionOptions options); +`} + + + + +{`// Open a Session for the default database configured in \`DocumentStore.Database\` +IAsyncDocumentSession OpenAsyncSession(); + +// Open a Session for a specified database +IAsyncDocumentSession OpenAsyncSession(string database); + +// Open a Session and pass it a preconfigured SessionOptions object +IAsyncDocumentSession OpenAsyncSession(SessionOptions options); +`} + + + + +| Parameter | Type | Description | +|--------------|------------------|--------------------------------------------------------------------------------------------------------------------------------| +| **database** | `string` | The Session will operate on this database,
overriding the default database set in the document store. | +| **options** | `SessionOptions` | An object with Session configuration options. See details [below](../../client-api/session/opening-a-session.mdx#session-options). | + +| Return Value | Description | +|----------------------------------------------|-------------------------------| +| `IDocumentSession` / `IAsyncDocumentSession` | Instance of a Session object | + + + +## Session options + +* The `SessionOptions` object contains various options to configure the Session's behavior. + +| Option | Type | Description | Default Value | +|---------------------------------------------------------|-------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------| +| **Database** | `string` | The Session will operate on this database,
overriding the Default Database. | `null` - the Session operates on the Default Database | +| **NoTracking** | `bool` | `true` - The Session tracks changes made to all entities it loaded, stored, or queried for.
`false` - Tracking will be turned off.
Learn more in [Disable tracking](../../client-api/session/configuration/how-to-disable-tracking.mdx) | `false` | +| **NoCaching** | `bool` | `true` - Server responses will Not be cached.
`false` - The Session caches the server responses.
Learn more in [Disable caching](../../client-api/session/configuration/how-to-disable-caching.mdx) | `false` | +| **RequestExecutor** | `RequestExecutor` | ( _Advanced option_ )
The request executor the Session should use. | `null` - the default request executor is used | +| **TransactionMode** | `TransactionMode` | Specify the Session's transaction mode
`SingleNode` / `ClusterWide`
Learn more in [Cluster-wide vs. Single-node](../../client-api/session/cluster-transaction/overview.mdx#cluster-wide-transaction-vs-single-node-transaction) | `SingleNode` | + +* Experts Only: + +| Option | Type | Description | Default Value | +|--------------------------------------------------------------|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------| +| **DisableAtomicDocumentWrites-
InClusterWideTransaction** | `bool?` | ( _Experts only_ )
`true` - Disable Atomic-Guards in cluster-wide sessions.
`false` - Automatic atomic writes in cluster-wide sessions are enabled.
Learn more in [Atomic-Guards](../../client-api/session/cluster-transaction/atomic-guards.mdx) | `false` | + + + +## Open session example + +* The following example opens a **cluster-wide Session**: + + + + +{`using (var store = new DocumentStore()) +{ + // Define the Session's options object + SessionOptions options = new SessionOptions + { + Database = "your_database_name", + TransactionMode = TransactionMode.ClusterWide + }; + + // Open the Session in a Synchronous mode + // Pass the options object to the session + using (IDocumentSession session = store.OpenSession(options)) + { + // Run your business logic: + // + // Store entities + // Load and Modify entities + // Query indexes & collections + // Delete entities + // ... etc. + + session.SaveChanges(); + // When 'SaveChanges' returns successfully, + // all changes made to the entities in the session are persisted to the documents in the database + } +} +`} + + + + +{`using (var store = new DocumentStore()) +{ + // Define the Session's options object + SessionOptions options = new SessionOptions + { + Database = "your_database_name", + TransactionMode = TransactionMode.ClusterWide + }; + + // Open the Session in an Asynchronous mode + // Pass the options object to the session + using (IAsyncDocumentSession asyncSession = store.OpenAsyncSession(options)) + { + // Run your business logic: + // + // Store entities + // Load and Modify documentitiesents + // Query indexes & collections + // Delete documents + // ... etc. + + await asyncSession.SaveChangesAsync(); + // When 'SaveChanges' returns successfully, + // all changes made to the entities in the session are persisted to the documents in the database + } +} +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_opening-a-session-java.mdx b/versioned_docs/version-7.1/client-api/session/_opening-a-session-java.mdx new file mode 100644 index 0000000000..dd745a6968 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_opening-a-session-java.mdx @@ -0,0 +1,112 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To open session use the `openSession` method from `DocumentStore`. + +## Syntax + +There are three overloads of `openSession` methods + + + +{`// Open session for a 'default' database configured in 'DocumentStore' +IDocumentSession openSession(); + +// Open session for a specified database +IDocumentSession openSession(String database); + +IDocumentSession openSession(SessionOptions sessionOptions); +`} + + + +The first method is an equivalent of doing + + + +{`store.openSession(new SessionOptions()); +`} + + + +The second method is an equivalent of doing + + + +{`SessionOptions sessionOptions = new SessionOptions(); +sessionOptions.setDatabase(databaseName); +store.openSession(sessionOptions); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **options** | `OpenSessionOptions` | Options **containing** information such as **name of database** and **RequestExecutor**. | + +| Return Value | | +| ------------- | ----- | +| IDocumentSession | Instance of a session object. | + +## Options + + + +{`private String database; +private boolean noTracking; +private boolean noCaching; +private RequestExecutor requestExecutor; +private TransactionMode transactionMode; + +// getters and setters +`} + + + +| Options | | | +| ------------- | ------------- | ----- | +| **database** | String | Name of database that session should operate on. If `null` then [default database set in DocumentStore](../../client-api/setting-up-default-database.mdx) is used. | +| **noTracking** | boolean | Indicates if session should **not** keep track of the changes. Default: `false`. More [here](../../client-api/session/configuration/how-to-disable-tracking.mdx). | +| **noCaching** | boolean | Indicates if session should **not** cache responses. Default: `false`. More [here](../../client-api/session/configuration/how-to-disable-caching.mdx). | +| **requestExecutor** | `RequestExecutor` | _(Advanced)_ Request executor to use. If `null` default one will be used. | +| **transactionMode** | `TransactionMode` | Sets the mode for the session. By default it is set to `SINGLE_NODE`, but session can also operate 'CLUSTER_WIDE'. You can read more about Cluster-Wide Transactions [here](../../server/clustering/cluster-transactions.mdx). | + + +## Example I + + + +{`try (IDocumentSession session = store.openSession()) \{ + // code here +\} +`} + + + +## Example II - Disabling Entities Tracking + + + +{`SessionOptions sessionOptions = new SessionOptions(); +sessionOptions.setNoTracking(true); +try (IDocumentSession session = store.openSession()) \{ + Employee employee1 = session.load(Employee.class, "employees/1-A"); + Employee employee2 = session.load(Employee.class, "employees/1-A"); + + // because NoTracking is set to 'true' + // each load will create a new Employee instance + Assert.assertNotSame(employee1, employee2); +\} +`} + + + +## Remarks + + +**Always remember to release session allocated resources after usage by invoking the `close` method or wrapping the session object in the `try` statement.** + + + diff --git a/versioned_docs/version-7.1/client-api/session/_opening-a-session-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/_opening-a-session-nodejs.mdx new file mode 100644 index 0000000000..b49e4bfe8c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_opening-a-session-nodejs.mdx @@ -0,0 +1,137 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A session object is obtained from the [document store](../../client-api/what-is-a-document-store.mdx). + +* Various session options can be configured using the `SessionOptions` object. + If no database is specified in the options then the [default database](../../client-api/setting-up-default-database.mdx) is assumed. + +* Most methods on the session object are asynchronous and return a `Promise`. + Either use `async & await` or `.then() & callback functions`. + Refer to the specific documentation for each method usage. + +* In this page: + * [Syntax](../../client-api/session/opening-a-session.mdx#syntax) + * [Session options](../../client-api/session/opening-a-session.mdx#session-options) + * [Open session example](../../client-api/session/opening-a-session.mdx#open-session-example) + +## Syntax + +* Use `openSession()` to open a session from the document store. +* The following overloads are available: + + + +{`openSession(); + +openSession(database); + +openSession(sessionOptions); +`} + + + +| Parameter | Type | Description | +|--------------|------------------|--------------------------------------------------------------------------------------------------------------------------------| +| **database** | string | The session will operate on this database,
overriding the default database set in the document store. | +| **options** | `SessionOptions` | An object with session configuration options. See details [below](../../client-api/session/opening-a-session.mdx#session-options). | + +| Return Value | Description | +|---------------------|--------------------------------| +| `IDocumentSession` | Instance of a session object | + + + +## Session options + +* The `SessionOptions` object contains various options to configure the Session's behavior. + +| Option | Type | Description | Default Value | +|---------------------|-------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------| +| **database** | string | The session will operate on this database,
overriding the Default Database. | `null` - the session operates on the Default Database | +| **noTracking** | boolean | `true` - The session tracks changes made to all entities it loaded, stored, or queried for.
`false` - Tracking will be turned off.
Learn more in [Disable tracking](../../client-api/session/configuration/how-to-disable-tracking.mdx) | `false` | +| **noCaching** | boolean | `true` - Server responses will Not be cached.
`false` - The session caches the server responses.
Learn more in [Disable caching](../../client-api/session/configuration/how-to-disable-caching.mdx) | `false` | +| **requestExecutor** | `RequestExecutor` | ( _Advanced option_ )
The request executor the session should use. | `null` - the default request executor is used | +| **transactionMode** | `TransactionMode` | Specify the session's transaction mode
`SingleNode` / `ClusterWide`
Learn more in [Cluster-wide vs. Single-node](../../client-api/session/cluster-transaction/overview.mdx#cluster-wide-transaction-vs-single-node-transaction) | `SingleNode` | + +* Experts Only: + +| Option | Type | Description | Default Value | +|----------------------------------------------------------------|----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------| +| **disableAtomicDocumentWrites-
InClusterWideTransaction** | boolean | ( _Experts only_ )
`true` - Disable Atomic-Guards in cluster-wide sessions.
`false` - Automatic atomic writes in cluster-wide sessions are enabled.
Learn more in [Atomic-Guards](../../client-api/session/cluster-transaction/atomic-guards.mdx) | `false` | + + + +## Open session example + +* The following example opens a **cluster-wide Session**: + + + + +{`// Open the session, pass the options object to the session +const session = documentStore.openSession({ + database: "your_database_name", + transactionMode: "ClusterWide" +}); + +// Run your business logic: +// +// Store entities +// Load and Modify entities +// Query indexes & collections +// Delete documents +// ... etc. + +// For example: load a document and modify it +// Note: 'load' returns a Promise and must be awaited +const entity = await session.load("companies/1-A"); +entity.name = "NewCompanyName"; + +// Save you changes +// Note: 'saveChanges' also returns a Promise and must be awaited +await session.saveChanges(); +// When 'SaveChanges' returns successfully, +// all changes made to the entities in the session are persisted to the documents in the database +`} + + + + +{`// Open the session, pass the options object to the session +const session = documentStore.openSession({ + database: "your_database_name", + transactionMode: "ClusterWide" +}); + +// Run your business logic: +// +// Store entities +// Load and Modify entities +// Query indexes & collections +// Delete documents +// ... etc. + +// For example: load a document, modify it, and save +// Note: 'load' & 'saveChanges' each return a Promise that is then handled by callback functions +session.load("companies/1-A") + .then((company) => { + company.name = "NewCompanyName"; + }) + .then(() => session.saveChanges()) + .then(() => { + // When 'SaveChanges' returns successfully, + // all changes made to the entities in the session are persisted to the documents in the database + }); +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_opening-a-session-php.mdx b/versioned_docs/version-7.1/client-api/session/_opening-a-session-php.mdx new file mode 100644 index 0000000000..4564fe5ffe --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_opening-a-session-php.mdx @@ -0,0 +1,146 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A Session object is obtained from the [Document Store](../../client-api/what-is-a-document-store.mdx). + +* Open a session using `openSession()`. + +* Various Session options can be configured using the `sessionOptions` object. + If no database is specified in the options then the [Default Database](../../client-api/setting-up-default-database.mdx) (stored in the Document Store) is assumed. + +* Make sure you release allocated session resources using the `close` method or wrapping + the session object in a `try` statement ([see below](../../client-api/session/opening-a-session.mdx#example-3---releasing-session-resources)). + +* In this page: + * [Syntax](../../client-api/session/opening-a-session.mdx#syntax) + * [Session options](../../client-api/session/opening-a-session.mdx#session-options) + * [Open session examples](../../client-api/session/opening-a-session.mdx#open-session-examples) + +## Syntax + +* Use `openSession()` to open a session from the Document Store. +* The following overloads are available: + + + +{`// Open session for a 'default' database configured in 'DocumentStore' +public function openSession(): DocumentSessionInterface; + +// Open session for a specified database +public function openSession(string $database): DocumentSessionInterface; + +public function openSession(SessionOptions $sessionOptions): DocumentSessionInterface; +`} + + + +| Parameter | Type | Description | +|--------------|------------------|--------------------------------------------------------------------------------------------------------------------------------| +| **database** | `string` | The Session will operate on this database,
overriding the default database set in the document store. | +| **options** | `sessionOptions` | An object with Session configuration options. See details [below](../../client-api/session/opening-a-session.mdx#session-options). | + +| Return Value | Description | +|----------------------------|----------------------------| +| `DocumentSessionInterface` | Document session interface | + + + +## Session options + +* The `sessionOptions` object contains various options to configure the Session's behavior. + +| Option | Type | Description | Default Value | +|-----------------------|-------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------| +| **Database** | `string` | The Session will operate on this database,
overriding the Default Database. | `null` - the Session operates on the Default Database | +| **NoTracking** | `bool` | `true` - The Session tracks changes made to all entities it loaded, stored, or queried for.
`false` - Tracking will be turned off.
Learn more in [Disable tracking](../../client-api/session/configuration/how-to-disable-tracking.mdx) | `false` | +| **NoCaching** | `bool` | `true` - Server responses will Not be cached.
`false` - The Session caches the server responses.
Learn more in [Disable caching](../../client-api/session/configuration/how-to-disable-caching.mdx) | `false` | +| **RequestExecutor** | `RequestExecutor` | ( _Advanced option_ )
The request executor the Session should use. | `null` - the default request executor is used | +| **TransactionMode** | `TransactionMode` | Specify the Session's transaction mode
`SingleNode` / `ClusterWide`
Learn more in [Cluster-wide vs. Single-node](../../client-api/session/cluster-transaction/overview.mdx#cluster-wide-transaction-vs-single-node-transaction) | `SingleNode` | + +* Experts Only: + +| Option | Type | Description | Default Value | +|--------------------------------------------------------------|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------| +| **DisableAtomicDocumentWrites-
InClusterWideTransaction** | `bool?` | ( _Experts only_ )
`true` - Disable Atomic-Guards in cluster-wide sessions.
`false` - Automatic atomic writes in cluster-wide sessions are enabled.
Learn more in [Atomic-Guards](../../client-api/session/cluster-transaction/atomic-guards.mdx) | `false` | + + + +## Open session examples + +#### Example 1 + + +{`$store->openSession(new SessionOptions()); +`} + + + +#### Example 2 + + +{`$sessionOptions = new SessionOptions(); +$sessionOptions->setDatabase($databaseName); +$store->openSession($sessionOptions); +`} + + + +#### Example 3 - Releasing session resources + +Always remember to release allocated session resources after usage by +invoking the `close` method or wrapping the session object in a `try` statement. + + + +{`$session = $store->openSession(); +try \{ + // code here +\} finally \{ + $session->close(); +\} +`} + + + +#### Example 4 - Disable entities tracking + + +{`$sessionOptions = new SessionOptions(); +$sessionOptions->setNoTracking(true); +$session = $store->openSession(); +try \{ + $employee1 = $session->load(Employee::class, "employees/1-A"); + $employee2 = $session->load(Employee::class, "employees/1-A"); + + // because NoTracking is set to 'true' + // each load will create a new Employee instance + $this->assertNotSame($employee1, $employee2); +\} finally \{ + $session->close(); +\} +`} + + + +#### Example 5 - Disable session caching + + +{`$sessionOptions = new SessionOptions(); +$sessionOptions->setNoCaching(true); +$session = $store->openSession(); +try \{ + // code here +\} finally \{ + $session->close(); +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_opening-a-session-python.mdx b/versioned_docs/version-7.1/client-api/session/_opening-a-session-python.mdx new file mode 100644 index 0000000000..5f68df5293 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_opening-a-session-python.mdx @@ -0,0 +1,90 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A Session object is obtained from the [Document Store](../../client-api/what-is-a-document-store.mdx). + +* Open a session using `open_session()`. + +* Various Session options can be configured using the `SessionOptions` object. + If no database is specified in the options then the [Default Database](../../client-api/setting-up-default-database.mdx) (stored in the Document Store) is assumed. + +* Be sure to wrap the session variable using a 'with' statement to ensure proper disposal. + +* In this page: + * [Syntax](../../client-api/session/opening-a-session.mdx#syntax) + * [Session options](../../client-api/session/opening-a-session.mdx#session-options) + * [Open session example](../../client-api/session/opening-a-session.mdx#open-session-example) + +## Syntax + +Use `open_session()` to open a session from the Document Store. + + + +{`# Open a Session, you may pass either specified database, or preconfigured SessionOptions object +# Passing no optional arguments opens a session for the default database configured in DocumentStore.database +def open_session( + self, database: Optional[str] = None, session_options: Optional[SessionOptions] = None +) -> DocumentSession: + ... +`} + + + +| Parameter | Type | Description | +|--------------|------------------|--------------------------------------------------------------------------------------------------------------------------------| +| **database** | `str` | The session will operate on this database,
overriding the default database set in the document store. | +| **options** | `SessionOptions` | An object with Session configuration options. See details [below](../../client-api/session/opening-a-session.mdx#session-options). | + +| Return Value | Description | +|----------------------------------------------|-------------------------------| +| `IDocumentSession` | Instance of a Session object | + + + +## Session options + +* The `SessionOptions` object contains various options to configure the Session's behavior. + +| Option | Type | Description | Default Value | +|---------------------------------------------------------|------------------- |-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------| +| **database** | `str` | The Session will operate on this database,
overriding the Default Database. | `None` - the Session operates on the Default Database | +| **no_tracking** | `bool` | `True` - The Session tracks changes made to all entities it loaded, stored, or queried for.
`False` - Tracking will be turned off.
Learn more in [Disable tracking](../../client-api/session/configuration/how-to-disable-tracking.mdx) | `False` | +| **no_caching** | `bool` | `True` - Server responses will not be cached.
`False` - The Session caches the server responses.
Learn more in [Disable caching](../../client-api/session/configuration/how-to-disable-caching.mdx) | `False` | +| **request_executor** | `RequestExecutor` | ( _Advanced option_ )
The request executor the Session should use. | `None` - the default request executor is used | +| **transaction_mode** | `TransactionMode` | Specify the Session's transaction mode
`SINGLE_NODE` / `CLUSTER_WIDE`
Learn more in [Cluster-wide vs. Single-node](../../client-api/session/cluster-transaction/overview.mdx#cluster-wide-transaction-vs-single-node-transaction) | `SINGLE_NODE` | + +* Experts Only: + +| Option | Type | Description | Default Value | +|--------------------------------------------------------------------|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------| +| **disable_atomic_document_writes_in_cluster_wide_transaction** | `bool` | **Experts only**
`True` - Disable Atomic-Guards in cluster-wide sessions.
`False` - Automatic atomic writes in cluster-wide sessions are enabled.
Learn more in [Atomic-Guards](../../client-api/session/cluster-transaction/atomic-guards.mdx) | `False` | + + + +## Open session example + +* The following example opens a **cluster-wide Session**: + + + +{`with DocumentStore() as store: + store.open_session() + # - is equivalent to: + store.open_session(session_options=SessionOptions()) + + # The second overload - + store.open_session("your_database_name") + # - is equivalent to: + store.open_session(session_options=SessionOptions(database="your_database_name")) +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_saving-changes-csharp.mdx b/versioned_docs/version-7.1/client-api/session/_saving-changes-csharp.mdx new file mode 100644 index 0000000000..a492aaa0a7 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_saving-changes-csharp.mdx @@ -0,0 +1,256 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Pending session operations like `Store`, `Delete`, and many others. will not be sent to the server until `SaveChanges` is called. + + + +Whenever you execute `SaveChanges()` to send a batch of operations like put, update, or delete in a request, +the server will wrap these operations in a [transaction](../../client-api/faq/transaction-support.mdx) upon execution in the database. + +Either all operations will be saved as a single, atomic transaction or none of them will be. +Once `SaveChanges()` returns successfully, it is guaranteed that all changes are persisted in the database. + + + +## Syntax + + + + +{`void SaveChanges(); +`} + + + + +{`Task SaveChangesAsync(); +`} + + + + +### Example + + + + +{`// storing new entity +session.Store(new Employee +{ + FirstName = "John", + LastName = "Doe" +}); + +session.SaveChanges(); +`} + + + + +{`// storing new entity +await asyncSession.StoreAsync(new Employee +{ + FirstName = "John", + LastName = "Doe" +}); + +await asyncSession.SaveChangesAsync(); +`} + + + + + +## Waiting for Indexes + +You can ask the server to wait until the indexes are caught up with changes made within the current session before the `SaveChanges` returns. + +* You can set a timeout (default: 15 seconds). +* You can specify whether you want to throw on timeout (default: `false`). +* You can specify indexes that you want to wait for. If you don't specify anything here, RavenDB will automatically select just the indexes that are impacted +by this write. + + + + +{`session.Advanced.WaitForIndexesAfterSaveChanges( + timeout: TimeSpan.FromSeconds(30), + throwOnTimeout: true, + indexes: new[] { "index/1", "index/2" }); + +session.Store(new Employee +{ + FirstName = "John", + LastName = "Doe" +}); + +session.SaveChanges(); +`} + + + + +{`asyncSession.Advanced.WaitForIndexesAfterSaveChanges( + timeout: TimeSpan.FromSeconds(30), + throwOnTimeout: true, + indexes: new[] { "index/1", "index/2" }); + +await asyncSession.StoreAsync(new Employee +{ + FirstName = "John", + LastName = "Doe" +}); + +await asyncSession.SaveChangesAsync(); +`} + + + + + + +## Waiting for Replication - Write Assurance + +Sometimes you might need to ensure that changes made in the session will be replicated to more than one node of the cluster before the `SaveChanges` returns. +It can be useful if you have some writes that are really important so you want to be sure the stored values will reside on multiple machines. Also it might be necessary to use +when you customize [the read balance behavior](../../client-api/configuration/load-balance/read-balance-behavior.mdx) and need to ensure the next request from the user +will be able to read what he or she just wrote (the next open session might access a different node). + +You can ask the server to wait until the replication is caught up with those particular changes. + +* You can set a timeout (default: 15 seconds). +* You can specify whether you want to throw on timeout, which may happen in case of network issues (default: `true`). +* You can specify to how many replicas (nodes) the currently saved write must be replicated, before the `SaveChanges` returns (default: 1). +* You can specify whether the `SaveChanges` will return only when the current write was replicated to majority of the nodes (default: `false`). + + + + +{`session.Advanced.WaitForReplicationAfterSaveChanges( + timeout: TimeSpan.FromSeconds(30), + throwOnTimeout: false, //default true + replicas: 2, //minimum replicas to replicate + majority: false); + +session.Store(new Employee +{ + FirstName = "John", + LastName = "Doe" +}); + +session.SaveChanges(); +`} + + + + +{`asyncSession.Advanced.WaitForReplicationAfterSaveChanges( + timeout: TimeSpan.FromSeconds(30), + throwOnTimeout: false, //default true + replicas: 2, //minimum replicas to replicate + majority: false); + +await asyncSession.StoreAsync(new Employee +{ + FirstName = "John", + LastName = "Doe" +}); + +await asyncSession.SaveChangesAsync(); +`} + + + + + +The `WaitForReplicationAfterSaveChanges` method waits only for replicas which are part of the cluster. It means that external replication destinations are not counted towards the number specified in `replicas` parameter, since they are not part of the cluster. + + + + +Even if RavenDB was not able to write your changes to the number of replicas you specified, the data has been already written to some nodes. You will get an error but data is already there. + +This is a powerful feature, but you need to be aware of the possible pitfalls of using it. + + + + + +## Transaction Mode - Cluster Wide + +Setting `TransactionMode` to `TransactionMode.ClusterWide` will enable the [Cluster Transactions](../../server/clustering/cluster-transactions.mdx) feature. + +With this feature enabled the [Session](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx) will support the following _write_ commands: + +- `Store`/`StoreAsync` +- `Delete` +- `CreateCompareExchangeValue` +- `UpdateCompareExchangeValue` +- `DeleteCompareExchangeValue` + + +Here is an example of creating a unique user with cluster wide. + + + + +{`using (var store = new DocumentStore()) +{ + using (var session = store.OpenSession(new SessionOptions + { + //default is: TransactionMode.SingleNode + TransactionMode = TransactionMode.ClusterWide + })) + { + var user = new Employee + { + FirstName = "John", + LastName = "Doe" + }; + session.Store(user); + + // this transaction is now conditional on this being + // successfully created (so, no other users with this name) + // it also creates an association to the new user's id + session.Advanced.ClusterTransaction + .CreateCompareExchangeValue("usernames/John", user.Id); + + session.SaveChanges(); + } +`} + + + + +{`using (var session = store.OpenAsyncSession(new SessionOptions +{ + //default is: TransactionMode.SingleNode + TransactionMode = TransactionMode.ClusterWide +})) +{ + var user = new Employee + { + FirstName = "John", + LastName = "Doe" + }; + await session.StoreAsync(user); + + // this transaction is now conditional on this being + // successfully created (so, no other users with this name) + // it also creates an association to the new user's id + session.Advanced.ClusterTransaction + .CreateCompareExchangeValue("usernames/John", user.Id); + + await session.SaveChangesAsync(); +} +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_saving-changes-java.mdx b/versioned_docs/version-7.1/client-api/session/_saving-changes-java.mdx new file mode 100644 index 0000000000..c7d5192809 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_saving-changes-java.mdx @@ -0,0 +1,163 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Pending session operations like `store`, `delete`, and many others, will not be sent to the server until `saveChanges` is called. + + + +Whenever you execute `saveChanges()` to send a batch of operations like put, update, or delete in a request, +the server will wrap these operations in a [transaction](../../client-api/faq/transaction-support.mdx) upon execution in the database. + +Either all operations will be saved as a single, atomic transaction or none of them will be. +Once `saveChanges()` returns successfully, it is guaranteed that all changes are persisted in the database. + + + +## Syntax + + + +{`void saveChanges(); +`} + + + +### Example + + + +{`Employee employee = new Employee(); +employee.setFirstName("John"); +employee.setLastName("Doe"); + +session.store(employee); +session.saveChanges();; +`} + + + +## Waiting for Indexes + +You can ask the server to wait until the indexes are caught up with changes made within the current session before the `saveChanges` returns. + +* You can set a timeout (default: 15 seconds). +* You can specify whether you want to throw on timeout (default: `false`). +* You can specify indexes that you want to wait for. If you don't specify anything here, RavenDB will automatically select just the indexes that are impacted +by this write. + + + +{`session.advanced().waitForIndexesAfterSaveChanges(builder -> \{ + builder.withTimeout(Duration.ofSeconds(30)) + .throwOnTimeout(true) + .waitForIndexes("index/1", "index/2"); + + Employee employee = new Employee(); + employee.setFirstName("John"); + employee.setLastName("Doe"); + session.store(employee); + + session.saveChanges(); +\}); +`} + + + + + +## Waiting for Replication - Write Assurance + +Sometimes you might need to ensure that changes made in the session will be replicated to more than one node of the cluster before the `saveChanges` returns. +It can be useful if you have some writes that are really important so you want to be sure the stored values will reside on multiple machines. Also it might be necessary to use +when you customize [the read balance behavior](../../client-api/configuration/load-balance/overview.mdx) and need to ensure the next request from the user +will be able to read what he or she just wrote (the next open session might access a different node). + +You can ask the server to wait until the replication is caught up with those particular changes. + +* You can set a timeout (default: 15 seconds). +* You can specify whether you want to throw on timeout, which may happen in case of network issues (default: `true`). +* You can specify to how many replicas (nodes) the currently saved write must be replicated, before the `saveChanges` returns (default: 1). +* You can specify whether the `saveChanges` will return only when the current write was replicated to majority of the nodes (default: `false`). + + + +{`session + .advanced() + .waitForReplicationAfterSaveChanges(builder -> \{ + builder.withTimeout(Duration.ofSeconds(30)) + .throwOnTimeout(false) //default true + .numberOfReplicas(2)//minimum replicas to replicate + .majority(false); + \}); + +Employee employee = new Employee(); +employee.setFirstName("John"); +employee.setLastName("Doe"); + +session.store(employee); +session.saveChanges(); +`} + + + + +The `waitForReplicationAfterSaveChanges` method waits only for replicas which are part of the cluster. It means that external replication destinations are not counted towards the number specified in `replicas` parameter, since they are not part of the cluster. + + + + +The usage of `waitForReplicationAfterSaveChanges` doesn't involve a distributed transaction (those are not supported since RavenDB 4.0). Even if RavenDB was not able +to write your changes to the number of replicas you specified, the data has been already written to some nodes. You will get an error but data is already there. + +This is a powerful feature, but you need to be aware of the possible pitfalls of using it. + + + + + +## Transaction Mode - Cluster Wide + +Setting `transactionMode` to `TransactionMode.CLUSTER_WIDE` will enable the [Cluster Transactions](../../server/clustering/cluster-transactions.mdx) feature. + +With this feature enabled the [Session](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx) will support the following _write_ commands: + +- `store` +- `delete` +- `createCompareExchangeValue` +- `updateCompareExchangeValue` +- `deleteCompareExchangeValue` + + +Here is an example of creating a unique user with cluster wide. + + + +{`try (IDocumentStore store = new DocumentStore()) \{ + SessionOptions sessionOptions = new SessionOptions(); + // default is: TransactionMode.SINGLE_NODE + sessionOptions.setTransactionMode(TransactionMode.CLUSTER_WIDE); + try (IDocumentSession session = store.openSession(sessionOptions)) \{ + Employee user = new Employee(); + user.setFirstName("John"); + user.setLastName("Doe"); + + session.store(user); + + // this transaction is now conditional on this being + // successfully created (so, no other users with this name) + // it also creates an association to the new user's id + session.advanced().clusterTransaction() + .createCompareExchangeValue("usernames/John", user.getId()); + + session.saveChanges(); + \} +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_saving-changes-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/_saving-changes-nodejs.mdx new file mode 100644 index 0000000000..a045b744da --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_saving-changes-nodejs.mdx @@ -0,0 +1,114 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Pending session operations like `store()`, `delete()`, and many others, will not be sent to the server until `saveChanges()` is called. + + + +Whenever you execute `saveChanges()` to send a batch of operations like put, update, or delete in a request, +the server will wrap these operations in a [transaction](../../client-api/faq/transaction-support.mdx) upon execution in the database. + +Either all operations will be saved as a single, atomic transaction or none of them will be. +Once `saveChanges()` returns successfully, it is guaranteed that all changes are persisted in the database. + + + +## Syntax + + + +{`session.saveChanges(); +`} + + + +| Return Value | | +| ------------- | ----- | +| `Promise` | A `Promise` resolved once session changes are persisted to the server | + +### Example + + + +{`const employee = new Employee("John", "Doe"); +await session.store(employee); +await session.saveChanges(); +`} + + + +## Waiting for Indexes + +You can ask the server to wait until the indexes are caught up with changes made within the current session before the `saveChanges()` returns. + +* You can set a timeout (default: 15 seconds). +* You can specify whether you want to throw on timeout (default: `false`). +* You can specify indexes that you want to wait for. If you don't specify anything here, RavenDB will automatically select just the indexes that are impacted +by this write. + + + +{`session.advanced.waitForIndexesAfterSaveChanges(\{ + indexes: ["index/1", "index/2"], + throwOnTimeout: true, + timeout: 30 * 1000 // 30 seconds in ms +\}); + +const employee = new Employee("John", "Doe"); +await session.store(employee); +await session.saveChanges(); +`} + + + + + +## Waiting for Replication - Write Assurance + +Sometimes you might need to ensure that changes made in the session will be replicated to more than one node of the cluster before the `saveChanges()` returns. +It can be useful if you have some writes that are really important so you want to be sure the stored values will reside on multiple machines. Also it might be necessary to use +when you customize [the read balance behavior](../../client-api/configuration/load-balance/read-balance-behavior.mdx) and need to ensure the next request from the user +will be able to read what he or she just wrote (the next open session might access a different node). + +You can ask the server to wait until the replication is caught up with those particular changes. + +* You can set a timeout (default: 15 seconds). +* You can specify whether you want to throw on timeout, which may happen in case of network issues (default: `true`). +* You can specify to how many replicas (nodes) the currently saved write must be replicated, before the `saveChanges` returns (default: 1). +* You can specify whether the `saveChanges()` will return only when the current write was replicated to majority of the nodes (default: `false`). + + + +{`session.advanced + .waitForReplicationAfterSaveChanges(\{ + throwOnTimeout: false, // default true + timeout: 30000, + replicas: 2, // minimum replicas to replicate + majority: false + \}); + +const employee = new Employee("John", "Doe"); +await session.store(employee); +await session.saveChanges(); +`} + + + + +The `waitForReplicationAfterSaveChanges` method waits only for replicas which are part of the cluster. It means that external replication destinations are not counted towards the number specified in `replicas` parameter, since they are not part of the cluster. + + + + +The usage of `waitForReplicationAfterSaveChanges` doesn't involve a distributed transaction (those are not supported since RavenDB 4.0). Even if RavenDB was not able +to write your changes to the number of replicas you specified, the data has been already written to some nodes. You will get an error but data is already there. + +This is a powerful feature, but you need to be aware of the possible pitfalls of using it. + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_saving-changes-php.mdx b/versioned_docs/version-7.1/client-api/session/_saving-changes-php.mdx new file mode 100644 index 0000000000..c359d78013 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_saving-changes-php.mdx @@ -0,0 +1,121 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Pending session operations like `store`, `delete`, and many others, will not be sent to the server until `saveChanges` is called. + + + +When `saveChanges()` is applied to send a batch of operations (e.g. `put`, `update`, and `delete`) +in a request, the server will wrap these operations in a [transaction](../../client-api/faq/transaction-support.mdx) +upon execution in the database. + +Either all operations are performed as a single, atomic transaction, or none of them are. +Once `saveChanges()` returns successfully, it is guaranteed that all changes are persisted in the database. + + + +## Syntax + + + +{`public function saveChanges(): void; +`} + + + +### Example + + + +{`$employee = new Employee(); +$employee->setFirstName("John"); +$employee->setLastName("Doe"); + +$session->store($employee); +$session->saveChanges();; +`} + + + +## Waiting for Indexes + +You can request the server to wait until the indexes are caught up with changes made within +the current session before `saveChanges` returns. + +* You can set a timeout (default: 15 seconds). +* You can specify whether you want to throw on timeout (default: `false`). +* You can specify indexes that you want to wait for. If you don't specify anything here, RavenDB will automatically select just the indexes that are impacted +by this write. + + + +{`$session->advanced()->waitForIndexesAfterSaveChanges(function ($builder) use ($session) \{ + $builder->withTimeout(Duration::ofSeconds(30)) + ->throwOnTimeout(true) + ->waitForIndexes("index/1", "index/2"); + + $employee = new Employee(); + $employee->setFirstName("John"); + $employee->setLastName("Doe"); + + $session->store($employee); + + $session->saveChanges(); +\}); +`} + + + + + +## Transaction Mode - Cluster Wide + +Setting `TransactionMode` to `TransactionMode.clusterWide` will enable the [Cluster Transactions](../../server/clustering/cluster-transactions.mdx) feature. + +With this feature enabled the [session](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx) will support the following _write_ commands: + +- `store` +- `delete` +- `createCompareExchangeValue` +- `updateCompareExchangeValue` +- `deleteCompareExchangeValue` + +Here is an example of creating a unique user with cluster wide. + + + +{`$session = $store->openSession(); +try \{ + $sessionOptions = new SessionOptions(); + // default is: TransactionMode::singleNode(); + $sessionOptions->setTransactionMode(TransactionMode::clusterWide()); + $session = $store->openSession($sessionOptions); + try \{ + $user = new Employee(); + $user->setFirstName("John"); + $user->setLastName("Doe"); + + $session->store($user); + + // this transaction is now conditional on this being + // successfully created (so, no other users with this name) + // it also creates an association to the new user's id + $session->advanced()->clusterTransaction() + ->createCompareExchangeValue("usernames/John", $user->getId()); + + $session->saveChanges(); + \} finally \{ + $session->close(); + \} +\} finally \{ + $store->close(); +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_saving-changes-python.mdx b/versioned_docs/version-7.1/client-api/session/_saving-changes-python.mdx new file mode 100644 index 0000000000..391fe79897 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_saving-changes-python.mdx @@ -0,0 +1,155 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Pending session operations e.g. `store`, `delete` and many others will not be send to the server until `save_changes` is called. + + + +Whenever you execute `save_changes()` to send a batch of operations like put, update, or delete in a request, +the server will wrap these operations in a [transaction](../../client-api/faq/transaction-support.mdx) upon execution in the database. + +Either all operations will be saved as a single, atomic transaction or none of them will be. +Once `save_changes()` returns successfully, it is guaranteed that all changes are persisted in the database. + + + +## Syntax + + + +{`def save_changes(self) -> None: + ... +`} + + + +### Example + + + +{`session.store(Employee(first_name="John", last_name="Doe")) + +session.save_changes() +`} + + + + +## Waiting for Indexes + +You can ask the server to wait until the indexes are caught up with changes made within the current session before the `save_changes` returns. + +* You can set a timeout (default: 15 seconds). +* You can specify whether you want to throw on timeout (default: `False`). +* You can specify indexes that you want to wait for. If you don't specify anything here, RavenDB will automatically select just the indexes that are impacted +by this write. + + + +{`def _build_wait(idx_wait_opt_builder: InMemoryDocumentSessionOperations.IndexesWaitOptsBuilder) -> None: + idx_wait_opt_builder.with_timeout(timedelta(seconds=30)) + idx_wait_opt_builder.throw_on_timeout(True) + idx_wait_opt_builder.wait_for_indexes("index/1", "index/2") + +# this function can be also passed as a lambda +session.advanced.wait_for_indexes_after_save_changes(_build_wait) + +session.store(Employee(first_name="John", last_name="Doe")) + +session.save_changes() +`} + + + + + +## Waiting for Replication - Write Assurance + +Sometimes you might need to ensure that changes made in the session will be replicated to more than one node of the cluster before the `save_changes` returns. +It can be useful if you have some writes that are really important so you want to be sure the stored values will reside on multiple machines. Also it might be necessary to use +when you customize [the read balance behavior](../../client-api/configuration/load-balance/read-balance-behavior.mdx) and need to ensure the next request from the user +will be able to read what he or she just wrote (the next open session might access a different node). + +You can ask the server to wait until the replication is caught up with those particular changes. + +* You can set a timeout (default: 15 seconds). +* You can specify whether you want to throw on timeout, which may happen in case of network issues (default: `True`). +* You can specify to how many replicas (nodes) the currently saved write must be replicated, before the `save_changes` returns (default: 1). +* You can specify whether the `save_changes` will return only when the current write was replicated to majority of the nodes (default: `False`). + + + +{`def _build_wait( + repl_wait_builder: InMemoryDocumentSessionOperations.ReplicationWaitOptsBuilder, +) -> None: + repl_wait_builder.with_timeout(timedelta(seconds=30)) + repl_wait_builder.throw_on_timeout(False) # default True + repl_wait_builder.number_of_replicas(2) # minimum replicas to replicate + repl_wait_builder.majority(False) + +# this function can be also passed as a lambda +session.advanced.wait_for_replication_after_save_changes(_build_wait) + +session.store(Employee(first_name="John", last_name="Doe")) +session.save_changes() +`} + + + + +The `wait_for_replication_after_save_changes` method waits only for replicas which are part of the cluster. It means that external replication destinations are not counted towards the number specified in `replicas` parameter, since they are not part of the cluster. + + + + +Even if RavenDB was not able to write your changes to the number of replicas you specified, the data has been already written to some nodes. You will get an error but data is already there. + +This is a powerful feature, but you need to be aware of the possible pitfalls of using it. + + + + + +## Transaction Mode - Cluster Wide + +Setting `TransactionMode` to `TransactionMode.CLUSTER_WIDE` will enable the [Cluster Transactions](../../server/clustering/cluster-transactions.mdx) feature. + +With this feature enabled the [session](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx) will support the following _write_ commands: + +- `store` +- `delete` +- `create_compare_exchange_value` +- `update_compare_exchange_value` +- `delete_compare_exchange_value` + + +Here is an example of creating a unique user with cluster wide. + + + +{`DocumentStore = DocumentStoreFake +with DocumentStore() as store: + with store.open_session( + session_options=SessionOptions( + # default is: TransactionMode.SINGLE_NODE + transaction_mode=TransactionMode.CLUSTER_WIDE + ) + ) as session: + user = Employee(first_name="John", last_name="Doe") + session.store(user) + + # this transaction is now conditional on this being + # successfully created (so, no other users with this name) + # it also creates an association to the new user's id + session.advanced.cluster_transaction.create_compare_exchange_value("usernames/John", user.Id) + + session.save_changes() +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_storing-entities-csharp.mdx b/versioned_docs/version-7.1/client-api/session/_storing-entities-csharp.mdx new file mode 100644 index 0000000000..5848680f18 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_storing-entities-csharp.mdx @@ -0,0 +1,61 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To store entities inside the **session** object, use one of the three `Store` methods. + +## Syntax + +First overload: Stores the entity in a session, then extracts the ID from the entity or generates a new one if it's not available. + + + +{`void Store(object entity); +`} + + + +Second overload: Stores the entity in a session with given ID. + + + +{`void Store(object entity, string id); +`} + + + +Third overload: Stores the entity in a session with given ID, forces concurrency check with given change vector. + + + +{`void Store(object entity, string changeVector, string id); +`} + + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **entity** | `object` | Entity that will be stored | +| **changeVector** | `string` | Entity changeVector, used for concurrency checks (`null` to skip check) | +| **id** | `string` | Entity will be stored under this ID, (`null` to generate automatically) | + +## Example + + + +{`// generate Id automatically +session.Store(new Employee +\{ + FirstName = "John", + LastName = "Doe" +\}); + +// send all pending operations to server, in this case only \`Put\` operation +session.SaveChanges(); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_storing-entities-java.mdx b/versioned_docs/version-7.1/client-api/session/_storing-entities-java.mdx new file mode 100644 index 0000000000..8169ac6bed --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_storing-entities-java.mdx @@ -0,0 +1,61 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To store entities inside the **session** object, use one of the three `store` methods. + +## Syntax + +First overload: Stores the entity in a session, then extracts the ID from the entity or generates a new one if it's not available. + + + +{`void store(Object entity); +`} + + + +Second overload: Stores the entity in a session with given ID. + + + +{`void store(Object entity, String id); +`} + + + +Third overload: Stores the entity in a session with given ID, forces concurrency check with given change vector. + + + +{`void store(Object entity, String changeVector, String id); +`} + + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **entity** | Object | Entity that will be stored | +| **changeVector** | String | Entity changeVector, used for concurrency checks (`null` to skip check) | +| **id** | String | Entity will be stored under this ID, (`null` to generate automatically) | + +## Example + + + +{`Employee employee = new Employee(); +employee.setFirstName("John"); +employee.setLastName("Doe"); + +// generate Id automatically +session.store(employee); + +// send all pending operations to server, in this case only \`Put\` operation +session.saveChanges(); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_storing-entities-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/_storing-entities-nodejs.mdx new file mode 100644 index 0000000000..23ba306a80 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_storing-entities-nodejs.mdx @@ -0,0 +1,90 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To store entities inside the **session** object, use `store()` method. + +## Syntax + +You can pass the following arguments to the `store()` method: + +- entity only: Stores the entity in the session, then extracts the ID from the entity or generates a new one if it's not available. + + + +{`session.store(entity, [documentType]); +`} + + + +- entity and an id: Stores the entity in a session with given ID. + + + +{`session.store(entity, id); +`} + + + +- entity, an id and store options: Stores the entity in a session with given ID, forces concurrency check with given change vector. + + + +{`session.store(entity, id, [options]); +session.store(entity, id, [documentType]); +`} + + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **entity** | object | Entity that will be stored | +| **id** | string | Entity will be stored under this ID, (`null` to generate automatically) | +| **documentType** | class | class used to determine collection of the entity (extracted from entity by default)| +| **options** | object | Options object with the below properties: | +| **changeVector** | string | entity *change vector* used for concurrency checks (`null` to skip check) | +| **documentType** | class | class used to determine collection of the entity (extracted from entity by default)| + +| Return value | | +| ------------- | ----- | +| Promise | A promise resolved once entity obtained an ID and is stored in Unit of Work | + + +`store()` method is asynchronous (since it reaches out to server to get a new ID) and returns a `Promise`, so don't forget to use either `await`, `.then()` *before* saving changes. + + + +In order to comfortably use object literals as entities set the function getting collection name based on the content of the object - `store.conventions.findCollectionNameForObjectLiteral()` + + + +{`const store = new DocumentStore(urls, database); +store.conventions.findCollectionNameForObjectLiteral = entity => entity["collection"]; +// ... +store.initialize(); +`} + + + +This needs to be done before an `initialize()` call on `DocumentStore` instance. If you fail to do so, your entites will land up in *@empty* collection having an *UUID* for an ID. + + + +## Example + + + +{`const employee = new Employee("John", "Doe"); + +// generate Id automatically +await session.store(employee); + +// send all pending operations to server, in this case only \`Put\` operation +await session.saveChanges(); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_storing-entities-php.mdx b/versioned_docs/version-7.1/client-api/session/_storing-entities-php.mdx new file mode 100644 index 0000000000..9471b6d11c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_storing-entities-php.mdx @@ -0,0 +1,61 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To store entities inside the **session** object, use one of the three `store` methods. + +## Syntax + +First overload: Stores the entity in a session, then extracts the ID from the entity or generates a new one if it's not available. + + + +{`public function store(?object $entity): void; +`} + + + +Second overload: Stores the entity in a session with given ID. + + + +{`public function store(?object $entity, ?string $id): void; +`} + + + +Third overload: Stores the entity in a session with given ID, forces concurrency check with given change vector. + + + +{`public function store(?object $entity, ?string $id, ?string $changeVector): void; +`} + + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **entity** | `object` | Entity that will be stored | +| **changeVector** | `string` | Entity changeVector, used for concurrency checks (`null` to skip check) | +| **id** | `string` | Entity will be stored under this ID, (`null` to generate automatically) | + +## Example + + + +{`$employee = new Employee(); +$employee->setFirstName("John"); +$employee->setLastName("Doe"); + +// generate Id automatically +$session->store($employee); + +// send all pending operations to server, in this case only \`Put\` operation +$session->saveChanges(); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_storing-entities-python.mdx b/versioned_docs/version-7.1/client-api/session/_storing-entities-python.mdx new file mode 100644 index 0000000000..abbe70c920 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_storing-entities-python.mdx @@ -0,0 +1,38 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To store entities inside the **session** object, use the `store` method. + +## Syntax + +Stores the entity in a session, then extracts the ID from the entity or if not available generates a new one. + + + +{`def store(self, entity: object, key: Optional[str] = None, change_vector: Optional[str] = None) -> None: + ... +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **entity** | `object` | Entity that will be stored | +| **change_vector** | `str` | Entity change vector, used for concurrency checks (`None` to skip check) | +| **key** | `str` | Entity will be stored under this ID, (`None` to generate automatically) | + +## Example + + + +{`session.store(Employee(first_name="John", last_name="Doe")) + +# send all pending operations to server, in this case only 'Put' operation +session.save_changes() +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_updating-entities-csharp.mdx b/versioned_docs/version-7.1/client-api/session/_updating-entities-csharp.mdx new file mode 100644 index 0000000000..5f92e5432d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_updating-entities-csharp.mdx @@ -0,0 +1,125 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To modify existing documents: + + * **Retrieve** documents from the database using [Load](../../client-api/session/loading-entities.mdx#load) or by a [Query](../../client-api/session/querying/how-to-query.mdx#sessionquery). + The entities loaded from the documents are added to the internal entities map that the Session manages. + + * **Edit** the properties you wish to change. + The session will track all changes made to the loaded entities. + + * **Save** to apply the changes. + Once `SaveChanges()` returns it is guaranteed that the data is persisted in the database. + + + +* In this page: + * [Load a document & update](../../client-api/session/updating-entities.mdx#load-a-document--update) + * [Query for documents & update](../../client-api/session/updating-entities.mdx#query-for-documents--update) + + +## Load a document & update + +* In this example we `Load` a company document and update its **PostalCode** property. + + + + +{`using (var session = store.OpenSession()) +{ + // Load a company document + // The entity loaded from the document will be added to the Session's entities map + Company company = session.Load("companies/1-A"); + + // Update the company's PostalCode + company.Address.PostalCode = "TheNewPostalCode"; + + // Apply changes + session.SaveChanges(); +} +`} + + + + +{`using (var asyncSession = store.OpenAsyncSession()) +{ + // Load a document + // The entity loaded from the document will be added to the Session's entities map + Company company = await asyncSession.LoadAsync("companies/KitchenAppliances"); + + // Update the company's PostalCode + company.Address.PostalCode = "TheNewPostalCode"; + + // Apply changes + await asyncSession.SaveChangesAsync(); +} +`} + + + + + + +## Query for documents & update + +* In this example we `Query` for company documents whose **PostalCode** property is _12345_, + and modify this property for the matching documents. + + + + +{`using (var session = store.OpenSession()) +{ + // Query: find companies with the specified PostalCode + // The entities loaded from the matching documents will be added to the Session's entities map + IRavenQueryable query = session.Query() + .Where(c => c.Address.PostalCode == "12345"); + + var matchingCompanies = query.ToList(); + + // Update the PostalCode for the resulting company documents + for (var i = 0; i < matchingCompanies.Count; i++) + { + matchingCompanies[i].Address.PostalCode = "TheNewPostalCode"; + } + + // Apply changes + session.SaveChanges(); +} +`} + + + + +{`using (var asyncSession = store.OpenAsyncSession()) +{ + // Query: find companies with the specified PostalCode + // The entities loaded from the matching documents will be added to the Session's entities map + IRavenQueryable query = asyncSession.Query() + .Where(c => c.Address.PostalCode == "12345"); + + var matchingCompanies = await query.ToListAsync(); + + // Update the PostalCode for the resulting company documents + for (var i = 0; i < matchingCompanies.Count; i++) + { + matchingCompanies[i].Address.PostalCode = "TheNewPostalCode"; + } + + // Apply changes + await asyncSession.SaveChangesAsync(); +} +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_updating-entities-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/_updating-entities-nodejs.mdx new file mode 100644 index 0000000000..236c536cee --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_updating-entities-nodejs.mdx @@ -0,0 +1,94 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To modify existing documents: + + * **Retrieve** documents from the database using [load](../../client-api/session/loading-entities.mdx#load) or by a [query](../../client-api/session/querying/how-to-query.mdx#sessionquery). + The entities loaded from the documents are added to the internal entities map that the Session manages. + + * **Edit** the properties you wish to change. + The session will track all changes made to the loaded entities. + + * **Save** to apply the changes. + Once `saveChanges()` returns it is guaranteed that the data is persisted in the database. + + + +* In this page: + * [Load a document & update](../../client-api/session/updating-entities.mdx#load-a-document--update) + * [Query for documents & update](../../client-api/session/updating-entities.mdx#query-for-documents--update) + + +## Load a document & update + +* In this example we `load` a company document and update its **postalCode** property. + + + +{`// Sample company document structure +class Address \{ + constructor(code) \{ + this.postalCode = code; + \} +\} + +class Company \{ + constructor(name, code) \{ + this.name = name; + this.address = new Address(code); + \} +\} +`} + + + + + +{`const session = documentStore.openSession(); + +// Load a company document +// The entity loaded from the document will be added to the Session's entities map +const company = await session.load("companies/1-A"); + +// Update the company's postalCode +company.address.postalCode = "TheNewPostalCode"; + +// Apply changes +await session.saveChanges(); +`} + + + + + +## Query for documents & update + +* In this example we `query` for company documents whose **postalCode** property is _12345_, + and modify this property for the matching documents. + + + +{`const session = documentStore.openSession(); + +// Query: find companies with the specified postalCode +// The entities loaded from the matching documents will be added to the Session's entities map +const matchingCompanies = await session.query(\{collection: "Companies"\}) + .whereEquals("address.postalCode", "12345") + .all(); + +// Update the postalCode for the resulting company documents +matchingCompanies.forEach(c => c.address.postalCode = "TheNewPostalCode"); + +// Apply changes +await session.saveChanges(); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_updating-entities-php.mdx b/versioned_docs/version-7.1/client-api/session/_updating-entities-php.mdx new file mode 100644 index 0000000000..9bb8ba1ace --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_updating-entities-php.mdx @@ -0,0 +1,90 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To modify existing documents: + + * **Retrieve** documents from the database using [load](../../client-api/session/loading-entities.mdx#load) + or by a [query](../../client-api/session/querying/how-to-query.mdx#sessionquery). + The entities loaded from the documents are added to the internal entities map that the session manages. + + * **Edit** the properties you wish to change. + The session will track all changes made to the loaded entities. + + * **Save** to apply the changes. + Once `saveChanges()` returns it is guaranteed that the data is persisted in the database. + + + +* In this page: + * [Load a document & update](../../client-api/session/updating-entities.mdx#load-a-document--update) + * [Query for documents & update](../../client-api/session/updating-entities.mdx#query-for-documents--update) + + +## Load a document & update + +* In this example we `load` a company document and update its **PostalCode** property. + + + +{`$session = $store->openSession(); +try \{ + // Load a company document + // The entity loaded from the document will be added to the Session's entities map + /** @var Company $company */ + $company = $session->load(Company::class, "companies/1-A"); + + // Update the company's PostalCode + $address = $company->getAddress(); + $address->setPostalCode("TheNewPostalCode"); + $company->setAddress($address); + + // Apply changes + $session->saveChanges(); +\} finally \{ + $session->close(); +\} +`} + + + + + +## Query for documents & update + +* In this example we `query` for company documents whose **PostalCode** property is _12345_, + and modify this property for the matching documents. + + + +{`$session = $store->openSession(); +try \{ + // Query: find companies with the specified PostalCode + // The entities loaded from the matching documents will be added to the Session's entities map + $query = $session->query(Company::class) + ->whereEquals("address.postal_code", "12345"); + + $matchingCompanies = $query->toList(); + + // Update the PostalCode for the resulting company documents + for ($i = 0; $i < count($matchingCompanies); $i++) \{ + $address = $matchingCompanies[$i]->getAddress(); + $address->setPostalCode("TheNewPostalCode"); + $matchingCompanies[$i]->setAddress($address); + \} + + // Apply changes + $session->saveChanges(); +\} finally \{ + $session->close(); +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_updating-entities-python.mdx b/versioned_docs/version-7.1/client-api/session/_updating-entities-python.mdx new file mode 100644 index 0000000000..5a0fb18215 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_updating-entities-python.mdx @@ -0,0 +1,79 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To modify existing documents: + + * **Retrieve** documents from the database using [load](../../client-api/session/loading-entities.mdx#load) or by a [query](../../client-api/session/querying/how-to-query.mdx#sessionquery). + The entities loaded from the documents are added to the internal entities map that the Session manages. + + * **Edit** the properties you wish to change. + The session will track all changes made to the loaded entities. + + * **Save** to apply the changes. + Once `save_changes()` returns it is guaranteed that the data is persisted in the database. + + + +* In this page: + * [Load a document & update](../../client-api/session/updating-entities.mdx#load-a-document--update) + * [Query for documents & update](../../client-api/session/updating-entities.mdx#query-for-documents--update) + + +## Load a document & update + +* In this example we `load` a company document and update its **PostalCode** property. + + + +{`with store.open_session() as session: + # Load a company document + # The entity loaded from the document will be added to the Session's entities map + company = session.load("companies/1-A", Company) + + # Update the company's postal_code + company.address["postal_code"] = "TheNewPostalCode" + + # In Python client nested objects are loaded as dicts for convenience + # You can customize and control your class (from/to) JSON conversion to fit any case + # Implement classmethod 'from_json(json_dict) -> YourType' to control how the object its being read + # Implement method 'to_json() -> Dict' to manage how it's serialized + + # Apply changes + session.save_changes() +`} + + + + + +## Query for documents & update + +* In this example we `query` for company documents whose **PostalCode** property is _12345_, + and modify this property for the matching documents. + + + +{`with store.open_session() as session: + # Query: find companies with the specified postal_code + # The entities loaded from the matching documents will be added to the Session's entities map + query = session.query(object_type=Company).where_equals("address.postal_code", "12345") + + matching_companies = list(query) + + # Update the postal_code for the resulting company documents + for company in matching_companies: + company.address["postal_code"] = "TheNewPostalCode" + + # Apply changes + session.save_changes() +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_what-is-a-session-and-how-does-it-work-csharp.mdx b/versioned_docs/version-7.1/client-api/session/_what-is-a-session-and-how-does-it-work-csharp.mdx new file mode 100644 index 0000000000..ea2e9da71e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_what-is-a-session-and-how-does-it-work-csharp.mdx @@ -0,0 +1,354 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The **Session**, which is obtained from the [Document Store](../../client-api/what-is-a-document-store.mdx), + is the primary interface your application will interact with. + +* In this page: + * [Session overview](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#session-overview) + * [Unit of work pattern](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#unit-of-work-pattern) + * [Tracking changes](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#tracking-changes) + * [Create document example](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#create-document-example) + * [Modify document example](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#modify-document-example) + * [Identity map pattern](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#identity-map-pattern) + * [Batching & Transactions](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#batching--transactions) + * [Concurrency control](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#concurrency-control) + * [Reducing server calls (best practices) for:](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#reducing-server-calls-(best-practices)-for:) + * [The N+1 problem](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#the-select-n1-problem) + * [Large query results](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#large-query-results) + * [Retrieving results on demand (Lazy)](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#retrieving-results-on-demand-lazy) + + +## Session overview + +* **What is the session**: + + * The session (`ISession`/`IAsyncDocumentSession`) serves as a [Unit of Work](https://en.wikipedia.org/wiki/Unit_of_work) representing a single + **[Business Transaction](https://martinfowler.com/eaaCatalog/unitOfWork.html)** on a specific database (not to be confused with an [ACID transaction](../../client-api/faq/transaction-support.mdx)). + + * It is a container that allows you to query for documents and load, create, or update entities + while keeping track of changes. + + * Basic document CRUD actions and document Queries are available through the `Session`. + More advanced options are available using the `Advanced` Session operations. + +* **Batching modifications**: + A business transaction usually involves multiple requests such as loading of documents or execution of queries. + Calling [SaveChanges()](../../client-api/session/saving-changes.mdx) indicates the completion of the client-side business logic. + At this point, all modifications made within the session are batched and sent together in a **single HTTP request** to the server to be persisted as a single ACID transaction. + +* **Tracking changes**: + Based on the [Unit of Work](https://martinfowler.com/eaaCatalog/unitOfWork.html) and the [Identity Map](https://martinfowler.com/eaaCatalog/identityMap.html) patterns, + the session tracks all changes made to all entities that it has either loaded, stored, deleted, or queried for. + Only the modifications are sent to the server when _SaveChanges()_ is called. + +* **Client side object**: + The session is a pure client side object. Opening the session does Not establish any connection to a database, + and the session's state isn't reflected on the server side during its duration. + +* **Configurability**: + Various aspects of the session are configurable. + For example, the number of server requests allowed per session is [configurable](../../client-api/session/configuration/how-to-change-maximum-number-of-requests-per-session.mdx) (default is 30). + +* **The session and ORM Comparison**: + The RavenDB Client API is a native way to interact with a RavenDB database. + It is _not_ an Object–relational mapping (ORM) tool. Although if you're familiar with NHibernate of Entity Framework ORMs you'll recognize that + the session is equivalent of NHibernate's session and Entity Framework's DataContext which implement UoW pattern as well. + + + +## Unit of work pattern + +#### Tracking changes + +* Using the Session, perform needed operations on your documents. + e.g. create a new document, modify an existing document, query for documents, etc. +* Any such operation '*loads*' the document as an entity to the Session, + and the entity is added to the **Session's entities map**. +* The Session **tracks all changes** made to all entities stored in its internal map. + You don't need to manually track the changes and decide what needs to be saved and what doesn't, + the Session will do it for you. +* Prior to saving, you can review the changes made if necessary. See: + * [Get tracked entities](../../client-api/session/how-to/get-tracked-entities.mdx) + * [Check for session changes](../../client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx) +* All the tracked changes are combined & persisted in the database only when calling `SaveChanges()`. +* Entity tracking can be disabled if needed. See: + * [Disable entity tracking](../../client-api/session/configuration/how-to-disable-tracking.mdx) + * [Clear session](../../client-api/session/how-to/clear-a-session.mdx) + * [Evict a single entity](../../client-api/session/how-to/evict-entity-from-a-session.mdx) + +#### Create document example + +* The Client API, and the Session in particular, is designed to be as straightforward as possible. + Open the session, do some operations, and apply the changes to the RavenDB server. +* The following example shows how to create a new document in the database using the Session. + + + + +{`// Obtain a Session from your Document Store +using (IDocumentSession session = store.OpenSession()) +{ + // Create a new entity + Company entity = new Company { Name = "CompanyName" }; + + // Store the entity in the Session's internal map + session.Store(entity); + // From now on, any changes that will be made to the entity will be tracked by the Session. + // However, the changes will be persisted to the server only when 'SaveChanges()' is called. + + session.SaveChanges(); + // At this point the entity is persisted to the database as a new document. + // Since no database was specified when opening the Session, the Default Database is used. +} +`} + + + + +{`// Obtain a Session from your Document Store +using (IAsyncDocumentSession asyncSession = store.OpenAsyncSession()) +{ + // Create a new entity + Company entity = new Company { Name = "CompanyName" }; + + // Store the entity in the Session's internal map + asyncSession.StoreAsync(entity); + // From now on, any changes that will be made to the entity will be tracked by the Session. + // However, the changes will be persisted to the server only when 'SaveChanges()' is called. + + asyncSession.SaveChangesAsync(); + // At this point the entity is persisted to the database as a new document. + // Since no database was specified when opening the Session, the Default Database is used. +} +`} + + + + +#### Modify document example +* The following example modifies the content of an existing document. + + + + +{`// Open a session +using (IDocumentSession session = store.OpenSession()) +{ + // Load an existing document to the Session using its ID + // The loaded entity will be added to the session's internal map + Company entity = session.Load(companyId); + + // Edit the entity, the Session will track this change + entity.Name = "NewCompanyName"; + + session.SaveChanges(); + // At this point, the change made is persisted to the existing document in the database +} +`} + + + + +{`// Open a Session +using (IAsyncDocumentSession asyncSession = store.OpenAsyncSession()) +{ + // Load an existing document to the Session using its ID + // The loaded entity will be added to the session's internal map + Company entity = await asyncSession.LoadAsync(companyId); + + // Edit the entity, the Session will track this change + entity.Name = "NewCompanyName"; + + asyncSession.SaveChangesAsync(); + // At this point, the change made is persisted to the existing document in the database +} +`} + + + + + + +## Identity map pattern + +* The session implements the [Identity Map Pattern](https://martinfowler.com/eaaCatalog/identityMap.html). +* The first `Load()` call goes to the server and fetches the document from the database. + The document is then stored as an entity in the Session's entities map. +* All subsequent `Load()` calls to the same document will simply retrieve the entity from the Session - + no additional calls to the server are made. + + + + +{`// A document is fetched from the server +Company entity1 = session.Load(companyId); + +// Loading the same document will now retrieve its entity from the Session's map +Company entity2 = session.Load(companyId); + +// This command will Not throw an exception +Assert.Same(entity1, entity2); +`} + + + + +{`// A document is fetched from the server +Company entity1 = await asyncSession.LoadAsync(companyId); + +// Loading the same document will now retrieve its entity from the Session's map +Company entity2 = await asyncSession.LoadAsync(companyId); + +// This command will Not throw an exception +Assert.Same(entity1, entity2); +`} + + + + +* Note: + To override this behavior and force `Load()` to fetch the latest changes from the server see: + [Refresh an entity](../../client-api/session/how-to/refresh-entity.mdx). + + + +## Batching & Transactions + + + +#### Batching + +* Remote calls to a server over the network are among the most expensive operations an application makes. + The session optimizes this by batching all **write operations** it has tracked into the `SaveChanges()` call. +* When calling _SaveChanges_, the session evaluates its state to identify all pending changes requiring persistence in the database. + These changes are then combined into a single batch that is sent to the server in a **single remote call** and executed as a single ACID transaction. + + + + +#### Transactions + +* The client API does not provide transactional semantics over the entire session. + The session **does not** represent a [transaction](../../client-api/faq/transaction-support.mdx) (nor a transaction scope) in terms of ACID transactions. +* RavenDB provides transactions over individual requests, so each call made within the session's usage will be processed in a separate transaction on the server side. + This applies to both reads and writes. + +##### Read transactions + +* Each call retrieving data from the database will generate a separate request. Multiple requests mean separate transactions. +* The following options allow you to read _multiple_ documents in a single request: + * Using overloads of the [Load()](../../client-api/session/loading-entities.mdx#load---multiple-entities) method that specify a collection of IDs or a prefix of ID. + * Using [Include](../../client-api/session/loading-entities.mdx#load-with-includes) to retrieve additional documents in a single request. + * A query that can return multiple documents is executed in a single request, + hence it is processed in a single read transaction. + +##### Write transactions + +* The batched operations that are sent in the `SaveChanges()` complete transactionally, as this call generates a single request to the database. + In other words, either all changes are saved as a **Single Atomic Transaction** or none of them are. + So once _SaveChanges_ returns successfully, it is guaranteed that all changes are persisted to the database. +* _SaveChanges_ is the only time when the RavenDB Client API sends updates to the server from the Session, + resulting in a reduced number of network calls. +* To execute an operation that both loads and updates a document within the same write transaction, use the patching feature. + This can be done either with the usage of a [JavaScript patch](../../client-api/operations/patching/single-document.mdx) syntax or [JSON Patch](../../client-api/operations/patching/json-patch-syntax.mdx) syntax. + + + + +#### Transaction mode + +* The session's transaction mode can be set to either: + * **Single-Node** - transaction is executed on a specific node and then replicated + * **Cluster-Wide** - transaction is registered for execution on all nodes in an atomic fashion + * + The phrase "session's transaction mode" refers to the type of transaction that will be executed on the server-side when `SaveChanges()` is called. + As mentioned earlier, the session itself does not represent an ACID transaction. + + * Learn more about these modes in [Cluster-wide vs. Single-node](../../client-api/session/cluster-transaction/overview.mdx#cluster-wide-transaction-vs-single-node-transaction) transactions. + + + + +For a detailed description of transactions in RavenDB please refer to the [Transaction support in RavenDB](../../client-api/faq/transaction-support.mdx) article. + + + + +## Concurrency control + +The typical usage model of the session is: + + * Load documents + * Modify the documents + * Save changes + +For example, a real case scenario would be: + + * Load an entity from a database. + * Display an Edit form on the screen. + * Update the entity after the user completes editing. + +When using the session, the interaction with the database is divided into two parts - the load part and save changes part. +Each of these parts is executed separately, via its own HTTP request. +Consequently, data that was loaded and edited could potentially be changed by another user in the meantime. +To address this, the session API offers the concurrency control feature. + +#### Default strategy on single node + +* By default, concurrency checks are turned off. + This means that with the default configuration of the session, concurrent changes to the same document will use the Last Write Wins strategy. + +* The second write of an updated document will override the previous version, causing potential data loss. + This behavior should be considered when using the session with single node transaction mode. + +#### Optimistic concurrency on single node + +* The modification or editing stage can extend over a considerable time period or may occur offline. + To prevent conflicting writes, where a document is modified while it is being edited by another user or client, + the session can be configured to employ [optimistic concurrency](../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx). + +* Once optimistic concurrency is enabled, the session performs version tracking to ensure that any document modified within the session has not been altered in the database since it was loaded into the session. + The version is tracked using a [change vector](../../server/clustering/replication/change-vector.mdx). + +* When `SaveChanges()` is called, the session additionally transmits the version of the modified documents to the database, allowing it to verify if any changes have occurred in the meantime. + If modifications are detected, the transaction will be aborted with a `ConcurrencyException`, + providing the caller with an opportunity to retry or handle the error as needed. + +#### Concurrency control in cluster-wide transactions + +* In a cluster-wide transaction scenario, RavenDB server tracks a cluster-wide version for each modified document, updating it through the Raft protocol. + This means that when using a session with the cluster-wide transaction mode, a `ConcurrencyException` will be triggered upon calling `SaveChanges()` + if another user has modified a document and saved it in a separate cluster-wide transaction in the meantime. + +* More information about cluster transactions can be found in [Cluster Transaction - Overview](../../client-api/session/cluster-transaction/overview.mdx). + + + +## Reducing server calls (best practices) for: + +#### The select N+1 problem +* The Select N+1 problem is common + with all ORMs and ORM-like APIs. + It results in an excessive number of remote calls to the server, which makes a query very expensive. +* Make use of RavenDB's `include()` method to include related documents and avoid this issue. + See: [Document relationships](../../client-api/how-to/handle-document-relationships.mdx) + +#### Large query results +* When query results are large and you don't want the overhead of keeping all results in memory, + then you can [Stream query results](../../client-api/session/querying/how-to-stream-query-results.mdx). + A single server call is executed and the client can handle the results one by one. +* [Paging](../../indexes/querying/paging.mdx) also avoids getting all query results at one time, + however, multiple server calls are generated - one per page retrieved. + +#### Retrieving results on demand (Lazy) +* Query calls to the server can be delayed and executed on-demand as needed using `Lazily()` +* See [Perform queries lazily](../../client-api/session/querying/how-to-perform-queries-lazily.mdx) + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_what-is-a-session-and-how-does-it-work-java.mdx b/versioned_docs/version-7.1/client-api/session/_what-is-a-session-and-how-does-it-work-java.mdx new file mode 100644 index 0000000000..52d89165fc --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_what-is-a-session-and-how-does-it-work-java.mdx @@ -0,0 +1,294 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The **Session**, which is obtained from the [Document Store](../../client-api/what-is-a-document-store.mdx), + is the primary interface your application will interact with. + +* In this page: + * [Session overview](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#session-overview) + * [Unit of work pattern](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#unit-of-work-pattern) + * [Tracking changes](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#tracking-changes) + * [Create document example](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#create-document-example) + * [Modify document example](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#modify-document-example) + * [Identity map pattern](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#identity-map-pattern) + * [Batching & Transactions](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#batching--transactions) + * [Concurrency control](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#concurrency-control) + * [Reducing server calls (best practices) for:](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#reducing-server-calls-(best-practices)-for:) + * [The N+1 problem](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#the-select-n1-problem) + * [Large query results](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#large-query-results) + * [Retrieving results on demand (Lazy)](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#retrieving-results-on-demand-lazy) + + +## Session overview + +* **What is the session**: + + * The session (`IDocumentSession`/`IAsyncDocumentSession`) serves as a [Unit of Work](https://en.wikipedia.org/wiki/Unit_of_work) representing a single + **[Business Transaction](https://martinfowler.com/eaaCatalog/unitOfWork.html)** on a specific database (not to be confused with an [ACID transaction](../../client-api/faq/transaction-support.mdx)). + + * It is a container that allows you to query for documents and load, create, or update entities + while keeping track of changes. + + * Basic document CRUD actions and document Queries are available through the `session`. + More advanced options are available using the `advanced` Session operations. + +* **Batching modifications**: + A business transaction usually involves multiple requests such as loading of documents or execution of queries. + Calling [saveChanges()](../../client-api/session/saving-changes.mdx) indicates the completion of the client-side business logic. + At this point, all modifications made within the session are batched and sent together in a **single HTTP request** to the server to be persisted as a single ACID transaction. + +* **Tracking changes**: + Based on the [Unit of Work](https://martinfowler.com/eaaCatalog/unitOfWork.html) and the [Identity Map](https://martinfowler.com/eaaCatalog/identityMap.html) patterns, + the session tracks all changes made to all entities that it has either loaded, stored, deleted, or queried for. + Only the modifications are sent to the server when _saveChanges()_ is called. + +* **Client side object**: + The session is a pure client side object. Opening the session does Not establish any connection to a database, + and the session's state isn't reflected on the server side during its duration. + +* **Configurability**: + Various aspects of the session are configurable. + For example, the number of server requests allowed per session is [configurable](../../client-api/session/configuration/how-to-change-maximum-number-of-requests-per-session.mdx) (default is 30). + +* **The session and ORM Comparison**: + The RavenDB Client API is a native way to interact with a RavenDB database. + It is _not_ an Object–relational mapping (ORM) tool. Although if you're familiar with NHibernate of Entity Framework ORMs you'll recognize that + the session is equivalent of NHibernate's session and Entity Framework's DataContext which implement UoW pattern as well. + + + +## Unit of work pattern + +#### Tracking changes + +* Using the Session, perform needed operations on your documents. + e.g. create a new document, modify an existing document, query for documents, etc. +* Any such operation '*loads*' the document as an entity to the Session, + and the entity is added to the **Session's entities map**. +* The Session **tracks all changes** made to all entities stored in its internal map. + You don't need to manually track the changes and decide what needs to be saved and what doesn't, the Session will do it for you. + Prior to saving, you can review the changes made if necessary. See: [Check for session changes](../../client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx). +* All the tracked changes are combined & persisted in the database only when calling `saveChanges()`. +* Entity tracking can be disabled if needed. See: + * [Disable entity tracking](../../client-api/session/configuration/how-to-disable-tracking.mdx) + * [Clear session](../../client-api/session/how-to/clear-a-session.mdx) + * [Evict a single entity](../../client-api/session/how-to/evict-entity-from-a-session.mdx) + +#### Create document example + +* The Client API, and the Session in particular, is designed to be as straightforward as possible. + Open the session, do some operations, and apply the changes to the RavenDB server. +* The following example shows how to create a new document in the database using the Session. + + + +{`// Obtain a Session from your Document Store +try (IDocumentSession session = store.openSession()) \{ + + // Create a new entity + Company entity = new Company(); + entity.setName("Company"); + + // Store the entity in the Session's internal map + session.store(entity); + // From now on, any changes that will be made to the entity will be tracked by the Session. + // However, the changes will be persisted to the server only when 'SaveChanges()' is called. + + session.saveChanges(); + // At this point the entity is persisted to the database as a new document. + // Since no database was specified when opening the Session, the Default Database is used. +\} +`} + + + +#### Modify document example +* The following example modifies the content of an existing document. + + + +{`// Open a session +try (IDocumentSession session = store.openSession()) \{ + // Load an existing document to the Session using its ID + // The loaded entity will be added to the session's internal map + Company entity = session.load(Company.class, companyId); + + // Edit the entity, the Session will track this change + entity.setName("NewCompanyName"); + + session.saveChanges(); + // At this point, the change made is persisted to the existing document in the database +\} +`} + + + + + +## Identity map pattern + +* The session implements the [Identity Map Pattern](https://martinfowler.com/eaaCatalog/identityMap.html). +* The first `load()` call goes to the server and fetches the document from the database. + The document is then stored as an entity in the Session's entities map. +* All subsequent `load()` calls to the same document will simply retrieve the entity from the Session - + no additional calls to the server are made. + + + +{`// A document is fetched from the server +Company entity1 = session.load(Company.class, companyId); + +// Loading the same document will now retrieve its entity from the Session's map +Company entity2 = session.load(Company.class, companyId); + +// This command will Not throw an exception +Assert.assertSame(entity1, entity2); +`} + + + +* Note: + To override this behavior and force `load()` to fetch the latest changes from the server see: + [Refresh an entity](../../client-api/session/how-to/refresh-entity.mdx). + + + +## Batching & Transactions + + + +#### Batching + +* Remote calls to a server over the network are among the most expensive operations an application makes. + The session optimizes this by batching all **write operations** it has tracked into the `saveChanges()` call. +* When calling _saveChanges_, the session evaluates its state to identify all pending changes requiring persistence in the database. + These changes are then combined into a single batch that is sent to the server in a **single remote call** and executed as a single ACID transaction. + + + + +#### Transactions + +* The client API does not provide transactional semantics over the entire session. + The session **does not** represent a [transaction](../../client-api/faq/transaction-support.mdx) (nor a transaction scope) in terms of ACID transactions. +* RavenDB provides transactions over individual requests, so each call made within the session's usage will be processed in a separate transaction on the server side. + This applies to both reads and writes. + +##### Read transactions + +* Each call retrieving data from the database will generate a separate request. Multiple requests mean separate transactions. +* The following options allow you to read _multiple_ documents in a single request: + * Using overloads of the [load()](../../client-api/session/loading-entities.mdx#load---multiple-entities) method that specify a collection of IDs or a prefix of ID. + * Using [include](../../client-api/session/loading-entities.mdx#load-with-includes) to retrieve additional documents in a single request. + * A query that can return multiple documents is executed in a single request, + hence it is processed in a single read transaction. + +##### Write transactions + +* The batched operations that are sent in the `saveChanges()` complete transactionally, as this call generates a single request to the database. + In other words, either all changes are saved as a **Single Atomic Transaction** or none of them are. + So once _saveChanges_ returns successfully, it is guaranteed that all changes are persisted to the database. +* _saveChanges_ is the only time when the RavenDB Client API sends updates to the server from the Session, + resulting in a reduced number of network calls. +* To execute an operation that both loads and updates a document within the same write transaction, use the patching feature. + This can be done either with the usage of a [JavaScript patch](../../client-api/operations/patching/single-document.mdx) syntax or [JSON Patch](../../client-api/operations/patching/json-patch-syntax.mdx) syntax. + + + + +#### Transaction mode + +* The session's transaction mode can be set to either: + * **Single-Node** - transaction is executed on a specific node and then replicated + * **Cluster-Wide** - transaction is registered for execution on all nodes in an atomic fashion + * + The phrase "session's transaction mode" refers to the type of transaction that will be executed on the server-side when `saveChanges()` is called. + As mentioned earlier, the session itself does not represent an ACID transaction. + + * Learn more about these modes in [Cluster-wide vs. Single-node](../../client-api/session/cluster-transaction/overview.mdx#cluster-wide-transaction-vs-single-node-transaction) transactions. + + + + +For a detailed description of transactions in RavenDB please refer to the [Transaction support in RavenDB](../../client-api/faq/transaction-support.mdx) article. + + + + +## Concurrency control + +The typical usage model of the session is: + + * Load documents + * Modify the documents + * Save changes + +For example, a real case scenario would be: + + * Load an entity from a database. + * Display an Edit form on the screen. + * Update the entity after the user completes editing. + +When using the session, the interaction with the database is divided into two parts - the load part and save changes part. +Each of these parts is executed separately, via its own HTTP request. +Consequently, data that was loaded and edited could potentially be changed by another user in the meantime. +To address this, the session API offers the concurrency control feature. + +#### Default strategy on single node + +* By default, concurrency checks are turned off. + This means that with the default configuration of the session, concurrent changes to the same document will use the Last Write Wins strategy. + +* The second write of an updated document will override the previous version, causing potential data loss. + This behavior should be considered when using the session with single node transaction mode. + +#### Optimistic concurrency on single node + +* The modification or editing stage can extend over a considerable time period or may occur offline. + To prevent conflicting writes, where a document is modified while it is being edited by another user or client, + the session can be configured to employ [optimistic concurrency](../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx). + +* Once optimistic concurrency is enabled, the session performs version tracking to ensure that any document modified within the session has not been altered in the database since it was loaded into the session. + The version is tracked using a [change vector](../../server/clustering/replication/change-vector.mdx). + +* When `saveChanges()` is called, the session additionally transmits the version of the modified documents to the database, allowing it to verify if any changes have occurred in the meantime. + If modifications are detected, the transaction will be aborted with a `ConcurrencyException`, + providing the caller with an opportunity to retry or handle the error as needed. + +#### Concurrency control in cluster-wide transactions + +* In a cluster-wide transaction scenario, RavenDB server tracks a cluster-wide version for each modified document, updating it through the Raft protocol. + This means that when using a session with the cluster-wide transaction mode, a `ConcurrencyException` will be triggered upon calling `saveChanges()` + if another user has modified a document and saved it in a separate cluster-wide transaction in the meantime. + +* More information about cluster transactions can be found in [Cluster Transaction - Overview](../../client-api/session/cluster-transaction/overview.mdx). + + + +## Reducing server calls (best practices) for: + +#### The select N+1 problem +* The Select N+1 problem is common + with all ORMs and ORM-like APIs. + It results in an excessive number of remote calls to the server, which makes a query very expensive. +* Make use of RavenDB's `include()` method to include related documents and avoid this issue. + See: [Document relationships](../../client-api/how-to/handle-document-relationships.mdx) + +#### Large query results +* When query results are large and you don't want the overhead of keeping all results in memory, + then you can [Stream query results](../../client-api/session/querying/how-to-stream-query-results.mdx). + A single server call is executed and the client can handle the results one by one. +* [Paging](../../indexes/querying/paging.mdx) also avoids getting all query results at one time, + however, multiple server calls are generated - one per page retrieved. + +#### Retrieving results on demand (Lazy) +* Query calls to the server can be delayed and executed on-demand as needed using `Lazily()` +* See [Perform queries lazily](../../client-api/session/querying/how-to-perform-queries-lazily.mdx) + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_what-is-a-session-and-how-does-it-work-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/_what-is-a-session-and-how-does-it-work-nodejs.mdx new file mode 100644 index 0000000000..e481f64a20 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_what-is-a-session-and-how-does-it-work-nodejs.mdx @@ -0,0 +1,353 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The **session**, which is obtained from the [Document Store](../../client-api/what-is-a-document-store.mdx), + is the primary interface your application will interact with. + +* In this page: + * [Session overview](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#session-overview) + * [Unit of work pattern](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#unit-of-work-pattern) + * [Tracking changes](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#tracking-changes) + * [Create document example](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#create-document-example) + * [Modify document example](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#modify-document-example) + * [Identity map pattern](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#identity-map-pattern) + * [Batching & Transactions](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#batching--transactions) + * [Concurrency control](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#concurrency-control) + * [Reducing server calls (best practices) for:](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#reducing-server-calls-(best-practices)-for:) + * [The N+1 problem](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#the-select-n1-problem) + * [Large query results](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#large-query-results) + * [Retrieving results on demand (Lazy)](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#retrieving-results-on-demand-lazy) + + +## Session overview + +* **What is the session**: + + * The session (`IDocumentSession`/`IAsyncDocumentSession`) serves as a [Unit of Work](https://en.wikipedia.org/wiki/Unit_of_work) representing a single + **[Business Transaction](https://martinfowler.com/eaaCatalog/unitOfWork.html)** on a specific database (not to be confused with an [ACID transaction](../../client-api/faq/transaction-support.mdx)). + + * It is a container that allows you to query for documents and load, create, or update entities + while keeping track of changes. + + * Basic document CRUD actions and document Queries are available through the `session`. + More advanced options are available using the `advanced` Session operations. + +* **Batching modifications**: + A business transaction usually involves multiple requests such as loading of documents or execution of queries. + Calling [saveChanges()](../../client-api/session/saving-changes.mdx) indicates the completion of the client-side business logic. + At this point, all modifications made within the session are batched and sent together in a **single HTTP request** to the server to be persisted as a single ACID transaction. + +* **Tracking changes**: + Based on the [Unit of Work](https://martinfowler.com/eaaCatalog/unitOfWork.html) and the [Identity Map](https://martinfowler.com/eaaCatalog/identityMap.html) patterns, + the session tracks all changes made to all entities that it has either loaded, stored, deleted, or queried for. + Only the modifications are sent to the server when _saveChanges()_ is called. + +* **Client side object**: + The session is a pure client side object. Opening the session does Not establish any connection to a database, + and the session's state isn't reflected on the server side during its duration. + +* **Configurability**: + Various aspects of the session are configurable. + For example, the number of server requests allowed per session is [configurable](../../client-api/session/configuration/how-to-change-maximum-number-of-requests-per-session.mdx) (default is 30). + +* **The session and ORM Comparison**: + The RavenDB Client API is a native way to interact with a RavenDB database. + It is _not_ an Object–relational mapping (ORM) tool. Although if you're familiar with NHibernate of Entity Framework ORMs you'll recognize that + the session is equivalent of NHibernate's session and Entity Framework's DataContext which implement UoW pattern as well. + + + +## Unit of work pattern + +#### Tracking changes + +* Using the session, perform needed operations on your documents. + e.g. create a new document, modify an existing document, query for documents, etc. +* Any such operation '*loads*' the document as an entity to the Session, + and the entity is added to the **session's entities map**. +* The session **tracks all changes** made to all entities stored in its internal map. + You don't need to manually track the changes and decide what needs to be saved and what doesn't, + the session will do it for you. +* Prior to saving, you can review the changes made if necessary. See: + * [Get tracked entities](../../client-api/session/how-to/get-tracked-entities.mdx) + * [Check for session changes](../../client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx) +* All the tracked changes are combined & persisted in the database only when calling `saveChanges()`. +* Entity tracking can be disabled if needed. See: + * [Disable entity tracking](../../client-api/session/configuration/how-to-disable-tracking.mdx) + * [Clear session](../../client-api/session/how-to/clear-a-session.mdx) + * [Evict a single entity](../../client-api/session/how-to/evict-entity-from-a-session.mdx) + +#### Create document example + +* The Client API, and the session in particular, is designed to be as straightforward as possible. + Open the session, do some operations, and apply the changes to the RavenDB server. +* The following example shows how to create a new document in the database using the session. + + + + +{`// Obtain a session from your Document Store +const session = documentStore.openSession(); + +// Create a new company entity +class Company { + constructor(name) { + this.name = name; + } +} + +const entity = new Company("CompanyName"); + +// Store the entity in the Session's internal map +await session.store(entity); +// From now on, any changes that will be made to the entity will be tracked by the session. +// However, the changes will be persisted to the server only when 'saveChanges()' is called. + +await session.saveChanges(); +// At this point the entity is persisted to the database as a new document. +// Since no database was specified when opening the session, the Default Database is used. +`} + + + + +{`// Obtain a session from your Document Store +const session = documentStore.openSession(); + +// Create a new company entity +class Company { + constructor(name) { + this.name = name; + } +} + +const entity = new Company("CompanyName"); + +// Store the entity in the Session's internal map +session.store(entity) + .then(() => { + // From now on, any changes that will be made to the entity will be tracked by the session. + // However, the changes will be persisted to the server only when 'saveChanges()' is called. + + session.saveChanges(); + }) + .then(() => { + // At this point the entity is persisted to the database as a new document. + // Since no database was specified when opening the session, the Default Database is used. + }); +`} + + + + +#### Modify document example +* The following example modifies the content of an existing document. + + + + +{`// Open a session +const session = documentStore.openSession(); + +// Load an existing document to the session using its ID +// The loaded entity will be added to the session's internal map +const entity = await session.load("companies/1-A"); + +// Edit the entity, the session will track this change +entity.name = "NewCompanyName"; + +await session.saveChanges(); +// At this point, the change made is persisted to the existing document in the database +`} + + + + +{`// Open a session +const session = documentStore.openSession(); + +// Load an existing document to the session using its ID +// The loaded entity will be added to the session's internal map +session.load("companies/1-A") + .then((company) => { + // Edit the entity, the session will track this change + company.name = "NewCompanyName"; + }) + .then(() => session.saveChanges()) + .then(() => { + // At this point, the change made is persisted to the existing document in the database + }); +`} + + + + + + +## Identity map pattern + +* The session implements the [Identity Map Pattern](https://martinfowler.com/eaaCatalog/identityMap.html). +* The first `load()` call goes to the server and fetches the document from the database. + The document is then stored as an entity in the session's entities map. +* All subsequent `load()` calls to the same document will simply retrieve the entity from the session - + no additional calls to the server are made. + + + +{`// A document is fetched from the server +const entity1 = await session.load("companies/1-A"); + +// Loading the same document will now retrieve its entity from the session's map +const entity2 = await session.load("companies/1-A"); + +// This command will Not throw an exception +assert.equal(entity1, entity2); +`} + + + +* Note: + To override this behavior and force `load()` to fetch the latest changes from the server see: + [Refresh an entity](../../client-api/session/how-to/refresh-entity.mdx). + + + +## Batching & Transactions + + + +#### Batching + +* Remote calls to a server over the network are among the most expensive operations an application makes. + The session optimizes this by batching all **write operations** it has tracked into the `saveChanges()` call. +* When calling _saveChanges_, the session evaluates its state to identify all pending changes requiring persistence in the database. + These changes are then combined into a single batch that is sent to the server in a **single remote call** and executed as a single ACID transaction. + + + + +#### Transactions + +* The client API does not provide transactional semantics over the entire session. + The session **does not** represent a [transaction](../../client-api/faq/transaction-support.mdx) (nor a transaction scope) in terms of ACID transactions. +* RavenDB provides transactions over individual requests, so each call made within the session's usage will be processed in a separate transaction on the server side. + This applies to both reads and writes. + +##### Read transactions + +* Each call retrieving data from the database will generate a separate request. Multiple requests mean separate transactions. +* The following options allow you to read _multiple_ documents in a single request: + * Using overloads of the [load()](../../client-api/session/loading-entities.mdx#load---multiple-entities) method that specify a collection of IDs or a prefix of ID. + * Using [include](../../client-api/session/loading-entities.mdx#load-with-includes) to retrieve additional documents in a single request. + * A query that can return multiple documents is executed in a single request, + hence it is processed in a single read transaction. + +##### Write transactions + +* The batched operations that are sent in the `saveChanges()` complete transactionally, as this call generates a single request to the database. + In other words, either all changes are saved as a **Single Atomic Transaction** or none of them are. + So once _saveChanges_ returns successfully, it is guaranteed that all changes are persisted to the database. +* _saveChanges_ is the only time when the RavenDB Client API sends updates to the server from the Session, + resulting in a reduced number of network calls. +* To execute an operation that both loads and updates a document within the same write transaction, use the patching feature. + This can be done either with the usage of a [JavaScript patch](../../client-api/operations/patching/single-document.mdx) syntax or [JSON Patch](../../client-api/operations/patching/json-patch-syntax.mdx) syntax. + + + + +#### Transaction mode + +* The session's transaction mode can be set to either: + * **Single-Node** - transaction is executed on a specific node and then replicated + * **Cluster-Wide** - transaction is registered for execution on all nodes in an atomic fashion + * + The phrase "session's transaction mode" refers to the type of transaction that will be executed on the server-side when `saveChanges()` is called. + As mentioned earlier, the session itself does not represent an ACID transaction. + + * Learn more about these modes in [Cluster-wide vs. Single-node](../../client-api/session/cluster-transaction/overview.mdx#cluster-wide-transaction-vs-single-node-transaction) transactions. + + + + +For a detailed description of transactions in RavenDB please refer to the [Transaction support in RavenDB](../../client-api/faq/transaction-support.mdx) article. + + + + +## Concurrency control + +The typical usage model of the session is: + + * Load documents + * Modify the documents + * Save changes + +For example, a real case scenario would be: + + * Load an entity from a database. + * Display an Edit form on the screen. + * Update the entity after the user completes editing. + +When using the session, the interaction with the database is divided into two parts - the load part and save changes part. +Each of these parts is executed separately, via its own HTTP request. +Consequently, data that was loaded and edited could potentially be changed by another user in the meantime. +To address this, the session API offers the concurrency control feature. + +#### Default strategy on single node + +* By default, concurrency checks are turned off. + This means that with the default configuration of the session, concurrent changes to the same document will use the Last Write Wins strategy. + +* The second write of an updated document will override the previous version, causing potential data loss. + This behavior should be considered when using the session with single node transaction mode. + +#### Optimistic concurrency on single node + +* The modification or editing stage can extend over a considerable time period or may occur offline. + To prevent conflicting writes, where a document is modified while it is being edited by another user or client, + the session can be configured to employ [optimistic concurrency](../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx). + +* Once optimistic concurrency is enabled, the session performs version tracking to ensure that any document modified within the session has not been altered in the database since it was loaded into the session. + The version is tracked using a [change vector](../../server/clustering/replication/change-vector.mdx). + +* When `saveChanges()` is called, the session additionally transmits the version of the modified documents to the database, allowing it to verify if any changes have occurred in the meantime. + If modifications are detected, the transaction will be aborted with a `ConcurrencyException`, + providing the caller with an opportunity to retry or handle the error as needed. + +#### Concurrency control in cluster-wide transactions + +* In a cluster-wide transaction scenario, RavenDB server tracks a cluster-wide version for each modified document, updating it through the Raft protocol. + This means that when using a session with the cluster-wide transaction mode, a `ConcurrencyException` will be triggered upon calling `saveChanges()` + if another user has modified a document and saved it in a separate cluster-wide transaction in the meantime. + +* More information about cluster transactions can be found in [Cluster Transaction - Overview](../../client-api/session/cluster-transaction/overview.mdx). + + + +## Reducing server calls (best practices) for: + +#### The select N+1 problem +* The Select N+1 problem is common + with all ORMs and ORM-like APIs. + It results in an excessive number of remote calls to the server, which makes a query very expensive. +* Make use of RavenDB's `include()` method to include related documents and avoid this issue. + See: [Document relationships](../../client-api/how-to/handle-document-relationships.mdx) + +#### Large query results +* When query results are large and you don't want the overhead of keeping all results in memory, + then you can [Stream query results](../../client-api/session/querying/how-to-stream-query-results.mdx). + A single server call is executed and the client can handle the results one by one. +* [Paging](../../indexes/querying/paging.mdx) also avoids getting all query results at one time, + however, multiple server calls are generated - one per page retrieved. + +#### Retrieving results on demand (Lazy) +* Query calls to the server can be delayed and executed on-demand as needed using `Lazily()` +* See [Perform queries lazily](../../client-api/session/querying/how-to-perform-queries-lazily.mdx) + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_what-is-a-session-and-how-does-it-work-php.mdx b/versioned_docs/version-7.1/client-api/session/_what-is-a-session-and-how-does-it-work-php.mdx new file mode 100644 index 0000000000..f69e751017 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_what-is-a-session-and-how-does-it-work-php.mdx @@ -0,0 +1,299 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The **Session**, which is obtained from the [Document Store](../../client-api/what-is-a-document-store.mdx), + is the primary interface your application will interact with. + +* In this page: + * [Session overview](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#session-overview) + * [Unit of work pattern](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#unit-of-work-pattern) + * [Tracking changes](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#tracking-changes) + * [Create document example](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#create-document-example) + * [Modify document example](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#modify-document-example) + * [Identity map pattern](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#identity-map-pattern) + * [Batching & Transactions](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#batching--transactions) + * [Concurrency control](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#concurrency-control) + * [Reducing server calls (best practices) for:](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#reducing-server-calls-(best-practices)-for:) + * [The N+1 problem](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#the-select-n1-problem) + * [Large query results](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#large-query-results) + * [Retrieving results on demand (Lazy)](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#retrieving-results-on-demand-lazy) + + +## Session overview + +* **What is the session**: + + * The session (`ISession`/`IAsyncDocumentSession`) serves as a [Unit of Work](https://en.wikipedia.org/wiki/Unit_of_work) representing a single + **[Business Transaction](https://martinfowler.com/eaaCatalog/unitOfWork.html)** on a specific database (not to be confused with an [ACID transaction](../../client-api/faq/transaction-support.mdx)). + + * It is a container that allows you to query for documents and load, create, or update entities + while keeping track of changes. + + * Basic document CRUD actions and document Queries are available through the `Session`. + More advanced options are available using the `Advanced` Session operations. + +* **Batching modifications**: + A business transaction usually involves multiple requests such as loading of documents or execution of queries. + Calling [saveChanges()](../../client-api/session/saving-changes.mdx) indicates the completion of the client-side business logic. + At this point, all modifications made within the session are batched and sent together in a **single HTTP request** to the server to be persisted as a single ACID transaction. + +* **Tracking changes**: + Based on the [Unit of Work](https://martinfowler.com/eaaCatalog/unitOfWork.html) and the [Identity Map](https://martinfowler.com/eaaCatalog/identityMap.html) patterns, + the session tracks all changes made to all entities that it has either loaded, stored, deleted, or queried for. + Only the modifications are sent to the server when `saveChanges` is called. + +* **Client side object**: + The session is a pure client side object. Opening the session does Not establish any connection to a database, + and the session's state isn't reflected on the server side during its duration. + +* **Configurability**: + Various aspects of the session are configurable. + For example, the number of server requests allowed per session is [configurable](../../client-api/session/configuration/how-to-change-maximum-number-of-requests-per-session.mdx) (default is 30). + +* **The session and ORM Comparison**: + The RavenDB Client API is a native way to interact with a RavenDB database. + It is _not_ an Object–relational mapping (ORM) tool. Although if you're familiar with NHibernate of Entity Framework ORMs you'll recognize that + the session is equivalent of NHibernate's session and Entity Framework's DataContext which implement UoW pattern as well. + + + +## Unit of work pattern + +#### Tracking changes + +* Using the Session, perform needed operations on your documents. + e.g. create a new document, modify an existing document, query for documents, etc. +* Any such operation '*loads*' the document as an entity to the Session, + and the entity is added to the **Session's entities map**. +* The Session **tracks all changes** made to all entities stored in its internal map. + You don't need to manually track the changes and decide what needs to be saved and what doesn't, the Session will do it for you. + Prior to saving, you can review the changes made if necessary. See: [Check for session changes](../../client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx). +* All the tracked changes are combined & persisted in the database only when calling `saveChanges()`. +* Entity tracking can be disabled if needed. See: + * [Disable entity tracking](../../client-api/session/configuration/how-to-disable-tracking.mdx) + * [Clear session](../../client-api/session/how-to/clear-a-session.mdx) + * [Evict a single entity](../../client-api/session/how-to/evict-entity-from-a-session.mdx) + +#### Create document example + +* The Client API, and the Session in particular, is designed to be as straightforward as possible. + Open the session, do some operations, and apply the changes to the RavenDB server. +* The following example shows how to create a new document in the database using the Session. + + + +{`// Obtain a Session from your Document Store +$session = $store->openSession(); +try \{ + // Create a new entity + $entity = new Company(); + $entity->setName("Company"); + + // Store the entity in the Session's internal map + $session->store($entity); + // From now on, any changes that will be made to the entity will be tracked by the Session. + // However, the changes will be persisted to the server only when saveChanges() is called. + + $session->saveChanges(); + // At this point the entity is persisted to the database as a new document. + // Since no database was specified when opening the Session, the Default Database is used. +\} finally \{ + $session->close(); +\} +`} + + + +#### Modify document example +* The following example modifies the content of an existing document. + + + +{`// Open a session +$session = $store->openSession(); +try \{ + // Load an existing document to the Session using its ID + // The loaded entity will be added to the session's internal map + $entity = $session->load(Company::class, $companyId); + + // Edit the entity, the Session will track this change + $entity->setName("NewCompanyName"); + + $session->saveChanges(); + // At this point, the change made is persisted to the existing document in the database +\} finally \{ + $session->close(); +\} +`} + + + + + +## Identity map pattern + +* The session implements the [Identity Map Pattern](https://martinfowler.com/eaaCatalog/identityMap.html). +* The first `Load()` call goes to the server and fetches the document from the database. + The document is then stored as an entity in the Session's entities map. +* All subsequent `Load()` calls to the same document will simply retrieve the entity from the Session - + no additional calls to the server are made. + + + +{`// A document is fetched from the server +$entity1 = $session->load(Company::class, $companyId); + +// Loading the same document will now retrieve its entity from the Session's map +$entity2 = $session->load(Company::class, $companyId); + +// This command will Not throw an exception +$this->assertSame($entity1, $entity2); +`} + + + +* Note: + To override this behavior and force `Load()` to fetch the latest changes from the server see: + [Refresh an entity](../../client-api/session/how-to/refresh-entity.mdx). + + + +## Batching & Transactions + + + +#### Batching + +* Remote calls to a server over the network are among the most expensive operations an application makes. + The session optimizes this by batching all **write operations** it has tracked into the `saveChanges()` call. +* When calling `saveChanges`, the session evaluates its state to identify all pending changes requiring persistence in the database. + These changes are then combined into a single batch that is sent to the server in a **single remote call** and executed as a single ACID transaction. + + + + +#### Transactions + +* The client API does not provide transactional semantics over the entire session. + The session **does not** represent a [transaction](../../client-api/faq/transaction-support.mdx) (nor a transaction scope) in terms of ACID transactions. +* RavenDB provides transactions over individual requests, so each call made within the session's usage will be processed in a separate transaction on the server side. + This applies to both reads and writes. + +##### Read transactions + +* Each call retrieving data from the database will generate a separate request. Multiple requests mean separate transactions. +* The following options allow you to read _multiple_ documents in a single request: + * Using overloads of the [Load()](../../client-api/session/loading-entities.mdx#load---multiple-entities) method that specify a collection of IDs or a prefix of ID. + * Using [Include](../../client-api/session/loading-entities.mdx#load-with-includes) to retrieve additional documents in a single request. + * A query that can return multiple documents is executed in a single request, + hence it is processed in a single read transaction. + +##### Write transactions + +* The batched operations that are sent in the `saveChanges()` complete transactionally, as this call generates a single request to the database. + In other words, either all changes are saved as a **Single Atomic Transaction** or none of them are. + So once `saveChanges` returns successfully, it is guaranteed that all changes are persisted to the database. +* `saveChanges` is the only time when the RavenDB Client API sends updates to the server from the Session, + resulting in a reduced number of network calls. +* To execute an operation that both loads and updates a document within the same write transaction, use the patching feature. + This can be done either with the usage of a [JavaScript patch](../../client-api/operations/patching/single-document.mdx) syntax or [JSON Patch](../../client-api/operations/patching/json-patch-syntax.mdx) syntax. + + + + +#### Transaction mode + +* The session's transaction mode can be set to either: + * **Single-Node** - transaction is executed on a specific node and then replicated + * **Cluster-Wide** - transaction is registered for execution on all nodes in an atomic fashion + * + The phrase "session's transaction mode" refers to the type of transaction that will be executed on the server-side when `saveChanges()` is called. + As mentioned earlier, the session itself does not represent an ACID transaction. + + * Learn more about these modes in [Cluster-wide vs. Single-node](../../client-api/session/cluster-transaction/overview.mdx#cluster-wide-transaction-vs-single-node-transaction) transactions. + + + + +For a detailed description of transactions in RavenDB please refer to the [Transaction support in RavenDB](../../client-api/faq/transaction-support.mdx) article. + + + + +## Concurrency control + +The typical usage model of the session is: + + * Load documents + * Modify the documents + * Save changes + +For example, a real case scenario would be: + + * Load an entity from a database. + * Display an Edit form on the screen. + * Update the entity after the user completes editing. + +When using the session, the interaction with the database is divided into two parts - the load part and save changes part. +Each of these parts is executed separately, via its own HTTP request. +Consequently, data that was loaded and edited could potentially be changed by another user in the meantime. +To address this, the session API offers the concurrency control feature. + +#### Default strategy on single node + +* By default, concurrency checks are turned off. + This means that with the default configuration of the session, concurrent changes to the same document will use the Last Write Wins strategy. + +* The second write of an updated document will override the previous version, causing potential data loss. + This behavior should be considered when using the session with single node transaction mode. + +#### Optimistic concurrency on single node + +* The modification or editing stage can extend over a considerable time period or may occur offline. + To prevent conflicting writes, where a document is modified while it is being edited by another user or client, + the session can be configured to employ [optimistic concurrency](../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx). + +* Once optimistic concurrency is enabled, the session performs version tracking to ensure that any document modified within the session has not been altered in the database since it was loaded into the session. + The version is tracked using a [change vector](../../server/clustering/replication/change-vector.mdx). + +* When `saveChanges()` is called, the session additionally transmits the version of the modified documents to the database, allowing it to verify if any changes have occurred in the meantime. + If modifications are detected, the transaction will be aborted with a `ConcurrencyException`, + providing the caller with an opportunity to retry or handle the error as needed. + +#### Concurrency control in cluster-wide transactions + +* In a cluster-wide transaction scenario, RavenDB server tracks a cluster-wide version for each modified document, updating it through the Raft protocol. + This means that when using a session with the cluster-wide transaction mode, a `ConcurrencyException` will be triggered upon calling `saveChanges()` + if another user has modified a document and saved it in a separate cluster-wide transaction in the meantime. + +* More information about cluster transactions can be found in [Cluster Transaction - Overview](../../client-api/session/cluster-transaction/overview.mdx). + + + +## Reducing server calls (best practices) for: + +#### The select N+1 problem +* The Select N+1 problem is common + with all ORMs and ORM-like APIs. + It results in an excessive number of remote calls to the server, which makes a query very expensive. +* Make use of RavenDB's `include()` method to include related documents and avoid this issue. + See: [Document relationships](../../client-api/how-to/handle-document-relationships.mdx) + +#### Large query results +* When query results are large and you don't want the overhead of keeping all results in memory, + then you can [Stream query results](../../client-api/session/querying/how-to-stream-query-results.mdx). + A single server call is executed and the client can handle the results one by one. +* [Paging](../../indexes/querying/paging.mdx) also avoids getting all query results at one time, + however, multiple server calls are generated - one per page retrieved. + +#### Retrieving results on demand (Lazy) +* Query calls to the server can be delayed and executed on-demand as needed using `Lazily()` +* See [Perform queries lazily](../../client-api/session/querying/how-to-perform-queries-lazily.mdx) + + + + diff --git a/versioned_docs/version-7.1/client-api/session/_what-is-a-session-and-how-does-it-work-python.mdx b/versioned_docs/version-7.1/client-api/session/_what-is-a-session-and-how-does-it-work-python.mdx new file mode 100644 index 0000000000..b7d821c220 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/_what-is-a-session-and-how-does-it-work-python.mdx @@ -0,0 +1,289 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The **Session**, which is obtained from the [Document Store](../../client-api/what-is-a-document-store.mdx), + is the primary interface your application will interact with. + +* In this page: + * [Session overview](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#session-overview) + * [Unit of work pattern](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#unit-of-work-pattern) + * [Tracking changes](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#tracking-changes) + * [Create document example](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#create-document-example) + * [Modify document example](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#modify-document-example) + * [Identity map pattern](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#identity-map-pattern) + * [Batching & Transactions](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#batching--transactions) + * [Concurrency control](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#concurrency-control) + * [Reducing server calls (best practices) for:](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#reducing-server-calls-(best-practices)-for:) + * [The N+1 problem](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#the-select-n1-problem) + * [Large query results](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#large-query-results) + * [Retrieving results on demand (Lazy)](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#retrieving-results-on-demand-lazy) + + +## Session overview + +* **What is the session**: + + * The session serves as a [Unit of Work](https://en.wikipedia.org/wiki/Unit_of_work) representing a single + **[Business Transaction](https://martinfowler.com/eaaCatalog/unitOfWork.html)** on a specific database (not to be confused with an [ACID transaction](../../client-api/faq/transaction-support.mdx)). + + * It is a container that allows you to query for documents and load, create, or update entities + while keeping track of changes. + + * Basic document CRUD actions and document Queries are available through the `session`. + More advanced options are available using the `advanced` Session operations. + +* **Batching modifications**: + A business transaction usually involves multiple requests such as loading of documents or execution of queries. + Calling [save_changes()](../../client-api/session/saving-changes.mdx) indicates the completion of the client-side business logic. + At this point, all modifications made within the session are batched and sent together in a **single HTTP request** to the server to be persisted as a single ACID transaction. + +* **Tracking changes**: + Based on the [Unit of Work](https://martinfowler.com/eaaCatalog/unitOfWork.html) and the [Identity Map](https://martinfowler.com/eaaCatalog/identityMap.html) patterns, + the session tracks all changes made to all entities that it has either loaded, stored, deleted, or queried for. + Only the modifications are sent to the server when _save_changes()_ is called. + +* **Client side object**: + The session is a pure client side object. Opening the session does Not establish any connection to a database, + and the session's state isn't reflected on the server side during its duration. + +* **Configurability**: + Various aspects of the session are configurable. + For example, the number of server requests allowed per session is [configurable](../../client-api/session/configuration/how-to-change-maximum-number-of-requests-per-session.mdx) (default is 30). + +* **The session and ORM Comparison**: + The RavenDB Client API is a native way to interact with a RavenDB database. + It is _not_ an Object–relational mapping (ORM) tool. Although if you're familiar with NHibernate of Entity Framework ORMs you'll recognize that + the session is equivalent of NHibernate's session and Entity Framework's DataContext which implement UoW pattern as well. + + + +## Unit of work pattern + +#### Tracking changes + +* Using the Session, perform needed operations on your documents. + e.g. create a new document, modify an existing document, query for documents, etc. +* Any such operation '*loads*' the document as an entity to the Session, + and the entity is added to the **Session's entities map**. +* The Session **tracks all changes** made to all entities stored in its internal map. + You don't need to manually track the changes and decide what needs to be saved and what doesn't, the Session will do it for you. + Prior to saving, you can review the changes made if necessary. See: [Check for session changes](../../client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx). +* All the tracked changes are combined & persisted in the database only when calling `save_changes()`. +* Entity tracking can be disabled if needed. See: + * [Disable entity tracking](../../client-api/session/configuration/how-to-disable-tracking.mdx) + * [Clear session](../../client-api/session/how-to/clear-a-session.mdx) + * [Evict a single entity](../../client-api/session/how-to/evict-entity-from-a-session.mdx) + +#### Create document example + +* The Client API, and the Session in particular, is designed to be as straightforward as possible. + Open the session, do some operations, and apply the changes to the RavenDB server. +* The following example shows how to create a new document in the database using the Session. + + + +{`with store.open_session() as session: + # Create a new entity + entity = Company(name="CompanyName") + + # Store the entity in the Session's internal map + session.store(entity) + # From now on, any changes that will be made to the entity will be tracked by the Session. + # However, the changes will be persisted to the server only when 'save_changes()' is called. + + session.save_changes() + # At this point the entity is persisted to the database as a new document. + # Since no database was specified when opening the Session, the Default Database is used. +`} + + + +#### Modify document example +* The following example modifies the content of an existing document. + + + +{`# Open a session +with store.open_session() as session: + # Load an existing document to the Session using its ID + # The loaded entity will be added to the session's internal map + entity = session.load(company_id, Company) + + # Edit the entity, the Session will track this change + entity.name = "NewCompanyName" + + session.save_changes() + # At this point, the change made is persisted to the existing document in the database +`} + + + + + +## Identity map pattern + +* The session implements the [Identity Map Pattern](https://martinfowler.com/eaaCatalog/identityMap.html). +* The first `load()` call goes to the server and fetches the document from the database. + The document is then stored as an entity in the Session's entities map. +* All subsequent `load()` calls to the same document will simply retrieve the entity from the Session - + no additional calls to the server are made. + + + +{`# A document is fetched from the server +entity1 = session.load(company_id, Company) + +# Loading the same document will now retrieve its entity from the Session's map +entity2 = session.load(company_id, Company) + +# This command will not throw an exception +self.assertEqual(entity1, entity2) +`} + + + +* Note: + To override this behavior and force `load()` to fetch the latest changes from the server see: + [Refresh an entity](../../client-api/session/how-to/refresh-entity.mdx). + + + +## Batching & Transactions + + + +#### Batching + +* Remote calls to a server over the network are among the most expensive operations an application makes. + The session optimizes this by batching all **write operations** it has tracked into the `save_changes()` call. +* When calling _SaveChanges_, the session evaluates its state to identify all pending changes requiring persistence in the database. + These changes are then combined into a single batch that is sent to the server in a **single remote call** and executed as a single ACID transaction. + + + + +#### Transactions + +* The client API does not provide transactional semantics over the entire session. + The session **does not** represent a [transaction](../../client-api/faq/transaction-support.mdx) (nor a transaction scope) in terms of ACID transactions. +* RavenDB provides transactions over individual requests, so each call made within the session's usage will be processed in a separate transaction on the server side. + This applies to both reads and writes. + +##### Read transactions + +* Each call retrieving data from the database will generate a separate request. Multiple requests mean separate transactions. +* The following options allow you to read _multiple_ documents in a single request: + * Using overloads of the [load()](../../client-api/session/loading-entities.mdx#load---multiple-entities) method that specify a collection of IDs or a prefix of ID. + * Using [Include](../../client-api/session/loading-entities.mdx#load-with-includes) to retrieve additional documents in a single request. + * A query that can return multiple documents is executed in a single request, + hence it is processed in a single read transaction. + +##### Write transactions + +* The batched operations that are sent in the `save_changes()` complete transactionally, as this call generates a single request to the database. + In other words, either all changes are saved as a **Single Atomic Transaction** or none of them are. + So once _SaveChanges_ returns successfully, it is guaranteed that all changes are persisted to the database. +* _SaveChanges_ is the only time when the RavenDB Client API sends updates to the server from the Session, + resulting in a reduced number of network calls. +* To execute an operation that both loads and updates a document within the same write transaction, use the patching feature. + This can be done either with the usage of a [JavaScript patch](../../client-api/operations/patching/single-document.mdx) syntax or [JSON Patch](../../client-api/operations/patching/json-patch-syntax.mdx) syntax. + + + + +#### Transaction mode + +* The session's transaction mode can be set to either: + * **Single-Node** - transaction is executed on a specific node and then replicated + * **Cluster-Wide** - transaction is registered for execution on all nodes in an atomic fashion + * + The phrase "session's transaction mode" refers to the type of transaction that will be executed on the server-side when `save_changes()` is called. + As mentioned earlier, the session itself does not represent an ACID transaction. + + * Learn more about these modes in [Cluster-wide vs. Single-node](../../client-api/session/cluster-transaction/overview.mdx#cluster-wide-transaction-vs-single-node-transaction) transactions. + + + + +For a detailed description of transactions in RavenDB please refer to the [Transaction support in RavenDB](../../client-api/faq/transaction-support.mdx) article. + + + + +## Concurrency control + +The typical usage model of the session is: + + * Load documents + * Modify the documents + * Save changes + +For example, a real case scenario would be: + + * Load an entity from a database. + * Display an Edit form on the screen. + * Update the entity after the user completes editing. + +When using the session, the interaction with the database is divided into two parts - the load part and save changes part. +Each of these parts is executed separately, via its own HTTP request. +Consequently, data that was loaded and edited could potentially be changed by another user in the meantime. +To address this, the session API offers the concurrency control feature. + +#### Default strategy on single node + +* By default, concurrency checks are turned off. + This means that with the default configuration of the session, concurrent changes to the same document will use the Last Write Wins strategy. + +* The second write of an updated document will override the previous version, causing potential data loss. + This behavior should be considered when using the session with single node transaction mode. + +#### Optimistic concurrency on single node + +* The modification or editing stage can extend over a considerable time period or may occur offline. + To prevent conflicting writes, where a document is modified while it is being edited by another user or client, + the session can be configured to employ [optimistic concurrency](../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx). + +* Once optimistic concurrency is enabled, the session performs version tracking to ensure that any document modified within the session has not been altered in the database since it was loaded into the session. + The version is tracked using a [change vector](../../server/clustering/replication/change-vector.mdx). + +* When `save_changes()` is called, the session additionally transmits the version of the modified documents to the database, allowing it to verify if any changes have occurred in the meantime. + If modifications are detected, the transaction will be aborted with a `ConcurrencyException`, + providing the caller with an opportunity to retry or handle the error as needed. + +#### Concurrency control in cluster-wide transactions + +* In a cluster-wide transaction scenario, RavenDB server tracks a cluster-wide version for each modified document, updating it through the Raft protocol. + This means that when using a session with the cluster-wide transaction mode, a `ConcurrencyException` will be triggered upon calling `save_changes()` + if another user has modified a document and saved it in a separate cluster-wide transaction in the meantime. + +* More information about cluster transactions can be found in [Cluster Transaction - Overview](../../client-api/session/cluster-transaction/overview.mdx). + + + +## Reducing server calls (best practices) for: + +#### The select N+1 problem +* The Select N+1 problem is common + with all ORMs and ORM-like APIs. + It results in an excessive number of remote calls to the server, which makes a query very expensive. +* Make use of RavenDB's `include()` method to include related documents and avoid this issue. + See: [Document relationships](../../client-api/how-to/handle-document-relationships.mdx) + +#### Large query results +* When query results are large and you don't want the overhead of keeping all results in memory, + then you can [Stream query results](../../client-api/session/querying/how-to-stream-query-results.mdx). + A single server call is executed and the client can handle the results one by one. +* [Paging](../../indexes/querying/paging.mdx) also avoids getting all query results at one time, + however, multiple server calls are generated - one per page retrieved. + +#### Retrieving results on demand (Lazy) +* Query calls to the server can be delayed and executed on-demand as needed using `lazily()` +* See [Perform queries lazily](../../client-api/session/querying/how-to-perform-queries-lazily.mdx) + + + + diff --git a/versioned_docs/version-7.1/client-api/session/cluster-transaction/_atomic-guards-csharp.mdx b/versioned_docs/version-7.1/client-api/session/cluster-transaction/_atomic-guards-csharp.mdx new file mode 100644 index 0000000000..811678c09d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/cluster-transaction/_atomic-guards-csharp.mdx @@ -0,0 +1,363 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Atomic Guards** are [compare-exchange key/value items](../../../client-api/operations/compare-exchange/overview.mdx) + that RavenDB creates and manages **automatically** to guarantee + [ACID](../../../server/clustering/cluster-transactions.mdx#cluster-transaction-properties) transactions in cluster-wide sessions. + +* Each document is associated with its own unique atomic guard item. + Atomic guards coordinate between sessions that attempt to write to the same document concurrently. + Saving a document will be prevented if another session has modified the document. + +* In this article: + * [Atomic guard creation and update](../../../client-api/session/cluster-transaction/atomic-guards.mdx#atomic-guard-creation-and-update) + * [Atomic guard usage example](../../../client-api/session/cluster-transaction/atomic-guards.mdx#atomic-guard-usage-example) + * [Atomic guard database scope](../../../client-api/session/cluster-transaction/atomic-guards.mdx#atomic-guard-database-scope) + * [Disabling atomic guards](../../../client-api/session/cluster-transaction/atomic-guards.mdx#disabling-atomic-guards) + * [When are atomic guards removed](../../../client-api/session/cluster-transaction/atomic-guards.mdx#when-are-atomic-guards-removed) + * [Best practice when storing a document in a cluster-wide transaction](../../../client-api/session/cluster-transaction/atomic-guards.mdx#best-practice-when-storing-a-document-in-a-cluster-wide-transaction) + + +## Atomic guard creation and update + + +Atomic guards are created and managed **only when the session's transaction mode is set to [ClusterWide](../../../client-api/session/cluster-transaction/overview.mdx#open-a-cluster-transaction)**. + +* **When creating a new document**: + A new atomic guard is created when a new document is successfully saved. + +* **When modifying an existing document that already has an atomic guard**: + * The atomic guard’s Raft index is incremented when the document is successfully saved after being modified. + This allows RavenDB to detect that the document has changed. + * If another session had loaded the document before the document's version changed, it will not be able to save its changes + unless it first reloads the updated version. Otherwise, a `ConcurrencyException` is thrown. + +* **When modifying an existing document that doesn't have an atomic guard**: + * A new atomic guard is created when modifying an existing document that does not yet have one. + * The absence of the atomic guard may be because the document was created in a single-node session, + or because its atomic guard was manually removed (which is not recommended). + +* **When saving a document fails**: + * If a session's `SaveChanges()` fails, the entire session is rolled back and the atomic guard is Not created. + * Ensure your business logic is designed to re-execute the session in case saving changes fails for any reason. + + + +## Atomic guard usage example + +In the code sample below, an atomic guard is automatically created when a new document is saved. +It is then used to detect and prevent conflicting writes: when two sessions load and modify the same document, +only the first save succeeds, and the second fails with a _ConcurrencyException_. + + + + +{`using (var session = store.OpenSession(new SessionOptions + { + // Open a cluster-wide session: + TransactionMode = TransactionMode.ClusterWide + })) +{ + session.Store(new User(), "users/johndoe"); + session.SaveChanges(); + // An atomic guard is now automatically created for the new document "users/johndoe". +} + +// Open two concurrent cluster-wide sessions: +using (var session1 = store.OpenSession( + new SessionOptions + {TransactionMode = TransactionMode.ClusterWide})) +using (var session2 = store.OpenSession( + new SessionOptions + {TransactionMode = TransactionMode.ClusterWide})) +{ + // Both sessions load the same document: + var loadedUser1 = session1.Load("users/johndoe"); + loadedUser1.Name = "jindoe"; + + var loadedUser2 = session2.Load("users/johndoe"); + loadedUser2.Name = "jandoe"; + + // session1 saves its changes first — + // this increments the Raft index of the associated atomic guard. + session1.SaveChanges(); + + // session2 tries to save using an outdated atomic guard version + // and fails with a ConcurrencyException. + session2.SaveChanges(); +} +`} + + + + +{`using (var asyncSession = store.OpenAsyncSession(new SessionOptions + { + // Open a cluster-wide session: + TransactionMode = TransactionMode.ClusterWide + })) +{ + await asyncSession.StoreAsync(new User(), "users/johndoe"); + await asyncSession.SaveChangesAsync(); + // An atomic guard is now automatically created for the new document "users/johndoe". +} + +// Open two concurrent cluster-wide sessions: +using (var asyncSession1 = store.OpenAsyncSession( + new SessionOptions + {TransactionMode = TransactionMode.ClusterWide})) +using (var asyncSession2 = store.OpenAsyncSession( + new SessionOptions + {TransactionMode = TransactionMode.ClusterWide})) +{ + // Both sessions load the same document: + var loadedUser1 = await asyncSession1.LoadAsync("users/johndoe"); + loadedUser1.Name = "jindoe"; + + var loadedUser2 = await asyncSession2.LoadAsync("users/johndoe"); + loadedUser2.Name = "jandoe"; + + // asyncSession1 saves its changes first — + // this increments the Raft index of the associated atomic guard. + await asyncSession1.SaveChangesAsync(); + + // asyncSession2 tries to save using an outdated atomic guard version + // and fails with a ConcurrencyException. + await asyncSession2.SaveChangesAsync(); +} +`} + + + +After running the above example, you can view the automatically created atomic guard in the Studio’s +[Compare-Exchange view](../../../studio/database/documents/compare-exchange-view.mdx#the-compare-exchange-view): + +![Atomic Guard](./assets/atomic-guard.png) + +1. These are **custom compare-exchange items** that were manually created by you, + e.g., via the [Put compare exchange operation](../../../client-api/operations/compare-exchange/put-compare-exchange-value.mdx) - for any purpose you needed. + They are NOT the automatically created atomic guards. + +2. This is the **atomic guard** that was generated by running the example above. + + The generated atomic guard **key** is: `rvn-atomic/users/johndoe`. + It is composed of: + * The prefix `rvn-atomic/`. + * The ID of the associated document. + + + * Although this Studio view allows editing compare-exchange items, **do not delete or modify atomic guard entries**. + * Doing so will interfere with RavenDB's ability to track document versioning through atomic guards. + + + + +## Atomic guard database scope + +* Atomic guards are local to the database on which they were defined. + +* Since atomic guards are implemented as compare-exchange items, + they are Not externally replicated to other databases by any ongoing replication task. + Learn more in [why compare-exchange items are not replicated](../../../client-api/operations/compare-exchange/overview.mdx#why-compare-exchange-items-are-not-replicated-to-external-databases). + + + +## Disabling atomic guards + +* Before atomic guards were introduced (in RavenDB 5.2), client code had to explicitly manage compare-exchange entries + to ensure concurrency control and maintain ACID guarantees in cluster-wide transactions. + +* You can still take this manual approach by disabling the automatic use of atomic guards in a cluster-wide session, + and managing the required [compare-exchange key/value pairs](../../../client-api/operations/compare-exchange/overview.mdx) yourself, + as shown in this [example](../../../client-api/operations/compare-exchange/overview.mdx#example-i---email-address-reservation). + +* To disable the automatic creation and use of atomic guards in a cluster-wide session, + set the session's `DisableAtomicDocumentWritesInClusterWideTransaction` configuration option to `true`. + + + + +{`using (var session = store.OpenSession(new SessionOptions + { + TransactionMode = TransactionMode.ClusterWide, + // Disable atomic-guards + DisableAtomicDocumentWritesInClusterWideTransaction = true + })) +{ + session.Store(new User(), "users/johndoe"); + + // No atomic-guard will be created upon saveChanges + session.SaveChanges(); +} +`} + + + + +{`using (var asyncSession = store.OpenAsyncSession(new SessionOptions + { + TransactionMode = TransactionMode.ClusterWide, + // Disable atomic-guards + DisableAtomicDocumentWritesInClusterWideTransaction = true + })) +{ + await asyncSession.StoreAsync(new User(), "users/johndoe"); + + // No atomic-guard will be created upon saveChanges + await asyncSession.SaveChangesAsync(); +} +`} + + + + + + +## When are atomic guards removed + +Atomic guards are removed **automatically** in the following scenarios: +(you don't need to clean them up manually) + +* **Document deleted via a cluster-wide session**: + * Create a document using a cluster wide session (an associated atomic guard is created). + * Delete the document using a cluster wide session - its atomic guard will be removed automatically. + +* **Document expires via the expiration feature**: + * Create a document using a cluster wide session (an associated atomic guard is created). + * Add the `@expires` metadata property the document, as described in [Document expiration](../../../studio/database/settings/document-expiration.mdx). + * When the expiration time is reached, the document and its atomic guard will both be removed automatically. + * Since different cleanup tasks handle the removal of **expired** documents and the removal of their associated atomic guards, + it may happen that atomic guards of removed documents would linger in the compare exchange entries list a short while longer before they are removed. + You do Not need to remove such atomic guards yourself, they will be removed by the cleanup task. + + +* **Do not delete or modify atomic guards manually while they are in use by an active session**. + If a session attempts to save a document whose atomic guard has been removed or changed, + it will fail with an error. + +* If you accidentally remove an atomic guard that is associated with an existing document, + you can restore it by re-saving the document in a cluster-wide session, + this will re-create the atomic guard automatically. + + + + +## Best practice when storing a document in a cluster-wide transaction + +* When working with a cluster-wide session, + we recommend that you always **`Load` the document into the session before storing it** - + even if the document is expected to be new. + +* This is especially important if a document (originally created in a cluster-wide transaction) was deleted **outside** of a cluster-wide session - + as when using a [single-node session](../../../client-api/session/cluster-transaction/overview.mdx#cluster-wide-transaction-vs-single-node-transaction) + or the [DeleteByQueryOperation](../../../client-api/operations/common/delete-by-query.mdx). + In these cases, the document is deleted, but the atomic guard remains (it is not automatically removed). + If you attempt to re-create such a document without loading it first, + RavenDB will fail to save it because the session is unaware of the existing atomic guard’s latest Raft index. +In this example, the document is loaded into the session BEFORE creating or modifying it: + + + + +{`using (var session = store.OpenSession(new SessionOptions + { + // Open a cluster-wide session + TransactionMode = TransactionMode.ClusterWide + })) +{ + // Load the user document BEFORE creating a new one or modifying if already exists + var user = session.Load("users/johndoe"); + + if (user == null) + { + // Document doesn't exist => create a new document: + var newUser = new User + { + Name = "John Doe", + // ... initialize other properties + }; + + // Store the new user document in the session + session.Store(newUser, "users/johndoe"); + } + else + { + // Document exists => apply your modifications: + user.Name = "New name"; + // ... make any other updates + + // No need to call Store() again + // RavenDB tracks changes on loaded entities + } + + // Commit your changes + session.SaveChanges(); +} +`} + + + + +{`using (var asyncSession = store.OpenAsyncSession(new SessionOptions + { + // Open a cluster-wide session + TransactionMode = TransactionMode.ClusterWide + })) +{ + // Load the user document BEFORE creating or updating + var user = await asyncSession.LoadAsync("users/johndoe"); + + if (user == null) + { + // Document doesn't exist => create a new document: + var newUser = new User + { + Name = "John Doe", + // ... initialize other properties + }; + + // Store the new user document in the session + await asyncSession.StoreAsync(newUser, "users/johndoe"); + } + else + { + // Document exists => apply your modifications: + user.Name = "New name"; + // ... make any other updates + + // No need to call Store() again + // RavenDB tracks changes on loaded entities + } + + // Commit your changes + await asyncSession.SaveChangesAsync(); +} +`} + + + + + + +When _loading_ a document in a cluster-wide session, RavenDB attempts to retrieve the document from the document store: + +* **If the document is found**, it is loaded into the session, + and modifications will be saved successfully as long as no other session has modified the document in the meantime. + Specifically, if the document’s [change vector](../../../server/clustering/replication/change-vector.mdx) matches the one currently stored on the server, + the save will proceed - after which the Raft index of the associated atomic guard will be incremented as expected. + Otherwise, RavenDB will fail the operation with a _ConcurrencyException_. + +* **If no document is found**, RavenDB will check whether a matching atomic guard exists (as in the case when the document was deleted outside of a cluster-wide session): + * **If an atomic guard exists**, + the client constructs a change vector for the document using the atomic guard’s Raft index, and the document will be saved with this change vector. + * **If no atomic guard exists**, + the document is treated as a brand new document and will be saved as usual. + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/cluster-transaction/_atomic-guards-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/cluster-transaction/_atomic-guards-nodejs.mdx new file mode 100644 index 0000000000..d9ae1ff263 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/cluster-transaction/_atomic-guards-nodejs.mdx @@ -0,0 +1,261 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Atomic Guards** are [compare-exchange key/value items](../../../client-api/operations/compare-exchange/overview.mdx) + that RavenDB creates and manages **automatically** to guarantee + [ACID](../../../server/clustering/cluster-transactions.mdx#cluster-transaction-properties) transactions in cluster-wide sessions. + +* Each document is associated with its own unique atomic guard item. + Atomic guards coordinate between sessions that attempt to write to the same document concurrently. + Saving a document will be prevented if another session has modified the document. + +* In this article: + * [Atomic guard creation and update](../../../client-api/session/cluster-transaction/atomic-guards.mdx#atomic-guard-creation-and-update) + * [Atomic guard usage example](../../../client-api/session/cluster-transaction/atomic-guards.mdx#atomic-guard-usage-example) + * [Atomic guard database scope](../../../client-api/session/cluster-transaction/atomic-guards.mdx#atomic-guard-database-scope) + * [Disabling atomic guards](../../../client-api/session/cluster-transaction/atomic-guards.mdx#disabling-atomic-guards) + * [When are atomic guards removed](../../../client-api/session/cluster-transaction/atomic-guards.mdx#when-are-atomic-guards-removed) + * [Best practice when storing a document in a cluster-wide transaction](../../../client-api/session/cluster-transaction/atomic-guards.mdx#best-practice-when-storing-a-document-in-a-cluster-wide-transaction) + + +## Atomic guard creation and update + + +Atomic guards are created and managed **only when the session's transaction mode is set to [ClusterWide](../../../client-api/session/cluster-transaction/overview.mdx#open-a-cluster-transaction)**. + +* **When creating a new document**: + A new atomic guard is created when a new document is successfully saved. + +* **When modifying an existing document that already has an atomic guard**: + * The atomic guard’s Raft index is incremented when the document is successfully saved after being modified. + This allows RavenDB to detect that the document has changed. + * If another session had loaded the document before the document's version changed, it will not be able to save its changes + unless it first reloads the updated version. Otherwise, a `ConcurrencyException` is thrown. + +* **When modifying an existing document that doesn't have an atomic guard**: + * A new atomic guard is created when modifying an existing document that does not yet have one. + * The absence of the atomic guard may be because the document was created in a single-node session, + or because its atomic guard was manually removed (which is not recommended). + +* **When saving a document fails**: + * If a session's `saveChanges()` fails, the entire session is rolled back and the atomic guard is Not created. + * Ensure your business logic is designed to re-execute the session in case saving changes fails for any reason. + + + +## Atomic guard usage example + +In the code sample below, an atomic guard is automatically created when a new document is saved. +It is then used to detect and prevent conflicting writes: when two sessions load and modify the same document, +only the first save succeeds, and the second fails with a _ConcurrencyException_. + + + +{`const user = \{ + firstName: "John", + lastName: "Doe" +\}; + +// Open a cluster-wide session: +const session = documentStore.openSession(\{ + transactionMode: "ClusterWide" +\}); + +await session.store(user, "users/johndoe"); +await session.saveChanges(); +// An atomic-guard is now automatically created for the new document "users/johndoe". + +// Open two concurrent cluster-wide sessions: +const session1 = documentStore.openSession(\{ + transactionMode: "ClusterWide" +\}); +const session2 = documentStore.openSession(\{ + transactionMode: "ClusterWide" +\}); + +// Both sessions load the same document: +const loadedUser1 = await session1.load("users/johndoe"); +loadedUser1.name = "jindoe"; + +const loadedUser2 = await session2.load("users/johndoe"); +loadedUser2.name = "jandoe"; + +// session1 saves its changes first — +// this increments the Raft index of the associated atomic guard. +await session1.saveChanges(); + +// session2 tries to save using an outdated atomic guard version +// and fails with a ConcurrencyException. +await session2.saveChanges(); +`} + + +After running the above example, you can view the automatically created atomic guard in the Studio’s +[Compare-Exchange view](../../../studio/database/documents/compare-exchange-view.mdx#the-compare-exchange-view): + +![Atomic Guard](./assets/atomic-guard.png) + +1. These are **custom compare-exchange items** that were manually created by you, + e.g., via the [Put compare exchange operation](../../../client-api/operations/compare-exchange/put-compare-exchange-value.mdx) - for any purpose you needed. + They are NOT the automatically created atomic guards. + +2. This is the **atomic guard** that was generated by running the example above. + + The generated atomic guard **key** is: `rvn-atomic/users/johndoe`. + It is composed of: + * The prefix `rvn-atomic/`. + * The ID of the associated document. + + + * Although this Studio view allows editing compare-exchange items, **do not delete or modify atomic guard entries**. + * Doing so will interfere with RavenDB's ability to track document versioning through atomic guards. + + + + +## Atomic guard database scope + +* Atomic guards are local to the database on which they were defined. + +* Since atomic guards are implemented as compare-exchange items, + they are Not externally replicated to other databases by any ongoing replication task. + Learn more in [why compare-exchange items are not replicated](../../../client-api/operations/compare-exchange/overview.mdx#why-compare-exchange-items-are-not-replicated-to-external-databases). + + + +## Disabling atomic guards + +* Before atomic guards were introduced (in RavenDB 5.2), client code had to explicitly manage compare-exchange entries + to ensure concurrency control and maintain ACID guarantees in cluster-wide transactions. + +* You can still take this manual approach by disabling the automatic use of atomic guards in a cluster-wide session, + and managing the required [compare-exchange key/value pairs](../../../client-api/operations/compare-exchange/overview.mdx) yourself, + as shown in this [example](../../../client-api/operations/compare-exchange/overview.mdx#example-i---email-address-reservation). + +* To disable the automatic creation and use of atomic guards in a cluster-wide session, + set the session's `DisableAtomicDocumentWritesInClusterWideTransaction` configuration option to `true`. + + + +{`// Open a cluster-wide session +const session = documentStore.openSession(\{ + transactionMode: "ClusterWide", + // Disable atomic-guards + disableAtomicDocumentWritesInClusterWideTransaction: true +\}); + +await session.store(user, "users/johndoe"); + +// No atomic-guard will be created upon saveChanges +await session.saveChanges(); +`} + + + + + +## When are atomic guards removed + +Atomic guards are removed **automatically** in the following scenarios: +(you don't need to clean them up manually) + +* **Document deleted via a cluster-wide session**: + * Create a document using a cluster wide session (an associated atomic guard is created). + * Delete the document using a cluster wide session - its atomic guard will be removed automatically. + +* **Document expires via the expiration feature**: + * Create a document using a cluster wide session (an associated atomic guard is created). + * Add the `@expires` metadata property the document, as described in [Document expiration](../../../studio/database/settings/document-expiration.mdx). + * When the expiration time is reached, the document and its atomic guard will both be removed automatically. + * Since different cleanup tasks handle the removal of **expired** documents and the removal of their associated atomic guards, + it may happen that atomic guards of removed documents would linger in the compare exchange entries list a short while longer before they are removed. + You do Not need to remove such atomic guards yourself, they will be removed by the cleanup task. + + +* **Do not delete or modify atomic guards manually while they are in use by an active session**. + If a session attempts to save a document whose atomic guard has been removed or changed, + it will fail with an error. + +* If you accidentally remove an atomic guard that is associated with an existing document, + you can restore it by re-saving the document in a cluster-wide session, + this will re-create the atomic guard automatically. + + + + +## Best practice when storing a document in a cluster-wide transaction + +* When working with a cluster-wide session, + we recommend that you always **`load` the document into the session before storing it** - + even if the document is expected to be a new document. + +* This is especially important if a document (originally created in a cluster-wide transaction) was deleted **outside** of a cluster-wide session - + e.g., when using a [single-node session](../../../client-api/session/cluster-transaction/overview.mdx#cluster-wide-transaction-vs-single-node-transaction) + or the [DeleteByQueryOperation](../../../client-api/operations/common/delete-by-query.mdx). + In these cases, the document is deleted, but the atomic guard remains (it is not automatically removed). + If you attempt to re-create such a document without loading it first, + RavenDB will fail to save it because the session is unaware of the existing atomic guard’s latest Raft index. + +---- + +In this example, the document is loaded into the session BEFORE creating or modifying it: + + + +{`const session = documentStore.openSession(\{ + // Open a cluster-wide session + transactionMode: "ClusterWide" +\}); + +// Load the user document BEFORE creating or updating +const user = await session.load("users/johndoe"); + +if (!user) \{ + // Document doesn't exist => create a new document + const newUser = \{ + name: "John Doe", + // ... initialize other properties + \}; + + // Store the new user document in the session + await session.store(newUser, "users/johndoe"); + +\} else \{ + // Document exists => apply your modifications + user.name = "New name"; + // ... make any other updates + + // No need to call store() again + // RavenDB tracks changes on loaded entities +\} + +// Commit your changes +await session.saveChanges(); +`} + + + + + +When _loading_ a document in a cluster-wide session, RavenDB attempts to retrieve the document from the document store: + +* **If the document is found**, it is loaded into the session, + and modifications will be saved successfully as long as no other session has modified the document in the meantime. + Specifically, if the document’s [change vector](../../../server/clustering/replication/change-vector.mdx) matches the one currently stored on the server, + the save will proceed - after which the Raft index of the associated atomic guard will be incremented as expected. + Otherwise, RavenDB will fail the operation with a _ConcurrencyException_. + +* **If no document is found**, RavenDB will check whether a matching atomic guard exists (as in the case when the document was deleted outside of a cluster-wide session): + * **If an atomic guard exists**, + the client constructs a change vector for the document using the atomic guard’s Raft index, and the document will be saved with this change vector. + * **If no atomic guard exists**, + the document is treated as a brand new document and will be saved as usual. + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/cluster-transaction/_atomic-guards-php.mdx b/versioned_docs/version-7.1/client-api/session/cluster-transaction/_atomic-guards-php.mdx new file mode 100644 index 0000000000..aee89e6ab0 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/cluster-transaction/_atomic-guards-php.mdx @@ -0,0 +1,276 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Atomic Guards** are [compare-exchange key/value items](../../../client-api/operations/compare-exchange/overview.mdx) + that RavenDB creates and manages **automatically** to guarantee + [ACID](../../../server/clustering/cluster-transactions.mdx#cluster-transaction-properties) transactions in cluster-wide sessions. + +* Each document is associated with its own unique atomic guard item. + Atomic guards coordinate between sessions that attempt to write to the same document concurrently. + Saving a document will be prevented if another session has modified the document. + +* In this article: + * [Atomic guard creation and update](../../../client-api/session/cluster-transaction/atomic-guards.mdx#atomic-guard-creation-and-update) + * [Atomic guard usage example](../../../client-api/session/cluster-transaction/atomic-guards.mdx#atomic-guard-usage-example) + * [Atomic guard database scope](../../../client-api/session/cluster-transaction/atomic-guards.mdx#atomic-guard-database-scope) + * [Disabling atomic guards](../../../client-api/session/cluster-transaction/atomic-guards.mdx#disabling-atomic-guards) + * [When are atomic guards removed](../../../client-api/session/cluster-transaction/atomic-guards.mdx#when-are-atomic-guards-removed) + * [Best practice when storing a document in a cluster-wide transaction](../../../client-api/session/cluster-transaction/atomic-guards.mdx#best-practice-when-storing-a-document-in-a-cluster-wide-transaction) + + +## Atomic guard creation and update + + +Atomic guards are created and managed **only when the session's transaction mode is set to [ClusterWide](../../../client-api/session/cluster-transaction/overview.mdx#open-a-cluster-transaction)**. + +* **When creating a new document**: + A new atomic guard is created when a new document is successfully saved. + +* **When modifying an existing document that already has an atomic guard**: + * The atomic guard’s Raft index is incremented when the document is successfully saved after being modified. + This allows RavenDB to detect that the document has changed. + * If another session had loaded the document before the document's version changed, it will not be able to save its changes + unless it first reloads the updated version. Otherwise, a `ConcurrencyException` is thrown. + +* **When modifying an existing document that doesn't have an atomic guard**: + * A new atomic guard is created when modifying an existing document that does not yet have one. + * The absence of the atomic guard may be because the document was created in a single-node session, + or because its atomic guard was manually removed (which is not recommended). + +* **When saving a document fails**: + * If a session's `saveChanges()` fails, the entire session is rolled back and the atomic guard is Not created. + * Ensure your business logic is designed to re-execute the session in case saving changes fails for any reason. + + + +## Atomic guard usage example + +In the code sample below, an atomic guard is automatically created when a new document is saved. +It is then used to detect and prevent conflicting writes: when two sessions load and modify the same document, +only the first save succeeds, and the second fails with a _ConcurrencyException_. + + + +{`// Open a cluster-wide session: +$sessionOptions = new SessionOptions(); +$sessionOptions->setTransactionMode(TransactionMode::clusterWide()); + +$session = $store->openSession($sessionOptions); + +try \{ + $session->store(new User(), "users/johndoe"); + // An atomic-guard is now automatically created for the new document "users/johndoe". + $session->saveChanges(); +\} finally \{ + $session->close(); +\} + +// Open two concurrent cluster-wide sessions: + +$sessionOptions1 = new SessionOptions(); +$sessionOptions1->setTransactionMode(TransactionMode::clusterWide()); + +$session1 = $store->openSession($sessionOptions1); +try \{ + $sessionOptions2 = new SessionOptions(); + $sessionOptions2->setTransactionMode(TransactionMode::clusterWide()); + $session2 = $store->openSession($sessionOptions2); + + try \{ + // Both sessions load the same document: + var $loadedUser1 = $session1->load(User::class, "users/johndoe"); + $loadedUser1->setName("jindoe"); + + $loadedUser2 = $session2->load(User::class, "users/johndoe"); + $loadedUser2->setName("jandoe"); + + // session1 saves its changes first — + // this increments the Raft index of the associated atomic guard. + $session1->saveChanges(); + + // session2 tries to save using an outdated atomic guard version + // and fails with a ConcurrencyException. + $session2->saveChanges(); + \} finally \{ + $session2->close(); + \} + +\} finally \{ + $session1->close(); +\} +`} + + +After running the above example, you can view the automatically created atomic guard in the Studio’s +[Compare-Exchange view](../../../studio/database/documents/compare-exchange-view.mdx#the-compare-exchange-view): + +![Atomic Guard](./assets/atomic-guard.png) + +1. These are **custom compare-exchange items** that were manually created by you, + e.g., via the [Put compare exchange operation](../../../client-api/operations/compare-exchange/put-compare-exchange-value.mdx) - for any purpose you needed. + They are NOT the automatically created atomic guards. + +2. This is the **atomic guard** that was generated by running the example above. + + The generated atomic guard **key** is: `rvn-atomic/users/johndoe`. + It is composed of: + * The prefix `rvn-atomic/`. + * The ID of the associated document. + + + * Although this Studio view allows editing compare-exchange items, **do not delete or modify atomic guard entries**. + * Doing so will interfere with RavenDB's ability to track document versioning through atomic guards. + + + + +## Atomic guard database scope + +* Atomic guards are local to the database on which they were defined. + +* Since atomic guards are implemented as compare-exchange items, + they are Not externally replicated to other databases by any ongoing replication task. + Learn more in [why compare-exchange items are not replicated](../../../client-api/operations/compare-exchange/overview.mdx#why-compare-exchange-items-are-not-replicated-to-external-databases). + + + +## Disabling atomic guards + +* Before atomic guards were introduced (in RavenDB 5.2), client code had to explicitly manage compare-exchange entries + to ensure concurrency control and maintain ACID guarantees in cluster-wide transactions. + +* You can still take this manual approach by disabling the automatic use of atomic guards in a cluster-wide session, + and managing the required [compare-exchange key/value pairs](../../../client-api/operations/compare-exchange/overview.mdx) yourself, + as shown in this [example](../../../client-api/operations/compare-exchange/overview.mdx#example-i---email-address-reservation). + +* To disable the automatic creation and use of atomic guards in a cluster-wide session, + set the session's `DisableAtomicDocumentWritesInClusterWideTransaction` configuration option to `true`. + + + +{`$sessionOptions = new SessionOptions(); +$sessionOptions->setTransactionMode(TransactionMode::clusterWide()); +$sessionOptions->setDisableAtomicDocumentWritesInClusterWideTransaction(true); + +$session = $store->openSession($sessionOptions); + +try \{ + $session->store(new User(), "users/johndoe"); + // No atomic-guard will be created upon saveChanges + $session->saveChanges(); +\} finally \{ + $session->close(); +\} +`} + + + + + +## When are atomic guards removed + +Atomic guards are removed **automatically** in the following scenarios: +(you don't need to clean them up manually) + +* **Document deleted via a cluster-wide session**: + * Create a document using a cluster wide session (an associated atomic guard is created). + * Delete the document using a cluster wide session - its atomic guard will be removed automatically. + +* **Document expires via the expiration feature**: + * Create a document using a cluster wide session (an associated atomic guard is created). + * Add the `@expires` metadata property the document, as described in [Document expiration](../../../studio/database/settings/document-expiration.mdx). + * When the expiration time is reached, the document and its atomic guard will both be removed automatically. + * Since different cleanup tasks handle the removal of **expired** documents and the removal of their associated atomic guards, + it may happen that atomic guards of removed documents would linger in the compare exchange entries list a short while longer before they are removed. + You do Not need to remove such atomic guards yourself, they will be removed by the cleanup task. + + +* **Do not delete or modify atomic guards manually while they are in use by an active session**. + If a session attempts to save a document whose atomic guard has been removed or changed, + it will fail with an error. + +* If you accidentally remove an atomic guard that is associated with an existing document, + you can restore it by re-saving the document in a cluster-wide session, + this will re-create the atomic guard automatically. + + + + +## Best practice when storing a document in a cluster-wide transaction + +* When working with a cluster-wide session, + we recommend that you always **`load` the document into the session before storing it** - + even if the document is expected to be a new document. + +* This is especially important if a document (originally created in a cluster-wide transaction) was deleted **outside** of a cluster-wide session - + e.g., when using a [single-node session](../../../client-api/session/cluster-transaction/overview.mdx#cluster-wide-transaction-vs-single-node-transaction) + or the [DeleteByQueryOperation](../../../client-api/operations/common/delete-by-query.mdx). + In these cases, the document is deleted, but the atomic guard remains (it is not automatically removed). + If you attempt to re-create such a document without loading it first, + RavenDB will fail to save it because the session is unaware of the existing atomic guard’s latest Raft index. + +---- + +In this example, the document is loaded into the session BEFORE creating or modifying it: + + + +{`// Open a cluster-wide session +$sessionOptions = new SessionOptions(); +$sessionOptions->setTransactionMode(TransactionMode::clusterWide()); + +$session = $store->openSession($sessionOptions); +try \{ + // Load the user document BEFORE creating or updating + $user = $session->load(User::class, "users/johndoe"); + + if ($user === null) \{ + // Document doesn't exist => create a new document: + $newUser = new User(); + $newUser->setName("John Doe"); + // ... initialize other properties + + // Store the new user document in the session + $session->store($newUser, "users/johndoe"); + \} else \{ + // Document exists => apply your modifications: + $user->setName("New name"); + // ... make any other updates + + // No need to call Store() again + // RavenDB tracks changes on loaded entities + \} + + // Commit your changes + $session->saveChanges(); +\} finally \{ + $session->close(); +\} +`} + + + + + +When _loading_ a document in a cluster-wide session, RavenDB attempts to retrieve the document from the document store: + +* **If the document is found**, it is loaded into the session, + and modifications will be saved successfully as long as no other session has modified the document in the meantime. + Specifically, if the document’s [change vector](../../../server/clustering/replication/change-vector.mdx) matches the one currently stored on the server, + the save will proceed - after which the Raft index of the associated atomic guard will be incremented as expected. + Otherwise, RavenDB will fail the operation with a _ConcurrencyException_. + +* **If no document is found**, RavenDB will check whether a matching atomic guard exists (as in the case when the document was deleted outside of a cluster-wide session): + * **If an atomic guard exists**, + the client constructs a change vector for the document using the atomic guard’s Raft index, and the document will be saved with this change vector. + * **If no atomic guard exists**, + the document is treated as a brand new document and will be saved as usual. + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/cluster-transaction/_atomic-guards-python.mdx b/versioned_docs/version-7.1/client-api/session/cluster-transaction/_atomic-guards-python.mdx new file mode 100644 index 0000000000..e650aac9bf --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/cluster-transaction/_atomic-guards-python.mdx @@ -0,0 +1,251 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Atomic Guards** are [compare-exchange key/value items](../../../client-api/operations/compare-exchange/overview.mdx) + that RavenDB creates and manages **automatically** to guarantee + [ACID](../../../server/clustering/cluster-transactions.mdx#cluster-transaction-properties) transactions in cluster-wide sessions. + +* Each document is associated with its own unique atomic guard item. + Atomic guards coordinate between sessions that attempt to write to the same document concurrently. + Saving a document will be prevented if another session has modified the document. + +* In this article: + * [Atomic guard creation and update](../../../client-api/session/cluster-transaction/atomic-guards.mdx#atomic-guard-creation-and-update) + * [Atomic guard usage example](../../../client-api/session/cluster-transaction/atomic-guards.mdx#atomic-guard-usage-example) + * [Atomic guard database scope](../../../client-api/session/cluster-transaction/atomic-guards.mdx#atomic-guard-database-scope) + * [Disabling atomic guards](../../../client-api/session/cluster-transaction/atomic-guards.mdx#disabling-atomic-guards) + * [When are atomic guards removed](../../../client-api/session/cluster-transaction/atomic-guards.mdx#when-are-atomic-guards-removed) + * [Best practice when storing a document in a cluster-wide transaction](../../../client-api/session/cluster-transaction/atomic-guards.mdx#best-practice-when-storing-a-document-in-a-cluster-wide-transaction) + + +## Atomic guard creation and update + + +Atomic guards are created and managed **only when the session's transaction mode is set to [CLUSTER_WIDE](../../../client-api/session/cluster-transaction/overview.mdx#open-a-cluster-transaction)**. + +* **When creating a new document**: + A new atomic guard is created when a new document is successfully saved. + +* **When modifying an existing document that already has an atomic guard**: + * The atomic guard’s Raft index is incremented when the document is successfully saved after being modified. + This allows RavenDB to detect that the document has changed. + * If another session had loaded the document before the document's version changed, it will not be able to save its changes + unless it first reloads the updated version. Otherwise, a `ConcurrencyException` is thrown. + +* **When modifying an existing document that doesn't have an atomic guard**: + * A new atomic guard is created when modifying an existing document that does not yet have one. + * The absence of the atomic guard may be because the document was created in a single-node session, + or because its atomic guard was manually removed (which is not recommended). + +* **When saving a document fails**: + * If a session's `save_changes()` fails, the entire session is rolled back and the atomic guard is Not created. + * Ensure your business logic is designed to re-execute the session in case saving changes fails for any reason. + + + +## Atomic guard usage example + +In the code sample below, an atomic guard is automatically created when a new document is saved. +It is then used to detect and prevent conflicting writes: when two sessions load and modify the same document, +only the first save succeeds, and the second fails with a _ConcurrencyException_. + + + +{`with store.open_session( + # Open a cluster-wide session: + session_options=SessionOptions(transaction_mode=TransactionMode.CLUSTER_WIDE) +) as session: + session.store(User(), "users/johndoe") + session.save_changes() + # An atomic-guard is now automatically created for the new document "users/johndoe" + +# Open two concurrent cluster-wide sessions: +with store.open_session( + session_options=SessionOptions(transaction_mode=TransactionMode.CLUSTER_WIDE) +) as session1: + with store.open_session( + session_options=SessionOptions(transaction_mode=TransactionMode.CLUSTER_WIDE) + ) as session2: + # Both sessions load the same document: + loaded_user_1 = session1.load("users/johndoe", User) + loaded_user_1.name = "jindoe" + loaded_user_2 = session2.load("users/johndoe", User) + loaded_user_2.name = "jandoe" + + # session1 saves its changes first — + # this increments the Raft index of the associated atomic guard. + session1.save_changes() + + # session2 tries to save using an outdated atomic guard version + # and fails with a ConcurrencyException. + session2.save_changes() +`} + + +After running the above example, you can view the automatically created atomic guard in the Studio’s +[Compare-Exchange view](../../../studio/database/documents/compare-exchange-view.mdx#the-compare-exchange-view): + +![Atomic Guard](./assets/atomic-guard.png) + +1. These are **custom compare-exchange items** that were manually created by you, + e.g., via the [Put compare exchange operation](../../../client-api/operations/compare-exchange/put-compare-exchange-value.mdx) - for any purpose you needed. + They are NOT the automatically created atomic guards. + +2. This is the **atomic guard** that was generated by running the example above. + + The generated atomic guard **key** is: `rvn-atomic/users/johndoe`. + It is composed of: + * The prefix `rvn-atomic/`. + * The ID of the associated document. + + + * Although this Studio view allows editing compare-exchange items, **do not delete or modify atomic guard entries**. + * Doing so will interfere with RavenDB's ability to track document versioning through atomic guards. + + + + +## Atomic guard database scope + +* Atomic guards are local to the database on which they were defined. + +* Since atomic guards are implemented as compare-exchange items, + they are Not externally replicated to other databases by any ongoing replication task. + Learn more in [why compare-exchange items are not replicated](../../../client-api/operations/compare-exchange/overview.mdx#why-compare-exchange-items-are-not-replicated-to-external-databases). + + + +## Disabling atomic guards + +* Before atomic guards were introduced (in RavenDB 5.2), client code had to explicitly manage compare-exchange entries + to ensure concurrency control and maintain ACID guarantees in cluster-wide transactions. + +* You can still take this manual approach by disabling the automatic use of atomic guards in a cluster-wide session, + and managing the required [compare-exchange key/value pairs](../../../client-api/operations/compare-exchange/overview.mdx) yourself, + as shown in this [example](../../../client-api/operations/compare-exchange/overview.mdx#example-i---email-address-reservation). + +* To disable the automatic creation and use of atomic guards in a cluster-wide session, + set the session's `DisableAtomicDocumentWritesInClusterWideTransaction` configuration option to `true`. + + + +{`with store.open_session( + # Open a cluster-wide session + session_options=SessionOptions( + transaction_mode=TransactionMode.CLUSTER_WIDE, + disable_atomic_document_writes_in_cluster_wide_transaction=True, + ) +) as session: + session.store(User(), "users/johndoe") + + # No atomic-guard will be created upon save_changes + session.save_changes() +`} + + + + + +## When are atomic guards removed + +Atomic guards are removed **automatically** in the following scenarios: +(you don't need to clean them up manually) + +* **Document deleted via a cluster-wide session**: + * Create a document using a cluster wide session (an associated atomic guard is created). + * Delete the document using a cluster wide session - its atomic guard will be removed automatically. + +* **Document expires via the expiration feature**: + * Create a document using a cluster wide session (an associated atomic guard is created). + * Add the `@expires` metadata property the document, as described in [Document expiration](../../../studio/database/settings/document-expiration.mdx). + * When the expiration time is reached, the document and its atomic guard will both be removed automatically. + * Since different cleanup tasks handle the removal of **expired** documents and the removal of their associated atomic guards, + it may happen that atomic guards of removed documents would linger in the compare exchange entries list a short while longer before they are removed. + You do Not need to remove such atomic guards yourself, they will be removed by the cleanup task. + + +* **Do not delete or modify atomic guards manually while they are in use by an active session**. + If a session attempts to save a document whose atomic guard has been removed or changed, + it will fail with an error. + +* If you accidentally remove an atomic guard that is associated with an existing document, + you can restore it by re-saving the document in a cluster-wide session, + this will re-create the atomic guard automatically. + + + + +## Best practice when storing a document in a cluster-wide transaction + +* When working with a cluster-wide session, + we recommend that you always **`load` the document into the session before storing it** - + even if the document is expected to be a new document. + +* This is especially important if a document (originally created in a cluster-wide transaction) was deleted **outside** of a cluster-wide session - + e.g., when using a [single-node session](../../../client-api/session/cluster-transaction/overview.mdx#cluster-wide-transaction-vs-single-node-transaction) + or the [DeleteByQueryOperation](../../../client-api/operations/common/delete-by-query.mdx). + In these cases, the document is deleted, but the atomic guard remains (it is not automatically removed). + If you attempt to re-create such a document without loading it first, + RavenDB will fail to save it because the session is unaware of the existing atomic guard’s latest Raft index. + +---- + +In this example, the document is loaded into the session BEFORE creating or modifying it: + + + +{`with store.open_session( + session_options=SessionOptions( + # Open a cluster-wide session + transaction_mode=TransactionMode.CLUSTER_WIDE + ) +) as session: + # Load the user document BEFORE creating or updating + user = session.load("users/johndoe", User) + + if user is None: + # Document doesn't exist => create a new document + new_user = User() + new_user.name = "John Doe" + # ... initialize other properties + + # Store the new user document in the session + session.store(new_user, "users/johndoe") + else: + # Document exists => apply your modifications + user.name = "New name" + # ... make any other updates + + # No need to call store() again + # RavenDB tracks changes on loaded entities + + # Commit your changes + session.save_changes() +`} + + + + + +When _loading_ a document in a cluster-wide session, RavenDB attempts to retrieve the document from the document store: + +* **If the document is found**, it is loaded into the session, + and modifications will be saved successfully as long as no other session has modified the document in the meantime. + Specifically, if the document’s [change vector](../../../server/clustering/replication/change-vector.mdx) matches the one currently stored on the server, + the save will proceed - after which the Raft index of the associated atomic guard will be incremented as expected. + Otherwise, RavenDB will fail the operation with a _ConcurrencyException_. + +* **If no document is found**, RavenDB will check whether a matching atomic guard exists (as in the case when the document was deleted outside of a cluster-wide session): + * **If an atomic guard exists**, + the client constructs a change vector for the document using the atomic guard’s Raft index, and the document will be saved with this change vector. + * **If no atomic guard exists**, + the document is treated as a brand new document and will be saved as usual. + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/cluster-transaction/_category_.json b/versioned_docs/version-7.1/client-api/session/cluster-transaction/_category_.json new file mode 100644 index 0000000000..df626d8460 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/cluster-transaction/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 7, + "label": Cluster Transaction, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/cluster-transaction/_compare-exchange-csharp.mdx b/versioned_docs/version-7.1/client-api/session/cluster-transaction/_compare-exchange-csharp.mdx new file mode 100644 index 0000000000..ed76e0d51e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/cluster-transaction/_compare-exchange-csharp.mdx @@ -0,0 +1,227 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Compare-Exchange items can be created and managed on the advanced session (`Session.Advanced`) + Other options are listed in this [compare-exchange overview](../../../client-api/operations/compare-exchange/overview.mdx#how-to-create-and-manage-compare-exchange-items). + +* When working with compare-exchange items from the session, + the session **must be opened as a [cluster-wide session](../../../client-api/session/cluster-transaction/overview.mdx#open-a-cluster-transaction)**. + +* In this page: + * [Create compare-exchange](../../../client-api/session/cluster-transaction/compare-exchange.mdx#create-compare-exchange) + * [Get compare-exchange](../../../client-api/session/cluster-transaction/compare-exchange.mdx#get-compare-exchange) + * [Delete compare-exchange](../../../client-api/session/cluster-transaction/compare-exchange.mdx#delete-compare-exchange) + +## Create compare-exchange + + +#### Example + + + + +{`// The session must be first opened with cluster-wide mode +session.Advanced.ClusterTransaction.CreateCompareExchangeValue( + key: "Best NoSQL Transactional Database", + value: "RavenDB" +); + +session.SaveChanges(); +`} + + + + +{`// The session must be first opened with cluster-wide mode +session.Advanced.ClusterTransaction.CreateCompareExchangeValue( + key: "Best NoSQL Transactional Database", + value: "RavenDB" +); + +await session.SaveChangesAsync(); +`} + + + + +* `SaveChanges()` throws a `ConcurrencyException` if the key already exists. +* An `InvalidOperationException` exception is thrown if the session was Not opened as **cluster-wide**. + + + + +#### Syntax + + + + +{`session.Advanced.ClusterTransaction.CreateCompareExchangeValue(key, value); +`} + + + + +{`session.Advanced.ClusterTransaction.CreateCompareExchangeValue(key, value); +`} + + + + +| Parameters | Type | Description | +|--------------|----------|--------------------------------------------------------------------| +| **key** | `string` | The compare-exchange item key. This string can be up to 512 bytes. | +| **value** | `T` | The associated value to store for the key | + +| Return Value | Description | +|---------------------------|-------------------------------------------| +| `CompareExchangeValue` | The new compare-exchange item is returned | + + + +#### The CompareExchangeValue + +| Parameters | Type | Description | +|--------------|----------|--------------------------------------------------------------------| +| **key** | `string` | The compare-exchange item key. This string can be up to 512 bytes. | +| **value** | `T` | The value associated with the key | +| **index** | `long` | Index for concurrency control | + + + + +## Get compare-exchange + + +#### Get single value + + + + +{`session.Advanced.ClusterTransaction.GetCompareExchangeValue(key); +`} + + + + +{`await session.Advanced.ClusterTransaction.GetCompareExchangeValueAsync(key); +`} + + + + +| Parameters | Type | Description | +|--------------|----------|---------------------| +| **key** | `string` | The key to retrieve | + +| Return Value | Description | +| ------------- | ----- | +| `CompareExchangeValue`| If the key doesn't exist it will return `null` | + + + + +#### Get multiple values + + + + +{`session.Advanced.ClusterTransaction.GetCompareExchangeValues(keys); +`} + + + + +{`await session.Advanced.ClusterTransaction.GetCompareExchangeValuesAsync(keys); +`} + + + + +| Parameters | Type | Description | +|--------------|------------|---------------------------| +| **keys** | `string[]` | Array of keys to retrieve | + +| Return Value | Description | +| ------------- | ----- | +| `Dictionary>` | If a key doesn't exists the associate value will be `null` | + + + +#### Get compare-exchange lazily + + + + +{`// Single value +session.Advanced.ClusterTransaction.Lazily.GetCompareExchangeValue(key); + +// Multiple values +session.Advanced.ClusterTransaction.Lazily.GetCompareExchangeValues(keys); +`} + + + + +{`// Single value +session.Advanced.ClusterTransaction.Lazily.GetCompareExchangeValueAsync(key); + +// Multiple values +session.Advanced.ClusterTransaction.Lazily.GetCompareExchangeValuesAsync(keys); +`} + + + + +| Parameters | Type | Description | +|-------------|------------|---------------------------| +| **key** | `string` | The key to retrieve | +| **keys** | `string[]` | Array of keys to retrieve | + +| Return Value | Description | +| ------------- | ----- | +| `Lazy>`| If the key doesn't exist it will return `null` | +| `Lazy>>` | If a key doesn't exists the associate value will be `null` | + + + +## Delete compare-exchange + + + + + + +{`// Delete by key & index +session.Advanced.ClusterTransaction.DeleteCompareExchangeValue(key, index); + +// Delete by compare-exchange item +session.Advanced.ClusterTransaction.DeleteCompareExchangeValue(item); +`} + + + + +{`// Delete by key & index +session.Advanced.ClusterTransaction.DeleteCompareExchangeValue(key, index); + +// Delete by compare-exchange item +session.Advanced.ClusterTransaction.DeleteCompareExchangeValue(item); +`} + + + + +| Parameters | Type | Description | +|------------|---------------------------|------------------------------------------------| +| **key** | `string` | The key of the compare-exchange item to delete | +| **index** | `long` | The index of this compare-exchange item | +| **item** | `CompareExchangeValue` | The compare-exchange item to delete | + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/cluster-transaction/_compare-exchange-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/cluster-transaction/_compare-exchange-nodejs.mdx new file mode 100644 index 0000000000..aae949224f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/cluster-transaction/_compare-exchange-nodejs.mdx @@ -0,0 +1,173 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Compare-Exchange items can be created and managed on the advanced session (`session.advanced`) + Other options are listed in this [compare-exchange overview](../../../client-api/operations/compare-exchange/overview.mdx#how-to-create-and-manage-compare-exchange-items). + +* When working with compare-exchange items from the session, + the session **must be opened as a [cluster-wide session](../../../client-api/session/cluster-transaction/overview.mdx#open-a-cluster-transaction)**. + +* In this page: + * [Create compare-exchange](../../../client-api/session/cluster-transaction/compare-exchange.mdx#create-compare-exchange) + * [Get compare-exchange](../../../client-api/session/cluster-transaction/compare-exchange.mdx#get-compare-exchange) + * [Delete compare-exchange](../../../client-api/session/cluster-transaction/compare-exchange.mdx#delete-compare-exchange) + +## Create compare-exchange + + +#### Example + + + +{`// The session must be first opened with cluster-wide mode +const session = documentStore.openSession(\{ + transactionMode: "ClusterWide" +\}); + +session.advanced.clusterTransaction.createCompareExchangeValue( + "Best NoSQL Transactional Database", "RavenDB" // key, value +); + +await session.saveChanges(); +`} + + + +* An `InvalidOperationException` exception is thrown when: + * The session was Not opened as **cluster-wide**. + * The key already exists in the database. + + + + +#### Syntax + + + +{`session.advanced.clusterTransaction.createCompareExchangeValue(key, item); +`} + + + +| Parameters | Type | Description | +|--------------|-----------|--------------------------------------------------------------------| +| **key** | `string` | The compare-exchange item key. This string can be up to 512 bytes. | +| **value** | `object` | The associated value to store for the key | + +| Return value | Description | +|---------------|-------------------------------------------| +| `object` | The new compare-exchange item is returned | + + + +#### The compare exchange object returned + +| Parameters | Type | Description | +|--------------|----------|--------------------------------------------------------------------| +| **key** | `string` | The compare-exchange item key. This string can be up to 512 bytes. | +| **value** | `object` | The value associated with the key | +| **index** | `number` | Index for concurrency control | + + + + +## Get compare-exchange + + +#### Get single value + + + +{`await session.advanced.clusterTransaction.getCompareExchangeValue(key); +`} + + + +| Parameters | Type | Description | +|--------------|----------|--------------------------------| +| **key** | `string` | The key to retrieve | + +| Return value | Description | +|---------------|---------------------------------------------------------------------------------| +| `object` | The compare-exchange item is returned.
Returns `null` if key doesn't exist. | + +[key: string]: CompareExchangeValue<T>; + +
+ + +#### Get multiple values + + + +{`await session.advanced.clusterTransaction.getCompareExchangeValues(keys); +`} + + + +| Parameters | Type | Description | +|-------------|-------------|---------------------------| +| **keys** | `string[]` | Array of keys to retrieve | + +| Return value | Description | +|--------------------------|------------------------------------------------------------| +| `Record` | If a key doesn't exists the associate value will be `null` | + + + +#### Get compare-exchange lazily + + + +{`// Single item +const lazyItem = session.advanced.clusterTransaction.lazily.getCompareExchangeValue(key); +const item = await lazyItem.getValue(); + +// Multiple items +const lazyItems = session.advanced.clusterTransaction.lazily.getCompareExchangeValues(keys); +const items = await lazyItems.getValue(); +`} + + + +| Parameters | Type | Description | +|--------------|------------|---------------------------| +| **key** | `string` | The key to retrieve | +| **keys** | `string[]` | Array of keys to retrieve | + +| Return value - after calling `getValue` | Description | +|-----------------------------------------|------------------------------------------------------------------------------------| +| `object` | For single item:
If the key doesn't exist it will return `null` | +| `Record` | For multiple items:
If a key doesn't exists the associate value will be `null` | +
+ + +## Delete compare-exchange + + + + + +{`// Delete by key & index +session.advanced.clusterTransaction.deleteCompareExchangeValue(key, index); + +// Delete by compare-exchange item +session.advanced.clusterTransaction.deleteCompareExchangeValue(item); +`} + + + +| Parameters | Type | Description | +|------------|----------|------------------------------------------------| +| **key** | `string` | The key of the compare-exchange item to delete | +| **index** | `number` | The index of this compare-exchange item | +| **item** | `object` | The compare-exchange item to delete | + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/cluster-transaction/_compare-exchange-php.mdx b/versioned_docs/version-7.1/client-api/session/cluster-transaction/_compare-exchange-php.mdx new file mode 100644 index 0000000000..d10c22c0fc --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/cluster-transaction/_compare-exchange-php.mdx @@ -0,0 +1,165 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Compare-Exchange items can be created and managed on the advanced session (`session.advanced`). + Other options are listed in this [compare-exchange overview](../../../client-api/operations/compare-exchange/overview.mdx#how-to-create-and-manage-compare-exchange-items). + +* When working with compare-exchange items from the session, + the session **must be opened as a [cluster-wide session](../../../client-api/session/cluster-transaction/overview.mdx#open-a-cluster-transaction)**. + +* In this page: + * [Create compare-exchange](../../../client-api/session/cluster-transaction/compare-exchange.mdx#create-compare-exchange) + * [Get compare-exchange](../../../client-api/session/cluster-transaction/compare-exchange.mdx#get-compare-exchange) + * [Delete compare-exchange](../../../client-api/session/cluster-transaction/compare-exchange.mdx#delete-compare-exchange) + +## Create compare-exchange + + +#### Example + + + +{`// The session must be first opened with cluster-wide mode +$session->advanced()->clusterTransaction()->createCompareExchangeValue( + key: "Best NoSQL Transactional Database", + value: "RavenDB" +); + +$session->saveChanges(); +`} + + + +* `saveChanges()` throws a `ConcurrencyException` if the key already exists. +* An `InvalidOperationException` exception is thrown if the session was Not opened as **cluster-wide**. + + + + +#### Syntax + + + +{`$session->advanced()->clusterTransaction()->createCompareExchangeValue($key, $value); +`} + + + +| Parameters | Type | Description | +|--------------|----------|--------------------------------------------------------------------| +| **key** | `string` | The compare-exchange item key. This string can be up to 512 bytes. | +| **value** | `T` | The associated value to store for the key | + +| Return Value | Description | +|---------------------------|-------------------------------------------| +| `CompareExchangeValue` | The new compare-exchange item is returned | + + + +#### The CompareExchangeValue + +| Parameters | Type | Description | +|--------------|----------|--------------------------------------------------------------------| +| **key** | `string` | The compare-exchange item key. This string can be up to 512 bytes. | +| **value** | `T` | The value associated with the key | +| **index** | `long` | Index for concurrency control | + + + + +## Get compare-exchange + + +#### Get single value + + + +{`$session->advanced()->clusterTransaction()->getCompareExchangeValue(null, $key); +`} + + + +| Parameters | Type | Description | +|--------------|----------|---------------------| +| **key** | `string` | The key to retrieve | + +| Return Value | Description | +| ------------- | ----- | +| `CompareExchangeValue`| The compare exchange value, or `null` if it doesn't exist | + + + + +#### Get multiple values + + + +{`$session->advanced()->clusterTransaction()->getCompareExchangeValues(null, $keys); +`} + + + +| Parameters | Type | Description | +|--------------|------------|---------------------------| +| **keys** | `string[]` | Array of keys to retrieve | + +| Return Value | Description | +| ------------- | ----- | +| `Dictionary>` | If a key doesn't exists the associate value will be `null` | + + + +#### Get compare-exchange lazily + + + +{`// Single value +$session->advanced()->clusterTransaction()->lazily()->getCompareExchangeValue(null, $key); + +// Multiple values +$session->advanced()->clusterTransaction()->lazily()->getCompareExchangeValues(null, $keys); +`} + + + +| Parameters | Type | Description | +|-------------|------------|---------------------------| +| **key** | `string` | The key to retrieve | +| **keys** | `string[]` | Array of keys to retrieve | + +| Return Value | Description | +| ------------- | ----- | +| `Lazy>`| If the key doesn't exist it will return `null` | +| `Lazy>>` | If a key doesn't exists the associate value will be `null` | + + + +## Delete compare-exchange + + + + + +{`// Delete by key & index +$session->advanced()->clusterTransaction()->deleteCompareExchangeValue($key, $index); + +// Delete by compare-exchange item +$session->advanced()->clusterTransaction()->deleteCompareExchangeValue($item); +`} + + + +| Parameters | Type | Description | +|------------|---------------------------|------------------------------------------------| +| **key** | `string` | The key of the compare-exchange item to delete | +| **index** | `long` | The index of this compare-exchange item | +| **item** | `CompareExchangeValue` | The compare-exchange item to delete | + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/cluster-transaction/_compare-exchange-python.mdx b/versioned_docs/version-7.1/client-api/session/cluster-transaction/_compare-exchange-python.mdx new file mode 100644 index 0000000000..1d6e8d2dd1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/cluster-transaction/_compare-exchange-python.mdx @@ -0,0 +1,167 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Compare-Exchange items can be created and managed on the advanced session (`session.advanced`) + Other options are listed in this [compare-exchange overview](../../../client-api/operations/compare-exchange/overview.mdx#how-to-create-and-manage-compare-exchange-items). + +* When working with compare-exchange items from the session, + the session **must be opened as a [cluster-wide session](../../../client-api/session/cluster-transaction/overview.mdx#open-a-cluster-transaction)**. + +* In this page: + * [Create compare-exchange](../../../client-api/session/cluster-transaction/compare-exchange.mdx#create-compare-exchange) + * [Get compare-exchange](../../../client-api/session/cluster-transaction/compare-exchange.mdx#get-compare-exchange) + * [Delete compare-exchange](../../../client-api/session/cluster-transaction/compare-exchange.mdx#delete-compare-exchange) + +## Create compare-exchange + + +#### Example + + + +{`# The session must be first opened with cluster-wide mode +session.advanced.cluster_transaction.create_compare_exchange_value( + key="Best NoSQL Transactional Database", + item="RavenDB", +) + +session.save_changes() +`} + + + +* `save_changes()` throws a `ConcurrencyException` if the key already exists. +* A `RuntimeError` exception is thrown if the session was Not opened as **cluster-wide**. + + + + +#### Syntax + + + +{`session.advanced.cluster_transaction.create_compare_exchange_value(key, value) +`} + + + +| Parameters | Type | Description | +|--------------|----------|------------------------------------------------------------| +| **key** | `str` | The key for the compare-exchange item to be created
This string can be up to 512 bytes | +| **value** | `T` | The value to associate with this key | + +| Return Value | Description | +|---------------------------|-------------------------------------------| +| `CompareExchangeValue[T]` | The new compare-exchange item is returned | +
+ + +#### The CompareExchangeValue + +| Parameters | Type | Description | +|--------------|----------|---------------------------------------------------------------------| +| **key** | `str` | The compare-exchange item key
This string can be up to 512 bytes | +| **value** | `T` | The value associated with the key | +| **index** | `int` | Index for concurrency control | + +
+ + +## Get compare-exchange + + +#### Get single value + + + +{`session.advanced.cluster_transaction.get_compare_exchange_value(key) +`} + + + +| Parameters | Type | Description | +|--------------|----------|---------------------| +| **key** | `str` | The key for a compare-exchange item whose value is requested | + +| Return Value | Description | +| ------------- | ----- | +| `CompareExchangeValue[T]`| The requested value
If the key doesn't exist the value associated with it will be `None` | + +
+ + +#### Get multiple values + + + +{`session.advanced.cluster_transaction.get_compare_exchange_values(keys) +`} + + + +| Parameters | Type | Description | +|--------------|-------------|---------------------------| +| **keys** | `List[str]` | An array of compare-exchange keys whose values are requested | + +| Return Value | Description | +| ------------- | ----- | +| `Dict[str, CompareExchangeValue[T]]` | A dictionary of requested values
If a key doesn't exist the value associated with it will be `None` | +
+ + +#### Get compare-exchange lazily + + + +{`# Single value +session.advanced.cluster_transaction.lazily.get_compare_exchange_value(key) + +# Multiple values +session.advanced.cluster_transaction.lazily.get_compare_exchange_values(keys) +`} + + + +| Parameters | Type | Description | +|-------------|-------------|---------------------------| +| **key** | `str` | The key for a compare-exchange item whose value is requested | +| **keys** | `List[str]` | An array of compare-exchange keys whose values are requested | + +| Return Value | Description | +| ------------- | ----- | +| `Lazy[CompareExchangeValue[T]]`| The requested value
If the key doesn't exist the value associated with it will be `None` | +| `Lazy[Dict[str, CompareExchangeValue[T]]]` | A dictionary of requested values
If a key doesn't exist the value associated with it will be `None` | +
+ + +## Delete compare-exchange + +To delete a compare exchange item, use either of the following methods. + + + + + +{`# Delete by key & index +session.advanced.cluster_transaction.delete_compare_exchange_value(key, index) + +# Delete by compare-exchange item +session.advanced.cluster_transaction.delete_compare_exchange_value(item) +`} + + + +| Parameters | Type | Description | +|------------|---------------------------|-------------------------------------------------| +| **key** | `str` | The key for the compare-exchange item to delete | +| **index** | `int` | The index for the compare-exchange item to delete | +| **item** | `CompareExchangeValue[T]` | The compare-exchange item to delete | + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/cluster-transaction/_overview-csharp.mdx b/versioned_docs/version-7.1/client-api/session/cluster-transaction/_overview-csharp.mdx new file mode 100644 index 0000000000..d735f73f24 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/cluster-transaction/_overview-csharp.mdx @@ -0,0 +1,117 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A session represents a single [business transaction](https://martinfowler.com/eaaCatalog/unitOfWork.html) + (not to be confused with an [ACID transaction](../../../client-api/faq/transaction-support.mdx)). + +* When opening a session, the session's mode can be set to either: + * **Single-Node** - transaction is executed on a specific node and then replicated + * **Cluster-Wide** - transaction is registered for execution on all nodes in an atomic fashion + +* In this page: + * [Open a cluster transaction](../../../client-api/session/cluster-transaction/overview.mdx#open-a-cluster-transaction) + * [Cluster-wide transaction vs. Single-node transaction](../../../client-api/session/cluster-transaction/overview.mdx#cluster-wide-transaction-vs-single-node-transaction) + + +## Open a cluster transaction + +* To work with a cluster transaction open a **cluster-wide session**, + by explicitly setting the `TransactionMode` to `TransactionMode.ClusterWide`. + + + + +{`using (var session = store.OpenSession(new SessionOptions +{ + // Set mode to be cluster-wide + TransactionMode = TransactionMode.ClusterWide + + // Session will be single-node when either: + // * Mode is not specified + // * Explicitly set TransactionMode.SingleNode +})) +`} + + + + +{`using (var session = store.OpenAsyncSession(new SessionOptions +{ + // Set mode to be cluster-wide + TransactionMode = TransactionMode.ClusterWide + + // Session will be single-node when either: + // * Mode is not specified + // * Explicitly set TransactionMode.SingleNode +})) +`} + + + + +* Similar to the single-node session, + any CRUD operations can be made on the cluster-wide session and the session will track them as usual. + + + +## Cluster-wide transaction vs. Single-node transaction + + +#### Cluster-Wide +* Cluster-wide transactions are **fully ACID** transactions across all the database-group nodes. + Implemented by the Raft algorithm, the cluster must first reach a consensus. + Once the majority of the nodes have approved the transaction, + the transaction is registered for execution in the transaction queue of all nodes in an atomic fashion. +* The transaction will either **succeed on all nodes or be rolled-back**. + * The transaction is considered successful only when successfully registered on all the database-group nodes. + Once executed on all nodes, the data is consistent and available on all nodes. + * A failure to register the transaction on any node will cause the transaction to roll-back on all nodes and changes will Not be applied. +* The only **actions available** are: + * PUT / DELETE a document + * PUT / DELETE a compare-exchange item +* To prevent from concurrent documents modifications, + the server creates [Atomic-Guards](../../../client-api/session/cluster-transaction/atomic-guards.mdx) that will be associated with the documents. + An Atomic-Guard will be created when: + * A new document is created + * Modifying an existing document that doesn't have yet an Atomic-Guard +* Cluster-wide transactions are **conflict-free**. +* The cluster-wide transaction is considered **more expensive and less performant** + since a cluster consensus is required prior to execution. +* **Prefer a cluster-wide transaction when**: + * Prioritizing consistency over performance & availability + * When you would rather fail if a successful operation on all nodes cannot be ensured + + + + +#### Single-Node +* A single-node transaction is considered successful once executed successfully on the node the client is communicating with. + The data is **immediately available** on that node, and it will be **eventually-consistent** across all the other database nodes when the replication process takes place soon after. +* **Any action is available** except for PUT / DELETE a compare-exchange item. + No Atomic-Guards are created by the server. +* **Conflicts** may occur when two concurrent transactions modify the same document on different nodes at the same time. + They are resolved according to the defined conflict settings, either by using the latest version (default) or by following a conflict resolution script. + Revisions are created for the conflicting documents so that any document can be recovered. +* The single-node transaction is considered **faster and less expensive**, + as no cluster consensus is required for its execution. +* **Prefer a single-node transaction when**: + * Prioritizing performance & availability over consistency + * When immediate data persistence is crucial + * When you must ensure data is written even when other nodes are not reachable at the moment + * And - when resolving occasional conflicts is acceptable + + + + + +For a detailed description of transactions in RavenDB please refer to the [Transaction support in RavenDB](../../../client-api/faq/transaction-support.mdx) article. + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/cluster-transaction/_overview-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/cluster-transaction/_overview-nodejs.mdx new file mode 100644 index 0000000000..3496f993ab --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/cluster-transaction/_overview-nodejs.mdx @@ -0,0 +1,102 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A session represents a single [business transaction](https://martinfowler.com/eaaCatalog/unitOfWork.html) + (not to be confused with an [ACID transaction](../../../client-api/faq/transaction-support.mdx)). + +* When opening a session, the session's mode can be set to either: + * **Single-Node** - transaction is executed on a specific node and then replicated + * **Cluster-Wide** - transaction is registered for execution on all nodes in an atomic fashion + +* In this page: + * [Open a cluster transaction](../../../client-api/session/cluster-transaction/overview.mdx#open-a-cluster-transaction) + * [Cluster-wide transaction vs. Single-node transaction](../../../client-api/session/cluster-transaction/overview.mdx#cluster-wide-transaction-vs-single-node-transaction) + + +## Open a cluster transaction + +* To work with a cluster transaction open a **cluster-wide session**, + by explicitly setting the `transactionMode` to `ClusterWide`. + + + + +{`const session = store.openSession({ + // Set mode to be cluster-wide + transactionMode: "ClusterWide" + + // Session will be single-node when either: + // * Mode is not specified + // * Explicitly set to SingleNode +}); +`} + + + + +* Similar to the single-node session, + any CRUD operations can be made on the cluster-wide session and the session will track them as usual. + + + +## Cluster-wide transaction vs. Single-node transaction + + +#### Cluster-Wide +* Cluster-wide transactions are **fully ACID** transactions across all the database-group nodes. + Implemented by the Raft algorithm, the cluster must first reach a consensus. + Once the majority of the nodes have approved the transaction, + the transaction is registered for execution in the transaction queue of all nodes in an atomic fashion. +* The transaction will either **succeed on all nodes or be rolled-back**. + * The transaction is considered successful only when successfully registered on all the database-group nodes. + Once executed on all nodes, the data is consistent and available on all nodes. + * A failure to register the transaction on any node will cause the transaction to roll-back on all nodes and changes will Not be applied. +* The only **actions available** are: + * PUT / DELETE a document + * PUT / DELETE a compare-exchange item +* To prevent from concurrent documents modifications, + the server creates [Atomic-Guards](../../../client-api/session/cluster-transaction/atomic-guards.mdx) that will be associated with the documents. + An Atomic-Guard will be created when: + * A new document is created + * Modifying an existing document that doesn't have yet an Atomic-Guard +* Cluster-wide transactions are **conflict-free**. +* The cluster-wide transaction is considered **more expensive and less performant** + since a cluster consensus is required prior to execution. +* **Prefer a cluster-wide transaction when**: + * Prioritizing consistency over performance & availability + * When you would rather fail if a successful operation on all nodes cannot be ensured + + + + +#### Single-Node +* A single-node transaction is considered successful once executed successfully on the node the client is communicating with. + The data is **immediately available** on that node, and it will be **eventually-consistent** across all the other database nodes when the replication process takes place soon after. +* **Any action is available** except for PUT / DELETE a compare-exchange item. + No Atomic-Guards are created by the server. +* **Conflicts** may occur when two concurrent transactions modify the same document on different nodes at the same time. + They are resolved according to the defined conflict settings, either by using the latest version (default) or by following a conflict resolution script. + Revisions are created for the conflicting documents so that any document can be recovered. +* The single-node transaction is considered **faster and less expensive**, + as no cluster consensus is required for its execution. +* **Prefer a single-node transaction when**: + * Prioritizing performance & availability over consistency + * When immediate data persistence is crucial + * When you must ensure data is written even when other nodes are not reachable at the moment + * And - when resolving occasional conflicts is acceptable + + + + + +For a detailed description of transactions in RavenDB please refer to the [Transaction support in RavenDB](../../../client-api/faq/transaction-support.mdx) article. + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/cluster-transaction/_overview-php.mdx b/versioned_docs/version-7.1/client-api/session/cluster-transaction/_overview-php.mdx new file mode 100644 index 0000000000..c54678b606 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/cluster-transaction/_overview-php.mdx @@ -0,0 +1,107 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A session represents a single [business transaction](https://martinfowler.com/eaaCatalog/unitOfWork.html) + (not to be confused with an [ACID transaction](../../../client-api/faq/transaction-support.mdx)). + +* When opening a session, the session's mode can be set to either: + * **Single-Node** - transaction is executed on a specific node and then replicated + * **Cluster-Wide** - transaction is registered for execution on all nodes in an atomic fashion + +* In this page: + * [Open a cluster transaction](../../../client-api/session/cluster-transaction/overview.mdx#open-a-cluster-transaction) + * [Cluster-wide transaction vs. Single-node transaction](../../../client-api/session/cluster-transaction/overview.mdx#cluster-wide-transaction-vs-single-node-transaction) + + +## Open a cluster transaction + +* To work with a cluster transaction open a **cluster-wide session**, + by explicitly setting the `TransactionMode` to `TransactionMode.clusterWide`. + + + +{`$sessionOptions = new SessionOptions(); + +// Set mode to be cluster-wide +$sessionOptions->setTransactionMode(TransactionMode::clusterWide()); + +// Session will be single-node when either: +// * Mode is not specified +// * Explicitly set TransactionMode.SingleNode + +$session = $store->openSession($sessionOptions); +try \{ + // +\} finally \{ + $session->close(); +\} +`} + + + +* Similar to the single-node session, + any CRUD operations can be made on the cluster-wide session and the session will track them as usual. + + + +## Cluster-wide transaction vs. Single-node transaction + + +#### Cluster-Wide +* Cluster-wide transactions are **fully ACID** transactions across all the database-group nodes. + Implemented by the Raft algorithm, the cluster must first reach a consensus. + Once the majority of the nodes have approved the transaction, + the transaction is registered for execution in the transaction queue of all nodes in an atomic fashion. +* The transaction will either **succeed on all nodes or be rolled-back**. + * The transaction is considered successful only when successfully registered on all the database-group nodes. + Once executed on all nodes, the data is consistent and available on all nodes. + * A failure to register the transaction on any node will cause the transaction to roll-back on all nodes and changes will Not be applied. +* The only **actions available** are: + * PUT / DELETE a document + * PUT / DELETE a compare-exchange item +* To prevent from concurrent documents modifications, + the server creates [Atomic-Guards](../../../client-api/session/cluster-transaction/atomic-guards.mdx) that will be associated with the documents. + An Atomic-Guard will be created when: + * A new document is created + * Modifying an existing document that doesn't have yet an Atomic-Guard +* Cluster-wide transactions are **conflict-free**. +* The cluster-wide transaction is considered **more expensive and less performant** + since a cluster consensus is required prior to execution. +* **Prefer a cluster-wide transaction when**: + * Prioritizing consistency over performance & availability + * When you would rather fail if a successful operation on all nodes cannot be ensured + + + + +#### Single-Node +* A single-node transaction is considered successful once executed successfully on the node the client is communicating with. + The data is **immediately available** on that node, and it will be **eventually-consistent** across all the other database nodes when the replication process takes place soon after. +* **Any action is available** except for PUT / DELETE a compare-exchange item. + No Atomic-Guards are created by the server. +* **Conflicts** may occur when two concurrent transactions modify the same document on different nodes at the same time. + They are resolved according to the defined conflict settings, either by using the latest version (default) or by following a conflict resolution script. + Revisions are created for the conflicting documents so that any document can be recovered. +* The single-node transaction is considered **faster and less expensive**, + as no cluster consensus is required for its execution. +* **Prefer a single-node transaction when**: + * Prioritizing performance & availability over consistency + * When immediate data persistence is crucial + * When you must ensure data is written even when other nodes are not reachable at the moment + * And - when resolving occasional conflicts is acceptable + + + + + +For a detailed description of transactions in RavenDB please refer to the [Transaction support in RavenDB](../../../client-api/faq/transaction-support.mdx) article. + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/cluster-transaction/_overview-python.mdx b/versioned_docs/version-7.1/client-api/session/cluster-transaction/_overview-python.mdx new file mode 100644 index 0000000000..abd38c8529 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/cluster-transaction/_overview-python.mdx @@ -0,0 +1,99 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A session represents a single [business transaction](https://martinfowler.com/eaaCatalog/unitOfWork.html) + (not to be confused with an [ACID transaction](../../../client-api/faq/transaction-support.mdx)). + +* When opening a session, the session's mode can be set to either: + * **SINGLE-NODE** - transaction is executed on a specific node and then replicated + * **CLUSTER-WIDE** - transaction is registered for execution on all nodes in an atomic fashion + +* In this page: + * [Open a cluster transaction](../../../client-api/session/cluster-transaction/overview.mdx#open-a-cluster-transaction) + * [Cluster-wide transaction vs. Single-node transaction](../../../client-api/session/cluster-transaction/overview.mdx#cluster-wide-transaction-vs-single-node-transaction) + + +## Open a cluster transaction + +* To work with a cluster transaction open a **cluster-wide session**, + by explicitly setting the `transaction_mode` to `TransactionMode.CLUSTER_WIDE` + + +{`with store.open_session( + session_options=SessionOptions( + transaction_mode=TransactionMode.CLUSTER_WIDE # Set mode to be cluster-wide + # Session will be single-node when either: + # * Mode is not specified + # * Explicitly set TransactionMode.SINGLE_NODE + ) +) as session: +`} + + + +* Similar to the single-node session, + any CRUD operations can be made on the cluster-wide session and the session will track them as usual. + + + +## Cluster-wide transaction vs. Single-node transaction + + +#### Cluster-Wide +* Cluster-wide transactions are **fully ACID** transactions across all the database-group nodes. + Implemented by the Raft algorithm, the cluster must first reach a consensus. + Once the majority of the nodes have approved the transaction, + the transaction is registered for execution in the transaction queue of all nodes in an atomic fashion. +* The transaction will either **succeed on all nodes or be rolled-back**. + * The transaction is considered successful only when successfully registered on all the database-group nodes. + Once executed on all nodes, the data is consistent and available on all nodes. + * A failure to register the transaction on any node will cause the transaction to roll-back on all nodes and changes will Not be applied. +* The only available actions are: + * **put** or **delete** a document + * **put** or **delete** a compare-exchange item +* To prevent from concurrent documents modifications, + the server creates [Atomic-Guards](../../../client-api/session/cluster-transaction/atomic-guards.mdx) that will be associated with the documents. + An Atomic-Guard will be created when: + * A new document is created + * Modifying an existing document that doesn't have yet an Atomic-Guard +* Cluster-wide transactions are **conflict-free**. +* The cluster-wide transaction is considered **more expensive and less performant** + since a cluster consensus is required prior to execution. +* **Prefer a cluster-wide transaction when**: + * Prioritizing consistency over performance & availability + * When you would rather fail if a successful operation on all nodes cannot be ensured + + + + +#### Single-Node +* A single-node transaction is considered successful once executed successfully on the node the client is communicating with. + The data is **immediately available** on that node, and it will be **eventually-consistent** across all the other database nodes when the replication process takes place soon after. +* **Any action is available** except for a compare-exchange item **put** or **delete**. + No Atomic-Guards are created by the server. +* **Conflicts** may occur when two concurrent transactions modify the same document on different nodes at the same time. + They are resolved according to the defined conflict settings, either by using the latest version (default) or by following a conflict resolution script. + Revisions are created for the conflicting documents so that any document can be recovered. +* The single-node transaction is considered **faster and less expensive**, + as no cluster consensus is required for its execution. +* **Prefer a single-node transaction when**: + * Prioritizing performance & availability over consistency + * When immediate data persistence is crucial + * When you must ensure data is written even when other nodes are not reachable at the moment + * And - when resolving occasional conflicts is acceptable + + + + + +For a detailed description of transactions in RavenDB please refer to the [Transaction support in RavenDB](../../../client-api/faq/transaction-support.mdx) article. + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/cluster-transaction/assets/atomic-guard.png b/versioned_docs/version-7.1/client-api/session/cluster-transaction/assets/atomic-guard.png new file mode 100644 index 0000000000..4e2a12abe7 Binary files /dev/null and b/versioned_docs/version-7.1/client-api/session/cluster-transaction/assets/atomic-guard.png differ diff --git a/versioned_docs/version-7.1/client-api/session/cluster-transaction/atomic-guards.mdx b/versioned_docs/version-7.1/client-api/session/cluster-transaction/atomic-guards.mdx new file mode 100644 index 0000000000..988138f656 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/cluster-transaction/atomic-guards.mdx @@ -0,0 +1,53 @@ +--- +title: "Atomic Guards" +hide_table_of_contents: true +sidebar_label: Atomic Guards +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import AtomicGuardsCsharp from './_atomic-guards-csharp.mdx'; +import AtomicGuardsPython from './_atomic-guards-python.mdx'; +import AtomicGuardsPhp from './_atomic-guards-php.mdx'; +import AtomicGuardsNodejs from './_atomic-guards-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/cluster-transaction/compare-exchange.mdx b/versioned_docs/version-7.1/client-api/session/cluster-transaction/compare-exchange.mdx new file mode 100644 index 0000000000..7bf8ed0d86 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/cluster-transaction/compare-exchange.mdx @@ -0,0 +1,45 @@ +--- +title: "Compare Exchange in Cluster-Wide Session" +hide_table_of_contents: true +sidebar_label: Compare Exchange +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import CompareExchangeCsharp from './_compare-exchange-csharp.mdx'; +import CompareExchangePython from './_compare-exchange-python.mdx'; +import CompareExchangePhp from './_compare-exchange-php.mdx'; +import CompareExchangeNodejs from './_compare-exchange-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/cluster-transaction/overview.mdx b/versioned_docs/version-7.1/client-api/session/cluster-transaction/overview.mdx new file mode 100644 index 0000000000..728592a820 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/cluster-transaction/overview.mdx @@ -0,0 +1,42 @@ +--- +title: "Cluster Transaction - Overview" +hide_table_of_contents: true +sidebar_label: Overview +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import OverviewCsharp from './_overview-csharp.mdx'; +import OverviewPython from './_overview-python.mdx'; +import OverviewPhp from './_overview-php.mdx'; +import OverviewNodejs from './_overview-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_category_.json b/versioned_docs/version-7.1/client-api/session/configuration/_category_.json new file mode 100644 index 0000000000..954ff62feb --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 9, + "label": Configuration, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-change-maximum-number-of-requests-per-session-csharp.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-change-maximum-number-of-requests-per-session-csharp.mdx new file mode 100644 index 0000000000..eb58113e4f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-change-maximum-number-of-requests-per-session-csharp.mdx @@ -0,0 +1,44 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +By default, the maximum number of requests that a session can send the server is **30**. +This number, if everything is done correctly, should never be reached since remote +calls are expensive and the number of remote calls per "session" should be as close +to **1** as possible. +If the limit is reached, it may indicate a `Select N+1` problem or some other misuse +of the session object. + +Nevertheless, if needed, this number can be changed for a single session or for all sessions. + +## Single session + +To change the maximum number of requests in a single session, modify the `MaxNumberOfRequestsPerSession` +property value using the `Advanced` session operations. + + + +{`session.Advanced.MaxNumberOfRequestsPerSession = 50; +`} + + + +## All sessions + +To change the maximum number of requests for **all** sessions (on a particular store), +the `MaxNumberOfRequestsPerSession` property from DocumentStore `Conventions` must be changed. + + + +{`store.Conventions.MaxNumberOfRequestsPerSession = 100; +`} + + + + +The maximum number of requests for all sessions can also be configured via injected client +configuration from the Server. Read more about this [here](../../../studio/server/client-configuration.mdx). + + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-change-maximum-number-of-requests-per-session-java.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-change-maximum-number-of-requests-per-session-java.mdx new file mode 100644 index 0000000000..bce86640f1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-change-maximum-number-of-requests-per-session-java.mdx @@ -0,0 +1,44 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +By default, the maximum number of requests that a session can send the server is **30**. +This number, if everything is done correctly, should never be reached since remote +calls are expensive and the number of remote calls per "session" should be as close +to **1** as possible. +If the limit is reached, it may indicate a `Select N+1` problem or some other misuse +of the session object. + +Nevertheless, if needed, this number can be changed for a single session or for all sessions. + +## Single session + +To change the maximum number of requests in a single session, modify the `maxNumberOfRequestsPerSession` +field value using the `advanced` session operations. + + + +{`session.advanced().setMaxNumberOfRequestsPerSession(50); +`} + + + +## All sessions + +To change the maximum number of requests for **all** sessions (on a particular store), +the `maxNumberOfRequestsPerSession` field from DocumentStore `conventions` must be changed. + + + +{`store.getConventions().setMaxNumberOfRequestsPerSession(100); +`} + + + + +The maximum number of requests for all sessions can also be configured via injected client +configuration from the Server. Read more about this [here](../../../studio/server/client-configuration.mdx). + + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-change-maximum-number-of-requests-per-session-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-change-maximum-number-of-requests-per-session-nodejs.mdx new file mode 100644 index 0000000000..b6dc5d6bb1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-change-maximum-number-of-requests-per-session-nodejs.mdx @@ -0,0 +1,44 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +By default, the maximum number of requests that a session can send the server is **30**. +This number, if everything is done correctly, should never be reached since remote +calls are expensive and the number of remote calls per "session" should be as close +to **1** as possible. +If the limit is reached, it may indicate a `Select N+1` problem or some other misuse +of the session object. + +Nevertheless, if needed, this number can be changed for a single session or for all sessions. + +## Single session + +To change the maximum number of requests in a single session, modify the `maxNumberOfRequestsPerSession` +field value using the `Advanced` session operations. + + + +{`session.advanced.maxNumberOfRequestsPerSession = 50; +`} + + + +## All sessions + +To change the maximum number of requests for **all** sessions (on a particular store), +the `maxNumberOfRequestsPerSession` field from DocumentStore `conventions` must be changed. + + + +{`store.conventions.maxNumberOfRequestsPerSession = 100; +`} + + + + +The maximum number of requests for all sessions can also be configured via injected client +configuration from the Server. Read more about this [here](../../../studio/server/client-configuration.mdx). + + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-change-maximum-number-of-requests-per-session-php.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-change-maximum-number-of-requests-per-session-php.mdx new file mode 100644 index 0000000000..f9e95b8b33 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-change-maximum-number-of-requests-per-session-php.mdx @@ -0,0 +1,48 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +By default, the maximum number of requests that a session can send the server is **30**. +This number, if everything is done correctly, should never be reached since remote +calls are expensive and the number of remote calls per "session" should be as close +to **1** as possible. +If the limit is reached, it may indicate a `Select N+1` problem or some other misuse +of the session object. + +Nevertheless, if needed, this number can be changed for a single session or for all sessions. + +## Single session + +To modify the maximum number of requests for a **single** session, +set the value of the session `maxNumberOfRequestsPerSession` property. + +You can do this using the **advanced session** `setMaxNumberOfRequestsPerSession` method. + + + +{`$session->advanced()->setMaxNumberOfRequestsPerSession(50); +`} + + + +## All sessions + +To modify the maximum number of requests for **all** sessions (on a particular store), +set the value of the DocumentStore conventions `maxNumberOfRequestsPerSession` property. + +You can do this using the **store** `setMaxNumberOfRequestsPerSession` method. + + + +{`$store->getConventions()->setMaxNumberOfRequestsPerSession(100); +`} + + + + +The maximum number of requests for all sessions can also be configured by via injected client +configuration from the Server. Read more about this [here](../../../studio/server/client-configuration.mdx). + + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-change-maximum-number-of-requests-per-session-python.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-change-maximum-number-of-requests-per-session-python.mdx new file mode 100644 index 0000000000..71356b470f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-change-maximum-number-of-requests-per-session-python.mdx @@ -0,0 +1,44 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +By default, the maximum number of requests that a session can send the server is **30**. +This number, if everything is done correctly, should never be reached since remote +calls are expensive and the number of remote calls per "session" should be as close +to **1** as possible. +If the limit is reached, it may indicate a `Select N+1` problem or some other misuse +of the session object. + +Nevertheless, if needed, this number can be changed for a single session or for all sessions. + +## Single session + +To change the maximum number of requests for a **single** session, modify the value +of the `session` `_max_number_of_requests_per_session` property. + + + +{`session._max_number_of_requests_per_session = 50 +`} + + + +## All sessions + +To change the maximum number of requests for **all** sessions (on a particular store), +change the value of the DocumentStore `conventions` `max_number_of_requests_per_session ` property. + + + +{`store.conventions.max_number_of_requests_per_session = 100 +`} + + + + +The maximum number of requests for all sessions can also be configured by via injected client +configuration from the Server. Read more about this [here](../../../studio/server/client-configuration.mdx). + + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-collection-assignment-for-entities-csharp.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-collection-assignment-for-entities-csharp.mdx new file mode 100644 index 0000000000..2d4beb0a79 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-collection-assignment-for-entities-csharp.mdx @@ -0,0 +1,29 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Entities are grouped into [collections](../../faq/what-is-a-collection.mdx) on the server side. In order to determine the collection name that an entity belongs to +there are special conventions which return the collection name based on the type of an entity: [`FindCollectionName` and `FindCollectionNameForDynamic`](../../configuration/identifier-generation/global.mdx#findcollectionname-and-findcollectionnamefordynamic). + +## Example + +By default a collection name is pluralized form of a name of an entity type. For example objects of type `Category` will belong to `Categories` collection. However if your intention +is to classify them as `ProductGroups` use the following code: + + + +{`store.Conventions.FindCollectionName = type => +\{ + if (typeof(Category).IsAssignableFrom(type)) + return "ProductGroups"; + + return DocumentConventions.DefaultGetCollectionName(type); +\}; +`} + + + +This can become very useful when there is a need to deal with [polymorphic data](../../../indexes/indexing-polymorphic-data.mdx). + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-collection-assignment-for-entities-java.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-collection-assignment-for-entities-java.mdx new file mode 100644 index 0000000000..332cc17c13 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-collection-assignment-for-entities-java.mdx @@ -0,0 +1,29 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Entities are grouped into [collections](../../faq/what-is-a-collection.mdx) on the server side. In order to determine the collection name that an entity belongs to +there is special convention which return the collection name based on the type of an entity: [`findCollectionName`](../../configuration/identifier-generation/global.mdx#findcollectionname). + +## Example + +By default a collection name is pluralized form of a name of an entity type. For example objects of type `Category` will belong to `Categories` collection. However if your intention +is to classify them as `ProductGroups` use the following code: + + + +{`store.getConventions().setFindCollectionName(clazz -> \{ + if (Category.class.isAssignableFrom(clazz)) \{ + return "ProductGroups"; + \} + + return DocumentConventions.defaultGetCollectionName(clazz); +\}); +`} + + + +This can become very useful when there is a need to deal with [polymorphic data](../../../indexes/indexing-polymorphic-data.mdx). + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-collection-assignment-for-entities-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-collection-assignment-for-entities-nodejs.mdx new file mode 100644 index 0000000000..534b7fa13c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-collection-assignment-for-entities-nodejs.mdx @@ -0,0 +1,29 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Entities are grouped into [collections](../../faq/what-is-a-collection.mdx) on the server side. In order to determine the collection name that an entity belongs to +there is special convention which return the collection name based on the type of an entity: [`findCollectionName`](../../configuration/identifier-generation/global.mdx#findcollectionname). + +## Example + +By default a collection name is pluralized form of a name of an entity type. For example objects of type `Category` will belong to `Categories` collection. However if your intention +is to classify them as `ProductGroups` you can use the following code: + + + +{`store.conventions.findCollectionName = clazz => \{ + if (clazz === Category) \{ + return "ProductGroups"; + \} + + return DocumentConventions.defaultGetCollectionName(clazz); +\}; +`} + + + +This can become very useful when there is a need to deal with [polymorphic data](../../../indexes/indexing-polymorphic-data.mdx). + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-collection-assignment-for-entities-php.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-collection-assignment-for-entities-php.mdx new file mode 100644 index 0000000000..bf9dbc4746 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-collection-assignment-for-entities-php.mdx @@ -0,0 +1,31 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Entities are grouped on the server side into [collections](../../faq/what-is-a-collection.mdx). +A collection's name is determined by the type of the entities in it. +To find the name of an entity's collection, use [`findCollectionName`](../../configuration/identifier-generation/global.mdx#findcollectionname). + +## Example + +By default, a collection name is the pluralized form of an entity's type. +Entities of type `Category`, for example, will belong to the `Categories` collection. +To modify this behavior, use `setFindCollectionName`. + + + +{`$store->getConventions()->setFindCollectionName(function($className) \{ + if (is_subclass_of($className, Category::class)) \{ + return "ProductGroups"; + \} + + return DocumentConventions::defaultGetCollectionName($className); +\}); +`} + + + +This can become very useful when there is a need to deal with [polymorphic data](../../../indexes/indexing-polymorphic-data.mdx). + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-collection-assignment-for-entities-python.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-collection-assignment-for-entities-python.mdx new file mode 100644 index 0000000000..5786d2643f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-collection-assignment-for-entities-python.mdx @@ -0,0 +1,30 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Entities are grouped into [collections](../../faq/what-is-a-collection.mdx) on the server side. +The name of the collection that an entity belongs to can be retrieved using the `find_collection_name` convention. + +## Example + +By default, a collection name is the pluralized form of a name of an entity type. +E.g., objects of type `Category` will belong to the `Categories` collection. +If you mean to classify them as `ProductGroups`, however, use the following code: + + + +{`def __find_collection_name(object_type: type) -> str: + if issubclass(object_type, Category): + return "ProductGroups" + + return DocumentConventions.default_get_collection_name(object_type) + +store.conventions.find_collection_name = __find_collection_name +`} + + + +This can become very useful when there is a need to deal with [polymorphic data](../../../indexes/indexing-polymorphic-data.mdx). + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-identity-property-lookup-for-entities-csharp.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-identity-property-lookup-for-entities-csharp.mdx new file mode 100644 index 0000000000..9a09fb062b --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-identity-property-lookup-for-entities-csharp.mdx @@ -0,0 +1,30 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +The client must know which property of your entity is considered as an identity. By default, it always looks for the `Id` property (case-sensitive). This behavior can be changed by overwriting one of our conventions called `FindIdentityProperty`. + +## Syntax + + + +{`Func FindIdentityProperty \{ get; set; \} +`} + + + +`MemberInfo` will represent the member of a stored entity, and return value (bool) will indicate if given member is an identity property. + +## Example + +The simplest example would be to check if the property name is equal to 'Identifier'. + + + +{`store.Conventions.FindIdentityProperty = memberInfo => memberInfo.Name == "Identifier"; +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-identity-property-lookup-for-entities-java.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-identity-property-lookup-for-entities-java.mdx new file mode 100644 index 0000000000..410313f775 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-identity-property-lookup-for-entities-java.mdx @@ -0,0 +1,30 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +The client must know which property of your entity is considered as an identity. By default, it always looks for the `id` field (case-sensitive). This behavior can be changed by overwriting one of our conventions called `findIdentityProperty`. + +## Syntax + + + +{`public void setFindIdentityProperty(Function findIdentityProperty); +`} + + + +`PropertyDescriptor` will represent the property of a stored entity, and return value (bool) will indicate if given member is an identity property. + +## Example + +The simplest example would be to check if the property name is equal to 'Identifier'. + + + +{`store.getConventions().setFindIdentityProperty(property -> "Identifier".equals(property.getName())); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-identity-property-lookup-for-entities-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-identity-property-lookup-for-entities-nodejs.mdx new file mode 100644 index 0000000000..7cd2ac56d7 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-identity-property-lookup-for-entities-nodejs.mdx @@ -0,0 +1,28 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +The client must know which property of your entity is considered as an identity. By default, it always looks for the `id` field (case-sensitive). This behavior can be changed by overwriting one of our conventions called `identityProperty`. + +## Syntax + + + +{`store.conventions.identityProperty; +`} + + + +## Example + +Here's how to change it to `Identifier` field. + + + +{`store.conventions.identityProperty = "Identifier"; +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-identity-property-lookup-for-entities-php.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-identity-property-lookup-for-entities-php.mdx new file mode 100644 index 0000000000..5d2b641392 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-customize-identity-property-lookup-for-entities-php.mdx @@ -0,0 +1,34 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +* When a client handles an entity, it must know which property is the entity's identifier. + By default, the client always looks for the `Id` property (case-sensitive). + +* This behavior can be changed by overwriting the `FindIdentityProperty` convention. + To do so, use the `setFindIdentityProperty` method. + +## Syntax + + + +{`public function setFindIdentityProperty(?Closure $findIdentityProperty): void; +`} + + + +## Example + +The simplest example would be to check if the property name is equal to 'Identifier'. + + + +{`$store->getConventions()->setFindIdentityProperty(function($property) \{ + return "Identifier" == $property->getName(); +\}); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-disable-caching-csharp.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-disable-caching-csharp.mdx new file mode 100644 index 0000000000..4ca6d33bff --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-disable-caching-csharp.mdx @@ -0,0 +1,44 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To reduce the overhead of sending the documents over the network, +client library is caching the HTTP responses and sends only ETags to Server. + +If the request was previously cached giving the Server an opportunity to send back `304 Not Modified` +without any content data or sending the up-to-date results, this will update the cache. + +This behavior can be changed globally by disabling the HTTP Cache size (more [here](../../../client-api/configuration/conventions.mdx#maxhttpcachesize)), +but can also be changed per session using the `SessionOptions.NoCaching` property. + +## Example + + + + +{`using (IDocumentSession Session = store.OpenSession(new SessionOptions +{ + NoCaching = true +})) +{ + // code here +} +`} + + + + +{`using (IAsyncDocumentSession Session = store.OpenAsyncSession(new SessionOptions +{ + NoCaching = true +})) +{ + // async code here +} +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-disable-caching-java.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-disable-caching-java.mdx new file mode 100644 index 0000000000..2e45158585 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-disable-caching-java.mdx @@ -0,0 +1,28 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To reduce the overhead of sending the documents over the network, +client library is caching the HTTP responses and sends only ETags to Server. + +If the request was previously cached giving the Server an opportunity to send back `304 Not Modified` +without any content data or sending the up-to-date results, this will update the cache. + +This behavior can be changed globally by disabling the HTTP Cache size (more [here](../../../client-api/configuration/conventions.mdx#maxhttpcachesize)), +but can also be changed per session using the `sessionOptions.noCaching` property. + +## Example + + + +{`SessionOptions sessionOptions = new SessionOptions(); +sessionOptions.setNoCaching(true); +try (IDocumentSession session = store.openSession(sessionOptions)) \{ + // code here +\} +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-disable-caching-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-disable-caching-nodejs.mdx new file mode 100644 index 0000000000..0818e7865c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-disable-caching-nodejs.mdx @@ -0,0 +1,32 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To reduce the overhead of sending the documents over the network, +client library is caching the HTTP responses and sends only ETags to Server. + +If the request was previously cached giving the Server an opportunity to send back `304 Not Modified` +without any content data or sending the up-to-date results, this will update the cache. + +This behavior can be changed globally by disabling the HTTP Cache size (more [here](../../../client-api/configuration/conventions.mdx#maxhttpcachesize)), +but can also be changed per session using the `SessionOptions.noCaching` property. + +## Example + + + +{`// Define the session's options object +const sessionOptions: SessionOptions = \{ + noCaching: true // Disable caching +\}; + +// Open the session, pass the options object +const session = store.openSession(sessionOptions); + +// The session will not cache any HTTP response data from the server +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-disable-caching-php.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-disable-caching-php.mdx new file mode 100644 index 0000000000..c42e58b3d3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-disable-caching-php.mdx @@ -0,0 +1,32 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To reduce the overhead of sending the documents over the network, +client library is caching the HTTP responses and sends only ETags to Server. + +If the request was previously cached giving the Server an opportunity to send back `304 Not Modified` +without any content data or sending the up-to-date results, this will update the cache. + +This behavior can be changed globally by disabling the HTTP Cache size (more [here](../../../client-api/configuration/conventions.mdx#maxhttpcachesize)). +It can also be changed per session, using the `setNoCaching` method. + +## Example + + + +{`$sessionOptions = new SessionOptions(); +$sessionOptions->setNoCaching(true); + +$session = $store->openSession($sessionOptions); +try \{ + // code here +\} finally \{ + $session->close(); +\} +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-disable-tracking-csharp.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-disable-tracking-csharp.mdx new file mode 100644 index 0000000000..945c056c43 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-disable-tracking-csharp.mdx @@ -0,0 +1,415 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, each session tracks changes to all entities it has either stored, loaded, or queried for. + All changes are then persisted when `SaveChanges` is called. + +* Tracking can be disabled at various scopes: + for a specific entity, for entities returned by a query, for all entities in a session, or globally using conventions. + +* In this article: + * [Disable tracking changes for a specific entity](../../../client-api/session/configuration/how-to-disable-tracking.mdx#disable-tracking-changes-for-a-specific-entity) + * [Disable tracking all entities in session](../../../client-api/session/configuration/how-to-disable-tracking.mdx#disable-tracking-all-entities-in-session) + * [Disable tracking query results](../../../client-api/session/configuration/how-to-disable-tracking.mdx#disable-tracking-query-results) + * [Customize tracking in conventions](../../../client-api/session/configuration/how-to-disable-tracking.mdx#customize-tracking-in-conventions) + * [Using 'Include' in a NoTracking session will throw](../../../client-api/session/configuration/how-to-disable-tracking.mdx#using-) + + +## Disable tracking changes for a specific entity + +* You can prevent the session from persisting changes made to a specific entity by using `IgnoreChangesFor`. +* Once changes are ignored for the entity: + * Any modifications made to the entity will be ignored by `SaveChanges`. + * The session will still keep a reference to the entity to avoid repeated server requests. + Performing another `Load` for the same entity will Not generate another call to the server. + +**Example** + + + + +{`// Load a product entity - the session will track the entity by default +Product product = session.Load("products/1-A"); + +// Call 'IgnoreChangesFor' to instruct the session to ignore changes made to this entity +session.Advanced.IgnoreChangesFor(product); + +// The following change will be ignored by SaveChanges - it will not be persisted +product.UnitsInStock += 1; + +session.SaveChanges(); +`} + + + + +{`// Load a product entity - the session will track the entity by default +Product product = await asyncSession.LoadAsync("products/1-A"); + +// Call 'IgnoreChangesFor' to instruct the session to ignore changes made to this entity +asyncSession.Advanced.IgnoreChangesFor(product); + +// The following change will be ignored by SaveChanges - it will not be persisted +product.UnitsInStock += 1; + +await asyncSession.SaveChangesAsync(); +`} + + + + +**Syntax** + + + +{`void IgnoreChangesFor(object entity); +`} + + + +| Parameter | Type | Description | +|------------|----------|------------------------------------------------------| +| **entity** | `object` | Instance of entity for which changes will be ignored | + + + +## Disable tracking all entities in session + +* Tracking can be disabled for all entities in the session's options. +* When tracking is disabled for the session: + * Method `Store` will Not be available (an exception will be thrown if used). + * Calling `Load` or `Query` will generate a call to the server and create new entities instances. + + + + +{`using (IDocumentSession session = store.OpenSession(new SessionOptions +{ + // Disable tracking for all entities in the session's options + NoTracking = true +})) +{ + // Load any entity, it will Not be tracked by the session + Employee employee1 = session.Load("employees/1-A"); + + // Loading again from same document will result in a new entity instance + Employee employee2 = session.Load("employees/1-A"); + + // Entities instances are not the same + Assert.NotEqual(employee1, employee2); +} +`} + + + + +{`using (IAsyncDocumentSession asyncSession = store.OpenAsyncSession(new SessionOptions +{ + // Disable tracking for all entities in the session's options + NoTracking = true +})) +{ + // Load any entity, it will Not be tracked by the session + Employee employee1 = await asyncSession.LoadAsync("employees/1-A"); + + // Loading again from same document will result in a new entity instance + Employee employee2 = await asyncSession.LoadAsync("employees/1-A"); + + // Entities instances are not the same + Assert.NotEqual(employee1, employee2); +} +`} + + + + + + +## Disable tracking query results + +* Tracking can be disabled for all entities resulting from a query. + + + + +{`using (IDocumentSession session = store.OpenSession()) +{ + // Define a query + List employeesResults = session.Query() + // Set NoTracking, all resulting entities will not be tracked + .Customize(x => x.NoTracking()) + .Where(x => x.FirstName == "Robert") + .ToList(); + + // The following modification will not be tracked for SaveChanges + Employee firstEmployee = employeesResults[0]; + firstEmployee.LastName = "NewName"; + + // Change to 'firstEmployee' will not be persisted + session.SaveChanges(); +} +`} + + + + +{`using (IAsyncDocumentSession asyncSession = store.OpenAsyncSession()) +{ + // Define a query + List employeesResults = asyncSession.Query() + // Set NoTracking, all resulting entities will not be tracked + .Customize(x => x.NoTracking()) + .Where(x => x.FirstName == "Robert") + .ToList(); + + // The following modification will not be tracked for SaveChanges + Employee firstEmployee = employeesResults[0]; + firstEmployee.LastName = "NewName"; + + // Change to 'firstEmployee' will not be persisted + await asyncSession.SaveChangesAsync(); +} +`} + + + + +{`using (IDocumentSession session = store.OpenSession()) +{ + // Define a query + List employeesResults = session.Advanced.DocumentQuery() + // Set NoTracking, all resulting entities will not be tracked + .NoTracking() + .Where(x => x.FirstName == "Robert") + .ToList(); + + // The following modification will not be tracked for SaveChanges + Employee firstEmployee = employeesResults[0]; + firstEmployee.LastName = "NewName"; + + // Change to 'firstEmployee' will not be persisted + session.SaveChanges(); +} +`} + + + + +{`using (IAsyncDocumentSession asyncSession = store.OpenAsyncSession()) +{ + // Define a query + List employeesResults = asyncSession.Advanced.AsyncDocumentQuery() + // Set NoTracking, all resulting entities will not be tracked + .NoTracking() + .Where(x => x.FirstName == "Robert") + .ToList(); + + // The following modification will not be tracked for SaveChanges + Employee firstEmployee = employeesResults[0]; + firstEmployee.LastName = "NewName"; + + // Change to 'firstEmployee' will not be persisted + await asyncSession.SaveChangesAsync(); +} +`} + + + + + + +## Customize tracking in conventions + +* You can further customize and fine-tune which entities will not be tracked + by configuring the `ShouldIgnoreEntityChanges` convention method on the document store. +* This customization rule will apply to all sessions opened for this document store. + +**Example** + + + +{`using (var store = new DocumentStore() +\{ + // Define the 'ignore' convention on your document store + Conventions = + \{ + ShouldIgnoreEntityChanges = + // Define for which entities tracking should be disabled + // Tracking will be disabled ONLY for entities of type Employee whose FirstName is Bob + (session, entity, id) => (entity is Employee e) && + (e.FirstName == "Bob") + \} +\}.Initialize()) +\{ + using (IDocumentSession session = store.OpenSession()) + \{ + var employee1 = new Employee \{ Id = "employees/1", FirstName = "Alice" \}; + var employee2 = new Employee \{ Id = "employees/2", FirstName = "Bob" \}; + + session.Store(employee1); // This entity will be tracked + session.Store(employee2); // Changes to this entity will be ignored + + session.SaveChanges(); // Only employee1 will be persisted + + employee1.FirstName = "Bob"; // Changes to this entity will now be ignored + employee2.FirstName = "Alice"; // This entity will now be tracked + + session.SaveChanges(); // Only employee2 is persisted + \} +\} +`} + + + +// todo .. async... + +**Syntax** + + + +{`public Func ShouldIgnoreEntityChanges; +`} + + + +| Parameter | Description | +|-------------------------------------|--------------------------------------------------| +| `InMemoryDocumentSessionOperations` | The session for which tracking is to be disabled | +| `object` | The entity for which tracking is to be disabled | +| `string` | The entity's document ID | + +| Return Type | Description | +|--------------|-------------------------------------------------------------------------| +| `bool` | `true` - Entity will Not be tracked
`false` - Entity will be tracked | + + + +## Using 'Include' in a NoTracking session will throw + +* Attempting to use `Include` in a `NoTracking` session will throw an exception. + +* Like other entities in a _NoTracking_ session, + the included items are not tracked and will not prevent additional server requests during subsequent _Load_ operations for the same data. + To avoid confusion, _Include_ operations are disallowed during non-tracking session actions such as `Load` or `Query`. + +* This applies to all items that can be included - + e.g., documents, compare-exchange items, counters, revisions, and time series. +**Include when loading**: + + + + +{`using (IDocumentSession session = store.OpenSession(new SessionOptions +{ + // Working with a non-tracking session + NoTracking = true +})) +{ + try + { + // Trying to include a related document when loading a document will throw: + Product product1 = session + .Include(x => x.Supplier) + .Load("products/1-A"); + + // The same applies when using the builder syntax: + Product product2 = session.Load("products/1-A", + builder => builder.IncludeDocuments(product => product.Supplier)); + } + catch (InvalidOperationException e) + { + // An InvalidOperationException is expected here + } +} +`} + + + + +{`using (IAsyncDocumentSession asyncSession = store.OpenAsyncSession(new SessionOptions + { + // Working with a non-tracking session + NoTracking = true + })) +{ + try + { + // Trying to include a related document when loading a document will throw: + Product product = await asyncSession + .Include(x => x.Supplier) + .LoadAsync("products/1-A"); + + // The same applies when using the builder syntax: + Product product2 = await asyncSession.LoadAsync("products/1-A", + builder => builder.IncludeDocuments(product => product.Supplier)); + } + catch (InvalidOperationException e) + { + // An InvalidOperationException is expected here + } +} +`} + + + + +**Include when querying**: + + + + +{`using (IDocumentSession session = store.OpenSession(new SessionOptions + { + // Working with a non-tracking session + NoTracking = true + })) +{ + try + { + // Trying to include related documents in a query will throw + var products = session + .Query() + .Include(x => x.Supplier) + .ToList(); + } + catch (InvalidOperationException e) + { + // An InvalidOperationException is expected here + } +} +`} + + + + +{`using (IAsyncDocumentSession asyncSession = store.OpenAsyncSession(new SessionOptions + { + // Working with a non-tracking session + NoTracking = true + })) +{ + try + { + // Trying to include related documents when making a query will throw + var products = await asyncSession + .Query() + .Include(x => x.Supplier) + .ToListAsync(); + } + catch (InvalidOperationException e) + { + // An InvalidOperationException is expected here + } +} +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-disable-tracking-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-disable-tracking-nodejs.mdx new file mode 100644 index 0000000000..ff67fe965c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-disable-tracking-nodejs.mdx @@ -0,0 +1,248 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, each session tracks changes to all entities it has either stored, loaded, or queried for. + All changes are then persisted when `saveChanges` is called. + +* Tracking can be disabled at various scopes: + for a specific entity, for entities returned by a query, for all entities in a session, or globally using conventions. + +* In this article: + * [Disable tracking changes for a specific entity](../../../client-api/session/configuration/how-to-disable-tracking.mdx#disable-tracking-changes-for-a-specific-entity) + * [Disable tracking all entities in session](../../../client-api/session/configuration/how-to-disable-tracking.mdx#disable-tracking-all-entities-in-session) + * [Disable tracking query results](../../../client-api/session/configuration/how-to-disable-tracking.mdx#disable-tracking-query-results) + * [Customize tracking in conventions](../../../client-api/session/configuration/how-to-disable-tracking.mdx#customize-tracking-in-conventions) + * [Using 'Include' in a NoTracking session will throw](../../../client-api/session/configuration/how-to-disable-tracking.mdx#using-) + + +## Disable tracking changes for a specific entity + +* You can prevent the session from persisting changes made to a specific entity by using `ignoreChangesFor`. +* Once changes are ignored for the entity: + * Any modifications made to the entity will be ignored by `saveChanges`. + * The session will still keep a reference to the entity to avoid repeated server requests. + Performing another `load` for the same entity will Not generate another call to the server. + +**Example** + + + +{`const session = documentStore.openSession(); + +// Load a product entity - the session will track the entity by default +const product = await session.load("products/1-A"); + +// Call 'ignoreChangesFor' to instruct the session to ignore changes made to this entity +session.advanded.ignoreChangesFor(product); + +// The following change will be ignored by SaveChanges - it will not be persisted +product.unitsInStock += 1; + +await session.saveChanges(); +`} + + + +**Syntax** + + + +{`session.advanced.ignoreChangesFor(entity); +`} + + + +| Parameter | Type | Description | +|------------|----------|------------------------------------------------------| +| **entity** | `object` | Instance of entity for which changes will be ignored | + + + +## Disable tracking all entities in session + +* Tracking can be disabled for all entities in the session's options. +* When tracking is disabled for the session: + * Method `store` will Not be available (an exception will be thrown if used). + * Calling `load` or `query` will generate a call to the server and create new entities instances. + + + +{`const session = documentStore.openSession(\{ + // Disable tracking for all entities in the session's options + noTracking: true +\}); + +// Load any entity, it will Not be tracked by the session +const employee1 = await session.load("employees/1-A"); + +// Loading again from same document will result in a new entity instance +const employee2 = await session.load("employees/1-A"); + +// Entities instances are not the same +assert.notStrictEqual(company1, company2); + +// Calling saveChanges will throw an exception +await session.saveChanges(); +`} + + + + + +## Disable tracking query results + +* Tracking can be disabled for all entities resulting from a query. + + + +{`const session = documentStore.openSession(); + +// Define a query +const employeesResults = await session.query(\{ collection: "employees" \}) + .whereEquals("FirstName", "Robert") + // Set noTracking, all resulting entities will not be tracked + .noTracking() + .all(); + +// The following modification will not be tracked for saveChanges +const firstEmployee = employeesResults[0]; +firstEmployee.lastName = "NewName"; + +// Change to 'firstEmployee' will not be persisted +session.saveChanges(); +`} + + + + + +## Customize tracking in conventions + +* You can further customize and fine-tune which entities will not be tracked + by configuring the `shouldIgnoreEntityChanges` convention method on the document store. +* This customization rule will apply to all sessions opened for this document store. + +**Example** + + + +{`const customStore = new DocumentStore(); + +// Define the 'ignore' convention on your document store +customStore.conventions.shouldIgnoreEntityChanges = + (sessionOperations, entity, documentId) => \{ + // Define for which entities tracking should be disabled + // Tracking will be disabled ONLY for entities of type Employee whose firstName is Bob + return entity instanceof Employee && entity.firstName === "Bob"; + \}; +customStore.initialize(); + +const session = customStore.openSession(); + +const employee1 = new Employee(); +employee1.firstName = "Alice"; + +const employee2 = new Employee(); +employee2.firstName = "Bob"; + +await session.store(employee1, "employees/1-A"); // This entity will be tracked +await session.store(employee2, "employees/2-A"); // Changes to this entity will be ignored + +await session.saveChanges(); // Only employee1 will be persisted + +employee1.firstName = "Bob"; // Changes to this entity will now be ignored +employee2.firstName = "Alice"; // This entity will now be tracked + +session.saveChanges(); // Only employee2 is persisted +`} + + + +**Syntax** + + + +{`store.conventions.shouldIgnoreEntityChanges = (sessionOperations, entity, documentId) => \{ + // Write your logic + // return value: + // true - entity will not be tracked + // false - entity will be tracked +\} +`} + + + +| Parameter | Type | Description | +|-------------------|-------------------------------------|--------------------------------------------------| +| sessionOperations | `InMemoryDocumentSessionOperations` | The session for which tracking is to be disabled | +| entity | `object` | The entity for which tracking is to be disabled | +| documentId | `string` | The entity's document ID | + +| Return Type | Description | +|---------------|-------------------------------------------------------------------------| +| `boolean` | `true` - Entity will Not be tracked
`false` - Entity will be tracked | + + + +## Using 'include' in a noTracking session will throw + +* Attempting to use `include` in a `noTracking` session will throw an exception. + +* Like other entities in a _noTracking_ session, + the included items are not tracked and will not prevent additional server requests during subsequent _load_ operations for the same data. + To avoid confusion, _include_ operations are disallowed during non-tracking session actions such as `load` or `query`. + +* This applies to all items that can be included - + e.g., documents, compare-exchange items, counters, revisions, and time series. +**Include when loading**: + + + +{`const session = documentStore.openSession(\{ + // Working with a non-tracking session + noTracking: true +\}); + +try \{ + // Trying to include a related document when loading a document will throw + const product = await session + .include("supplier") + .load("products/1-A"); +\} +catch (error) \{ + // An exception is expected here +\} +`} + + + +**Include when querying**: + + + +{`const session = documentStore.openSession(\{ + // Working with a non-tracking session + noTracking: true +\}); + +try \{ + // Trying to include related documents in a query will throw + const products = await session + .query(\{ collection: 'products' \}) + .include("supplier") + .all(); +\} +catch (error) \{ + // An exception is expected here +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-disable-tracking-php.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-disable-tracking-php.mdx new file mode 100644 index 0000000000..81dcb1ce19 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-disable-tracking-php.mdx @@ -0,0 +1,196 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, each session tracks changes to all entities it has either stored, loaded, or queried for. + All changes are then persisted when `saveChanges` is called. + +* Tracking can be disabled at various scopes: + for a specific entity, for entities returned by a query, for all entities in a session, or globally using conventions. + +* In this article: + * [Disable tracking changes for a specific entity](../../../client-api/session/configuration/how-to-disable-tracking.mdx#disable-tracking-changes-for-a-specific-entity) + * [Disable tracking all entities in session](../../../client-api/session/configuration/how-to-disable-tracking.mdx#disable-tracking-all-entities-in-session) + * [Disable tracking query results](../../../client-api/session/configuration/how-to-disable-tracking.mdx#disable-tracking-query-results) + * [Customize tracking in conventions](../../../client-api/session/configuration/how-to-disable-tracking.mdx#customize-tracking-in-conventions) + + +## Disable tracking changes for a specific entity + +* You can prevent the session from persisting changes made to a specific entity by using `ignoreChangesFor`. +* Once changes are ignored for the entity: + * Any modifications made to the entity will be ignored by `saveChanges`. + * The session will still keep a reference to the entity to avoid repeated server requests. + Performing another `load` for the same entity will Not generate another call to the server. + +**Example** + + + +{`// Load a product entity - the session will track the entity by default +/** @var Product $product */ +$product = $session->load(Product::class, "products/1-A"); + +// Call 'ignoreChangesFor' to instruct the session to ignore changes made to this entity +$session->advanced()->ignoreChangesFor($product); + +// The following change will be ignored by saveChanges - it will not be persisted +$product->setUnitsInStock($product->getUnitsInStock() + 1); + +$session->saveChanges(); +`} + + + + + +## Disable tracking for all entities in a session + +* Tracking can be disabled for all entities in the session's options. +* When tracking is disabled for the session: + * Method `store` will Not be available (an exception will be thrown if used). + * Calling `load` or `query` will generate a call to the server and create new entities instances. + + + +{`$sessionOptions = new SessionOptions(); +// Disable tracking for all entities in the session's options +$sessionOptions->setNoTracking(true); + +$session = $store->openSession($sessionOptions); +try \{ + // Load any entity, it will Not be tracked by the session + /** @var Employee $employee1 */ + $employee1 = $session->load(Employee::class, "employees/1-A"); + + // Loading again from same document will result in a new entity instance + $employee2 = $session->load(Employee::class, "employees/1-A"); + + // Entities instances are not the same + $this->assertNotEquals($employee1, $employee2); +\} finally \{ + $session->close(); +\} +`} + + + + + +## Disable tracking for query results + +* Tracking can be disabled for all entities resulting from a query. + + + + +{`$session = $store->openSession(); +try { + // Define a query + /** @var array $employeesResults */ + $employeesResults = $session->query(Employee::class) + // Set NoTracking, all resulting entities will not be tracked + ->noTracking() + ->whereEquals("FirstName", "Robert") + ->toList(); + + // The following modification will not be tracked for SaveChanges + $firstEmployee = $employeesResults[0]; + $firstEmployee->setLastName("NewName"); + + // Change to 'firstEmployee' will not be persisted + $session->saveChanges(); +} finally { + $session->close(); +} +`} + + + + +{`$session = $store->openSession(); +try { + // Define a query + /** @var array $employeesResults */ + $employeesResults = $session->advanced()->documentQuery(Employee::class) + // Set NoTracking, all resulting entities will not be tracked + ->noTracking() + ->whereEquals("FirstName", "Robert") + ->toList(); + + // The following modification will not be tracked for SaveChanges + $firstEmployee = $employeesResults[0]; + $firstEmployee->setLastName("NewName"); + + // Change to 'firstEmployee' will not be persisted + $session->saveChanges(); +} finally { + $session->close(); +} +`} + + + + + + +## Customize tracking in conventions + +* You can further customize and fine-tune which entities will not be tracked + by configuring the `ShouldIgnoreEntityChanges` convention method on the document store. +* This customization will apply to all sessions opened for this document store. +* Use the `setShouldIgnoreEntityChanges` method to do so. + +#### Example: + + + +{`// Define the 'ignore' convention on your document store +$conventions = new DocumentConventions(); +$conventions->setShouldIgnoreEntityChanges( +// Define for which entities tracking should be disabled +// Tracking will be disabled ONLY for entities of type Employee whose FirstName is Bob + function ($session, $entity, $id) \{ + return $entity instanceof Employee && $entity->getFirstName() == "Bob"; + \} +); + +$store = new DocumentStore(); +$store->setConventions($conventions); +$store->initialize(); +try \{ + $session = $store->openSession(); + try \{ + $employee1 = new Employee(); + $employee1->setId("employees/1"); + $employee1->setFirstName("Alice"); + + $employee2 = new Employee(); + $employee2->setId("employees/2"); + $employee2->setFirstName("Bob"); + + $session->store($employee1); // This entity will be tracked + $session->store($employee2); // Changes to this entity will be ignored + + $session->saveChanges(); // Only employee1 will be persisted + + $employee1->setFirstName("Bob"); // Changes to this entity will now be ignored + $employee2->setFirstName("Alice");// This entity will now be tracked + + $session->saveChanges(); // Only employee2 is persisted + \} finally \{ + $session->close(); + \} +\} finally \{ + $store->close(); +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-disable-tracking-python.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-disable-tracking-python.mdx new file mode 100644 index 0000000000..4ef842b2f6 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-disable-tracking-python.mdx @@ -0,0 +1,194 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, each session tracks changes to all entities it has either stored, loaded, or queried for. + All changes are then persisted when `save_changes` is called. + +* Tracking can be disabled at various scopes: + for a specific entity, for entities returned by a query, for all entities in a session, or globally using conventions. + +* In this article: + * [Disable tracking changes for a specific entity](../../../client-api/session/configuration/how-to-disable-tracking.mdx#disable-tracking-changes-for-a-specific-entity) + * [Disable tracking all entities in session](../../../client-api/session/configuration/how-to-disable-tracking.mdx#disable-tracking-all-entities-in-session) + * [Disable tracking query results](../../../client-api/session/configuration/how-to-disable-tracking.mdx#disable-tracking-query-results) + * [Customize tracking in conventions](../../../client-api/session/configuration/how-to-disable-tracking.mdx#customize-tracking-in-conventions) + + +## Disable tracking changes for a specific entity + +* You can prevent the session from persisting changes made to a specific entity by using `ignore_changes_for`. +* Once changes are ignored for the entity: + * Any modifications made to the entity will be ignored by `save_changes`. + * The session will still keep a reference to the entity to avoid repeated server requests. + Performing another `load` for the same entity will Not generate another call to the server. + +**Example** + + + +{`# Load a product entity - the session will track the entity by default +product = session.load("products/1-A", Product) + +# Call 'ignore_changes_for' to instruct the session to ignore changes made to this entity +session.ignore_changes_for(product) + +# The following change will be ignored by SaveChanges - it will not be persisted +product.units_in_stock += 1 +session.save_changes() +`} + + + +**Syntax** + + + +{`def ignore_changes_for(self, entity: object) -> None: ... +`} + + + +| Parameter | Type | Description | +|------------|----------|------------------------------------------------------| +| **entity** | `object` | Instance of entity for which changes will be ignored | + + + +## Disable tracking for all entities in a session + +* Tracking can be disabled for all entities in the session's options. +* When tracking is disabled for the session: + * Method `store` will Not be available (an exception will be thrown if used). + * Calling `load` or `query` will generate a call to the server and create new entities instances. + + + +{`with store.open_session( + SessionOptions( + # Disable tracking for all entities in the session's options + no_tracking=True + ) +): + # Load any entity, it will Not be tracked by the session + employee1 = session.load("employees/1-A", Employee) + + # Loading again from same document will result in a new entity instance + employee2 = session.load("employees/1-A", Employee) + + # Entities instances are not the same + self.assertNotEqual(employee1, employee2) +`} + + + + + +## Disable tracking for query results + +* Tracking can be disabled for all entities resulting from a query. + + + + +{`with store.open_session() as session: + # Define a query + employees_results = list( + session.advanced.document_query(object_type=Employee) + # Set no_tracking, all resulting entities will not ne tracked + .no_tracking().where_equals("FirstName", "Robert") + ) + + # The following modification will not be tracked for save_changes + first_employee = employees_results[0] + first_employee.last_name = "NewName" + + # Change to 'first_employee' will not be persisted + session.save_changes() +`} + + + + + + +## Customize tracking in conventions + +* You can further customize and fine-tune which entities will not be tracked + by configuring the `should_ignore_entity_changes` convention method on the document store. +* This customization rule will apply to all sessions opened for this document store. +* Implement rules under your [ShouldIgnoreEntityChanges](../../../client-api/session/configuration/how-to-disable-tracking.mdx#syntax) subclass. + Apply the class's `check` method to control the ignore flow. + +**Example**: + + + +{`with DocumentStore() as store: + # Define the 'ignore' convention on your document store: + + # Create a class that implements 'ravendb.documents.conventions.ShouldIgnoreEntityChanges' + # and implement 'check' method - it's going to be called to check if entity should be ignored + + class MyCustomShouldIgnoreEntityChanges(ShouldIgnoreEntityChanges): + def check(self, session_operations: DocumentSession, entity: object, document_id: str) -> bool: + # Define for which entities tracking should be disabled + # Tracking will be disabled ONLY for entities of type Employee whose FirstName is Bob + return isinstance(entity, Employee) and entity.first_name == "Bob" + + store.conventions.should_ignore_entity_changes = MyCustomShouldIgnoreEntityChanges + + store.initialize() + + with store.open_session() as session: + employee1 = Employee(first_name="Alice", Id="employees/1") + employee2 = Employee(first_name="Bob", Id="employees/2") + + session.store(employee1) # This entity will be tracked + session.store(employee2) # Changes to this entity will be ignored + + session.save_changes() + + employee1.first_name = "Bob" # Changes to this entity will now be ignored + employee2.first_name = "Alice" # This entity will now be tracked + + session.save_changes() +`} + + + +**Syntax**: + + + +{`@should_ignore_entity_changes.setter +def should_ignore_entity_changes(self, value: ShouldIgnoreEntityChanges) -> None: ... + +class ShouldIgnoreEntityChanges(ABC): + @abstractmethod + def check( + self, + session_operations: "InMemoryDocumentSessionOperations", + entity: object, + document_id: str, + ) -> bool: + pass +`} + + + +| Parameter | Type | Description | +|-------------|----------|-------------------------------------------------| +| entity | `object` | The entity for which tracking is to be disabled | +| document_id | `str` | The entity's document ID | + +| Return Type | Description | +|---------------|-------------------------------------------------------------------------| +| `bool` | `True` - Entity will Not be tracked
`False` - Entity will be tracked | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-enable-optimistic-concurrency-csharp.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-enable-optimistic-concurrency-csharp.mdx new file mode 100644 index 0000000000..385c920b84 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-enable-optimistic-concurrency-csharp.mdx @@ -0,0 +1,188 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, optimistic concurrency checks are **disabled**. Changes made outside of the session object will be overwritten. + Concurrent changes to the same document will use the _Last Write Wins_ strategy so a lost update anomaly is possible + with the default configuration of the [session](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx). + +* Optimistic concurrency can be **enabled** for: + * A specific document + * A specific session (enable on a per-session basis) + * All sessions (enable globally, at the document store level) + +* With optimistic concurrency enabled, RavenDB will generate a concurrency exception (and abort all modifications in + the current transaction) when trying to save a document that has been modified on the server side after the client + loaded and modified it. + +* The `ConcurrencyException` that might be thrown upon the `SaveChanges` call needs to be handled by the caller. + The operation can be retried (the document needs to be reloaded since it got changed meanwhile) or handle the error + in a way that is suitable in a given scenario. + +* In this page: + * [Enable for specific session](../../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx#enable-for-specific-session) + * [Enable globally](../../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx#enable-globally) + * [Disable for specific document (when enabled on session)](../../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx#disable-for-specific-document-(when-enabled-on-session)) + * [Enable for specific document (when disabled on session)](../../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx#enable-for-specific-document-(when-disabled-on-session)) + + + +* Note that the `UseOptimisticConcurrency` setting only applies to documents that have been modified by the current session. + E.g., if you load documents `users/1-A` and `users/2-A` in a session, make modifications only to `users/1-A`, and then call `SaveChanges`, + the operation will succeed regardless of the optimistic concurrency setting, even if `users/2-A` has been changed by another process in the meantime. + +* However, if you modify both documents and attempt to save changes with optimistic concurrency enabled, an exception will be raised + if `users/2-A` has been modified externally. + In this case, the updates to both `users/1-A` and `users/2-A` will be cancelled. + + + + + +A detailed description of transactions and concurrency control in RavenDB is available here: +[Transaction support in RavenDB](../../../client-api/faq/transaction-support.mdx) + + + + +## Enable for specific session + + + +{`using (IDocumentSession session = store.OpenSession()) +\{ + // Enable optimistic concurrency for this session + session.Advanced.UseOptimisticConcurrency = true; + + // Save a document in this session + Product product = new Product \{ Name = "Some Name" \}; + session.Store(product, "products/999"); + session.SaveChanges(); + + // Modify the document 'externally' by another session + using (IDocumentSession otherSession = store.OpenSession()) + \{ + Product otherProduct = otherSession.Load("products/999"); + otherProduct.Name = "Other Name"; + otherSession.SaveChanges(); + \} + + // Trying to modify the document without reloading it first will throw + product.Name = "Better Name"; + session.SaveChanges(); // This will throw a ConcurrencyException +\} +`} + + + + + +* Enabling optimistic concurrency in a session will ensure that changes made to a document will only be persisted + if the version of the document sent in the `SaveChanges()` call matches its version from the time it was initially read (loaded from the server). + +* Note that it's necessary to enable optimistic concurrency for ALL sessions that modify the documents for which you want to guarantee that no writes will be silently discarded. + If optimistic concurrency is enabled in some sessions but not in others, and they modify the same documents, the risk of the lost update anomaly still exists. + + + + + +## Enable globally + +* Optimistic concurrency can also be _enabled_ for all sessions that are opened under a document store. + +* Use the [store.Conventions.UseOptimisticConcurrency](../../../client-api/configuration/conventions.mdx#useoptimisticconcurrency) convention to enable globally. + + + +{`// Enable for all sessions that will be opened within this document store +store.Conventions.UseOptimisticConcurrency = true; + +using (IDocumentSession session = store.OpenSession()) +\{ + bool isSessionUsingOptimisticConcurrency = session.Advanced.UseOptimisticConcurrency; // will return true +\} +`} + + + + + +## Disable for specific document (when enabled on session) + +* Optimistic concurrency can be _disabled when **storing** a specific document, + even when it is _enabled_ for an entire session (or globally). + +* This is done by passing `null` as a change vector value to the [Store](../../../client-api/session/storing-entities.mdx) method. + + + +{`using (IDocumentSession session = store.OpenSession()) +\{ + // Store document 'products/999' + session.Store(new Product \{ Name = "Some Name" \}, id: "products/999"); + session.SaveChanges(); +\} + +using (IDocumentSession session = store.OpenSession()) +\{ + // Enable optimistic concurrency for the session + session.Advanced.UseOptimisticConcurrency = true; + + // Store the same document + // Pass 'null' as the changeVector to turn OFF optimistic concurrency for this document + session.Store(new Product \{ Name = "Some Other Name" \}, changeVector: null, id: "products/999"); + + // This will NOT throw a ConcurrencyException, and the document will be saved + session.SaveChanges(); +\} +`} + + + + + +## Enable for specific document (when disabled on session) + +* Optimistic concurrency can be _enabled_ when **storing** a specific document, + even when it is _disabled_ for an entire session (or globally). + +* This is done by passing `string.Empty` as the change vector value to the [Store](../../../client-api/session/storing-entities.mdx) method. + Setting the change vector to an empty string will cause RavenDB to ensure that this document is a new one and doesn't already exist. + A `ConcurrencyException` will be thrown if the document already exists. + +* If you do not provide a change vector or if the change vector is `null`, optimistic concurrency will be disabled. + +* Setting optimistic concurrency for a specific document overrides the `UseOptimisticConcurrency` property from the `Advanced` session operations. + + + +{`using (IDocumentSession session = store.OpenSession()) +\{ + // Store document 'products/999' + session.Store(new Product \{ Name = "Some Name" \}, id: "products/999"); + session.SaveChanges(); +\} + +using (IDocumentSession session = store.OpenSession()) +\{ + // Disable optimistic concurrency for the session + session.Advanced.UseOptimisticConcurrency = false; // This is also the default value + + // Store the same document + // Pass 'string.Empty' as the changeVector to turn ON optimistic concurrency for this document + session.Store(new Product \{ Name = "Some Other Name" \}, changeVector: string.Empty, id: "products/999"); + + // This will throw a ConcurrencyException, and the document will NOT be saved + session.SaveChanges(); +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-enable-optimistic-concurrency-java.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-enable-optimistic-concurrency-java.mdx new file mode 100644 index 0000000000..4e3b9cd0af --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-enable-optimistic-concurrency-java.mdx @@ -0,0 +1,143 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +By default, optimistic concurrency checks are **disabled**. Changes made outside our session object will be overwritten. Concurrent changes to the same document will use +the Last Write Wins strategy so a lost update anomaly is possible with the default configuration of the [session](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx). + +You can enable the optimistic concurrency strategy either globally, at the document store level or a per session basis. +In either case, with optimistic concurrency enabled, RavenDB will generate a concurrency exception (and abort all +modifications in the current transaction) when trying to save a document that has been modified on the server side after the client loaded and modified it. + +The `ConcurrencyException` that might be thrown upon the `saveChanges` call, needs to be handled by the caller. +The operation can be retried (the document needs to be reloaded since it got changed meanwhile) or handle the error in a way that is suitable in a given scenario. + + +Note that `useOptimisticConcurrency` only applies to documents that has been _modified_ by the session. Loading documents `users/1-A` and `users/2-A` in a session, modifying +`users/1-A` and then calling `saveChanges` will succeed, regardless of the optimistic concurrency setting, even if `users/2-A` has changed in the meantime. +If the session were to try to save to `users/2-A` as well with optimistic concurrency enabled, then an exception will be raised and the updates to both `users/1-A` and `users/2-A` +will be cancelled. + + +You can also control optimistic concurrency per specific document. To enable it, [provide a Change Vector to Store](../../../client-api/session/storing-entities.mdx). +If you do not provide a change vector or if the change vector is `null`, optimistic concurrency will be disabled. + +Setting the 'Change Vector' to an empty string will cause RavenDB to ensure that this document is a new one and doesn't already exists. + +Setting optimistic concurrency per specific document overrides the use of the `useOptimisticConcurrency` field from the `advanced` session operations. + + +For a detailed description of transactions and concurrency control in RavenDB please refer to the +[Transaction support in RavenDB](../../../client-api/faq/transaction-support.mdx) article. + + +## Enabling for a specific Session + + + +{`try (IDocumentSession session = store.openSession()) \{ + session.advanced().setUseOptimisticConcurrency(true); + + Product product = new Product(); + product.setName("Some Name"); + + session.store(product, "products/999"); + session.saveChanges(); + + try (IDocumentSession otherSession = store.openSession()) \{ + Product otherProduct = otherSession.load(Product.class, "products/999"); + otherProduct.setName("Other Name"); + + otherSession.saveChanges(); + \} + + product.setName("Better Name"); + session.saveChanges(); // will throw ConcurrencyException +\} +`} + + + + + +* Enabling optimistic concurrency in a session will ensure that changes made to a document will only be persisted + if the version of the document sent in the `saveChanges()` call matches its version from the time it was initially read (loaded from the server). + +* Note that it's necessary to enable optimistic concurrency for ALL sessions that modify the documents for which you want to guarantee that no writes will be silently discarded. + If optimistic concurrency is enabled in some sessions but not in others, and they modify the same documents, the risk of the lost update anomaly still exists. + + + +## Enabling Globally + +The first example shows how to enable optimistic concurrency for a particular session. +This can be also enabled globally, for all opened sessions by using the convention `store.getConventions().setUseOptimisticConcurrency`. + + + +{`store.getConventions().setUseOptimisticConcurrency(true); + +try (IDocumentSession session = store.openSession()) \{ + boolean isSessionUsingOptimisticConcurrency + = session.advanced().isUseOptimisticConcurrency(); // will return true +\} +`} + + + +## Disabling Optimistic Concurrency for a Single Document when it is Enabled on Session + +Optimistic concurrency can be disabled for a single document by passing `null` as a change vector value to `store` method even when it is enabled for an entire session (or globally). + + + +{`try (IDocumentSession session = store.openSession()) \{ + Product product = new Product(); + product.setName("Some Name"); + + session.store(product, "products/999"); + session.saveChanges(); +\} + +try (IDocumentSession session = store.openSession()) \{ + session.advanced().setUseOptimisticConcurrency(true); + + Product product = new Product(); + product.setName("Some Other Name"); + + session.store(product, null, "products/999"); + session.saveChanges(); // will NOT throw Concurrency exception +\} +`} + + + +## Enabling Optimistic Concurrency for a New Document when it is Disabled on Session + +Optimistic concurrency can be enabled for a new document by passing `""` as a change vector value to `store` method even when it is disabled for an entire session (or globally). +It will cause to throw `ConcurrencyException` if the document already exists. + + + +{`try (IDocumentSession session = store.openSession()) \{ + Product product = new Product(); + product.setName("Some Name"); + session.store(product, "products/999"); + session.saveChanges(); +\} + +try (IDocumentSession session = store.openSession()) \{ + session.advanced().setUseOptimisticConcurrency(false); // default value + + Product product = new Product(); + product.setName("Some Other Name"); + + session.store(product, "", "products/999"); + session.saveChanges(); // will throw Concurrency exception +\} +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-enable-optimistic-concurrency-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-enable-optimistic-concurrency-nodejs.mdx new file mode 100644 index 0000000000..f2431ca57f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-enable-optimistic-concurrency-nodejs.mdx @@ -0,0 +1,204 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, optimistic concurrency checks are **disabled**. Changes made outside of the session object will be overwritten. + Concurrent changes to the same document will use the _Last Write Wins_ strategy so a lost update anomaly is possible + with the default configuration of the [session](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx). + +* Optimistic concurrency can be **enabled** for: + * A specific document + * A specific session (enable on a per-session basis) + * All sessions (enable globally, at the document store level) + +* With optimistic concurrency enabled, RavenDB will generate a concurrency exception (and abort all modifications in + the current transaction) when trying to save a document that has been modified on the server side after the client + loaded and modified it. + +* The `ConcurrencyException` that might be thrown upon the `saveChanges` call needs to be handled by the caller. + The operation can be retried (the document needs to be reloaded since it got changed meanwhile) or handle the error + in a way that is suitable in a given scenario. + +* In this page: + * [Enable for specific session](../../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx#enable-for-specific-session) + * [Enable globally](../../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx#enable-globally) + * [Disable for specific document (when enabled on session)](../../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx#disable-for-specific-document-(when-enabled-on-session)) + * [Enable for specific document (when disabled on session)](../../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx#enable-for-specific-document-(when-disabled-on-session)) + + + +* Note that the `useOptimisticConcurrency` setting only applies to documents that have been modified by the current session. + E.g., if you load documents `users/1-A` and `users/2-A` in a session, make modifications only to `users/1-A`, and then call `saveChanges`, + the operation will succeed regardless of the optimistic concurrency setting, even if `users/2-A` has been changed by another process in the meantime. + +* However, if you modify both documents and attempt to save changes with optimistic concurrency enabled, an exception will be raised + if `users/2-A` has been modified externally. + In this case, the updates to both `users/1-A` and `users/2-A` will be cancelled. + + + + + +A detailed description of transactions and concurrency control in RavenDB is available here: +[Transaction support in RavenDB](../../../client-api/faq/transaction-support.mdx) + + + + +## Enable for specific session + + + +{`// Enable optimistic concurrency for this session +const session = store.openSession(); +session.advanced.useOptimisticConcurrency = true; + +const product = new Product(); +product.name = "Some Name"; + +// Save a document in this session +await session.store(product, "products/999"); +await session.saveChanges(); + +\{ + // Modify the document 'externally' by another session + const anotherSession = store.openSession(); + + const otherProduct = await anotherSession.load("products/999"); + otherProduct.name = "Other Name"; + await anotherSession.saveChanges(); +\} + +// Trying to modify the document without reloading it first will throw +product.name = "Better Name"; +await session.saveChanges(); // This will throw a ConcurrencyException +`} + + + + + +* Enabling optimistic concurrency in a session will ensure that changes made to a document will only be persisted + if the version of the document sent in the `saveChanges()` call matches its version from the time it was initially read (loaded from the server). + +* Note that it's necessary to enable optimistic concurrency for ALL sessions that modify the documents for which you want to guarantee that no writes will be silently discarded. + If optimistic concurrency is enabled in some sessions but not in others, and they modify the same documents, the risk of the lost update anomaly still exists. + + + + + +## Enable globally + +* Optimistic concurrency can also be _enabled_ for all sessions that are opened under a document store. + +* Use the [store.Conventions.UseOptimisticConcurrency](../../../client-api/configuration/conventions.mdx#useoptimisticconcurrency) convention to enable globally. + + + +{`// Enable for all sessions that will be opened within this document store +store.conventions.useOptimisticConcurrency = true; + +\{ + const session = store.openSession(); + const isSessionUsingOptimisticConcurrency + = session.advanced.useOptimisticConcurrency; // true +\} +`} + + + + + +## Disable for specific document (when enabled on session) + +* Optimistic concurrency can be _disabled_ when **storing** a specific document, + even when it is _enabled_ for an entire session (or globally). + +* This is done by passing `null` as a change vector value to the [store](../../../client-api/session/storing-entities.mdx) method. + + + +{`\{ + const session = store.openSession(); + + const product = new Product(); + product.name = "Some Name"; + + // Store document 'products/999' + await session.store(product, "products/999"); + await session.saveChanges(); +\} +\{ + const session = store.openSession(); + + // Enable optimistic concurrency for the session + session.advanced.useOptimisticConcurrency = true; + + const product = new Product(); + product.name = "Some Other Name"; + + // Store the same document + // Pass 'null' as the changeVector to turn OFF optimistic concurrency for this document + await session.store(product, "products/999", \{ "changeVector": null \}); + + // This will NOT throw a ConcurrencyException, and the document will be saved + await session.saveChanges(); +\} +`} + + + + + +## Enable for specific document (when disabled on session) + +* Optimistic concurrency can be _enabled_ when **storing** a specific document, + even when it is _disabled_ for an entire session (or globally). + +* This is done by passing `string.Empty` as the change vector value to the [store](../../../client-api/session/storing-entities.mdx) method. + Setting the change vector to an empty string will cause RavenDB to ensure that this document is a new one and doesn't already exist. + A `ConcurrencyException` will be thrown if the document already exists. + +* If you do not provide a change vector or if the change vector is `null`, optimistic concurrency will be disabled. + +* Setting optimistic concurrency for a specific document overrides the `useOptimisticConcurrency` property from the `advanced` session operations. + + + +{`\{ + const session = store.openSession(); + + const product = new Product(); + product.name = "Some Name"; + + // Store document 'products/999' + await session.store(product, "products/999"); + await session.saveChanges(); +\} +\{ + const session = store.openSession(); + + // Disable optimistic concurrency for the session + session.advanced.useOptimisticConcurrency = false; // This is also the default value + + const product = new Product(); + product.name = "Some Other Name"; + + // Store the same document + // Pass an empty string as the changeVector to turn ON optimistic concurrency for this document + await session.store(product, "products/999", \{ "changeVector": "" \}); + + // This will throw a ConcurrencyException, and the document will NOT be saved + await session.saveChanges(); +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-enable-optimistic-concurrency-php.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-enable-optimistic-concurrency-php.mdx new file mode 100644 index 0000000000..182bf0e6ba --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-enable-optimistic-concurrency-php.mdx @@ -0,0 +1,204 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, optimistic concurrency checks are **disabled**. Changes made outside of the session object will be overwritten. + Concurrent changes to the same document will use the _Last Write Wins_ strategy so a lost update anomaly is possible + with the default configuration of the [session](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx). + +* Optimistic concurrency can be **enabled** for: + * A specific document + * A specific session (enable on a per-session basis) + * All sessions (enable globally, at the document store level) + +* With optimistic concurrency enabled, RavenDB will generate a concurrency exception (and abort all modifications in + the current transaction) when trying to save a document that has been modified on the server side after the client + loaded and modified it. + +* The `ConcurrencyException` that might be thrown upon the `saveChanges` call needs to be handled by the caller. + The operation can be retried (the document needs to be reloaded since it got changed meanwhile) or handle the error + in a way that is suitable in a given scenario. + +* In this page: + * [Enable for specific session](../../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx#enable-for-specific-session) + * [Enable globally](../../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx#enable-globally) + * [Disable for specific document (when enabled on session)](../../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx#disable-for-specific-document-(when-enabled-on-session)) + * [Enable for specific document (when disabled on session)](../../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx#enable-for-specific-document-(when-disabled-on-session)) + + + +* Note that the `UseOptimisticConcurrency` setting only applies to documents that have been modified by the current session. + E.g., if you load documents `users/1-A` and `users/2-A` in a session, make modifications only to `users/1-A`, and then call `saveChanges`, + the operation will succeed regardless of the optimistic concurrency setting, even if `users/2-A` has been changed by another process in the meantime. + +* However, if you modify both documents and attempt to save changes with optimistic concurrency enabled, an exception will be raised + if `users/2-A` has been modified externally. + In this case, the updates to both `users/1-A` and `users/2-A` will be cancelled. + + + + + +A detailed description of transactions and concurrency control in RavenDB is available here: +[Transaction support in RavenDB](../../../client-api/faq/transaction-support.mdx) + + + + +## Enable for specific session + +Enable optimistic concurrency for a session using the advanced session `setUseOptimisticConcurrency` method. + + + +{`$session = $store->openSession(); +try \{ + $session->advanced()->setUseOptimisticConcurrency(true); + + $product = new Product(); + $product->setName("Some Name"); + + $session->store($product, "products/999"); + $session->saveChanges(); + + $otherSession = $store->openSession(); + try \{ + $otherProduct = $otherSession->load(Product::class, "products/999"); + $otherProduct->setName("Other Name"); + + $otherSession->saveChanges(); + \} finally \{ + $otherSession->close(); + \} + + $product->setName("Better Name"); + $session->saveChanges(); // will throw ConcurrencyException +\} finally \{ + $session->close(); +\} +`} + + + + + +* Enabling optimistic concurrency in a session will ensure that changes made to a document will only be persisted + if the version of the document sent in the `saveChanges` call matches its version from the time it was initially + read (loaded from the server). + +* Note that it's necessary to enable optimistic concurrency for ALL sessions that modify the documents for + which you want to guarantee that no writes will be silently discarded. + If optimistic concurrency is enabled in some sessions but not in others, and they modify the same documents, + the risk of the lost update anomaly still exists. + + + + + +## Enable globally + +* Optimistic concurrency can also be enabled for all sessions that are opened under a document store. + +* Use the store `setUseOptimisticConcurrency` method to enable globally. + + + +{`$store->getConventions()->setUseOptimisticConcurrency(true); + +$session = $store->openSession(); +try \{ + $isSessionUsingOptimisticConcurrency = $session->advanced()->isUseOptimisticConcurrency(); // will return true +\} finally \{ + $session->close(); +\} +`} + + + + + +## Disable for specific document (when enabled on session) + +* Optimistic concurrency can be _disabled_ when **storing** a specific document, + even when it is _enabled_ for an entire session (or globally). + +* This is done by passing `None` as a change vector value to the [store](../../../client-api/session/storing-entities.mdx) method. + + + +{`$session = $store->openSession(); +try \{ + $product = new Product(); + $product->setName("Some Name"); + + $session->store(product, "products/999"); + $session->saveChanges(); +\} finally \{ + $session->close(); +\} + +$session = $store->openSession(); +try \{ + $session->advanced()->setUseOptimisticConcurrency(true); + + $product = new Product(); + $product->setName("Some Other Name"); + + $session->store(product, null, "products/999"); + $session->saveChanges(); // will NOT throw Concurrency exception +\} finally \{ + $session->close(); +\} +`} + + + + + +## Enable for specific document (when disabled on session) + +* Optimistic concurrency can be _enabled_ when **storing** a specific document, + even when it is _disabled_ for an entire session (or globally). + +* This is done by passing an empty string as the change vector value to the [store](../../../client-api/session/storing-entities.mdx) method. + Setting the change vector to an empty string will cause RavenDB to ensure that this document is a new one and doesn't already exist. + A `ConcurrencyException` will be thrown if the document already exists. + +* If you do not provide a change vector or if the change vector is `None`, optimistic concurrency will be disabled. + +* Setting optimistic concurrency for a specific document overrides the advanced session `setUseOptimisticConcurrency` operation. + + + +{`$session = $store->openSession(); +try \{ + $product = new Product(); + $product->setName("Some Name"); + $session->store($product, "products/999"); + $session->saveChanges(); +\} finally \{ + $session->close(); +\} + +$session = $store->openSession(); +try \{ + $session->advanced()->setUseOptimisticConcurrency(false); // default value + + $product = new Product(); + $product->setName("Some Other Name"); + + $session->store(product, "", "products/999"); + $session->saveChanges(); // will throw Concurrency exception +\} finally \{ + $session->close(); +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/_how-to-enable-optimistic-concurrency-python.mdx b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-enable-optimistic-concurrency-python.mdx new file mode 100644 index 0000000000..47c7e930d0 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/_how-to-enable-optimistic-concurrency-python.mdx @@ -0,0 +1,179 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, optimistic concurrency checks are **disabled**. Changes made outside of the session object will be overwritten. + Concurrent changes to the same document will use the _Last Write Wins_ strategy so a lost update anomaly is possible + with the default configuration of the [session](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx). + +* Optimistic concurrency can be **enabled** for: + * A specific document + * A specific session (enable on a per-session basis) + * All sessions (enable globally, at the document store level) + +* With optimistic concurrency enabled, RavenDB will generate a concurrency exception (and abort all modifications in + the current transaction) when trying to save a document that has been modified on the server side after the client + loaded and modified it. + +* The `ConcurrencyException` that might be thrown upon the `save_changes` call needs to be handled by the caller. + The operation can be retried (the document needs to be reloaded since it got changed meanwhile) or handle the error + in a way that is suitable in a given scenario. + +* In this page: + * [Enable for specific session](../../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx#enable-for-specific-session) + * [Enable globally](../../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx#enable-globally) + * [Disable for specific document (when enabled on session)](../../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx#disable-for-specific-document-(when-enabled-on-session)) + * [Enable for specific document (when disabled on session)](../../../client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx#enable-for-specific-document-(when-disabled-on-session)) + + + +* Note that the `use_optimistic_concurrency` setting only applies to documents that have been modified by the current session. + E.g., if you load documents `users/1-A` and `users/2-A` in a session, make modifications only to `users/1-A`, and then call `save_changes`, + the operation will succeed regardless of the optimistic concurrency setting, even if `users/2-A` has been changed by another process in the meantime. + +* However, if you modify both documents and attempt to save changes with optimistic concurrency enabled, an exception will be raised + if `users/2-A` has been modified externally. + In this case, the updates to both `users/1-A` and `users/2-A` will be cancelled. + + + + + +A detailed description of transactions and concurrency control in RavenDB is available here: +[Transaction support in RavenDB](../../../client-api/faq/transaction-support.mdx) + + + + +## Enable for specific session + + + +{`with store.open_session() as session: + # Enable optimistic concurrency for this session + session.advanced.use_optimistic_concurrency = True + + # Save a document in this session + product = Product(name="Some name") + session.store(product, "products/999") + session.save_changes() + + # Modify the document 'externally' by another session + with store.open_session() as other_session: + other_product = other_session.load("products/999") + other_product.name = "Other name" + other_session.save_changes() + + # Trying to modify the document without reloading it first will throw + product.name = "Better Name" + session.save_changes() # This will throw a ConcurrencyException + +gion optimistic_concurrency_2 +able for all sessions that will be opened within this document store +e.conventions.use_optimistic_concurrency = True + store.open_session() as session: +is_session_using_optimistic_concurrency = session.advanced.use_optimistic_concurrency # will return True +`} + + + + + +* Enabling optimistic concurrency in a session will ensure that changes made to a document will only be persisted + if the version of the document sent in the `save_changes` call matches its version from the time it was initially read (loaded from the server). + +* Note that it's necessary to enable optimistic concurrency for ALL sessions that modify the documents for which you want to guarantee that no writes will be silently discarded. + If optimistic concurrency is enabled in some sessions but not in others, and they modify the same documents, the risk of the lost update anomaly still exists. + + + + + +## Enable globally + +* Optimistic concurrency can also be enabled for all sessions that are opened under a document store. + +* Use the [store.conventions.use_optimistic_concurrency](../../../client-api/configuration/conventions.mdx#useoptimisticconcurrency) convention to enable globally. + + + +{`# Enable for all sessions that will be opened within this document store +store.conventions.use_optimistic_concurrency = True +with store.open_session() as session: + is_session_using_optimistic_concurrency = session.advanced.use_optimistic_concurrency # will return True +`} + + + + + +## Disable for specific document (when enabled on session) + +* Optimistic concurrency can be _disabled_ when **storing** a specific document, + even when it is _enabled_ for an entire session (or globally). + +* This is done by passing `None` as a change vector value to the [store](../../../client-api/session/storing-entities.mdx) method. + + + +{`with store.open_session() as session: + # Store document 'products/999' + session.store(Product(name="Some name", Id="products/999")) + session.save_changes() + +with store.open_session() as session: + # Enable optimistic concurrency for the session + session.advanced.use_optimistic_concurrency = True + + # Store the same document + # Pass 'null' as the change_vector to turn OFF optimistic concurrency for this document + session.store(Product(name="Some Other Name"), change_vector=None, key="products/999") + + # This will NOT throw a ConcurrencyException, and the document will be saved + session.save_changes() +`} + + + + + +## Enable for specific document (when disabled on session) + +* Optimistic concurrency can be _enabled_ when **storing** a specific document, + even when it is _disabled_ for an entire session (or globally). + +* This is done by passing an empty `str` as the change vector value to the [store](../../../client-api/session/storing-entities.mdx) method. + Setting the change vector to an empty string will cause RavenDB to ensure that this document is a new one and doesn't already exist. + A `ConcurrencyException` will be thrown if the document already exists. + +* If you do not provide a change vector or if the change vector is `None`, optimistic concurrency will be disabled. + +* Setting optimistic concurrency for a specific document overrides the `use_optimistic_concurrency` property from the `advanced` session operations. + + + +{`with store.open_session() as session: + # Store document 'products/999' + session.store(Product(name="Some name", Id="products/999")) + session.save_changes() + +with store.open_session() as session: + # Disable optimistic concurrency for the session + session.advanced.use_optimistic_concurrency = False + + # Store the same document + # Pass empty str as the change_vector to turn ON optimistic concurrency for this document + session.store(Product(name="Some Other Name"), key="products/999", change_vector="") + + # This will throw a ConcurrencyException, and the document will NOT be saved + session.save_changes() +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/how-to-change-maximum-number-of-requests-per-session.mdx b/versioned_docs/version-7.1/client-api/session/configuration/how-to-change-maximum-number-of-requests-per-session.mdx new file mode 100644 index 0000000000..be76c55733 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/how-to-change-maximum-number-of-requests-per-session.mdx @@ -0,0 +1,47 @@ +--- +title: "Session: How to Change Maximum Number of Requests per Session" +hide_table_of_contents: true +sidebar_label: How to Change Maximum Number of Requests per Session +sidebar_position: 3 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToChangeMaximumNumberOfRequestsPerSessionCsharp from './_how-to-change-maximum-number-of-requests-per-session-csharp.mdx'; +import HowToChangeMaximumNumberOfRequestsPerSessionJava from './_how-to-change-maximum-number-of-requests-per-session-java.mdx'; +import HowToChangeMaximumNumberOfRequestsPerSessionPython from './_how-to-change-maximum-number-of-requests-per-session-python.mdx'; +import HowToChangeMaximumNumberOfRequestsPerSessionPhp from './_how-to-change-maximum-number-of-requests-per-session-php.mdx'; +import HowToChangeMaximumNumberOfRequestsPerSessionNodejs from './_how-to-change-maximum-number-of-requests-per-session-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/configuration/how-to-customize-collection-assignment-for-entities.mdx b/versioned_docs/version-7.1/client-api/session/configuration/how-to-customize-collection-assignment-for-entities.mdx new file mode 100644 index 0000000000..c8e9c34a44 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/how-to-customize-collection-assignment-for-entities.mdx @@ -0,0 +1,52 @@ +--- +title: "Session: How to Customize Collection Assignment for Entities" +hide_table_of_contents: true +sidebar_label: How to Customize Collection Assignment for Entities +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToCustomizeCollectionAssignmentForEntitiesCsharp from './_how-to-customize-collection-assignment-for-entities-csharp.mdx'; +import HowToCustomizeCollectionAssignmentForEntitiesJava from './_how-to-customize-collection-assignment-for-entities-java.mdx'; +import HowToCustomizeCollectionAssignmentForEntitiesPython from './_how-to-customize-collection-assignment-for-entities-python.mdx'; +import HowToCustomizeCollectionAssignmentForEntitiesPhp from './_how-to-customize-collection-assignment-for-entities-php.mdx'; +import HowToCustomizeCollectionAssignmentForEntitiesNodejs from './_how-to-customize-collection-assignment-for-entities-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/configuration/how-to-customize-id-generation-for-entities.mdx b/versioned_docs/version-7.1/client-api/session/configuration/how-to-customize-id-generation-for-entities.mdx new file mode 100644 index 0000000000..23322c61e4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/how-to-customize-id-generation-for-entities.mdx @@ -0,0 +1,31 @@ +--- +title: "Session: How to Customize ID Generation for Entities" +hide_table_of_contents: true +sidebar_label: How to Customize ID Generation for Entities +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Session: How to Customize ID Generation for Entities + + +RavenDB provides several ways to create document IDs. + +- To get familiar with basic built-in ID generation strategies + before starting to customize entity identifiers, please see: + [Working with Document Identifiers](../../../client-api/document-identifiers/working-with-document-identifiers.mdx) + +- To learn about conventions related to ID customization, please see + the following articles, presenting conventions that can be applied + to all entity types or to a particular type. + - [Global ID Generation Conventions](../../../client-api/configuration/identifier-generation/global.mdx) + - [Type-specific ID Generation Conventions](../../../client-api/configuration/identifier-generation/type-specific.mdx) + + + diff --git a/versioned_docs/version-7.1/client-api/session/configuration/how-to-customize-identity-property-lookup-for-entities.mdx b/versioned_docs/version-7.1/client-api/session/configuration/how-to-customize-identity-property-lookup-for-entities.mdx new file mode 100644 index 0000000000..7909c283a1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/how-to-customize-identity-property-lookup-for-entities.mdx @@ -0,0 +1,47 @@ +--- +title: "Session: How to Customize the Identity Property Lookup for Entities" +hide_table_of_contents: true +sidebar_label: How to Customize Identity Property Lookup For Entities +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToCustomizeIdentityPropertyLookupForEntitiesCsharp from './_how-to-customize-identity-property-lookup-for-entities-csharp.mdx'; +import HowToCustomizeIdentityPropertyLookupForEntitiesJava from './_how-to-customize-identity-property-lookup-for-entities-java.mdx'; +import HowToCustomizeIdentityPropertyLookupForEntitiesPhp from './_how-to-customize-identity-property-lookup-for-entities-php.mdx'; +import HowToCustomizeIdentityPropertyLookupForEntitiesNodejs from './_how-to-customize-identity-property-lookup-for-entities-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/configuration/how-to-disable-caching.mdx b/versioned_docs/version-7.1/client-api/session/configuration/how-to-disable-caching.mdx new file mode 100644 index 0000000000..72f5426165 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/how-to-disable-caching.mdx @@ -0,0 +1,46 @@ +--- +title: "Disable Caching per Session" +hide_table_of_contents: true +sidebar_label: Disable Caching +sidebar_position: 6 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToDisableCachingCsharp from './_how-to-disable-caching-csharp.mdx'; +import HowToDisableCachingJava from './_how-to-disable-caching-java.mdx'; +import HowToDisableCachingPhp from './_how-to-disable-caching-php.mdx'; +import HowToDisableCachingNodejs from './_how-to-disable-caching-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/configuration/how-to-disable-tracking.mdx b/versioned_docs/version-7.1/client-api/session/configuration/how-to-disable-tracking.mdx new file mode 100644 index 0000000000..5a9ddc2c4a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/how-to-disable-tracking.mdx @@ -0,0 +1,50 @@ +--- +title: "Disable Entity Tracking" +hide_table_of_contents: true +sidebar_label: Disable Tracking +sidebar_position: 5 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToDisableTrackingCsharp from './_how-to-disable-tracking-csharp.mdx'; +import HowToDisableTrackingPython from './_how-to-disable-tracking-python.mdx'; +import HowToDisableTrackingPhp from './_how-to-disable-tracking-php.mdx'; +import HowToDisableTrackingNodejs from './_how-to-disable-tracking-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx b/versioned_docs/version-7.1/client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx new file mode 100644 index 0000000000..7a6756b7b5 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/configuration/how-to-enable-optimistic-concurrency.mdx @@ -0,0 +1,50 @@ +--- +title: "How to Enable Optimistic Concurrency" +hide_table_of_contents: true +sidebar_label: How to Enable Optimistic Concurrency +sidebar_position: 4 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToEnableOptimisticConcurrencyCsharp from './_how-to-enable-optimistic-concurrency-csharp.mdx'; +import HowToEnableOptimisticConcurrencyJava from './_how-to-enable-optimistic-concurrency-java.mdx'; +import HowToEnableOptimisticConcurrencyPython from './_how-to-enable-optimistic-concurrency-python.mdx'; +import HowToEnableOptimisticConcurrencyPhp from './_how-to-enable-optimistic-concurrency-php.mdx'; +import HowToEnableOptimisticConcurrencyNodejs from './_how-to-enable-optimistic-concurrency-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/deleting-entities.mdx b/versioned_docs/version-7.1/client-api/session/deleting-entities.mdx new file mode 100644 index 0000000000..0e29b64dfe --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/deleting-entities.mdx @@ -0,0 +1,58 @@ +--- +title: "Session: Deleting Entities" +hide_table_of_contents: true +sidebar_label: Deleting Entities +sidebar_position: 3 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import DeletingEntitiesCsharp from './_deleting-entities-csharp.mdx'; +import DeletingEntitiesJava from './_deleting-entities-java.mdx'; +import DeletingEntitiesPython from './_deleting-entities-python.mdx'; +import DeletingEntitiesPhp from './_deleting-entities-php.mdx'; +import DeletingEntitiesNodejs from './_deleting-entities-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_category_.json b/versioned_docs/version-7.1/client-api/session/how-to/_category_.json new file mode 100644 index 0000000000..54796dc08f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 10, + "label": How to..., +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_check-if-attachment-exists-csharp.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-attachment-exists-csharp.mdx new file mode 100644 index 0000000000..86abc59eec --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-attachment-exists-csharp.mdx @@ -0,0 +1,81 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To check whether a document contains a certain attachment, + use the method `Exists()` from the `Advanced.Attachments` session operations. + +* Calling _'Exists'_ does not [Load](../../../client-api/session/loading-entities.mdx) the document or the attachment to the session, + and the session will not track them. + +* In this page: + * [Check if attachment exists](../../../client-api/session/how-to/check-if-attachment-exists.mdx#check-if-attachment-exists) + * [Syntax](../../../client-api/session/how-to/check-if-attachment-exists.mdx#syntax) + +## Check if attachment exists + + + + +{`bool exists = session + .Advanced + .Attachments + .Exists("categories/1-A", "image.jpg"); + +if (exists) +{ + // attachment 'image.jpg' exists on document 'categories/1-A' +} +`} + + + + +{`bool exists = await asyncSession + .Advanced + .Attachments + .ExistsAsync("categories/1-A", "image.jpg"); + +if (exists) +{ + // attachment 'image.jpg' exists on document 'categories/1-A' +} +`} + + + + + + +## Syntax + + + + +{`bool Exists(string documentId, string attachmentName); +`} + + + + +{`Task ExistsAsync(string documentId, string attachmentName, CancellationToken token = default); +`} + + + + +| Parameter | Type | Description | +| - | - | - | +| **documentId** | `string` | The ID of the document you want to check | +| **attachmentName** | `string` | The name of the attachment you are looking for | + +| Return Value | Description | +| - | - | +| `bool` | `true` - The specified attachment exists on the document
`false` - The attachment does not exist on the document | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_check-if-attachment-exists-java.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-attachment-exists-java.mdx new file mode 100644 index 0000000000..7143e96011 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-attachment-exists-java.mdx @@ -0,0 +1,56 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To check whether a document contains a certain attachment, use the method `exists()` from the `session.advanced().attachments()` +operations. + +* This does not [load the document](../../../client-api/session/loading-entities.mdx) or [the attachment](../../../document-extensions/attachments/loading.mdx) +from the server, and it does not cause the session to track the document. + +* In this page: + * [Syntax](../../../client-api/session/how-to/check-if-attachment-exists.mdx#syntax) + * [Example](../../../client-api/session/how-to/check-if-attachment-exists.mdx#example) + +## Syntax + + + +{`boolean exists(String documentId, String name); +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **documentId** | `String` | The ID of the document you want to check for the attachment | +| **name** | `String` | The name of the attachment you want to check the document for | + +| Return Value | Description | +| - | - | +| `boolean` | Indicates if a document with the specified ID exists in the database | + + + +## Example + + + +{`boolean exists = session + .advanced() + .attachments() + .exists("categories/1-A","image.jpg"); + +if (exists) \{ + // do something +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_check-if-attachment-exists-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-attachment-exists-nodejs.mdx new file mode 100644 index 0000000000..88545e4951 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-attachment-exists-nodejs.mdx @@ -0,0 +1,56 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To check whether a document contains a certain attachment, + use the method `exists()` from the `advanced.attachments` session operations. + +* Calling _'exists'_ does not [load](../../../client-api/session/loading-entities.mdx) the document or the attachment to the session, + and the session will not track them. + +* In this page: + * [Check if attachment exists](../../../client-api/session/how-to/check-if-attachment-exists.mdx#check-if-attachment-exists) + * [Syntax](../../../client-api/session/how-to/check-if-attachment-exists.mdx#syntax) + +## Check if attachment exists + + + +{`const exists = await session + .advanced + .attachments + .exists("categories/1-A", "image.jpg"); + +if (exists) \{ + // attachment 'image.jpg' exists on document 'categories/1-A' +\} +`} + + + + + +## Syntax + + + +{`session.advanced.attachments.exists(docId, attachmentName); +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **docId** | `string` | The ID of the document you want to check | +| **attachmentName** | `string` | The name of the attachment you are looking for | + +| Return Value | Description | +| - | - | +| `Promise` | `true` - The specified attachment exists on the document
`false` - The attachment does not exist on the document | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_check-if-attachment-exists-php.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-attachment-exists-php.mdx new file mode 100644 index 0000000000..c7739dd27a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-attachment-exists-php.mdx @@ -0,0 +1,61 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To check whether a document contains a certain attachment, + use the `session.advanced` operations `attachments.exists` method. + +* Calling `exists` does not [load](../../../client-api/session/loading-entities.mdx) + the document or the attachment to the session, and the session will not track them. + +* In this page: + * [Check if attachment exists](../../../client-api/session/how-to/check-if-attachment-exists.mdx#check-if-attachment-exists) + * [Syntax](../../../client-api/session/how-to/check-if-attachment-exists.mdx#syntax) + +## Check if attachment exists + + + + +{`$exists = $session + ->advanced() + ->attachments() + ->exists("categories/1-A", "image.jpg"); + + if ($exists) + { + // attachment 'image.jpg' exists on document 'categories/1-A' + } +`} + + + + + + +## Syntax + + + + +{`function exists(?string $documentId, ?string $name): bool; +`} + + + + +| Parameter | Type | Description | +| - | - | - | +| **$documentId** | `?string` | The ID of the document you want to check | +| **$name** | `?string` | The name of the attachment you are looking for | + +| Return Value | Description | +| - | - | +| `bool` | `true` - The specified attachment exists for this document
`false` - The attachment doesn't exist for the document | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_check-if-attachment-exists-python.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-attachment-exists-python.mdx new file mode 100644 index 0000000000..41839c75db --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-attachment-exists-python.mdx @@ -0,0 +1,55 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To check whether a document contains a certain attachment, + use the `session.advanced` operations `attachments.exists` method. + +* Calling `exists` does not [load](../../../client-api/session/loading-entities.mdx) + the document or the attachment to the session, and the session will not track them. + +* In this page: + * [Check if attachment exists](../../../client-api/session/how-to/check-if-attachment-exists.mdx#check-if-attachment-exists) + * [Syntax](../../../client-api/session/how-to/check-if-attachment-exists.mdx#syntax) + +## Check if attachment exists + + + + +{`exists = session.advanced.attachments.exists("categories/1-A", "image.jpg") +if exists: + ... # attachment 'image.jpg' exists on document 'categories/1-A' +`} + + + + + + +## Syntax + + + + +{`def exists(self, document_id: str, name: str) -> bool: ... +`} + + + + +| Parameter | Type | Description | +| - | - | - | +| **document_id** | `str` | The ID of the document you want to check | +| **name** | `str` | The name of the attachment you are looking for | + +| Return Value | Description | +| - | - | +| `bool` | `True` - The specified attachment exists for this document
`False` - The attachment doesn't exist for the document | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_check-if-document-exists-csharp.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-document-exists-csharp.mdx new file mode 100644 index 0000000000..6aa1dce093 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-document-exists-csharp.mdx @@ -0,0 +1,74 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To check whether the database contains a certain document, + use the method `Exists()` from the `Advanced` session operations. + +* Calling _'Exists'_ does not [Load](../../../client-api/session/loading-entities.mdx) the document entity to the session, + and the session will not track it. + +* In this page: + * [Check if document exists](../../../client-api/session/how-to/check-if-document-exists.mdx#check-if-document-exists) + * [Syntax](../../../client-api/session/how-to/check-if-document-exists.mdx#syntax) + +## Check if document exists + + + + +{`bool exists = session.Advanced.Exists("employees/1-A"); + +if (exists) +{ + // document 'employees/1-A exists +} +`} + + + + +{`bool exists = await asyncSession.Advanced.ExistsAsync("employees/1-A"); + +if (exists) +{ + // document 'employees/1-A exists +} +`} + + + + + + +## Syntax + + + + +{`bool Exists(string id); +`} + + + + +{`Task ExistsAsync(string documentId, CancellationToken token = default); +`} + + + + +| Parameter | Type | Description | +| - | - | - | +| **id** | `string` | The ID of the document to check | + +| Return Value | Description | +| - | - | +| `bool` | `true` - the document exists in the database.
`false` - The document does Not exist in the database | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_check-if-document-exists-java.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-document-exists-java.mdx new file mode 100644 index 0000000000..4dae7a648b --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-document-exists-java.mdx @@ -0,0 +1,51 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To check whether the database contains a certain document, use the method `exists()` from the `advanced` session operations. + +* This does not [load](../../../client-api/session/loading-entities.mdx) the document from the server or cause the session to track it. + +* In this page: + * [Syntax](../../../client-api/session/how-to/check-if-document-exists.mdx#syntax) + * [Example](../../../client-api/session/how-to/check-if-document-exists.mdx#example) + + +## Syntax + + + +{`boolean exists(String id); +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **id** | `String` | The ID of the document you want to check the database for | + +| Return Value | Description | +| - | - | +| `boolean` | Indicates whether a document with the specified ID exists in the database | + + + +## Example + + + +{`boolean exists = session.advanced().exists("employees/1-A"); + +if (exists) \{ + // do something +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_check-if-document-exists-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-document-exists-nodejs.mdx new file mode 100644 index 0000000000..8fb5eb9987 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-document-exists-nodejs.mdx @@ -0,0 +1,51 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To check whether the database contains a certain document, + use the method `exists()` from the `advanced` session operations. + +* Calling _'exists'_ does not [load](../../../client-api/session/loading-entities.mdx) the document entity to the session, + and the session will not track it. + +* In this page: + * [Check if document exists](../../../client-api/session/how-to/check-if-document-exists.mdx#check-if-document-exists) + * [Syntax](../../../client-api/session/how-to/check-if-document-exists.mdx#syntax) + +## Check if document exists + + + +{`const exists = await session.advanced.exists("employees/1-A"); +if (exists) \{ + // document 'employees/1-A exists +\} +`} + + + + + +## Syntax + + + +{`session.advanced.exists(id); +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **id** | `string` | The ID of the document to check | + +| Return Value | Description | +| - | - | +| `Promise` | `true` - the document exists in the database.
`false` - The document does Not exist in the database | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_check-if-document-exists-php.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-document-exists-php.mdx new file mode 100644 index 0000000000..cbc1af1e00 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-document-exists-php.mdx @@ -0,0 +1,53 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To check whether the database contains a certain document, + use the `session.advanced` operations `exists` method. + +* Calling `exists` does not [load](../../../client-api/session/loading-entities.mdx) + the document entity to the session, and the session will not track it. + +* In this page: + * [Check if document exists](../../../client-api/session/how-to/check-if-document-exists.mdx#check-if-document-exists) + * [Syntax](../../../client-api/session/how-to/check-if-document-exists.mdx#syntax) + +## Check if document exists + + + +{`$exists = $session->advanced()->exists("employees/1-A"); + +if ($exists) +\{ + // document 'employees/1-A exists +\} +`} + + + + + +## Syntax + + + +{`public function exists(?string $id): bool; +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **$id** | `?string` | The ID of the document to look for | + +| Return Value | Description | +| - | - | +| `bool` | `true` - this document exists in the database.
`false` - The document does Not exist in the database | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_check-if-document-exists-python.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-document-exists-python.mdx new file mode 100644 index 0000000000..6bdd6b53c5 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-document-exists-python.mdx @@ -0,0 +1,55 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To check whether the database contains a certain document, + use the `session.advanced` operations `exists` method. + +* Calling `exists` does not [load](../../../client-api/session/loading-entities.mdx) + the document entity to the session, and the session will not track it. + +* In this page: + * [Check if document exists](../../../client-api/session/how-to/check-if-document-exists.mdx#check-if-document-exists) + * [Syntax](../../../client-api/session/how-to/check-if-document-exists.mdx#syntax) + +## Check if document exists + + + + +{`exists = session.advanced.exists("employees/1-A") + +if exists: + ... # Document 'employees/1-A' exists +`} + + + + + + +## Syntax + + + + +{`def exists(self, key: str) -> bool: ... +`} + + + + +| Parameter | Type | Description | +| - | - | - | +| **key** | `str` | The ID of the document to look for | + +| Return Value | Description | +| - | - | +| `bool` | `True` - this document exists in the database.
`False` - The document does Not exist in the database | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_check-if-entity-has-changed-csharp.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-entity-has-changed-csharp.mdx new file mode 100644 index 0000000000..7680d3e6a2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-entity-has-changed-csharp.mdx @@ -0,0 +1,182 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The Session [tracks all changes](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#tracking-changes) made to all entities that it has either loaded, stored, deleted, or queried for, + and persists to the server only what is needed when `SaveChanges()` is called. + +* This article describes how to check for changes made to a specific **entity** within a session. + To check for changes to **all** tracked entities, see [Check for session changes](../../../client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx). + +* To get the list of all entities tracked by the session, see [Get tracked entities](../../../client-api/session/how-to/get-tracked-entities.mdx). + +* In this page: + * [Check for entity changes](../../../client-api/session/how-to/check-if-entity-has-changed.mdx#check-for-entity-changes) + * [Get entity changes](../../../client-api/session/how-to/check-if-entity-has-changed.mdx#get-entity-changes) + * [Syntax](../../../client-api/session/how-to/check-if-entity-has-changed.mdx#syntax) + + +## Check for entity changes + +* The session's advanced property `HasChanged` indicates whether the specified entity was added, modified, or deleted within the session. + +* Note: The _HasChanged_ property is cleared after calling `SaveChanges()`. + + +{`using (var session = store.OpenSession()) +\{ + // Store a new entity within the session + // ===================================== + + Employee employee = new Employee \{FirstName = "John", LastName = "Doe"\}; + session.Store(employee, "employees/1-A"); + + // 'HasChanged' will be TRUE + Assert.True(session.Advanced.HasChanged(employee)); + + // 'HasChanged' will reset to FALSE after saving changes + session.SaveChanges(); + Assert.False(session.Advanced.HasChanged(employee)); + + // Load & modify entity within the session + // ======================================= + + employee = session.Load("employees/1-A"); + Assert.False(session.Advanced.HasChanged(employee)); // FALSE + + employee.LastName = "Brown"; + Assert.True(session.Advanced.HasChanged(employee)); // TRUE + + session.SaveChanges(); + Assert.False(session.Advanced.HasChanged(employee)); // FALSE +\} +`} + + + + + +## Get entity changes + +* Use the session's advanced method `WhatChangedFor()` to get all changes made to the specified entity + within the session. + +* Details will include: + * The name and path of the changed field + * Its old and new values + * The type of change + +* Note: `WhatChangedFor()` reports changes made prior to calling `SaveChanges()`. + Calling it immediately after _SaveChanges_ will return no results, since all changes are cleared at that point. +##### Example I + + + +{`using (var session = store.OpenSession()) +\{ + // Store (add) a new entity, it will be tracked by the session + Employee employee = new Employee \{FirstName = "John", LastName = "Doe"\}; + session.Store(employee, "employees/1-A"); + + // Get the changes for the entity in the session + // Call 'WhatChangedFor', pass the entity object in the param + DocumentsChanges[] changesForEmployee = session.Advanced.WhatChangedFor(employee); + Assert.Equal(changesForEmployee.Length, 1); // a single change for this entity (adding) + + // Get the change type + DocumentsChanges.ChangeType changeType = changesForEmployee[0].Change; + Assert.Equal(changeType, DocumentsChanges.ChangeType.DocumentAdded); + + session.SaveChanges(); +\} +`} + + + +##### Example II + + + +{`using (var session = store.OpenSession()) +\{ + // Load the entity, it will be tracked by the session + Employee employee = session.Load("employees/1-A"); + + // Modify the entity + employee.FirstName = "Jim"; + employee.LastName = "Brown"; + + // Get the changes for the entity in the session + // Call 'WhatChangedFor', pass the entity object in the param + DocumentsChanges[] changesForEmployee = session.Advanced.WhatChangedFor(employee); + + Assert.Equal(changesForEmployee[0].FieldName, "FirstName"); // Field name + Assert.Equal(changesForEmployee[0].FieldNewValue, "Jim"); // New value + Assert.Equal(changesForEmployee[0].Change, DocumentsChanges.ChangeType.FieldChanged); // Change type + + Assert.Equal(changesForEmployee[1].FieldName, "LastName"); + Assert.Equal(changesForEmployee[1].FieldNewValue, "Brown"); + Assert.Equal(changesForEmployee[1].Change, DocumentsChanges.ChangeType.FieldChanged); + + session.SaveChanges(); +\} +`} + + + + + +## Syntax + + + +{`// HasChanged +bool HasChanged(object entity); +`} + + + + +{`// WhatChangedFor +DocumentsChanges[] WhatChangedFor(object entity); +`} + + + +| Return value | | +|----------------------|------------------------------------| +| `DocumentsChanges[]` | List of changes made to the entity | + + + +{`public class DocumentsChanges +\{ + public object FieldOldValue \{ get; set; \} // Previous field value + public object FieldNewValue \{ get; set; \} // Current field value + public ChangeType Change \{ get; set; \} // Type of change that occurred + public string FieldName \{ get; set; \} // Name of field on which the change occurred + public string FieldPath \{ get; set; \} // Path of field on which the change occurred + public string FieldFullName \{ get; \} // Path + Name of field on which the change occurred +\} + +public enum ChangeType +\{ + DocumentDeleted, + DocumentAdded, + FieldChanged, + NewField, + RemovedField, + ArrayValueChanged, + ArrayValueAdded, + ArrayValueRemoved +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_check-if-entity-has-changed-java.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-entity-has-changed-java.mdx new file mode 100644 index 0000000000..5349fd1054 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-entity-has-changed-java.mdx @@ -0,0 +1,37 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To check if a specific entity differs from the one downloaded from server, the `hasChanged` method from the `advanced` session operations can be used. + +## Syntax + + + +{`boolean hasChanged(Object entity) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **entity** | Objec | Instance of entity for which changes will be checked. | + +| Return Value | | +| ------------- | ----- | +| boolean | Indicated if given entity has changed. | + +## Example + + + +{`Employee employee = session.load(Employee.class, "employees/1-A"); +boolean hasChanged = session.advanced().hasChanged(employee);// false +employee.setLastName("Shmoe"); +hasChanged = session.advanced().hasChanged(employee);// true +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_check-if-entity-has-changed-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-entity-has-changed-nodejs.mdx new file mode 100644 index 0000000000..5ddcdf716d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-entity-has-changed-nodejs.mdx @@ -0,0 +1,192 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The Session [tracks all changes](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#tracking-changes) made to all entities that it has either loaded, stored, deleted, or queried for, + and persists to the server only what is needed when `saveChanges()` is called. + +* This article describes how to check for changes made to a specific **entity** within a session. + To check for changes to **all** tracked entities, see [Check for session changes](../../../client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx). + +* To get the list of all entities tracked by the session, see [Get tracked entities](../../../client-api/session/how-to/get-tracked-entities.mdx). + +* In this page: + * [Check for entity changes](../../../client-api/session/how-to/check-if-entity-has-changed.mdx#check-for-entity-changes) + * [Get entity changes](../../../client-api/session/how-to/check-if-entity-has-changed.mdx#get-entity-changes) + * [Syntax](../../../client-api/session/how-to/check-if-entity-has-changed.mdx#syntax) + + +## Check for entity changes + +* The session's advanced property `hasChanged` indicates whether the specified entity was added, modified, or deleted within the session. + +* Note: The _hasChanged_ property is cleared after calling `saveChanges()`. + + +{`const session = documentStore.openSession(); + +// Store a new entity within the session +// ===================================== + +let employee = new Employee(); +employee.firstName = "John"; +employee.lastName = "Doe"; +await session.store(employee, "employees/1-A"); + +// 'hasChanged' will be TRUE +assert.ok(session.advanced.hasChanged(employee)); + +// 'hasChanged' will reset to FALSE after saving changes +await session.saveChanges(); +assert.ok(!session.advanced.hasChanged(employee)); + +// Load & modify entity within the session +// ======================================= + +employee = await session.load("employees/1-A"); +assert.ok(!session.advanced.hasChanged(employee)); // FALSE + +employee.lastName = "Brown"; +assert.ok(session.advanced.hasChanged(employee)); // TRUE + +await session.saveChanges(); +assert.ok(!session.advanced.hasChanged(employee)); // FALSE +`} + + + + + +## Get entity changes + +* Use the session's advanced method `whatChangedFor()` to get all changes made to the specified entity + within the session. + +* Details will include: + * The name and path of the changed field + * Its old and new values + * The type of change + +* Note: `whatChangedFor()` reports changes made prior to calling `saveChanges()`. + Calling it immediately after _saveChanges_ will return no results, since all changes are cleared at that point. +##### Example I + + + +{`const session = documentStore.openSession(); + +// Store (add) a new entity, it will be tracked by the session +let employee = new Employee(); +employee.firstName = "John"; +employee.lastName = "Doe"; +await session.store(employee, "employees/1-A"); + +// Get the changes for the entity in the session +// Call 'whatChangedFor', pass the entity object in the param +const changesForEmployee = session.advanced.whatChangedFor(employee); + +// Assert there was a single change for this entity +assert.equal(changesForEmployee.length, 1); + +// Get the change type +const changeType = changesForEmployee[0].change; +assert.equal(changeType, "DocumentAdded"); + +await session.saveChanges(); +`} + + + +##### Example II + + + +{`const session = documentStore.openSession(); + +// Load the entity, it will be tracked by the session +const employee = await session.load("employees/1-A"); + +// Modify the entity +employee.firstName = "Jim"; +employee.lastName = "Brown"; + +// Get the changes for the entity in the session +// Call 'whatChangedFor', pass the entity object in the param +const changesForEmployee = session.advanced.whatChangedFor(employee); + +assert.equal(changesForEmployee[0].fieldName, "firstName"); +assert.equal(changesForEmployee[0].fieldNewValue, "Jim"); +assert.equal(changesForEmployee[0].change, "FieldChanged"); + +assert.equal(changesForEmployee[1].fieldName, "lastName"); +assert.equal(changesForEmployee[1].fieldNewValue, "Brown"); +assert.equal(changesForEmployee[1].change, "FieldChanged"); +`} + + + + + +## Syntax + + + +{`session.advanced.hasChanged(entity); +`} + + + +| Return value | | +|--------------|----------------------------------------------------------------------------------------------------------| +| `boolean` | `true` - modifications were made to the entity in this session.
`false` - no modifications were made. | + + + +{`session.advanced.whatChangedFor(entity); +`} + + + +| Return value | | +|----------------------|------------------------------------| +| `DocumentsChanges[]` | List of changes made to the entity | + + + +{`class DocumentsChanges \{ + // Previous field value + fieldOldValue; // object + + // Current field value + fieldNewValue; // object + + // Name of field on which the change occurred + fieldName; // string + + // Path of field on which the change occurred + fieldPath; // string + + // Path + Name of field on which the change occurred + fieldFullName; // string + + // Type of change that occurred, can be: + // "DocumentDeleted" + // "DocumentAdded" + // "FieldChanged" + // "NewField" + // "RemovedField" + // "ArrayValueChanged" + // "ArrayValueAdded" + // "ArrayValueRemoved" + change; // string +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_check-if-entity-has-changed-php.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-entity-has-changed-php.mdx new file mode 100644 index 0000000000..858f415523 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-entity-has-changed-php.mdx @@ -0,0 +1,192 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The Session [tracks all changes](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#tracking-changes) made to all entities that it has either loaded, stored, or queried for, + and persists to the server only what is needed when `saveChanges` is called. + +* This article describes how to check for changes made in a specific **entity** within a session. + To check for changes on all tracked entities, see [Check for session changes](../../../client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx). + +* In this page: + * [Check for entity changes](../../../client-api/session/how-to/check-if-entity-has-changed.mdx#check-for-entity-changes) + * [Get entity changes](../../../client-api/session/how-to/check-if-entity-has-changed.mdx#get-entity-changes) + * [Syntax](../../../client-api/session/how-to/check-if-entity-has-changed.mdx#syntax) + + +## Check for entity changes + +* The session's advanced property `hasChanged` indicates whether the specified entity was added, modified, or deleted within the session. + +* Note: The `hasChanged` property is cleared (reset to `false`) after calling `saveChanges`. + + +{`$session = $store->openSession(); +try \{ + // Store a new entity within the session + // ===================================== + + $employee = new Employee(); + $employee->setFirstName("John"); + $employee->setLastName("Doe"); + $session->store($employee, "employees/1-A"); + + // '$hasChanged' will be TRUE + $hasChanged = $session->advanced()->hasChanged($employee); + + // 'HasChanged' will reset to FALSE after saving changes + $session->saveChanges(); + $hasChanged = $session->advanced()->hasChanged($employee); + + // Load & modify entity within the session + // ======================================= + + $employee = $session->load(Employee::class, "employees/1-A"); + $hasChanged = $session->advanced()->hasChanged($employee); // FALSE + + $employee->setLastName("Brown"); + $hasChanged = $session->advanced()->hasChanged($employee); // TRUE + + $session->saveChanges(); + $hasChanged = $session->advanced()->hasChanged($employee); // FALSE + +\} finally \{ + $session->close(); +\} +`} + + + + + +## Get entity changes + +* Use the advanced session `whatChangedFor` method to get all changes made in the specified entity + within the session. + +* Details will include: + * The name and path of the changed field + * Its old and new values + * The type of change +##### Example I + + + +{`$session = $store->openSession(); +try \{ + // Store (add) a new entity, it will be tracked by the session + $employee = new Employee(); + $employee->setFirstName("John"); + $employee->setLastName("Doe"); + $session->store($employee, "employees/1-A"); + + // Get the changes for the entity in the session + // Call 'WhatChangedFor', pass the entity object in the param + $changesForEmployee = $session->advanced()->whatChangedFor($employee); + $this->assertCount($changesForEmployee, 1); // a single change for this entity (adding) + + // Get the change type + $changeType = $changesForEmployee[0]->getChange(); + $this->assertTrue($changeType->isDocumentAdded()); + + $session->saveChanges(); +\} finally \{ + $session->close(); +\} +`} + + + +##### Example II + + + +{`$session = $store->openSession(); +try \{ + + // Load the entity, it will be tracked by the session + $employee = $session->load(Employee::class, "employees/1-A"); + + // Modify the entity + $employee->setFirstName("Jim"); + $employee->LastName("Brown"); + + // Get the changes for the entity in the session + // Call 'WhatChangedFor', pass the entity object in the param + $changesForEmployee = $session->advanced()->whatChangedFor($employee); + + $this->assertEquals("FirstName", $changesForEmployee[0]->getFieldName());// Field name + $this->assertEquals("Jim", $changesForEmployee[0]->getFieldNewValue()); // New value + $this->assertTrue($changesForEmployee[0]->getChange()->isFieldChanged()); // Change type + + $this->assertEquals("LastName", $changesForEmployee[1]->getFieldName()); + $this->assertEquals("Brown", $changesForEmployee[1]->getFieldNewValue()); + $this->assertTrue($changesForEmployee[1]->getChange()->isFieldChanged()); + + $session->saveChanges(); +\} finally \{ + $session->close(); +\} +`} + + + + + +## Syntax + + + +{`public function hasChanged(?object $entity): bool; +`} + + + + +{`// WhatChangedFor +public function whatChangedFor(object $entity): DocumentsChangesArray; +`} + + + +| ReturnValue | | +|--------------------|------------------------------------| +| `DocumentsChanges` | List of changes made in the entity (see `ChangeType` class below for available change types) | + + + +{`class DocumentsChanges +\{ + private mixed $fieldOldValue = null; // Previous field value + private mixed $fieldNewValue = null; // Current field value + private ChangeType $change; // Type of change that occurred + private ?string $fieldName = null; // Name of field on which the change occurred + private ?string $fieldPath = null; // Path of field on which the change occurred + + public function getFieldFullName(): ?string; // Path + Name of field on which the change occurred + + // ... getters and setters +\} + +class ChangeType +\{ + public function isDocumentDeleted(): bool; + public function isDocumentAdded(): bool; + public function isFieldChanged(): bool; + public function isNewField(): bool; + public function isRemovedField(): bool; + public function isArrayValueChanged(): bool; + public function isArrayValueAdded(): bool; + public function isArrayValueRemoved(): bool; + +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_check-if-entity-has-changed-python.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-entity-has-changed-python.mdx new file mode 100644 index 0000000000..2a49e51231 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-entity-has-changed-python.mdx @@ -0,0 +1,180 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The Session [tracks all changes](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#tracking-changes) made to all entities that it has either loaded, stored, deleted, or queried for, + and persists to the server only what is needed when `save_changes` is called. + +* This article describes how to check for changes made to a specific **entity** within a session. + To check for changes to **all** tracked entities, see [Check for session changes](../../../client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx). + +* In this page: + * [Check for entity changes](../../../client-api/session/how-to/check-if-entity-has-changed.mdx#check-for-entity-changes) + * [Get entity changes](../../../client-api/session/how-to/check-if-entity-has-changed.mdx#get-entity-changes) + * [Syntax](../../../client-api/session/how-to/check-if-entity-has-changed.mdx#syntax) + + +## Check for entity changes + +* The session's advanced property `has_changed` indicates whether the specified entity was added, modified, or deleted within the session. + +* Note: The `has_changed` property is cleared (reset to `False`) after calling `save_changes`. + + +{`with store.open_session() as session: + # Store a new entity within the session + # ===================================== + + employee = Employee(first_name="John", last_name="Doe") + session.store(employee, "employees/1-A") + + # 'has_changed' will be True + self.assertTrue(session.advanced.has_changed(employee)) + + # 'has_changed' will reset to False after saving changes + session.save_changes() + self.assertFalse(session.advanced.has_changed(employee)) + + # Load & modify entity within the session + # ======================================= + + employee = session.load("employees/1-A", Employee) + self.assertFalse(session.advanced.has_changed(employee)) # False + + employee.last_name = "Brown" + self.assertTrue(session.advanced.has_changed(employee)) # True + + session.save_changes() + self.assertFalse(session.advanced.has_changed(employee)) # False +`} + + + + + +## Get entity changes + +* Use the advanced session `what_changed` method to get all changes made to the specified entity + within the session. + +* Details will include: + * The name and path of the changed field + * Its old and new values + * The type of change + +* Note: `what_changed` reports changes made prior to calling `save_changes`. + Calling it immediately after _save_changes_ will return no results, since all changes are cleared at that point. +##### Example I + + + +{`with store.open_session() as session: + # Store (add) a new entity, it will be tracked by the session + employee = Employee(first_name="John", last_name="Doe") + session.store(employee, "employees/1-A") + + # Get the changes for the entity in the session + # Call 'what_changed', pass the document id as a key to a resulting dict + changes = session.advanced.what_changed() + changes_for_employee = changes["employees/1-A"] + + self.assertEquals(1, len(changes_for_employee)) # a single change for this entity (adding) + + # Get the change type + change_type = changes_for_employee[0].change + self.assertEquals(DocumentsChanges.ChangeType.DOCUMENT_ADDED, change_type) + + session.save_changes() +`} + + + +##### Example II + + + +{`with store.open_session() as session: + # Load the entity, it will be tracked by the session + employee = session.load("employees/1-A", Employee) + + # Modify the entity + employee.first_name = "Jim" + employee.last_name = "Brown" + + # Get the changes for the entity in the session + # Call 'what_changed', pass the document id as a key to a resulting dict + changes = session.advanced.what_changed() + changes_for_employee = changes["employees/1-A"] + + self.assertEquals("FirstName", changes_for_employee[0].field_name) # Field name + self.assertEquals("Jim", changes_for_employee[0].field_new_value) # New value + self.assertEquals( + changes_for_employee[0].change, DocumentsChanges.ChangeType.FIELD_CHANGED + ) # Change type + + self.assertEquals("LastName", changes_for_employee[1].field_name) # Field name + self.assertEquals("Brown", changes_for_employee[1].field_new_value) # New value + self.assertEquals( + changes_for_employee[1].change, DocumentsChanges.ChangeType.FIELD_CHANGED + ) # Change type + + session.save_changes() +`} + + + + + +## Syntax + + + +{`# has_changed +def has_changed(self, entity: object) -> bool: ... +`} + + + + +{`# what_changed +def what_changed(self) -> Dict[str, List[DocumentsChanges]]: ... +`} + + + +| Return value | | +|--------------------|----------------------------------------------------------------------------------------------| +| `DocumentsChanges` | List of changes made to the entity (see `ChangeType` class below for available change types) | + + + +{`class DocumentsChanges: + + def __init__( + self, + field_old_value: object, + field_new_value: object, + change: DocumentsChanges.ChangeType, + field_name: str = None, + field_path: str = None, + ): ... + + class ChangeType(Enum): + DOCUMENT_DELETED = "DocumentDeleted" + DOCUMENT_ADDED = "DocumentAdded" + FIELD_CHANGED = "FieldChanged" + NEW_FIELD = "NewField" + REMOVED_FIELD = "RemovedField" + ARRAY_VALUE_CHANGED = "ArrayValueChanged" + ARRAY_VALUE_ADDED = "ArrayValueAdded" + ARRAY_VALUE_REMOVED = "ArrayValueRemoved" +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_check-if-there-are-any-changes-on-a-session-csharp.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-there-are-any-changes-on-a-session-csharp.mdx new file mode 100644 index 0000000000..bedf1bb8b4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-there-are-any-changes-on-a-session-csharp.mdx @@ -0,0 +1,185 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The Session [tracks all changes](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#tracking-changes) + made to all the entities it has either loaded, stored, deleted, or queried for, + and persists to the server only what is needed when `SaveChanges()` is called. + +* This article describes how to check for changes made to **all** tracked entities within the session. + To check for changes to a specific **entity**, see [Check for entity changes](../../../client-api/session/how-to/check-if-entity-has-changed.mdx). + +* To get the list of all entities tracked by the session, see [Get tracked entities](../../../client-api/session/how-to/get-tracked-entities.mdx). + +* In this page: + * [Check for session changes](../../../client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx#check-for-session-changes) + * [Get session changes](../../../client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx#get-session-changes) + * [Syntax](../../../client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx#syntax) + + +## Check for session changes + +* The session's advanced property `HasChanges` indicates whether any entities were added, modified, or deleted within the session. + +* Note: The _HasChanges_ property is cleared after calling `SaveChanges()`. + + +{`using (var session = store.OpenSession()) +\{ + // No changes made yet - 'HasChanges' will be FALSE + Assert.False(session.Advanced.HasChanges); + + // Store a new entity within the session + session.Store(new Employee \{ FirstName = "John", LastName = "Doe" \}); + + // 'HasChanges' will now be TRUE + Assert.True(session.Advanced.HasChanges); + + // 'HasChanges' will reset to FALSE after saving changes + session.SaveChanges(); + Assert.False(session.Advanced.HasChanges); +\} +`} + + + + + +## Get session changes + +* Use the session's advanced method `WhatChanged()` to get all changes made to all the entities tracked by the session. + +* For each entity that was modified, the details will include: + * The name and path of the changed field + * Its old and new values + * The type of change + +* Note: `WhatChanged()` reports changes made prior to calling `SaveChanges()`. + Calling it immediately after _SaveChanges_ will return no results, as the changes are cleared. +##### Example I + + + +{`using (var session = store.OpenSession()) +\{ + // Store (add) new entities, they will be tracked by the session + session.Store(new Employee \{ FirstName = "John", LastName = "Doe" \}, "employees/1-A"); + session.Store(new Employee \{ FirstName = "Jane", LastName = "Doe" \}, "employees/2-A"); + + // Call 'WhatChanged' to get all changes in the session + IDictionary changes = session.Advanced.WhatChanged(); + Assert.Equal(changes.Count, 2); // 2 entities were added + + // Get the change details for an entity, specify the entity ID + DocumentsChanges[] changesForEmployee = changes["employees/1-A"]; + Assert.Equal(changesForEmployee.Length, 1); // a single change for this entity (adding) + + // Get the change type + DocumentsChanges.ChangeType changeType = changesForEmployee[0].Change; + Assert.Equal(changeType, DocumentsChanges.ChangeType.DocumentAdded); + + session.SaveChanges(); +\} +`} + + + +##### Example II + + + +{`using (var session = store.OpenSession()) +\{ + // Load the entities, they will be tracked by the session + Employee employee1 = session.Load("employees/1-A"); + Employee employee2 = session.Load("employees/2-A"); + + // Modify entities + employee1.FirstName = "Jim"; + employee1.LastName = "Brown"; + employee2.LastName = "Smith"; + + // Delete an entity + session.Delete(employee2); + + // Call 'WhatChanged' to get all changes in the session + IDictionary changes = session.Advanced.WhatChanged(); + + // Get the change details for an entity, specify the entity ID + DocumentsChanges[] changesForEmployee = changes["employees/1-A"]; + + Assert.Equal(changesForEmployee[0].FieldName, "FirstName"); // Field name + Assert.Equal(changesForEmployee[0].FieldNewValue, "Jim"); // New value + Assert.Equal(changesForEmployee[0].Change, DocumentsChanges.ChangeType.FieldChanged); // Change type + + Assert.Equal(changesForEmployee[1].FieldName, "LastName"); + Assert.Equal(changesForEmployee[1].FieldNewValue, "Brown"); + Assert.Equal(changesForEmployee[1].Change, DocumentsChanges.ChangeType.FieldChanged); + + // Note: for employee2 - even though the LastName was changed to 'Smith', + // the only reported change is the latest modification, which is the delete action. + changesForEmployee = changes["employees/2-A"]; + Assert.Equal(changesForEmployee[0].Change, DocumentsChanges.ChangeType.DocumentDeleted); + + session.SaveChanges(); +\} +`} + + + + + +## Syntax + + + +{`// HasChanges +bool HasChanges \{ get; \} +`} + + + + +{`// WhatChanged +IDictionary WhatChanged(); +`} + + + +| Return value | | +|-------------------------------------------|-------------------------------------------------------| +| `IDictionary` | Dictionary containing list of changes per document ID | + + + +{`public class DocumentsChanges +\{ + public object FieldOldValue \{ get; set; \} // Previous field value + public object FieldNewValue \{ get; set; \} // Current field value + public ChangeType Change \{ get; set; \} // Type of change that occurred + public string FieldName \{ get; set; \} // Name of field on which the change occurred + public string FieldPath \{ get; set; \} // Path of field on which the change occurred + public string FieldFullName \{ get; \} // Path + Name of field on which the change occurred +\} + +public enum ChangeType +\{ + DocumentDeleted, + DocumentAdded, + FieldChanged, + NewField, + RemovedField, + ArrayValueChanged, + ArrayValueAdded, + ArrayValueRemoved +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_check-if-there-are-any-changes-on-a-session-java.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-there-are-any-changes-on-a-session-java.mdx new file mode 100644 index 0000000000..f347f0a6d7 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-there-are-any-changes-on-a-session-java.mdx @@ -0,0 +1,98 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Single entity can be checked for changes using [hasChanged](../../../client-api/session/how-to/check-if-entity-has-changed.mdx) method, but there is also a possibility to check if there are any changes on a session or even what has changed. +Both the `hasChanges` method and the `whatChanged` method are available in the `advanced` session operations. + +## HasChanges + +Property indicates if the session contains any changes. If there are any new, changed or deleted entities. + + + +{`boolean hasChanges() +`} + + + +### Example + + + +{`try (IDocumentSession session = store.openSession()) \{ + Assert.assertFalse(session.advanced().hasChanges()); + + Employee employee = new Employee(); + employee.setFirstName("John"); + employee.setLastName("Doe"); + + session.store(employee); + + Assert.assertTrue(session.advanced().hasChanges()); +\} +`} + + + + + +## WhatChanged + +Method returns all changes for each entity stored within the session. Including name of the field/property that changed, its old and new value, and change type. + + + +{`Map> whatChanged() +`} + + + +| ReturnValue | | +| ------------- | ----- | +| Map<String, List<DocumentsChanges>> | Map containing list of changes per document ID. | + +### Example I + + + +{`try (IDocumentSession session = store.openSession()) \{ + Employee employee = new Employee(); + employee.setFirstName("Joe"); + employee.setLastName("Doe"); + session.store(employee); + + Map> changes = session.advanced().whatChanged(); + List employeeChanges = changes.get("employees/1-A"); + DocumentsChanges.ChangeType change + = employeeChanges.get(0).getChange(); // DocumentsChanges.ChangeType.DOCUMENT_ADDED + +\} +`} + + + +### Example II + + + +{`try (IDocumentSession session = store.openSession()) \{ + Employee employee = session.load(Employee.class, "employees/1-A");// 'Joe Doe' + employee.setFirstName("John"); + employee.setLastName("Shmoe"); + + Map> changes = session.advanced().whatChanged(); + List employeeChanges = changes.get("employees/1-A"); + DocumentsChanges change1 + = employeeChanges.get(0); //DocumentsChanges.ChangeType.FIELD_CHANGED + DocumentsChanges change2 + = employeeChanges.get(1); //DocumentsChanges.ChangeType.FIELD_CHANGED +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_check-if-there-are-any-changes-on-a-session-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-there-are-any-changes-on-a-session-nodejs.mdx new file mode 100644 index 0000000000..37fcbeeebf --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-there-are-any-changes-on-a-session-nodejs.mdx @@ -0,0 +1,176 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The Session [tracks all changes](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#tracking-changes) + made to all the entities it has either loaded, stored, deleted, or queried for, + and persists to the server only what is needed when `saveChanges()` is called. + +* This article describes how to check for changes made to **all** tracked entities within the session. + To check for changes to a specific **entity**, see [Check for entity changes](../../../client-api/session/how-to/check-if-entity-has-changed.mdx). + +* To get the list of all entities tracked by the session, see [Get tracked entities](../../../client-api/session/how-to/get-tracked-entities.mdx). + +* In this page: + * [Check for session changes](../../../client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx#check-for-session-changes) + * [Get session changes](../../../client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx#get-session-changes) + * [Syntax](../../../client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx#syntax) + + +## Check for session changes + +* The session's advanced property `hasChanges` indicates whether any entities were added, modified, or deleted within the session. + +* Note: The _hasChanges_ property is cleared after calling `saveChanges()`. + + +{`const session = store.openSession(); + +// No changes made yet - 'hasChanges' will be FALSE +assert.ok(!session.advanced.hasChanges()); + +// Store a new entity within the session +const employee = new Employee(); +employee.firstName = "John"; +employee.lastName = "Doe"; + +await session.store(employee); + +// 'hasChanges' will now be TRUE +assert.ok(session.advanced.hasChanges()); + +// 'hasChanges' will reset to FALSE after saving changes +await session.saveChanges(); +assert.ok(!session.advanced.hasChanges()); +`} + + + + + +## Get session changes + +* Use the session's advanced method `whatChanged()` to get all changes made to all the entities tracked by the session. + +* For each entity that was modified, the details will include: + * The name and path of the changed field + * Its old and new values + * The type of change + +* Note: `whatChanged()` reports changes made prior to calling `saveChanges()`. + Calling it immediately after _saveChanges_ will return no results, as the changes are cleared. +##### Example I + + + +{`const session = store.openSession(); + +// Store (add) new entities, they will be tracked by the session +const employee1 = new Employee(); +employee1.firstName = "John"; +employee1.lastName = "Doe"; + +const employee2 = new Employee(); +employee2.firstName = "Jane"; +employee2.lastName = "Doe"; + +await session.store(employee1, "employees/1-A"); +await session.store(employee2, "employees/2-A"); + +// Call 'WhatChanged' to get all changes in the session +const changes = session.advanced.whatChanged(); +assert.equal(Object.keys(changes).length, 2); // 2 entities were added + +// Get the change details for an entity, specify the entity ID +const changesForEmployee = changes["employees/1-A"]; +assert.equal(Object.keys(changesForEmployee).length, 1); // a single change for this entity (adding) + +// Get the change type +const changeType = changesForEmployee[0].change; +assert.equal(changeType, "DocumentAdded"); + +await session.saveChanges(); +`} + + + +##### Example II + + + +{`const session = store.openSession(); + +// Load the entities, they will be tracked by the session +const employee1 = await session.load("employees/1-A"); +const employee2 = await session.load("employees/2-A"); + +// Modify entities +employee1.firstName = "Jim"; +employee1.lastName = "Brown"; +employee2.lastName = "Smith"; + +// Delete an entity +session.delete(employee2); + +// Call 'WhatChanged' to get all changes in the session +const changes = session.advanced.whatChanged(); + +// Get the change details for an entity, specify the entity ID +let changesForEmployee = changes["employees/1-A"]; + +assert.equal(changesForEmployee[0].fieldName, "firstName"); // Field name +assert.equal(changesForEmployee[0].fieldNewValue, "Jim"); // New value +assert.equal(changesForEmployee[0].change, "FieldChanged"); // Change type + +assert.equal(changesForEmployee[1].fieldName, "lastName"); +assert.equal(changesForEmployee[1].fieldNewValue, "Brown"); +assert.equal(changesForEmployee[1].change, "FieldChanged"); + +// Note: for employee2 - even though the LastName was changed to 'Smith', +// the only reported change is the latest modification, which is the delete action. +changesForEmployee = changes["employees/2-A"]; +assert.equal(changesForEmployee[0].change, "DocumentDeleted"); + +await session.saveChanges(); +`} + + + + + +## Syntax + + + +{`// HasChanges +session.advanced.hasChanges(); +`} + + + + +{`// WhatChanged +session.advanced.whatChanged(); +`} + + + +| Return value | | +|------------------------------------|-------------------------------------------------------| +| `Record` | Dictionary containing list of changes per document ID | + +| `DocumentsChanges` | Type | Description | +|---------------------|---------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **fieldOldValue** | object | Previous field value | +| **fieldNewValue** | object | Current field value | +| **change** | string | Type of change that occurred. Can be:
`"DocumentDeleted"`, `"DocumentAdded"`,`"FieldChanged"`, `"NewField"`, `"RemovedField"`, `"ArrayValueChanged"`, `"ArrayValueAdded"`, `"ArrayValueRemoved"` | +| **fieldName** | string | Name of field on which the change occurred | +| **fieldPath** | string | Path of field on which the change occurred | +| **fieldFullName** | string | Path + Name of field on which the change occurred | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_check-if-there-are-any-changes-on-a-session-php.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-there-are-any-changes-on-a-session-php.mdx new file mode 100644 index 0000000000..a8ebf8d815 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-there-are-any-changes-on-a-session-php.mdx @@ -0,0 +1,205 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The Session [tracks all changes](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#tracking-changes) + made in all the entities it has either loaded, stored, or queried for, + and persists to the server only what is needed when `saveChanges` is called. + +* This article describes how to check for changes made in all tracked entities within the **session**. + To check for changes on a specific **entity**, see [Check for entity changes](../../../client-api/session/how-to/check-if-entity-has-changed.mdx). + +* In this page: + * [Check for session changes](../../../client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx#check-for-session-changes) + * [Get session changes](../../../client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx#get-session-changes) + * [Syntax](../../../client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx#syntax) + + +## Check for session changes + +* The advanced session `hasChanges` property indicates whether any entities were added, modified, or deleted within the session. + +* Note: The `hasChanges` property is cleared (reset to `false`) after calling `saveChanges`. + + +{`$session = $store->openSession(); +try \{ + // No changes made yet - 'hasChanges' will be FALSE + $this->assertFalse($session->advanced()->hasChanges()); + + // Store a new entity within the session + $employee = new Employee(); + $employee->setFirstName("John"); + $employee->setLastName("Doe"); + + $session->store($employee, "employees/1-A"); + + // 'hasChanges' will now be TRUE + $this->assertTrue($session->advanced()->hasChanges()); + + // 'HasChanges' will reset to FALSE after saving changes + $session->saveChanges(); + $this->assertFalse($session->advanced()->hasChanges()); + +\} finally \{ + $session->close(); +\} +`} + + + + + +## Get session changes + +* Use the session's advanced method `whatChanged` to get all changes made to all the entities tracked by the session. + +* For each entity that was modified, the details will include: + * The name and path of the changed field + * Its old and new values + * The type of change +##### Example I + + + +{`$session = $store->openSession(); +try \{ + // Store (add) new entities, they will be tracked by the session + $employee = new Employee(); + $employee->setFirstName("John"); + $employee->setLastName("Doe"); + $session->store($employee, "employees/1-A"); + + $employee = new Employee(); + $employee->setFirstName("Jane"); + $employee->setLastName("Doe"); + $session->store($employee, "employees/2-A"); + + // Call 'WhatChanged' to get all changes in the session + $changes = $session->advanced()->whatChanged(); + + $this->assertCount(2, $changes); // 2 entities were added + + // Get the change details for an entity, specify the entity ID + $changesForEmployee = $changes["employees/1-A"]; + $this->assertCount(1, $changesForEmployee); // a single change for this entity (adding) + + // Get the change type + $change = $changes[0]->getChange(); // ChangeType::DOCUMENT_ADDED + $this->assertTrue($change->isDocumentAdded()); + + $session->saveChanges(); + +\} finally \{ + $session->close(); +\} +`} + + + +##### Example II + + + +{`$session = $store->openSession(); +try \{ + // Load the entities, they will be tracked by the session + $employee1 = $session->load(Employee::class, "employees/1-A");// 'Joe Doe' + $employee2 = $session->load(Employee::class, "employees/2-A");// 'Jane Doe' + + // Modify entities + $employee1->setFirstName("Jim"); + $employee1->setLastName("Brown"); + $employee2->setLastName("Smith"); + + // Delete an entity + $session->delete($employee2); + + // Call 'WhatChanged' to get all changes in the session + $changes = $session->advanced()->whatChanged(); + + // Get the change details for an entity, specify the entity ID + $changesForEmployee = $changes["employees/1-A"]; + + $this->assertEquals("FirstName", $changesForEmployee[0]->getFieldName()); // Field name + $this->assertEquals("Jim", $changesForEmployee[0]->getFieldNewValue()); // New value + $this->assertTrue($changesForEmployee[0]->getChange()->isFieldChange()); // Change type + + $this->assertEquals("LastName", $changesForEmployee[1]->getFieldName()); // Field name + $this->assertEquals("Brown", $changesForEmployee[1]->getFieldNewValue()); // New value + $this->assertTrue($changesForEmployee[1]->getChange()->isFieldChange()); // Change type + + // Note: for employee2 - even though the LastName was changed to 'Smith', + // the only reported change is the latest modification, which is the delete action. + $changesForEmployee = $changes["employees/2-A"]; + $this->assertTrue($changesForEmployee[0]->getChange()->isDocumentDeleted()); + + $session->saveChanges(); + +\} finally \{ + $session->close(); +\} +`} + + + + + +## Syntax + + + +{`public function hasChanges(): bool; +`} + + + + +{`public function whatChanged(): array; // array +`} + + + +| ReturnValue | Description | +|-------------|-------------| +| `hasChanges(): bool;` | Indicates whether there were changes during the session | +| `whatChanged(): array;` | Returns an array of changes per document ID | +| `DocumentsChanges` | A list of changes made in an entity (see `ChangeType` class below for available change types) | + + + +{`class DocumentsChanges +\{ + private mixed $fieldOldValue = null; // Previous field value + private mixed $fieldNewValue = null; // Current field value + private ChangeType $change; // Type of change that occurred + private ?string $fieldName = null; // Name of field on which the change occurred + private ?string $fieldPath = null; // Path of field on which the change occurred + + public function getFieldFullName(): ?string; // Path + Name of field on which the change occurred + + // ... getters and setters +\} + +class ChangeType +\{ + public function isDocumentDeleted(): bool; + public function isDocumentAdded(): bool; + public function isFieldChanged(): bool; + public function isNewField(): bool; + public function isRemovedField(): bool; + public function isArrayValueChanged(): bool; + public function isArrayValueAdded(): bool; + public function isArrayValueRemoved(): bool; + +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_check-if-there-are-any-changes-on-a-session-python.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-there-are-any-changes-on-a-session-python.mdx new file mode 100644 index 0000000000..6187539e33 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_check-if-there-are-any-changes-on-a-session-python.mdx @@ -0,0 +1,180 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The Session [tracks all changes](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#tracking-changes) + made to all the entities it has either loaded, stored, deleted, or queried for, + and persists to the server only what is needed when `save_changes` is called. + +* This article describes how to check for changes made to **all** tracked entities within the session. + To check for changes to a specific **entity**, see [Check for entity changes](../../../client-api/session/how-to/check-if-entity-has-changed.mdx). + +* In this page: + * [Check for session changes](../../../client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx#check-for-session-changes) + * [Get session changes](../../../client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx#get-session-changes) + * [Syntax](../../../client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx#syntax) + + +## Check for session changes + +* The advanced session `has_changes` property indicates whether any entities were added, modified, or deleted within the session. + +* Note: The `has_changes` property is cleared (reset to `False`) after calling `save_changes`. + + +{`with store.open_session() as session: + # No changes made yet - 'has_changes' will be False + self.assertFalse(session.has_changes()) + + # Store a new entity within the session + session.store(Employee(first_name="John", last_name="Doe")) + + # 'has_changes' will now be True + self.assertTrue(session.has_changes()) + + # 'has_changes' will reset to False after saving changes + session.save_changes() + self.assertFalse(session.has_changes()) +`} + + + + + +## Get session changes + +* Use the session's advanced method `what_changed` to get all changes made to all the entities tracked by the session. + +* For each entity that was modified, the details will include: + * The name and path of the changed field + * Its old and new values + * The type of change + +* Note: `what_changed` reports changes made prior to calling `save_changes`. + Calling it immediately after _save_changes_ will return no results, as the changes are cleared. +##### Example I + + + +{`with store.open_session() as session: + # Store (add) new entities, they will be tracked by the session + session.store(Employee(first_name="John", last_name="Doe"), "employees/1-A") + session.store(Employee(first_name="Jane", last_name="Doe"), "employees/2-A") + + # Call 'what_changed' to get all changes in the session + changes = session.advanced.what_changed() + self.assertEquals(2, len(changes)) # 2 entites were added + + # Get the change details for an entity, specify the entity ID + changes_for_employee = changes["employees/1-A"] + self.assertEquals(1, len(changes_for_employee)) # a single change for this entity (adding) + + # Get the change type + change_type = changes_for_employee[0].change + self.assertEquals(DocumentsChanges.ChangeType.DOCUMENT_ADDED, change_type) + + session.save_changes() +`} + + + +##### Example II + + + +{`with store.open_session() as session: + # Load the entities, they will be tracked by the session + employee_1 = session.load("employees/1-A", Employee) + employee_2 = session.load("employees/2-A", Employee) + + # Modify entities + employee_1.first_name = "Jim" + employee_1.last_name = "Brown" + employee_2.last_name = "Smith" + + # Delete an entity + session.delete(employee_2) + + # Call 'what_changed' to get all changes in the session + changes = session.advanced.what_changed() + + # Get the change details for an entity, specify the entity ID + changes_for_employee = changes["employees/1-A"] + + self.assertEquals("FirstName", changes_for_employee[0].field_name) # Field name + self.assertEquals("Jim", changes_for_employee[0].field_new_value) # New value + self.assertEquals( + DocumentsChanges.ChangeType.FIELD_CHANGED, changes_for_employee[0].change + ) # Change type + + self.assertEquals("LastName", changes_for_employee[1].field_name) # Field name + self.assertEquals("Brown", changes_for_employee[1].field_new_value) # New value + self.assertEquals( + DocumentsChanges.ChangeType.FIELD_CHANGED, changes_for_employee[1].change + ) # Change type + + # Note: for employee2 - even though the LastName was changed to 'Smith', + # the only reported change is the latest modification, which is the delete action. + changes_for_employee = changes["employees/2-A"] + self.assertEquals(DocumentsChanges.ChangeType.DOCUMENT_DELETED, changes_for_employee[0].change) + + session.save_changes() +`} + + + + + +## Syntax + + + +{`# has_changes +def has_changes(self) -> bool: ... +`} + + + + +{`# what_changed +def what_changed(self) -> Dict[str, List[DocumentsChanges]]: ... +`} + + + +| Return value | | +|-------------------------------------|-------------------------------------------------------| +| `Dict[str, List[DocumentsChanges]]` | Dictionary containing list of changes per document ID | + + + +{`class DocumentsChanges: + + def __init__( + self, + field_old_value: object, + field_new_value: object, + change: DocumentsChanges.ChangeType, + field_name: str = None, + field_path: str = None, + ): ... + + class ChangeType(Enum): + DOCUMENT_DELETED = "DocumentDeleted" + DOCUMENT_ADDED = "DocumentAdded" + FIELD_CHANGED = "FieldChanged" + NEW_FIELD = "NewField" + REMOVED_FIELD = "RemovedField" + ARRAY_VALUE_CHANGED = "ArrayValueChanged" + ARRAY_VALUE_ADDED = "ArrayValueAdded" + ARRAY_VALUE_REMOVED = "ArrayValueRemoved" +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_clear-a-session-csharp.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_clear-a-session-csharp.mdx new file mode 100644 index 0000000000..ad35c796f3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_clear-a-session-csharp.mdx @@ -0,0 +1,101 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the `Clear()` method to clear the session’s state: + it **removes ALL tracked entities** and **cancels ALL pending operations** (e.g., _Store_, _Delete_). + This is useful when you need to discard all tracked changes and reset the session state. + +* To remove only a **single entity** from tracking, see [Evict a single entity](../../../client-api/session/how-to/evict-entity-from-a-session.mdx). + +* In this article: + * [Clear the session's state](../../../client-api/session/how-to/clear-a-session.mdx#clear-the-sessions-state) + * [Syntax](../../../client-api/session/how-to/clear-a-session.mdx#syntax) + + + +--- + +## Clear the session's state + + + +```csharp +using (var session = store.OpenSession()) +{ + // Store a new employee + var newEmployee = new Employee + { + FirstName = "John", + LastName = "Doe" + }; + session.Store(newEmployee, "employees/1"); + + // Load an existing employee and modify it + var existingEmployee = session.Load("employees/2"); + existingEmployee.LastName = "UpdatedLastName"; + + // Load another employee and mark for deletion + var employeeToDelete = session.Load("employees/3"); + session.Delete(employeeToDelete); + + // At this point: + // * employees/1 is pending insert + // * employees/2 is pending update + // * employees/3 is pending delete + + // Clear the session state + // this will remove all tracking and pending operations + session.Advanced.Clear(); + + // SaveChanges does nothing - all operations were discarded + session.SaveChanges(); +} +``` + + +```csharp +using (var asyncSession = store.OpenAsyncSession()) +{ + // Store a new employee + var newEmployee = new Employee + { + FirstName = "John", + LastName = "Doe" + }; + await asyncSession.StoreAsync(newEmployee, "employees/1"); + + // Load an existing employee and modify it + var existingEmployee = await asyncSession.LoadAsync("employees/2"); + existingEmployee.LastName = "UpdatedLastName"; + + // Load another employee and mark for deletion + var employeeToDelete = await asyncSession.LoadAsync("employees/3"); + asyncSession.Delete(employeeToDelete); + + // At this point: + // * employees/1 is pending insert + // * employees/2 is pending update + // * employees/3 is pending delete + + // Clear the session state + // this will remove all tracking and pending operations + asyncSession.Advanced.Clear(); + + // SaveChangesAsync does nothing - all operations were discarded + await asyncSession.SaveChangesAsync(); +} +``` + + + +## Syntax + + +```csharp +void Clear(); +``` + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_clear-a-session-java.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_clear-a-session-java.mdx new file mode 100644 index 0000000000..a4c30a7daf --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_clear-a-session-java.mdx @@ -0,0 +1,34 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To clear session state, stop tracking entities, and remove all pending commands, +use the `clear` method from the `advanced` session operations. + +## Syntax + + + +{`void clear() +`} + + + +## Example + + + +{`Employee employee = new Employee(); +employee.setFirstName("John"); +employee.setLastName("Doe"); +session.store(employee); + +session.advanced().clear(); + +session.saveChanges(); // nothing will hapen +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_clear-a-session-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_clear-a-session-nodejs.mdx new file mode 100644 index 0000000000..9e499f2053 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_clear-a-session-nodejs.mdx @@ -0,0 +1,67 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the `clear()` method to clear the session’s state: + it **removes ALL tracked entities** and **cancels ALL pending operations** (e.g., _store_, _delete_). + This is useful when you need to discard all tracked changes and reset the session state. + +* To remove only a **single entity** from tracking, see [Evict a single entity](../../../client-api/session/how-to/evict-entity-from-a-session.mdx). + +* In this article: + * [Clear the session's state](../../../client-api/session/how-to/clear-a-session.mdx#clear-the-sessions-state) + * [Syntax](../../../client-api/session/how-to/clear-a-session.mdx#syntax) + + + +--- + +## Clear the session's state + + +```js +const session = store.openSession(); + +// Store a new employee +class Employee { + constructor(firstName, lastName) { + this.firstName = firstName; + this.lastName = lastName; + } +} +const newEmployee = new Employee("John", "Doe"); +await session.store(newEmployee, "employees/1"); + +// Load an existing employee and modify it +const existingEmployee = await session.load("employees/2"); +existingEmployee.lastName = "UpdatedLastName"; + +// Load another employee and mark for deletion +const employeeToDelete = await session.load("employees/3"); +session.delete(employeeToDelete); + +// At this point: +// * employees/1 is pending insert +// * employees/2 is pending update +// * employees/3 is pending delete + +// Clear the session state +// this will remove all tracking and pending operations +session.advanced.clear(); + +// saveChanges does nothing - all operations were discarded +await session.saveChanges(); +``` + + +## Syntax + + +```js +session.advanced.clear(); +``` + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_clear-a-session-php.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_clear-a-session-php.mdx new file mode 100644 index 0000000000..6517b8449c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_clear-a-session-php.mdx @@ -0,0 +1,34 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To clear session state, stop tracking entities, and remove all pending commands, +use the `clear` method from the `advanced` session operations. + +## Syntax + + + +{`public function clear(): void; +`} + + + +## Example + + + +{`$employee = new Employee(); +$employee->setFirstName("John"); +$employee->setLastName("Doe"); +$session->store($employee); + +$session->advanced()->clear(); + +$session->saveChanges(); // nothing will hapen +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_clear-a-session-python.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_clear-a-session-python.mdx new file mode 100644 index 0000000000..10f13a6df9 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_clear-a-session-python.mdx @@ -0,0 +1,31 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To clear session state, stop tracking entities, and remove all pending commands, +use the `clear` method from the `advanced` session operations. + +## Syntax + + + +{`def clear(self) -> None: ... +`} + + + +## Example + + + +{`session.store(Employee(first_name="John", last_name="Doe")) + +session.advanced.clear() + +session.save_changes() # nothing will happen +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_defer-operations-csharp.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_defer-operations-csharp.mdx new file mode 100644 index 0000000000..5e5910c15c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_defer-operations-csharp.mdx @@ -0,0 +1,98 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* `Defer` allows you to register server commands via the session. + +* All the deferred requests will be stored in the session and sent to the server in a single batch when SaveChanges is called, + along with any other changes/operations made on the session. + Thus, all deferred commands are __executed as part of the session's SaveChanges transaction__. + +* In this page: + * [Defer commands example](../../../client-api/session/how-to/defer-operations.mdx#defer-commands-example) + * [Commands that can be deferred](../../../client-api/session/how-to/defer-operations.mdx#commands-that-can-be-deferred) + * [Syntax](../../../client-api/session/how-to/defer-operations.mdx#syntax) + +## Defer commands example + + + +{`// Defer is available in the session's Advanced methods +session.Advanced.Defer( + + // Define commands to be executed: + // i.e. Put a new document + new PutCommandData("products/999-A", null, new DynamicJsonValue + \{ + ["Name"] = "My Product", + ["Supplier"] = "suppliers/1-A", + ["@metadata"] = new DynamicJsonValue + \{ + ["@collection"] = "Products" + \} + \}), + + // Patch document + new PatchCommandData("products/999-A", null, new PatchRequest + \{ + Script = "this.Supplier = 'suppliers/2-A';" + \}, + null), + + // Force a revision to be created + new ForceRevisionCommandData("products/999-A"), + + // Delete a document + new DeleteCommandData("products/1-A", null) +); + +// All deferred commands will be sent to the server upon calling SaveChanges +session.SaveChanges(); +`} + + + + + +## Commands that can be deferred + +The following commands implement the `ICommandData` interface and can be deferred: + + - [PutCommandData](../../../glossary/put-command-data.mdx) + - [DeleteCommandData](../../../glossary/delete-command-data.mdx) + - DeletePrefixedCommandData + - [PatchCommandData](../../../glossary/patch-command-data.mdx) + - BatchPatchCommandData + - PutAttachmentCommandData + - DeleteAttachmentCommandData + - [CopyAttachmentCommandData](../../../glossary/copy-attachment-command-data.mdx) + - [MoveAttachmentCommandData](../../../glossary/move-attachment-command-data.mdx) + - [CountersBatchCommandData](../../../glossary/counters-batch-command-data.mdx) + - PutCompareExchangeCommandData + - DeleteCompareExchangeCommandData + - CopyTimeSeriesCommandData + - TimeSeriesBatchCommandData + - ForceRevisionCommandData + + +## Syntax + + + +{`void Defer(ICommandData command, params ICommandData[] commands); +void Defer(ICommandData[] commands); +`} + + + +| Parameter | Type | Description | +| - |-|-| +| **command** | A command that implements the `ICommandData` interface | The command to be executed | +| **commands** | `ICommandData[]` | Array of commands to be executed | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_defer-operations-java.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_defer-operations-java.mdx new file mode 100644 index 0000000000..f8fc002962 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_defer-operations-java.mdx @@ -0,0 +1,68 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Commands can be deferred till `saveChanges` is called by using `defer` method in `advanced` session operations. +All of the operations will update teh session state appropriately after `saveChanges` is called. + +Types of commands that can be deferred: + +- [PutCommandData](../../../glossary/put-command-data.mdx) +- [DeleteCommandData](../../../glossary/delete-command-data.mdx) +- DeletePrefixedCommandData +- [PatchCommandData](../../../glossary/patch-command-data.mdx) +- PutAttachmentCommandData +- DeleteAttachmentCommandData +- [CopyAttachmentCommandData](../../../glossary/copy-attachment-command-data.mdx) +- [MoveAttachmentCommandData](../../../glossary/move-attachment-command-data.mdx) +- [CountersBatchCommandData](../../../glossary/counters-batch-command-data.mdx) + +## Syntax + + + +{`void defer(ICommandData command, ICommandData... commands); + +void defer(ICommandData[] commands); +`} + + + +| Parameter | Description | +|------------------|----------------------------------------------------------| +| `ICommandData` | Command to be executed. | +| `ICommandData[]` | Array of commands implementing `ICommandData` interface. | + +## Example + + + +{`Map value1 = new HashMap<>(); +value1.put("Name", "My Product"); +value1.put("Supplier", "suppliers/999-A"); +value1.put("@metadata", Collections.singletonMap("@collection", "Users")); + +PutCommandDataWithJson putCommand1 = + new PutCommandDataWithJson("products/999-A", + null, + store.getConventions().getEntityMapper().valueToTree(value1)); + +HashMap value2 = new HashMap<>(); +value2.put("Name", "My Product"); +value2.put("Supplier", "suppliers/999-A"); +value2.put("@metadata", Collections.singletonMap("@collection", "Suppliers")); + +PutCommandDataWithJson putCommand2 = + new PutCommandDataWithJson("suppliers/999-A", + null, + store.getConventions().getEntityMapper().valueToTree(value2)); + +DeleteCommandData command3 = new DeleteCommandData("products/1-A", null); + +session.advanced().defer(putCommand1, putCommand2, command3); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_defer-operations-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_defer-operations-nodejs.mdx new file mode 100644 index 0000000000..97c7e74638 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_defer-operations-nodejs.mdx @@ -0,0 +1,96 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* `Defer` allows you to register server commands via the session. + +* All the deferred requests will be stored in the session and sent to the server in a single batch when saveChanges is called, + along with any other changes/operations made on the session. + Thus, all deferred commands are __executed as part of the session's saveChanges transaction__. + +* In this page: + * [Defer commands example](../../../client-api/session/how-to/defer-operations.mdx#defer-commands-example) + * [Commands that can be deferred](../../../client-api/session/how-to/defer-operations.mdx#commands-that-can-be-deferred) + * [Syntax](../../../client-api/session/how-to/defer-operations.mdx#syntax) + +## Defer commands example + + + +{`const session = documentStore.openSession(); + +// Define a patchRequest object for the PatchCommandData used in the 'defer' below +const patchRequest = new PatchRequest(); +patchRequest.script = "this.Supplier = 'suppliers/2-A';"; + +// 'defer' is available in the session's advanced methods +session.advanced.defer( + + // Define commands to be executed: + // i.e. Put a new document + new PutCommandDataBase("products/999-A", null, null, \{ + "Name": "My Product", + "Supplier": "suppliers/1-A" + "@metadata": \{ "@collection": "Products" \} + \}), + + // Patch document + new PatchCommandData("products/999-A", null, patchRequest, null), + + // Force a revision to be created + new ForceRevisionCommandData("products/999-A"), + + // Delete a document + new DeleteCommandData("products/1-A", null) +); + +// All deferred commands will be sent to the server upon calling SaveChanges +await session.saveChanges(); + +\} +`} + + + + + +## Commands that can be deferred + +The following commands implement the `ICommandData` interface and can be deferred: + +- PutCommandDataBase +- DeleteCommandData +- DeletePrefixedCommandData +- PatchCommandData +- BatchPatchCommandData +- PutAttachmentCommandData +- DeleteAttachmentCommandData +- CopyAttachmentCommandData +- MoveAttachmentCommandData +- CountersBatchCommandData +- PutCompareExchangeCommandData +- DeleteCompareExchangeCommandData +- CopyTimeSeriesCommandData +- TimeSeriesBatchCommandData +- ForceRevisionCommandData + + +## Syntax + + + +{`session.advanced.defer(...commands); +`} + + + +| Parameter | Type | Description | +| - |-|-| +| **commands** | `ICommandData[]` | Commands to be executed | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_defer-operations-php.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_defer-operations-php.mdx new file mode 100644 index 0000000000..b3440dfaaa --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_defer-operations-php.mdx @@ -0,0 +1,118 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The `defer` method allows you to register server commands via the session. + +* All the deferred requests will be stored in the session and sent to the server + in a single batch when `saveChanges` is called, along with any other changes/operations + made on the session. + Thus, all deferred commands are **executed as part of the session's `saveChanges` transaction**. + +* In this page: + * [Defer commands example](../../../client-api/session/how-to/defer-operations.mdx#defer-commands-example) + * [Commands that can be deferred](../../../client-api/session/how-to/defer-operations.mdx#commands-that-can-be-deferred) + * [Syntax](../../../client-api/session/how-to/defer-operations.mdx#syntax) + +## Defer commands example + + + +{`// Defer is available in the session's \`advanced\` methods +$session->advanced()->defer( + // Define commands to be executed: + + // i.e. Put a new document + new PutCommandDataWithJson( + "products/999-A", + null, + null, + [ + "Name" => "My Product", + "Supplier" => "suppliers/1-A", + "@metadata" => [ + "@collection" => "Products" + ] + ] + ), + + // Patch document + new PatchCommandData( + "products/999-A", + null, + PatchRequest::forScript("this.Supplier = 'suppliers/2-A';"), + null + ), + + // Force a revision to be created + new ForceRevisionCommandData("products/999-A"), + + // Delete a document + new DeleteCommandData("products/1-A", null) +); + +// All deferred commands will be sent to the server upon calling SaveChanges +$session->saveChanges(); +`} + + + + + +## Commands that can be deferred + +The following commands implement the `CommandDataInterface` interface and can be deferred: + + - [putCommandData](../../../glossary/put-command-data.mdx) + - [deleteCommandData](../../../glossary/delete-command-data.mdx) + - deletePrefixedCommandData + - [patchCommandData](../../../glossary/patch-command-data.mdx) + - batchPatchCommandData + - putAttachmentCommandData + - deleteAttachmentCommandData + - [copyAttachmentCommandData](../../../glossary/copy-attachment-command-data.mdx) + - [moveAttachmentCommandData](../../../glossary/move-attachment-command-data.mdx) + - [countersBatchCommandData](../../../glossary/counters-batch-command-data.mdx) + - putCompareExchangeCommandData + - deleteCompareExchangeCommandData + - copyTimeSeriesCommandData + - timeSeriesBatchCommandData + - forceRevisionCommandData + + +## Syntax + + + +{`/** + * Usage: + * - defer(CommandDataInterface $command): void + * - defer(CommandDataInterface ...$commands): void + * - defer(array $commands): void + * + * - defer(CommandDataInterface $command, array $commands): void + * + * Example: + * - defer($cmd); + * - defer($cmd1, $cmd2, $cmd3, $cmd4 ...) + * - defer([$cmd1, $cmd2, $cmd4, $cmd4, ...]) + * - defer($cmd1, [$cmd2, $cmd3]) + * + */ +public function defer(...$commands): void; +`} + + + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **$command** | `CommandDataInterface` | A command to be executed | +| **$commands** | `array` | An array of commands to be executed | +| **$commands** | `array` | An array of commands to be executed | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_defer-operations-python.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_defer-operations-python.mdx new file mode 100644 index 0000000000..fce2ab2a30 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_defer-operations-python.mdx @@ -0,0 +1,85 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The `defer` method allows you to register server commands via the session. + +* All the deferred requests will be stored in the session and sent to the server + in a single batch when `save_changes` is called, along with any other changes/operations + made on the session. + Thus, all deferred commands are **executed as part of the session's `save_changes` transaction**. + +* In this page: + * [Defer commands example](../../../client-api/session/how-to/defer-operations.mdx#defer-commands-example) + * [Commands that can be deferred](../../../client-api/session/how-to/defer-operations.mdx#commands-that-can-be-deferred) + * [Syntax](../../../client-api/session/how-to/defer-operations.mdx#syntax) + +## Defer commands example + + + +{`# Defer is available in the session's advanced methods +session.advanced.defer( + # Define commands to be executed: + # i.e. Put a new document + PutCommandDataBase( + "products/999-A", + None, + \{"Name": "My Product", "Supplier": "suppliers/1-A", "@metadata": \{"@collection": "Products"\}\}, + ), + # Patch document + PatchCommandData("products/999-A", None, PatchRequest("this.Supplier = 'suppliers/2-A'"), None), + # Force a revision to be created + ForceRevisionCommandData("products/999-A"), + # Delete a document + DeleteCommandData("products/1-A", None), +) + +# All deferred commands will be sent to the server upon calling save_changes +session.save_changes() +`} + + + + + +## Commands that can be deferred + +The following commands implement the `ICommandData` interface and can be deferred: + + - [PutCommandData](../../../glossary/put-command-data.mdx) + - [DeleteCommandData](../../../glossary/delete-command-data.mdx) + - DeletePrefixedCommandData + - [PatchCommandData](../../../glossary/patch-command-data.mdx) + - BatchPatchCommandData + - PutAttachmentCommandData + - DeleteAttachmentCommandData + - [CopyAttachmentCommandData](../../../glossary/copy-attachment-command-data.mdx) + - [MoveAttachmentCommandData](../../../glossary/move-attachment-command-data.mdx) + - [CountersBatchCommandData](../../../glossary/counters-batch-command-data.mdx) + - PutCompareExchangeCommandData + - DeleteCompareExchangeCommandData + - CopyTimeSeriesCommandData + - TimeSeriesBatchCommandData + - ForceRevisionCommandData + + +## Syntax + + + +{`def defer(self, *commands: CommandData) -> None: ... +`} + + + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **\*commands** | `CommandData` | An array of commands to be executed | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_evict-entity-from-a-session-csharp.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_evict-entity-from-a-session-csharp.mdx new file mode 100644 index 0000000000..a8148373f7 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_evict-entity-from-a-session-csharp.mdx @@ -0,0 +1,327 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, the [session](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx) tracks all entities that it has either stored, loaded, or queried. + Any changes made to these entities are automatically persisted when `SaveChanges` is called. + +* Use the `Evict` method to **remove a specific entity** from the session's tracking before calling _SaveChanges_. + Eviction stops the session from tracking the entity, removes it from the identity map and internal cache, + and cancels any pending operations for that entity (e.g., Store, Delete). + +* This article explains how to **evict a single entity** from the session. + To evict _all_ tracked entities and clear _all_ pending operations, use the [Clear](../../../client-api/session/how-to/clear-a-session.mdx) method instead. + To prevent an entity from being tracked, see [DisableTracking](../../../client-api/session/configuration/how-to-disable-tracking.mdx). + +* In this article: + * [Evict an entity before saving](../../../client-api/session/how-to/evict-entity-from-a-session.mdx#evict-an-entity-before-saving) + * [Evict an entity to force reload from server](../../../client-api/session/how-to/evict-entity-from-a-session.mdx#evict-an-entity-to-force-reload-from-server) + * [Evict an entity from the session's delete queue](../../../client-api/session/how-to/evict-entity-from-a-session.mdx#evict-an-entity-from-the-sessions-delete-queue) + * [Limitations](../../../client-api/session/how-to/evict-entity-from-a-session.mdx#limitations) + * [Syntax](../../../client-api/session/how-to/evict-entity-from-a-session.mdx#syntax) + + + +--- + +## Evict an entity before saving + +* Calling `Evict` on an entity that is _stored_ removes it from the session’s tracking and cancels the pending store operation. + As a result, the entity will not be saved to the database when _SaveChanges_ is called. +* This can be useful if you’ve staged an entity to be stored, but then decide to discard it before committing changes. + + + +```csharp +using (var session = store.OpenSession()) +{ + Employee employee1 = new Employee { FirstName = "John", LastName = "Doe" }; + Employee employee2 = new Employee { FirstName = "Jane", LastName = "Doe" }; + + // Store both employees in the session + // The session begins tracking both employee entities + session.Store(employee1, "employees/1"); + session.Store(employee2, "employees/2"); + + // Evict employee1 + // This removes employee1 from the session's tracking + // and cancels its pending store operation + session.Advanced.Evict(employee1); + + // Only employee2 will be saved + session.SaveChanges(); +} +``` + + +```csharp +using (var asyncSession = store.OpenAsyncSession()) +{ + Employee employee1 = new Employee { FirstName = "John", LastName = "Doe" }; + Employee employee2 = new Employee { FirstName = "Jane", LastName = "Doe" }; + + // Store both employees in the session + // The session begins tracking both employee entities + await asyncSession.StoreAsync(employee1, "employees/1"); + await asyncSession.StoreAsync(employee2, "employees/2"); + + // Evict employee1 + // This removes employee1 from the session's tracking + // and cancels its pending store operation + asyncSession.Advanced.Evict(employee1); + + // Only employee2 will be saved + asyncSession.SaveChangesAsync(); +} +``` + + + +## Evict an entity to force reload from server + +* Calling `Evict` on an entity that was previously loaded removes it from the session’s tracking. + The next time you call _Load_ for that entity, RavenDB will fetch it from the server instead of returning the cached version. +* This can be useful if you want to ensure you're working with the latest version of a document, + for example, if you expect the document was modified outside the session. + + + +```csharp +using (var session = store.OpenSession()) +{ + // Load an entity from the server + // The session begins tracking the loaded entity + Employee employee = session.Load("employees/1"); + Console.WriteLine("number of requests = " + session.Advanced.NumberOfRequests); // 1 + + // Loading the same entity again does NOT trigger a server call + employee = session.Load("employees/1"); + Console.WriteLine("number of requests = " + session.Advanced.NumberOfRequests); // 1 + + // Evict the entity + // This removes it from the session's tracking + session.Advanced.Evict(employee); + + // Loading the entity now DOES trigger a server call + employee = session.Load("employees/1"); + Console.WriteLine("number of requests = " + session.Advanced.NumberOfRequests); // 2 +} +``` + + +```csharp +using (var asyncSession = store.OpenAsyncSession()) +{ + // Load an entity from the server + // The session begins tracking the loaded entity + Employee employee = await asyncSession.LoadAsync("employees/1"); + Console.WriteLine("number of requests = " + session.Advanced.NumberOfRequests); // 1 + + // Loading the same entity again does NOT trigger a server call + employee = await asyncSession.LoadAsync("employees/1"); + Console.WriteLine("number of requests = " + session.Advanced.NumberOfRequests); // 1 + + // Evict the entity + // This removes it from the session's tracking + asyncSession.Advanced.Evict(employee); + + // Loading the entity now DOES trigger a server call + employee = await asyncSession.LoadAsync("employees/1"); + Console.WriteLine("number of requests = " + session.Advanced.NumberOfRequests); // 2 +} +``` + + + +## Evict an entity from the session's delete queue + +* Calling `Evict` on an entity that was previously marked for deletion removes it from the session's delete queue. + As a result, the deletion will not be sent to the server when _SaveChanges_ is called. +* This can be useful if you need to revert a pending deletion before committing changes. + + + +```csharp +using (var session = store.OpenSession()) +{ + var employee1 = session.Load("employees/1"); + var employee2 = session.Load("employees/2"); + + // Mark both employees for deletion + session.Delete(employee1); + session.Delete(employee1); + + // Remove employee1 from tracking and from delete queue + session.Advanced.Evict(employee1); + + // Only employee2 will be deleted + session.SaveChanges(); +} +``` + + +```csharp +using (var asyncSession = store.OpenAsyncSession()) +{ + var employee1 = await asyncSession.LoadAsync("employees/1"); + var employee2 = await asyncSession.LoadAsync("employees/2"); + + // Mark both employees for deletion + asyncSession.Delete(employee1); + asyncSession.Delete(employee2); + + // Remove employee1 from tracking and from delete queue + asyncSession.Advanced.Evict(employee1); + + // Only employee2 will be deleted + await asyncSession.SaveChangesAsync(); +} +``` + + + +## Limitations + +* `Evict` cannot be called from within an [OnBeforeStore](../../../client-api/session/how-to/subscribe-to-events.mdx#onbeforestore) or + [OnBeforeDelete](../../../client-api/session/how-to/subscribe-to-events.mdx#onbeforedelete) event handler. + Attempting to do so will throw an exception. +* This is a design limitation intended to prevent changes to the session's internal state during event handlers that are triggered by _SaveChanges_. + For example: + + + +```csharp +using (var session = store.OpenSession()) +{ + session.Store(new Employee() { FirstName = "Foo1" }, "employees/1"); + session.Store(new Employee() { FirstName = "Foo2" }, "employees/2"); + session.SaveChanges(); +} + +using (var session = store.OpenSession()) +{ + var employee2 = session.Load("employees/2"); + + // Register an event handler that will be called before any document is saved during SaveChanges + session.Advanced.OnBeforeStore += delegate (object sender, BeforeStoreEventArgs args) + { + try + { + args.Session.Evict(employee2); + } + catch (InvalidOperationException e) + { + // An exception is thrown: + // "Cannot Evict entity during OnBeforeStore..." + } + }; + + session.Store(new Employee { FirstName = "Foo3" }, "employees/3"); + + // SaveChanges triggers the 'OnBeforeStore' event + session.SaveChanges(); +} + +using (var session = store.OpenSession()) +{ + var employee2 = session.Load("employees/2"); + + // Register an event handler that will be called before any document is deleted during SaveChanges + session.Advanced.OnBeforeDelete += delegate (object sender, BeforeDeleteEventArgs args) + { + try + { + args.Session.Evict(employee2); + } + catch (InvalidOperationException e) + { + // An exception is thrown: + // "Cannot Evict entity during OnBeforeDelete..." + } + }; + + // Load employees/1 and mark it for deletion + var employee1 = session.Load("employees/1"); + session.Delete(employee1); + + // SaveChanges triggers the OnBeforeDelete event + session.SaveChanges(); +} +``` + + +```csharp +using (var asyncSession = store.OpenAsyncSession()) +{ + await asyncSession.StoreAsync(new Employee { FirstName = "Foo1" }, "employees/1"); + await asyncSession.StoreAsync(new Employee { FirstName = "Foo2" }, "employees/2"); + await asyncSession.SaveChangesAsync(); +} + +using (var asyncSession = store.OpenAsyncSession()) +{ + var employee2 = await asyncSession.LoadAsync("employees/2"); + + // Register an event handler that will be called before any document is saved during SaveChangesAsync + asyncSession.Advanced.OnBeforeStore += async (sender, args) => + { + try + { + args.Session.Evict(employee2); + } + catch (InvalidOperationException e) + { + // An exception is thrown: + // "Cannot Evict entity during OnBeforeStore..." + } + }; + + await asyncSession.StoreAsync(new Employee { FirstName = "Foo3" }, "employees/3"); + // SaveChangesAsync triggers the OnBeforeStore event + await asyncSession.SaveChangesAsync(); +} + +using (var asyncSession = store.OpenAsyncSession()) +{ + var employee2 = await asyncSession.LoadAsync("employees/2"); + + // Register an event handler that will be called before any document is deleted during SaveChangesAsync + asyncSession.Advanced.OnBeforeDelete += async (sender, args) => + { + try + { + args.Session.Evict(employee2); + } + catch (InvalidOperationException e) + { + // An exception is thrown: + // "Cannot Evict entity during OnBeforeDelete..." + } + }; + + // Load employees/1 and mark it for deletion + var employee1 = await asyncSession.LoadAsync("employees/1"); + asyncSession.Delete(employee1); + + // SaveChangesAsync triggers the OnBeforeDelete event + await asyncSession.SaveChangesAsync(); +} +``` + + + +## Syntax + + +```csharp +void Evict(T entity); +``` + + +| Parameter | Type | Description | +| -----------| ---- | --------------------------------------------- | +| **T** | T | Type of the entity to evict | +| **entity** | T | Instance of the entity to evict from tracking | diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_evict-entity-from-a-session-java.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_evict-entity-from-a-session-java.mdx new file mode 100644 index 0000000000..b31beb3a79 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_evict-entity-from-a-session-java.mdx @@ -0,0 +1,57 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +We can clear all session operations and stop tracking of all entities using the +[clear](../../../client-api/session/how-to/clear-a-session.mdx) method, but sometimes +there's a need to do cleanup only for one entity. This is what `evict` is for. + +## Syntax + + + +{` void evict(T entity); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **entity** | T | Instance of an entity that will be evicted | + +## Example I + + + +{`Employee employee1 = new Employee(); +employee1.setFirstName("John"); +employee1.setLastName("Doe"); + +Employee employee2 = new Employee(); +employee2.setFirstName("Joe"); +employee2.setLastName("Shmoe"); + +session.store(employee1); +session.store(employee2); + +session.advanced().evict(employee1); + +session.saveChanges(); // only 'Joe Shmoe' will be saved +`} + + + +## Example II + + + +{`Employee employee = session.load(Employee.class, "employees/1-A");//loading from serer +employee = session.load(Employee.class, "employees/1-A"); // no server call +session.advanced().evict(employee); +employee = session.load(Employee.class, "employees/1-A"); // loading form server +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_evict-entity-from-a-session-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_evict-entity-from-a-session-nodejs.mdx new file mode 100644 index 0000000000..2db430ec4d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_evict-entity-from-a-session-nodejs.mdx @@ -0,0 +1,192 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, the [session](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx) tracks all entities that it has either stored, loaded, or queried. + Any changes made to these entities are automatically persisted when `saveChanges` is called. + +* Use the `evict` method to **remove a specific entity** from the session's tracking before calling _saveChanges_. + eviction stops the session from tracking the entity, removes it from the identity map and internal cache, + and cancels any pending operations for that entity (e.g., Store, Delete). + +* This article explains how to **evict a single entity** from the session. + To evict _all_ tracked entities and clear _all_ pending operations, use the [clear](../../../client-api/session/how-to/clear-a-session.mdx) method instead. + To prevent an entity from being tracked, see [DisableTracking](../../../client-api/session/configuration/how-to-disable-tracking.mdx). + +* In this article: + * [Evict an entity before saving](../../../client-api/session/how-to/evict-entity-from-a-session.mdx#evict-an-entity-before-saving) + * [Evict an entity to force reload from server](../../../client-api/session/how-to/evict-entity-from-a-session.mdx#evict-an-entity-to-force-reload-from-server) + * [Evict an entity from the session's delete queue](../../../client-api/session/how-to/evict-entity-from-a-session.mdx#evict-an-entity-from-the-sessions-delete-queue) + * [Limitations](../../../client-api/session/how-to/evict-entity-from-a-session.mdx#limitations) + * [Syntax](../../../client-api/session/how-to/evict-entity-from-a-session.mdx#syntax) + + + +--- + +## Evict an entity before saving + +* Calling `evict` on an entity that is _stored_ removes it from the session’s tracking and cancels the pending store operation. + As a result, the entity will not be saved to the database when _saveChanges_ is called. +* This can be useful if you’ve staged an entity to be stored, but then decide to discard it before committing changes. + + +```js +class Employee { + constructor(firstName, lastName) { + this.firstName = firstName; + this.lastName = lastName; + } +} + +const session = store.openSession(); + +const employee1 = new Employee("John", "Doe"); +const employee2 = new Employee("Jane", "Doe"); + +// Store both employees in the session +// The session begins tracking both employee entities +await session.store(employee1, "employees/1"); +await session.store(employee2, "employees/2"); + +// Evict employee1 +// This removes employee1 from the session's tracking +// and cancels its pending store operation +session.advanced.evict(employee1); + +// Only employee2 will be saved +await session.saveChanges(); +``` + + +## Evict an entity to force reload from server + +* Calling `evict` on an entity that was previously loaded removes it from the session’s tracking. + The next time you call _load_ for that entity, RavenDB will fetch it from the server instead of returning the cached version. +* This can be useful if you want to ensure you're working with the latest version of a document, + for example, if you expect the document was modified outside the session. + + +```js +const session = store.openSession();` + +// Load an entity from the server +// The session begins tracking the loaded entity +let employee = await session.load("employees/1"); +console.log("number of requests = ", session.advanced.numberOfRequests); // 1 + +// Loading the same entity again does NOT trigger a server call +employee = await session.load("employees/1"); +console.log("number of requests = ", session.advanced.numberOfRequests); // 1 + +// Evict the entity +// This removes it from the session's tracking +session.advanced.evict(employee); + +// Loading the entity now DOES trigger a server call +employee = await session.load("employees/1"); +console.log("number of requests = ", session.advanced.numberOfRequests); // 2 +``` + + +## Evict an entity from the session's delete queue + +* Calling `evict` on an entity that was previously marked for deletion removes it from the session's delete queue. + As a result, the deletion will not be sent to the server when _saveChanges_ is called. +* This can be useful if you need to revert a pending deletion before committing changes. + + +```js +const session = store.openSession(); + +const employee1 = await session.load("employees/1"); +const employee2 = await session.load("employees/2"); + +// Mark both employees for deletion +session.delete(employee1); +session.delete(employee2); + +// Remove employee1 from tracking and from delete queue +session.advanced.evict(employee1); + +// Only employee2 will be deleted +await session.saveChanges(); +``` + + +## Limitations + +* `evict` cannot be called from within an [beforeStore](../../../client-api/session/how-to/subscribe-to-events.mdx#onbeforestore) or + [beforeDelete](../../../client-api/session/how-to/subscribe-to-events.mdx#onbeforedelete) event handler. + Attempting to do so will throw an exception. +* This is a design limitation intended to prevent changes to the session's internal state during event handlers that are triggered by _saveChanges_. + For example: + + +```js +{ + const session = store.openSession(); + await session.store(new Employee("John", "Doe"), "employees/1"); + await session.store(new Employee("Jane", "Doe"), "employees/2"); + await session.saveChanges(); +} + +{ + // Register event handler for 'beforeStore' + store.addSessionListener("beforeStore", async (args) => { + try { + const session = args.session; + const employee2 = await session.load("employees/2"); + session.advanced.evict(employee2); + } catch (e) { + console.log("OnBeforeStore exception:", e.message); + // Expected: "Cannot Evict entity during OnBeforeStore..." + } + }); + + // Register event handler for 'beforeDelete' + store.addSessionListener("beforeDelete", async (args) => { + try { + const session = args.session; + const employee2 = await session.load("employees/2"); + session.advanced.evict(employee2); + } catch (e) { + console.log("OnBeforeDelete exception:", e.message); + // Expected: "Cannot Evict entity during OnBeforeDelete..." + } + }); +} + +// Try evicting in beforeStore +{ + const session = store.openSession(); + await session.store({ firstName: "Foo3" }, "employees/3"); + + await session.saveChanges(); // Triggers beforeStore +} + +// Try evicting in beforeDelete +{ + const session = store.openSession(); + const employee1 = await session.load("employees/1"); + session.delete(employee1); + + await session.saveChanges(); // Triggers beforeDelete +} +``` + + +## Syntax + + +```js +session.advanced.evict(entity); +``` + + +| Parameter | Type | Description | +| -----------| ------ | ----------------------------------------------| +| **entity** | object | Instance of the entity to evict from tracking | \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_evict-entity-from-a-session-php.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_evict-entity-from-a-session-php.mdx new file mode 100644 index 0000000000..1b036dc61d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_evict-entity-from-a-session-php.mdx @@ -0,0 +1,57 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +We can clear all session operations and stop tracking of all entities using the +[clear](../../../client-api/session/how-to/clear-a-session.mdx) method, but sometimes +there's a need to do cleanup only for one entity. This is what `evict` is for. + +## Syntax + + + +{`public function evict(object $entity): void; +`} + + + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **$entity** | `object` | Instance of an entity that will be evicted | + +## Example I + + + +{`$employee1 = new Employee(); +$employee1->setFirstName("John"); +$employee1->setLastName("Doe"); + +$employee2 = new Employee(); +$employee2->setFirstName("Joe"); +$employee2->setLastName("Shmoe"); + +$session->store($employee1); +$session->store($employee2); + +$session->advanced()->evict($employee1); + +$session->saveChanges(); // only 'Joe Shmoe' will be saved +`} + + + +## Example II + + + +{`$employee = $session->load(Employee::class, "employees/1-A");//loading from serer +$employee = $session->load(Employee::class, "employees/1-A"); // no server call +$session->advanced()->evict($employee); +$employee = $session->load(Employee::class, "employees/1-A"); // loading form server +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_evict-entity-from-a-session-python.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_evict-entity-from-a-session-python.mdx new file mode 100644 index 0000000000..4307325fc4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_evict-entity-from-a-session-python.mdx @@ -0,0 +1,52 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +We can clear all session operations and stop tracking of all entities using the +[clear](../../../client-api/session/how-to/clear-a-session.mdx) method, but sometimes +there's a need to do cleanup only for one entity. This is what `evict` is for. + +## Syntax + + + +{`def evict(self, entity: object) -> None: ... +`} + + + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **entity** | `object` | Instance of an entity that will be evicted | + +## Example I + + + +{`employee_1 = Employee(first_name="John", last_name="Doe") +employee_2 = Employee(first_name="Joe", last_name="Shmoe") + +session.store(employee_1) +session.store(employee_2) + +session.advanced.evict(employee_1) + +session.save_changes() # only 'Joe Shmoe' will be saved +`} + + + +## Example II + + + +{`employee = session.load("employees/1-A") # loading from server +employee = session.load("employees/1-A") # no server call +session.advanced.evict(employee) +employee = session.load("employees/1-A") # loading form server +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_get-and-modify-entity-metadata-csharp.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_get-and-modify-entity-metadata-csharp.mdx new file mode 100644 index 0000000000..0de777165a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_get-and-modify-entity-metadata-csharp.mdx @@ -0,0 +1,70 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +When a document is downloaded from the server, it contains various metadata information like ID or current change-vector. This information is stored in a session and is available for each entity using the `GetMetadataFor` method from the `Advanced` session operations. +## Get the Metadata + + + +{`IMetadataDictionary GetMetadataFor(T instance); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **instance** | T | Instance of an entity for which metadata will be returned. | + +| Return Value | | +| ------------- | ----- | +| IMetadataDictionary | Returns the metadata for the specified entity. Throws an exception if the `instance` is not tracked by the session. | + +### Example + + + +{`var employee = session.Load("employees/1-A"); +var metadata = session.Advanced.GetMetadataFor(employee); +`} + + + + +## Modify the Metadata +After getting the metadata from `session.Advanced.GetMetadataFor` you can modify it just like any other dictionary. + +Keys in the metadata that starting with @ are reserved for RavenDB use + + +### Example I + + +{`var user = new User +\{ + Name = "Idan" +\}; + +session.Store(user); +var metadata = session.Advanced.GetMetadataFor(user); +metadata["Permissions"] = "ReadOnly"; +session.SaveChanges(); +`} + + + +### Example II + + +{`var user = session.Load("users/1-A"); +var metadata = session.Advanced.GetMetadataFor(user); + +metadata["Permissions"] = "ReadAndWrite"; + +session.SaveChanges(); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_get-and-modify-entity-metadata-java.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_get-and-modify-entity-metadata-java.mdx new file mode 100644 index 0000000000..03dcef6883 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_get-and-modify-entity-metadata-java.mdx @@ -0,0 +1,68 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +When a document is downloaded from the server, it contains various metadata information like ID or current change-vector. This information is stored in a session and is available for each entity using the `getMetadataFor` method from the `advanced` session operations. + +## Get the Metadata + + + +{` IMetadataDictionary getMetadataFor(T instance); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **instance** | T | Instance of an entity for which metadata will be returned. | + +| Return Value | | +| ------------- | ----- | +| IMetadataDictionary | Returns the metadata for the specified entity. Throws an exception if the `instance` is not tracked by the session. | + +### Example + + + +{`Employee employee = session.load(Employee.class, "employees/1-A"); +IMetadataDictionary metadata = session.advanced().getMetadataFor(employee); +`} + + + + +## Modify the Metadata +After getting the metadata from `session.advanced().getMetadataFor` you can modify it just like any other map. + +Keys in the metadata that starting with @ are reserved for RavenDB use + + +### Example I + + +{`User user = new User(); +user.setName("Idan"); + +session.store(user); + +IMetadataDictionary metadata = session.advanced().getMetadataFor(user); +metadata.put("Permissions", "READ_ONLY"); +session.saveChanges(); +`} + + + +### Example II + + +{`User user = session.load(User.class, "users/1-A"); +IMetadataDictionary metadata = session.advanced().getMetadataFor(user); +metadata.put("Permissions", "READ_AND_WRITE"); +session.saveChanges(); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_get-and-modify-entity-metadata-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_get-and-modify-entity-metadata-nodejs.mdx new file mode 100644 index 0000000000..b04ea02192 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_get-and-modify-entity-metadata-nodejs.mdx @@ -0,0 +1,68 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +When a document is downloaded from the server, it contains various metadata information like ID or current change-vector. This information is stored in a session and is available for each entity using the `getMetadataFor` method from the `advanced` session operations. + +## Get the Metadata + + + +{`session.advanced.getMetadataFor(entity); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **entity** | object | Entity for which metadata will be returned. | + +| Return Value | | +| ------------- | ----- | +| object | Returns the metadata for the specified entity. Throws an exception if the `instance` is not tracked by the session. | + +### Example + + + +{`const employee = await session.load("employees/1-A"); +const metadata = session.advanced.getMetadataFor(employee); +`} + + + + +## Modify the Metadata +After getting the metadata from `session.advanced.getMetadataFor()` you can modify it. + +Keys in the metadata that starting with *@* are reserved for RavenDB use + + +### Example I + + +{`const user = new User(); +user.name = "Idan"; + +await session.store(user); + +const metadata = session.advanced.getMetadataFor(user); +metadata["Permissions"] = "READ_ONLY"; +await session.saveChanges(); +`} + + + +### Example II + + +{`const user = await session.load("users/1-A"); +const metadata = session.advanced.getMetadataFor(user); +metadata["Permissions"] = "READ_AND_WRITE"; +await session.saveChanges(); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_get-current-session-node-csharp.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_get-current-session-node-csharp.mdx new file mode 100644 index 0000000000..32cb0267ee --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_get-current-session-node-csharp.mdx @@ -0,0 +1,59 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +When working in a RavenDB cluster, a database can reside on multiple nodes. +When the client needs to send a request to the server, it can have several nodes to choose from. + +The client uses [this logic](../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node) to determine which node to send the request to. +Learn more about load balancing the client requests in this [overview](../../../client-api/configuration/load-balance/overview.mdx). + +Use the `GetCurrentSessionNode` method from the `Advanced` session operations +to find out what is the current node that the session sends its requests to. + +## Syntax + + + +{`Task GetCurrentSessionNode(); +`} + + + +### Return Value + +The return value of `GetCurrentSessionNode` is a **ServerNode** object + + +{`public class ServerNode +\{ + public string Url; + public string Database; + public string ClusterTag; + public Role ServerRole; + + [Flags] + public enum Role + \{ + None = 0, + Promotable = 1, + Member = 2, + Rehab = 4 + \} +\} +`} + + + +## Example + + + +{`ServerNode serverNode = await session.Advanced.GetCurrentSessionNode(); +Console.WriteLine(serverNode.Url); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_get-current-session-node-java.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_get-current-session-node-java.mdx new file mode 100644 index 0000000000..47fcdd2e02 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_get-current-session-node-java.mdx @@ -0,0 +1,58 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +When working in a RavenDB cluster, a database can reside on multiple nodes. +When the client needs to send a request to the server, it can have several nodes to choose from. + +The client uses [this logic](../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node) to determine which node to send the request to. +Learn more about load balancing the client requests in this [overview](../../../client-api/configuration/load-balance/overview.mdx). + +Use the `GetCurrentSessionNode` method from the `Advanced` session operations +to find out what is the current node that the session sends its requests to. + +## Syntax + + + +{`ServerNode getCurrentSessionNode(); +`} + + + +### Return Value + +The return value of `getCurrentSessionNode` is a **ServerNode** object + + +{`public class ServerNode \{ + private String url; + private String database; + private String clusterTag; + private Role serverRole; + + // getters and setters +\} + +public enum Role \{ + NONE, + PROMOTABLE, + MEMBER, + REHAB +\} +`} + + + +## Example + + + +{`ServerNode serverNode = session.advanced().getCurrentSessionNode(); +System.out.println(serverNode.getUrl()); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_get-current-session-node-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_get-current-session-node-nodejs.mdx new file mode 100644 index 0000000000..8631a9bdaa --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_get-current-session-node-nodejs.mdx @@ -0,0 +1,50 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +When working in a RavenDB cluster, a database can reside on multiple nodes. +When the client needs to send a request to the server, it can have several nodes to choose from. + +The client uses [this logic](../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node) to determine which node to send the request to. +Learn more about load balancing the client requests in this [overview](../../../client-api/configuration/load-balance/overview.mdx). + +Use the `GetCurrentSessionNode` method from the `advanced` session operations +to find out what is the current node that the session sends its requests to. + +## Syntax + + + +{`session.advanced.getCurrentSessionNode(); +`} + + + +### Return Value + +The return value of `getCurrentSessionNode()` is a **ServerNode** object + + + +{`ServerNode \{ + url: "https://localhost:8080", + database: "Database", + clusterTag: "A", + serverRole: "Member" // Role can be one of: "None", "Promotable", "Member", "Rehab" +\} +`} + + + +## Example + + + +{`const serverNode = session.advanced.getCurrentSessionNode(); +console.log(serverNode.url); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_get-current-session-node-php.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_get-current-session-node-php.mdx new file mode 100644 index 0000000000..298e73e614 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_get-current-session-node-php.mdx @@ -0,0 +1,66 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +When working in a RavenDB cluster, a database can reside on multiple nodes. +When the client needs to send a request to the server, it can have several nodes to choose from. + +The client uses [this logic](../../../client-api/configuration/load-balance/overview.mdx#client-logic-for-choosing-a-node) +to determine which node to send the request to. +Learn more about load balancing the client requests in this [overview](../../../client-api/configuration/load-balance/overview.mdx). + +Use the `getCurrentSessionNode` method from the `advanced` session operations +to find out what is the current node that the session sends its requests to. + +## Syntax + + + +{`public function getCurrentSessionNode(): ServerNode; +`} + + + +### Return Value + +The return value of `getCurrentSessionNode` is a `ServerNode` object + + +{`class ServerNode +\{ + private ?Url $url = null; + private ?string $database = null; + private string $clusterTag; + private ?ServerNodeRole $serverRole = null; + + public function __construct() + \{ + $this->serverRole = ServerNodeRole::none(); + \} + + // ... getters and setters +\} + +class ServerNodeRole +\{ + public static function none(): ServerNodeRole; + public static function promotable(): ServerNodeRole; + public static function member(): ServerNodeRole; + public static function rehab(): ServerNodeRole; +\} +`} + + + +## Example + + + +{`$serverNode = $session->advanced()->getCurrentSessionNode(); +echo $serverNode->getUrl(); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-change-vector-csharp.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-change-vector-csharp.mdx new file mode 100644 index 0000000000..058048082e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-change-vector-csharp.mdx @@ -0,0 +1,40 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +* The change-vector reflects the cluster-wide point in time in which a change occured, and + includes the unique database ID, node identifier, and document Etag in the specific node. +* When a document is downloaded from the server, it contains various metadata information. + E.g. ID or current change-vector. +* The current change-vector is stored within the session metadata and is available for each + entity using the `GetChangeVectorFor` method from the `Advanced` session operations. + +## Syntax + + + +{`string GetChangeVectorFor(T instance); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **instance** | T | Instance of an entity for which an Etag will be returned. | + +| Return Value | | +| ------------- | ----- | +| string | Returns the current change-vector for an entity. Throws an exception if the `instance` is not tracked by the session. | + +## Example + + + +{`Employee employee = session.Load("employees/1-A"); +string changeVector = session.Advanced.GetChangeVectorFor(employee); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-change-vector-java.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-change-vector-java.mdx new file mode 100644 index 0000000000..03e409b82e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-change-vector-java.mdx @@ -0,0 +1,40 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +* The change-vector reflects the cluster-wide point in time in which a change occured, and + includes the unique database ID, node identifier, and document Etag in the specific node. +* When a document is downloaded from the server, it contains various metadata information. + E.g. ID or current change-vector. +* The current change-vector is stored within the session metadata and is available for each + entity using the `getChangeVectorFor` method from the `advanced` session operations. + +## Syntax + + + +{` String getChangeVectorFor(T instance) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **instance** | T | Instance of an entity for which an Etag will be returned. | + +| Return Value | | +| ------------- | ----- | +| String | Returns the current change-vector for an entity. Throws an exception if the `instance` is not tracked by the session. | + +## Example + + + +{`Employee employee = session.load(Employee.class, "employees/1-A"); +String changeVector = session.advanced().getChangeVectorFor(employee); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-change-vector-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-change-vector-nodejs.mdx new file mode 100644 index 0000000000..ce41f1aa35 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-change-vector-nodejs.mdx @@ -0,0 +1,40 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +* The change-vector reflects the cluster-wide point in time in which a change occured, and + includes the unique database ID, node identifier, and document Etag in the specific node. +* When a document is downloaded from the server, it contains various metadata information. + E.g. ID or current change-vector. +* The current change-vector is stored within the session metadata and is available for each + entity using the `getChangeVectorFor()` method from the `advanced` session operations. + +## Syntax + + + +{`session.advanced.getChangeVectorFor(entity); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **instance** | object | Instance of an entity for which an Etag will be returned. | + +| Return Value | | +| ------------- | ----- | +| string | Returns the current change-vector for an entity. Throws an exception if the `instance` is not tracked by the session. | + +## Example + + + +{`const employee = await session.load("employees/1-A"); +const changeVector = session.advanced.getChangeVectorFor(employee); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-change-vector-php.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-change-vector-php.mdx new file mode 100644 index 0000000000..09bea3e87c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-change-vector-php.mdx @@ -0,0 +1,42 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +* The change-vector reflects the cluster-wide point in time in which a change occured, and + includes the unique database ID, node identifier, and document Etag in the specific node. + +* When a document is downloaded from the server, it contains various metadata information. + E.g. ID or current change-vector. + +* The current change-vector is stored within the session metadata and is available for each + entity using the `getChangeVectorFor` method from the `advanced` session operations. + +## Syntax + + + +{`public function getChangeVectorFor(?object $instance): ?string; +`} + + + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **$instance** | `?object` | An instance of an entity for which an Etag will be returned | + +| Return Type | Description | +| ----------- | ----------- | +| `?string` | Returns the current change-vector for an entity. Throws an exception if the entity is not tracked by the session. | + +## Example + + + +{`$employee = $session->load(Employee::class, "employees/1-A"); +$changeVector = $session->advanced()->getChangeVectorFor($employee); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-change-vector-python.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-change-vector-python.mdx new file mode 100644 index 0000000000..9f8f4e58ea --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-change-vector-python.mdx @@ -0,0 +1,40 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +* The change-vector reflects the cluster-wide point in time in which a change occured, and + includes the unique database ID, node identifier, and document Etag in the specific node. +* When a document is downloaded from the server, it contains various metadata information. + E.g. ID or current change-vector. +* The current change-vector is stored within the session metadata and is available for each + entity using the `get_change_vector_for` method from the `advanced` session operations. + +## Syntax + + + +{`def get_change_vector_for(self, entity: object) -> str: ... +`} + + + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **entity** | `object` | Instance of an entity for which an Etag will be returned | + +| Return Type | Description | +| ----------- | ----------- | +| `str` | Returns the current change-vector for an entity. Throws an exception if the entity is not tracked by the session. | + +## Example + + + +{`employee = session.load("employees/1-A") +change_vector = session.advanced.get_change_vector_for(employee) +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-counters-csharp.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-counters-csharp.mdx new file mode 100644 index 0000000000..afdbc8b0bf --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-counters-csharp.mdx @@ -0,0 +1,53 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When a document is loaded to the session, + the loaded entity will contain various metadata information such as ID, current change-vector, and more. + +* If the document has __Counters__, the document metadata will also contain its counter names. + The counter names are available for each entity using the `GetCountersFor()` method from the `Advanced` session operations. + +* In this page: + * [Get entity counters](../../../client-api/session/how-to/get-entity-counters.mdx#get-entity-counters) + * [Syntax](../../../client-api/session/how-to/get-entity-counters.mdx#syntax) + +## Get entity counters + + + +{`// Load a document +var employee = session.Load("employees/1-A"); + +// Get counter names from the loaded entity +List counterNames = session.Advanced.GetCountersFor(employee); +`} + + + + + +## Syntax + + + +{`List GetCountersFor(T instance); +`} + + + + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **instance** | T | Instance of an entity for which counter names will be returned. | + +| Return Type | Description | +| ----------- | ----------- | +| `List` | Returns the counter names for the specified entity, or `null` if the entity has no counters.
An exception is thrown if the `instance` is not tracked by the session. | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-counters-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-counters-nodejs.mdx new file mode 100644 index 0000000000..15fdbd6e2d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-counters-nodejs.mdx @@ -0,0 +1,52 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When a document is loaded to the session, + the loaded entity will contain various metadata information such as ID, current change-vector, and more. + +* If the document has __Counters__, the document metadata will also contain its counter names. + The counter names are available for each entity using the `getCountersFor()` method from the `advanced` session operations. + +* In this page: + * [Get entity counters](../../../client-api/session/how-to/get-entity-counters.mdx#get-entity-counters) + * [Syntax](../../../client-api/session/how-to/get-entity-counters.mdx#syntax) + +## Get entity counters + + + +{`// Load a document +const employee = await session.load("employees/1-A"); + +// Get counter names from the loaded entity +const counterNames = session.advanced.getCountersFor(employee); +`} + + + + + +## Syntax + + + +{`getCountersFor(entity); +`} + + + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **entity** | T | The entity for which counter names will be returned. | + +| Return Type | Description | +| ----------- | ----------- | +| `string[]` | Returns the counter names for the specified entity, or `null` if the entity has no counters.
An exception is thrown if the entity is not tracked by the session. | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-counters-php.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-counters-php.mdx new file mode 100644 index 0000000000..396885ecba --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-counters-php.mdx @@ -0,0 +1,55 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When a document is loaded to the session, the loaded entity will include + various metadata details such as ID, current change-vector, etc. + +* If the document has **Counters**, the document metadata will also contain + its counter names. + The counter names are available for each entity using the `getCountersFor()` + method from the `advanced` session operations. + +* In this page: + * [Get entity counters](../../../client-api/session/how-to/get-entity-counters.mdx#get-entity-counters) + * [Syntax](../../../client-api/session/how-to/get-entity-counters.mdx#syntax) + +## Get entity counters + + + +{`// Load a document +$employee = $session->load(Employee::class, "employees/1-A"); + +// Get counter names from the loaded entity +$counterNames = $session->advanced()->getCountersFor($employee); +`} + + + + + +## Syntax + + + +{`public function getCountersFor(mixed $instance): ?StringList; +`} + + + + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **$instance** | `mixed` | An instance of an entity for which counter names will be returned. | + +| Return Type | Description | +| ----------- | ----------- | +| `?StringList` | Returns the counter names for the specified entity, or `None` if the entity has no counters.
An exception is thrown if the entity is not tracked by the session. | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-counters-python.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-counters-python.mdx new file mode 100644 index 0000000000..ed01f68fe2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-counters-python.mdx @@ -0,0 +1,55 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When a document is loaded to the session, the loaded entity will include + various metadata details such as ID, current change-vector, etc. + +* If the document has **Counters**, the document metadata will also contain + its counter names. + The counter names are available for each entity using the `get_counters_for()` + method from the `advanced` session operations. + +* In this page: + * [Get entity counters](../../../client-api/session/how-to/get-entity-counters.mdx#get-entity-counters) + * [Syntax](../../../client-api/session/how-to/get-entity-counters.mdx#syntax) + +## Get entity counters + + + +{`# Load a document +employee = session.load("employees/1-A") + +# Get counter names from the loaded entity +counter_names = session.advanced.get_counters_for(employee) +`} + + + + + +## Syntax + + + +{`def get_counters_for(self, entity: object) -> List[str]: ... +`} + + + + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **entity** | `object` | Instance of an entity for which counter names will be returned. | + +| Return Type | Description | +| ----------- | ----------- | +| `List[str]` | Returns the counter names for the specified entity, or `None` if the entity has no counters.
An exception is thrown if the entity is not tracked by the session. | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-id-csharp.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-id-csharp.mdx new file mode 100644 index 0000000000..dd667f7842 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-id-csharp.mdx @@ -0,0 +1,48 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Entities does not have to contain an ID property or field. In the case of such an entity, and a need for knowing under what ID it is stored on the server, the `GetDocumentId` method was created. + +## Syntax + + + +{`string GetDocumentId(object entity); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **entity** | object | Instance of an entity for which an ID will be returned | + +| Return Value | | +| ------------- | ----- | +| string | Returns the ID for a specified entity. The method may return `null` if `entity` is **null, isn't tracked, or the ID will be generated on the server**. | + +## Example + + + +{`public class Comment +\{ + public string Author \{ get; set; \} + + public string Message \{ get; set; \} +\} +`} + + + + + +{`string commentId = session + .Advanced + .GetDocumentId(comment); // e.g. comments/1-A +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-id-java.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-id-java.mdx new file mode 100644 index 0000000000..17c55a2406 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-id-java.mdx @@ -0,0 +1,62 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Entities does not have to contain an ID property or field. In the case of such an entity, and a need for knowing under what ID it is stored on the server, the `getDocumentId` method was created. + +## Syntax + + + +{`String getDocumentId(Object entity) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **entity** | Object | Instance of an entity for which an ID will be returned | + +| Return Value | | +| ------------- | ----- | +| String | Returns the ID for a specified entity. The method may return `null` if `entity` is **null, isn't tracked, or the ID will be generated on the server**. | + +## Example + + + +{`public class Comment \{ + private String author; + private String message; + + public String getAuthor() \{ + return author; + \} + + public void setAuthor(String author) \{ + this.author = author; + \} + + public String getMessage() \{ + return message; + \} + + public void setMessage(String message) \{ + this.message = message; + \} +\} +`} + + + + + +{`String commentId = session + .advanced() + .getDocumentId(comment);// e.g. comments/1-A +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-id-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-id-nodejs.mdx new file mode 100644 index 0000000000..ce9dad2c8a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-id-nodejs.mdx @@ -0,0 +1,46 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Entities does not have to contain an ID property or field. In the case of such an entity, and a need for knowing under what ID it is stored on the server, the `getDocumentId()` method was created. + +## Syntax + + + +{`session.advanced.getDocumentId(instance); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **entity** | object | Instance of an entity for which an ID will be returned | + +| Return Value | | +| ------------- | ----- | +| string | Returns the ID for a specified entity. The method may return `null` if `entity` is **null, isn't tracked, or the ID will be generated on the server**. | + +## Example + + + +{`class Comment \{ + constructor (author, message) \{ + this.author = author; + this.message = message; + \} +\} +`} + + + + + +{`const commentId = session.advanced.getDocumentId(comment); // e.g. comments/1-A +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-last-modified-csharp.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-last-modified-csharp.mdx new file mode 100644 index 0000000000..ec25e68767 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-last-modified-csharp.mdx @@ -0,0 +1,38 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +When a document is downloaded from the server it contains various metadata information, including the last modified date of the document. + +Last modified date is stored within the metadata in session and is available for each entity using the `GetLastModifiedFor` method from the `Advanced` session operations. + +## Syntax + + + +{`DateTime? GetLastModifiedFor(T instance); +`} + + + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **instance** | T | Instance of an entity for which the last modified date will be returned. | + +| Return Type | Description | +| ----------- | ----------- | +| DateTime? | Returns the last modified date for an entity. Throws an exception if the `instance` is not tracked by the session. | + + +## Example + + + +{`Employee employee = session.Load("employees/1-A"); +DateTime? lastModified = session.Advanced.GetLastModifiedFor(employee); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-last-modified-java.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-last-modified-java.mdx new file mode 100644 index 0000000000..392d3466b1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-last-modified-java.mdx @@ -0,0 +1,38 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +When a document is downloaded from the server it contains various metadata information, including the last modified date of the document. + +Last modified date is stored within the metadata in session and is available for each entity using the `getLastModifiedFor` method from the `advanced` session operations. + +## Syntax + + + +{` Date getLastModifiedFor(T instance) +`} + + + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **instance** | T | Instance of an entity for which the last modified date will be returned. | + +| Return Type | Description | +| ----------- | ----------- | +| Date | Returns the last modified date for an entity. Throws an exception if the `instance` is not tracked by the session. | + + +## Example + + + +{`Employee employee = session.load(Employee.class, "employees/1-A"); +Date lastModified = session.advanced().getLastModifiedFor(employee); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-last-modified-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-last-modified-nodejs.mdx new file mode 100644 index 0000000000..6c9b63c8f2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-last-modified-nodejs.mdx @@ -0,0 +1,38 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +When a document is downloaded from the server it contains various metadata information, including the last modified date of the document. + +Last modified date is stored within the metadata in session and is available for each entity using the `getLastModifiedFor()` method from the `advanced` session operations. + +## Syntax + + + +{`session.advanced.getLastModifiedFor(instance); +`} + + + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **instance** | object | Instance of an entity for which the last modified date will be returned. | + +| Return Type | Description | +| ----------- | ----------- | +| Date | Returns the last modified date for an entity. Throws an exception if the `instance` is not tracked by the session. | + + +## Example + + + +{`const employee = await session.load("employees/1-A"); +const lastModified = session.advanced.getLastModifiedFor(employee); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-last-modified-php.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-last-modified-php.mdx new file mode 100644 index 0000000000..1f6d18c4cb --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-last-modified-php.mdx @@ -0,0 +1,39 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +When a document is downloaded from the server it contains various metadata details, +including the last modified date of the document. + +Last modified date is stored within the session metadata and is available for each +entity using the `getLastModifiedFor` method from the `advanced` session operations. + +## Syntax + + + +{`public function getLastModifiedFor($instance): ?DateTimeInterface; +`} + + + +| Parameter | Description | +| --------- | ----------- | +| **$instance** | An instance of an entity for which the last modified date will be returned. | + +| Return Type | Description | +| ----------- | ----------- | +| `?DateTimeInterface` | Returns the last modified date for an entity. Throws an exception if the entity is not tracked by the session. | + +## Example + + + +{`$employee = $session->load(Employee::class, "employees/1-A"); +$lastModified = $session->advanced()->getLastModifiedFor($employee); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-last-modified-python.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-last-modified-python.mdx new file mode 100644 index 0000000000..badf6c50cb --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_get-entity-last-modified-python.mdx @@ -0,0 +1,39 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +When a document is downloaded from the server it contains various metadata details, +including the last modified date of the document. + +Last modified date is stored within the session metadata and is available for each +entity using the `get_last_modified_for` method from the `advanced` session operations. + +## Syntax + + + +{`def get_last_modified_for(self, entity: object) -> datetime: ... +`} + + + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **entity** | `object` | Instance of an entity for which the last modified date will be returned. | + +| Return Type | Description | +| ----------- | ----------- | +| `datetime` | Returns the last modified date for an entity. Throws an exception if the entity is not tracked by the session. | + +## Example + + + +{`employee = session.load("employees/1-A") +last_modified = session.advanced.get_last_modified_for(employee) +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_get-tracked-entities-csharp.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_get-tracked-entities-csharp.mdx new file mode 100644 index 0000000000..0b0c7f8d4e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_get-tracked-entities-csharp.mdx @@ -0,0 +1,129 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The Session [tracks all changes](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#tracking-changes) + made to all the entities it has either loaded, stored, deleted, or queried for, + and persists to the server only what is needed when `SaveChanges()` is called. + +* You can use the session's advanced `GetTrackedEntities()` method + to retrieve the **list of all entities tracked by the session**. + +* To check what is the actual type of change made to the entities, see: + [Get entity changes](../../../client-api/session/how-to/check-if-entity-has-changed.mdx#get-entity-changes), or + [Get session changes](../../../client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx#get-session-changes). + +* In this page: + * [Get tracked entities](../../../client-api/session/how-to/get-tracked-entities.mdx#get-tracked-entities) + * [Syntax](../../../client-api/session/how-to/get-tracked-entities.mdx#syntax) + + +## Get tracked entities + +**Tracking stored entities**: + + +{`using (var session = store.OpenSession()) +\{ + // Store entities within the session: + Employee employee1 = new Employee \{FirstName = "John", LastName = "Doe"\}; + Employee employee2 = new Employee \{FirstName = "David", LastName = "Brown"\}; + Employee employee3 = new Employee \{FirstName = "Tom", LastName = "Miller"\}; + session.Store(employee1, "employees/1-A"); + session.Store(employee2, "employees/2-A"); + session.Store(employee3, "employees/3-A"); + + // Get tracked entities: + IDictionary trackedEntities = session.Advanced.GetTrackedEntities(); + + // The session tracks the 3 new stored entities: + Assert.Equal(3, trackedEntities.Keys.Count); + var entityInfo = trackedEntities["employees/1-A"]; + Assert.Equal("employees/1-A", entityInfo.Id); + Assert.True(entityInfo.Entity is Employee); + + // Save changes: + session.SaveChanges(); + + // The session keeps tracking the entities even after SaveChanges is called: + trackedEntities = session.Advanced.GetTrackedEntities(); + Assert.Equal(3, trackedEntities.Keys.Count); +\} +`} + + + +**Tracking loaded and deleted entities**: + + +{`using (var session = store.OpenSession()) +\{ + // Load entity: + Employee employee1 = session.Load("employees/1-A"); + + // Delete entity: + session.Delete("employees/3-A"); + + // Get tracked entities: + IDictionary trackedEntities = session.Advanced.GetTrackedEntities(); + + // The session tracks the 2 entities: + Assert.Equal(2, trackedEntities.Keys.Count); + + // Note the 'IsDeleted' property that is set for deleted entities: + var entityInfo = trackedEntities["employees/3-A"]; + Assert.True(entityInfo.IsDeleted); + + // Save changes: + session.SaveChanges(); +\} +`} + + + +**Tracking queried entities**: + + +{`using (var session = store.OpenSession()) +\{ + // Query for all employees: + var employees = session.Query().ToList(); + + // Get tracked entities: + IDictionary trackedEntities = session.Advanced.GetTrackedEntities(); + + // The session tracks the entities loaded via the query: + Assert.Equal(2, trackedEntities.Keys.Count); +\} +`} + + + + + +## Syntax + + + +{`IDictionary GetTrackedEntities(); +`} + + + + +{`public class EntityInfo +\{ + public string Id \{ get; set; \} + public object Entity \{ get; set; \} + public bool IsDeleted \{ get; set; \} +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_get-tracked-entities-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_get-tracked-entities-nodejs.mdx new file mode 100644 index 0000000000..1a5b4408d9 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_get-tracked-entities-nodejs.mdx @@ -0,0 +1,145 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The Session [tracks all changes](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#tracking-changes) + made to all the entities it has either loaded, stored, deleted, or queried for, + and persists to the server only what is needed when `saveChanges()` is called. + +* You can use the session's advanced `getTrackedEntities()` method + to retrieve the **list of all entities tracked by the session**. + +* To check what is the actual type of change made to the entities, see: + [Get entity changes](../../../client-api/session/how-to/check-if-entity-has-changed.mdx#get-entity-changes), or + [Get session changes](../../../client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx#get-session-changes). + +* In this page: + * [Get tracked entities](../../../client-api/session/how-to/get-tracked-entities.mdx#get-tracked-entities) + * [Syntax](../../../client-api/session/how-to/get-tracked-entities.mdx#syntax) + + +## Get tracked entities + +**Tracking stored entities**: + + +{`const session = documentStore.openSession(); + +// Store entities within the session: +let employee1 = new Employee(); +employee.firstName = "John"; +employee.lastName = "Doe"; +await session.store(employee, "employees/1-A"); + +let employee2 = new Employee(); +employee.firstName = "David"; +employee.lastName = "Brown"; +await session.store(employee2, "employees/2-A"); + +let employee3 = new Employee(); +employee.firstName = "Tom"; +employee.lastName = "Miller"; +await session.store(employee3, "employees/3-A"); + +// Get tracked entities: +let trackedEntities = session.advanced.getTrackedEntities(); + +// The session tracks the new stored entities: +const entityInfo = trackedEntities["employees/1-A"]; +assert.equal(entityInfo.id, "employees/1-A"); +assert.ok(entityInfo.entity instanceof Employee); + +// Save changes: +await session.saveChanges(); + +// The session keeps tracking the entities even after SaveChanges is called: +trackedEntities = session.advanced.getTrackedEntities(); +`} + + + +**Tracking loaded and deleted entities**: + + +{`const session = documentStore.openSession(); + +// Load entity: +const employee1 = await session.load("employees/1-A"); + +// Delete entity: +session.delete("employees/3-A"); + +// Get tracked entities: +const trackedEntities = session.advanced.getTrackedEntities(); + +// The session tracks the 2 entities: + +// The loaded entity: +const entityInfo1 = trackedEntities["employees/1-A"]; +assert.ok(!entityInfo1.isDeleted); + +// The deleted entity: +const entityInfo2 = trackedEntities.get("employees/3-A"); +assert.ok(entityInfo2.isDeleted); + +// Save changes: +await session.saveChanges(); +`} + + + +**Tracking queried entities**: + + +{`const session = documentStore.openSession(); + +// Query for all employees: +const employees = await session.query(\{ collection: "employees"\}).all(); + +// Get tracked entities: +const trackedEntities = session.advanced.getTrackedEntities(); + +// The session tracks the entities loaded via the query: +const entityInfo1 = trackedEntities[employees[0].id]; +const entityInfo2 = trackedEntities[employees[1].id]; +`} + + + + + +## Syntax + + + +{`getTrackedEntities(); +`} + + + +| Return value | | +|---------------------------|------------------------------------------------------------------| +| `Map` | A dictionary that maps the entity id to the `EntityInfo` object. | + + + +{`class EntityInfo \{ + // The tracked entity id + id; // string + + // The tracked entity object + entity; // object + + // isDeleted is true if entity was deleted + isDeleted // boolean +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_ignore-entity-changes-csharp.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_ignore-entity-changes-csharp.mdx new file mode 100644 index 0000000000..cb3487bbf5 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_ignore-entity-changes-csharp.mdx @@ -0,0 +1,17 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, each session tracks the changes made in all the entities it has ever stored, loaded, or queried for. + All changes are then persisted when `SaveChanges` is called. + +* To ignore entity changes when calling `SaveChanges`, **disable entity tracking** by any of the following: + * [Disable tracking for a specific entity in session](../../../client-api/session/configuration/how-to-disable-tracking.mdx#disable-tracking-a-specific-entity-in-session) + * [Disable tracking for all entities in session](../../../client-api/session/configuration/how-to-disable-tracking.mdx#disable-tracking-all-entities-in-session) + * [Disable tracking for query results](../../../client-api/session/configuration/how-to-disable-tracking.mdx#disable-tracking-query-results) + * [Customize tracking in conventions](../../../client-api/session/configuration/how-to-disable-tracking.mdx#customize-tracking-in-conventions) + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_ignore-entity-changes-java.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_ignore-entity-changes-java.mdx new file mode 100644 index 0000000000..1679dae9b3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_ignore-entity-changes-java.mdx @@ -0,0 +1,39 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To indicate that an entity should be ignored when tracking changes, +use the `advanced` session `ignoreChangesFor` method. + +Using `Load` again to retrieve this entity will not initiate a server call. + +The entity will still take part in the session, but be ignored when `save_changes` is called. + +## Syntax + + + +{`void ignoreChangesFor(Object entity); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **entity** | Object | Instance of entity for which changes will be ignored. | + + +## Example + + + +{`Product product = session.load(Product.class, "products/1-A"); +session.advanced().ignoreChangesFor(product); +product.unitsInStock++; //this will be ignored for SaveChanges +session.saveChanges(); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_ignore-entity-changes-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_ignore-entity-changes-nodejs.mdx new file mode 100644 index 0000000000..17607624b5 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_ignore-entity-changes-nodejs.mdx @@ -0,0 +1,17 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, each session tracks the changes made in all the entities it has ever stored, loaded, or queried for. + All changes are then persisted when `saveChanges` is called. + +* To ignore entity changes when calling `saveChanges`, **disable entity tracking** by any of the following: + * [Disable tracking for a specific entity in session](../../../client-api/session/configuration/how-to-disable-tracking.mdx#disable-tracking-a-specific-entity-in-session) + * [Disable tracking for all entities in session](../../../client-api/session/configuration/how-to-disable-tracking.mdx#disable-tracking-all-entities-in-session) + * [Disable tracking for query results](../../../client-api/session/configuration/how-to-disable-tracking.mdx#disable-tracking-query-results) + * [Customize tracking in conventions](../../../client-api/session/configuration/how-to-disable-tracking.mdx#customize-tracking-in-conventions) + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_ignore-entity-changes-php.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_ignore-entity-changes-php.mdx new file mode 100644 index 0000000000..17607624b5 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_ignore-entity-changes-php.mdx @@ -0,0 +1,17 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, each session tracks the changes made in all the entities it has ever stored, loaded, or queried for. + All changes are then persisted when `saveChanges` is called. + +* To ignore entity changes when calling `saveChanges`, **disable entity tracking** by any of the following: + * [Disable tracking for a specific entity in session](../../../client-api/session/configuration/how-to-disable-tracking.mdx#disable-tracking-a-specific-entity-in-session) + * [Disable tracking for all entities in session](../../../client-api/session/configuration/how-to-disable-tracking.mdx#disable-tracking-all-entities-in-session) + * [Disable tracking for query results](../../../client-api/session/configuration/how-to-disable-tracking.mdx#disable-tracking-query-results) + * [Customize tracking in conventions](../../../client-api/session/configuration/how-to-disable-tracking.mdx#customize-tracking-in-conventions) + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_ignore-entity-changes-python.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_ignore-entity-changes-python.mdx new file mode 100644 index 0000000000..baa593ce1b --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_ignore-entity-changes-python.mdx @@ -0,0 +1,40 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To indicate that an entity should be ignored when tracking changes, +use the `advanced` session `ignore_changes_for` method. + +Using `load` again to retrieve this entity will not initiate a server call. + +The entity will still take part in the session, but be ignored when `save_changes` is called. + +See more here: [Disable Entity Tracking](../../../client-api/session/configuration/how-to-disable-tracking.mdx) + +## Syntax + + + +{`def ignore_changes_for(self, entity: object) -> None: ... +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **entity** | `object` | The instance of an entity for which changes will be ignored. | + +## Example + + + +{`product = session.load("products/1-A", Product) +session.ignore_changes_for(product) +product.units_in_stock += 1 # this will be ignored for save_changes +session.save_changes() +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_perform-operations-lazily-csharp.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_perform-operations-lazily-csharp.mdx new file mode 100644 index 0000000000..872d67f067 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_perform-operations-lazily-csharp.mdx @@ -0,0 +1,302 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Lazy request**: + + * You can define a lazy request within a session (e.g. a lazy-query or a lazy-load request) + and defer its execution until actually needed. + + * The lazy request definition is stored in the session and a `Lazy` instance is returned. + The request will be sent to the server and executed only when you access the value of this instance. + +* **Multiple lazy requests**: + + * Multiple lazy requests can be defined within the same session. + + * When triggering the deferred execution (whether implicitly or explicitly), + ALL pending lazy requests held up by the session will be sent to the server in a single call. + This can help reduce the number of remote calls made to the server over the network. + +* In this page: + * [Requests that can be executed lazily](../../../client-api/session/how-to/perform-operations-lazily.mdx#operations-that-can-be-executed-lazily) + * [Load entities](../../../client-api/session/how-to/perform-operations-lazily.mdx#load-entities) + * [Load entities with include](../../../client-api/session/how-to/perform-operations-lazily.mdx#load-entities-with-include) + * [Load entities starting with](../../../client-api/session/how-to/perform-operations-lazily.mdx#load-entities-starting-with) + * [Conditional load](../../../client-api/session/how-to/perform-operations-lazily.mdx#conditional-load) + * [Run query](../../../client-api/session/how-to/perform-operations-lazily.mdx#run-query) + * [Get revisions](../../../client-api/session/how-to/perform-operations-lazily.mdx#get-revisions) + * [Get compare-exchange value](../../../client-api/session/how-to/perform-operations-lazily.mdx#get-compare-exchange-value) + * [Multiple lazy requests](../../../client-api/session/how-to/perform-operations-lazily.mdx#multiple-lazy-requests) + * [Execute all requests - implicitly](../../../client-api/session/how-to/perform-operations-lazily.mdx#execute-all-requests---implicitly) + * [Execute all requests - explicitly](../../../client-api/session/how-to/perform-operations-lazily.mdx#execute-all-requests---explicitly) + + +## Operations that can be executed lazily + +### Load entities + +[Load](../../../client-api/session/loading-entities.mdx#load) loads a document entity from +the database into the session. +Loading entities can be executed **lazily**. + + + +{`Lazy lazyEmployee = session + // Add a call to Lazily + .Advanced.Lazily + // Document will Not be loaded from the database here, no server call is made + .Load("employees/1-A"); + +Employee employee = lazyEmployee.Value; // 'Load' operation is executed here +// The employee entity is now loaded & tracked by the session +`} + + +### Load entities with include + +[Load with include](../../../client-api/session/loading-entities.mdx#load-with-includes) loads both the document and the specified related document. +Loading entities with include can be executed **lazily**. + + + + +{`Lazy lazyProduct = session + // Add a call to Lazily + .Advanced.Lazily + // Request to include the related Supplier document + // Documents will Not be loaded from the database here, no server call is made + .Include(x => x.SupplierId) + .Load("products/1-A"); + +// 'Load with include' operation will be executed here +// Both documents will be retrieved from the database +Product product = lazyProduct.Value; +// The product entity is now loaded & tracked by the session + +// Access the related document, no additional server call is made +Supplier supplier = session.Load(product.SupplierId); +// The supplier entity is now also loaded & tracked by the session +`} + + + + +{`public class Product +{ + public string Id { get; set; } + public string Name { get; set; } + public string SupplierId { get; set; } // The related document ID +} +`} + + + +### Load entities starting with + +[LoadStartingWith](../../../client-api/session/loading-entities.mdx#loadstartingwith) loads entities whose ID starts with the specified prefix. +Loading entities with a common prefix can be executed **lazily**. + + + +{`Lazy> lazyEmployees = session + // Add a call to Lazily + .Advanced.Lazily + // Request to load entities whose ID starts with 'employees/' + // Documents will Not be loaded from the database here, no server call is made + .LoadStartingWith("employees/"); + +var employees = lazyEmployees.Value; // 'Load' operation is executed here +// The employee entities are now loaded & tracked by the session +`} + + +### Conditional load + +[ConditionalLoad](../../../client-api/session/loading-entities.mdx#conditionalload) logic is: + +* If the entity is already loaded to the session: + no server call is made, the tracked entity is returned. +* If the entity is Not already loaded to the session: + the document will be loaded from the server only if the change-vector provided to the + method is older than the one in the server (i.e. if the document in the server is newer). +* Loading entities conditionally can be executed **lazily**. + + + +{`// Create document and get its change-vector: +string changeVector; +using (var session1 = store.OpenSession()) +\{ + Employee employee = new Employee(); + session1.Store(employee, "employees/1-A"); + session1.SaveChanges(); + + // Get the tracked entity change-vector + changeVector = session1.Advanced.GetChangeVectorFor(employee); +\} + +// Conditionally lazy-load the document: +using (var session2 = store.OpenSession()) +\{ + var lazyEmployee = session2 + // Add a call to Lazily + .Advanced.Lazily + // Document will Not be loaded from the database here, no server call is made + .ConditionalLoad("employees/1-A", changeVector); + + var loadedItem = lazyEmployee.Value; // 'ConditionalLoad' operation is executed here + Employee employee = loadedItem.Entity; + + // If ConditionalLoad has actually fetched the document from the server (logic described above) + // then the employee entity is now loaded & tracked by the session + +\} +`} + + +### Run query + +A Query can be executed **lazily**. +Learn more about running queries lazily in [lazy queries](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx). + + + +{`// Define a lazy query: +Lazy> lazyEmployees = session + .Query() + .Where( x => x.FirstName == "John") + // Add a call to Lazily, the query will Not be executed here + .Lazily(); + +IEnumerable employees = lazyEmployees.Value; // Query is executed here + +// Note: Since query results are not projected, +// then the resulting employee entities will be tracked by the session. +`} + + +### Get revisions + +All methods for [getting revisions](../../../document-extensions/revisions/client-api/session/loading.mdx) and their metadata can be executed **lazily**. + + + +{`Lazy> lazyRevisions = session + // Add a call to Lazily + .Advanced.Revisions.Lazily + // Revisions will Not be fetched here, no server call is made + .GetFor("employees/1-A"); + + // Usage is the same for the other get revisions methods: + // .Get() + // .GetMetadataFor() + +List revisions = lazyRevisions.Value; // Getting revisions is executed here +`} + + +### Get compare-exchange value + +[Getting compare-exchange](../../../client-api/session/cluster-transaction/compare-exchange.mdx#get-compare-exchange) +values can be executed **lazily**. + + + +{`using (var session = + store.OpenSession(new SessionOptions \{ TransactionMode = TransactionMode.ClusterWide \})) +\{ + // Create compare-exchange value: + session.Advanced.ClusterTransaction + .CreateCompareExchangeValue(key: "someKey", value: "someValue"); + session.SaveChanges(); + + // Get the compare-exchange value lazily: + Lazy> lazyCmpXchg = session + // Add a call to Lazily + .Advanced.ClusterTransaction.Lazily + // Compare-exchange values will Not be fetched here, no server call is made + .GetCompareExchangeValue("someKey"); + + // Usage is the same for the other method: + // .GetCompareExchangeValues() + + CompareExchangeValue cmpXchgValue = + lazyCmpXchg.Value; // Getting compare-exchange value is executed here +\} +`} + + + + + +## Multiple lazy requests + +### Execute all requests - implicitly + +Accessing the value of ANY of the lazy instances will trigger +the execution of ALL pending lazy requests held up by the session, +in a SINGLE server call. + + + +{`// Define multiple lazy requests +Lazy lazyUser1 = session.Advanced.Lazily.Load("users/1-A"); +Lazy lazyUser2 = session.Advanced.Lazily.Load("users/2-A"); + +Lazy> lazyEmployees = session.Query() + .Lazily(); +Lazy> lazyProducts = session.Query() + .Search(x => x.Name, "Ch*") + .Lazily(); + +// Accessing the value of ANY of the lazy instances will trigger +// the execution of ALL pending lazy requests held up by the session +// This is done in a SINGLE server call +User user1 = lazyUser1.Value; + +// ALL the other values are now also available +// No additional server calls are made when accessing these values +User user2 = lazyUser2.Value; +IEnumerable employees = lazyEmployees.Value; +IEnumerable products = lazyProducts.Value; +`} + + +### Execute all requests - explicitly + +Explicitly calling `ExecuteAllPendingLazyOperations` will execute +ALL pending lazy requests held up by the session, in a SINGLE server call. + + + +{`// Define multiple lazy requests +Lazy lazyUser1 = session.Advanced.Lazily.Load("users/1-A"); +Lazy lazyUser2 = session.Advanced.Lazily.Load("users/2-A"); + +Lazy> lazyEmployees = session.Query() + .Lazily(); +Lazy> lazyProducts = session.Query() + .Search(x => x.Name, "Ch*") + .Lazily(); + +// Explicitly call 'ExecuteAllPendingLazyOperations' +// ALL pending lazy requests held up by the session will be executed in a SINGLE server call +session.Advanced.Eagerly.ExecuteAllPendingLazyOperations(); + +// ALL values are now available +// No additional server calls are made when accessing the values +User user1 = lazyUser1.Value; +User user2 = lazyUser2.Value; +IEnumerable employees = lazyEmployees.Value; +IEnumerable products = lazyProducts.Value; +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_perform-operations-lazily-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_perform-operations-lazily-nodejs.mdx new file mode 100644 index 0000000000..6e59f12715 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_perform-operations-lazily-nodejs.mdx @@ -0,0 +1,326 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* __Lazy request__: + + * You can define a lazy request within a session (e.g. a lazy-query or a lazy-load request) + and defer its execution until actually needed. + + * The lazy request definition is stored in the session and a `Lazy` instance is returned. + The request will be sent to the server and executed only when you access the value of this instance. + +* __Multiple lazy requests__: + + * Multiple lazy requests can be defined within the same session. + + * When triggering the deferred execution (whether implicitly or explicitly), + ALL pending lazy requests held up by the session will be sent to the server in a single call. + This can help reduce the number of remote calls made to the server over the network. + +* In this page: + * [Requests that can be executed lazily:](../../../client-api/session/how-to/perform-operations-lazily.mdx#requests-that-can-be-executed-lazily) + * [Load entities](../../../client-api/session/how-to/perform-operations-lazily.mdx#loadentities) + * [Load entities with include](../../../client-api/session/how-to/perform-operations-lazily.mdx#loadwithinclude) + * [Load entities starting with](../../../client-api/session/how-to/perform-operations-lazily.mdx#loadstartingwith) + * [Conditional load](../../../client-api/session/how-to/perform-operations-lazily.mdx#conditionalload) + * [Run query](../../../client-api/session/how-to/perform-operations-lazily.mdx#runquery) + * [Get revisions](../../../client-api/session/how-to/perform-operations-lazily.mdx#getrevisions) + * [Get compare-exchange value](../../../client-api/session/how-to/perform-operations-lazily.mdx#getcompareexchange) + * [Multiple lazy requests](../../../client-api/session/how-to/perform-operations-lazily.mdx#multiple-lazy-requests) + * [Execute all requests - implicitly](../../../client-api/session/how-to/perform-operations-lazily.mdx#implicit) + * [Execute all requests - explicitly](../../../client-api/session/how-to/perform-operations-lazily.mdx#explicit) + + +## Operations that can be executed lazily + + + __Load entities__ + +* ['load'](../../../client-api/session/loading-entities.mdx#load) loads a document entity from the database into the session. + Loading entities can be executed __lazily__. + + + +{`const lazyEmployee = session + // Add a call to lazily + .advanced.lazily + // Document will Not be loaded from the database here, no server call is made + .load("employees/1-A"); + +const employee = await lazyEmployee.getValue(); // 'load' operation is executed here +// The employee entity is now loaded & tracked by the session +`} + + + + + + __Load entities with include__ + +* ['load' with include](../../../client-api/session/loading-entities.mdx#load-with-includes) loads both the document and the specified related document. + Loading entities with include can be executed __lazily__. + + + + +{`const lazyProduct = session + // Add a call to lazily + .advanced.lazily + // Request to include the related Supplier document + // Documents will Not be loaded from the database here, no server call is made + .include("supplierId") + .load("products/1-A"); + +// 'load with include' operation will be executed here +// Both documents will be retrieved from the database +const product = await lazyProduct.getValue(); +// The product entity is now loaded & tracked by the session + +// Access the related document, no additional server call is made +const supplier = await session.load(product.supplierId) +// The supplier entity is now also loaded & tracked by the session +`} + + + + +{`// Sample product document +class Product { + constructor(name, supplierId) { + this.id = null; + this.name = name; + this.supplierId = supplierId; // The related document ID + } +} +`} + + + + + + + __Load entities starting with__ + +* ['loadStartingWith'](../../../client-api/session/loading-entities.mdx#loadstartingwith) loads entities whose ID starts with the specified prefix. + Loading entities with a common prefix can be executed __lazily__. + + + +{`const lazyEmployees = session + // Add a call to lazily + .advanced.lazily + // Request to load entities whose ID starts with 'employees/' + // Documents will Not be loaded from the database here, no server call is made + .loadStartingWith("employees/"); + +const employees = await lazyEmployees.getValue(); // 'load' operation is executed here +// The employee entities are now loaded & tracked by the session +`} + + + + + + __Conditional load__ + +* ['conditionalLoad'](../../../client-api/session/loading-entities.mdx#conditionalload) logic is: + * If the entity is already loaded to the session: + no server call is made, the tracked entity is returned. + * If the entity is Not already loaded to the session: + the document will be loaded from the server only if the change-vector provided to the method is older than the one in the server + (i.e. if the document in the server is newer). + * Loading entities conditionally can be executed __lazily__. + + + +{`// Create document and get its change-vector: +\{ + const session1 = documentStore.openSession(); + + const employee = new Employee(); + await session1.store(employee, "employees/1-A"); + await session.saveChanges(); + + // Get the tracked entity change-vector + const changeVector = session.advanced.getChangeVectorFor(employee); +\} + +// Conditionally lazy-load the document: +\{ + const session2 = documentStore.openSession(); + + const lazyEmployee = session2 + // Add a call to lazily + .advanced.lazily + // Document will Not be loaded from the database here, no server call is made + .conditionalLoad("employees/1-A", changeVector, Employee); + + const loadedItem = await lazyEmployee.getValue(); // 'conditionalLoad' operation is executed here + const employee = loadeditem.entity; + + // If conditionalLoad has actually fetched the document from the server (logic described above) + // then the employee entity is now loaded & tracked by the session +\} +`} + + + + + + __Run query__ + +* A Query can be executed __lazily__. + Learn more about running queries lazily in [lazy queries](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx). + + + +{`// Define a lazy query: +const lazyEmployees = session + .query(\{ collection: "employees" \}) + .whereEquals("FirstName", "John") + // Add a call to lazily, the query will Not be executed here + .lazily(); + +const employees = await lazyEmployees.getValue(); // Query is executed here + +// Note: Since query results are not projected, +// then the resulting employee entities will be tracked by the session. +`} + + + + + + __Get revisions__ + +* All methods for [getting revisions](../../../document-extensions/revisions/client-api/session/loading.mdx) and their metadata can be executed __lazily__. + + + +{`var lazyRevisions = session + // Add a call to lazily + .advanced.revisions.lazily + // Revisions will Not be fetched here, no server call is made + .getFor("employees/1-A"); + + // Usage is the same for the other get revisions methods: + // .get() + // .getMetadataFor() + +const revisions = lazyRevisions.getValue(); // Getting revisions is executed here +`} + + + + + + __Get compare-exchange value__ + +* [Getting compare-exchange](../../../client-api/session/cluster-transaction/compare-exchange.mdx#get-compare-exchange) values can be executed __lazily__. + + + +{`const session = documentStore.openSession(\{ transactionMode: "ClusterWide" \}); + +// Create compare-exchange value: +session.advanced.clusterTransaction.createCompareExchangeValue("someKey", "someValue"); +await session.saveChanges(); + +// Get the compare-exchange value lazily: +const lazyCmpXchg = session + // Add a call to lazily + .advanced.clusterTransaction.lazily + // Compare-exchange values will Not be fetched here, no server call is made + .getCompareExchangeValue("someKey"); + +// Usage is the same for the other method: +// .getCompareExchangeValues() + +const cmpXchgValue = + await lazyCmpXchg.getValue(); // Getting compare-exchange value is executed here +`} + + + + + + +## Multiple lazy requests + + + + __Execute all requests - implicitly__ + +Accessing the value of ANY of the lazy instances will trigger +the execution of ALL pending lazy requests held up by the session, +in a SINGLE server call. + + + +{`// Define multiple lazy requests +const lazyUser1 = session.advanced.lazily.load("users/1-A"); +const lazyUser2 = session.advanced.lazily.load("users/2-A"); + +const lazyEmployees = session.query(\{ collection: "employees" \}) + .lazily(); +const lazyProducts = session.query(\{ collection: "products" \}) + .search("Name", "Ch*") + .lazily(); + +// Accessing the value of ANY of the lazy instances will trigger +// the execution of ALL pending lazy requests held up by the session +// This is done in a SINGLE server call +const user1 = await lazyUser1.getValue(); + +// ALL the other values are now also available +// No additional server calls are made when accessing these values +const user2 = await lazyUser2.getValue(); +const employees = await lazyEmployees.getValue(); +const products = await lazyProducts.getValue(); +`} + + + + + + + __Execute all requests - explicitly__ + +Explicitly calling `executeAllPendingLazyOperations` will execute +ALL pending lazy requests held up by the session, in a SINGLE server call. + + + +{`// Define multiple lazy requests +const lazyUser1 = session.advanced.lazily.load("users/1-A"); +const lazyUser2 = session.advanced.lazily.load("users/2-A"); + +const lazyEmployees = session.query(\{ collection: "employees" \}) + .lazily(); +const lazyProducts = session.query(\{ collection: "products" \}) + .search("Name", "Ch*") + .lazily(); + +// Explicitly call 'executeAllPendingLazyOperations' +// ALL pending lazy requests held up by the session will be executed in a SINGLE server call +await session.advanced.eagerly.executeAllPendingLazyOperations(); + +// ALL values are now available +// No additional server calls are made when accessing the values +const user1 = await lazyUser1.getValue(); +const user2 = await lazyUser2.getValue(); +const employees = await lazyEmployees.getValue(); +const products = await lazyProducts.getValue(); +`} + + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_perform-operations-lazily-php.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_perform-operations-lazily-php.mdx new file mode 100644 index 0000000000..9835ed0951 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_perform-operations-lazily-php.mdx @@ -0,0 +1,349 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Lazy request**: + + * You can define a lazy request within a session (e.g. a lazy-query or a lazy-load request) + and defer its execution until actually needed. + + * The lazy request definition is stored in the session and a `Lazy` instance is returned. + The request will be sent to the server and executed only when you access the value of this instance. + +* **Multiple lazy requests**: + + * Multiple lazy requests can be defined within the same session. + + * When triggering the deferred execution (whether implicitly or explicitly), + ALL pending lazy requests held up by the session will be sent to the server in a single call. + This can help reduce the number of remote calls made to the server over the network. + +* In this page: + * [Requests that can be executed lazily](../../../client-api/session/how-to/perform-operations-lazily.mdx#operations-that-can-be-executed-lazily) + * [Load entities](../../../client-api/session/how-to/perform-operations-lazily.mdx#load-entities) + * [Load entities with include](../../../client-api/session/how-to/perform-operations-lazily.mdx#load-entities-with-include) + * [Load entities starting with](../../../client-api/session/how-to/perform-operations-lazily.mdx#load-entities-starting-with) + * [Conditional load](../../../client-api/session/how-to/perform-operations-lazily.mdx#conditional-load) + * [Run query](../../../client-api/session/how-to/perform-operations-lazily.mdx#run-query) + * [Get revisions](../../../client-api/session/how-to/perform-operations-lazily.mdx#get-revisions) + * [Get compare-exchange value](../../../client-api/session/how-to/perform-operations-lazily.mdx#get-compare-exchange-value) + * [Multiple lazy requests](../../../client-api/session/how-to/perform-operations-lazily.mdx#multiple-lazy-requests) + * [Execute all requests - implicitly](../../../client-api/session/how-to/perform-operations-lazily.mdx#execute-all-requests---implicitly) + * [Execute all requests - explicitly](../../../client-api/session/how-to/perform-operations-lazily.mdx#execute-all-requests---explicitly) + + +## Operations that can be executed lazily + +### Load entities + +[load](../../../client-api/session/loading-entities.mdx#load) loads a document entity from +the database into the session. +Loading entities can be executed `lazily`. + + + +{`/** @var Lazy $lazyEmployee */ +$lazyEmployee = $session + // Add a call to Lazily + ->advanced()->lazily() + // Document will Not be loaded from the database here, no server call is made + ->load(Employee::class, "employees/1-A"); + +$employee = $lazyEmployee->getValue(); // 'Load' operation is executed here +// The employee entity is now loaded & tracked by the session +`} + + +### Load entities with include + +[load with include](../../../client-api/session/loading-entities.mdx#load-with-includes) loads +both the document and the specified related document. +Loading entities with include can be executed `lazily`. + + + + +{`/** @var Lazy $lazyProduct */ +$lazyProduct = $session + // Add a call to Lazily + ->advanced()->lazily() + // Request to include the related Supplier document + // Documents will Not be loaded from the database here, no server call is made + ->include("SupplierId") + ->load(Product::class, "products/1-A"); + +// 'Load with include' operation will be executed here +// Both documents will be retrieved from the database +$product = $lazyProduct->getValue(); +// The product entity is now loaded & tracked by the session + +// Access the related document, no additional server call is made +$supplier = $session->load(Supplier::class, $product->getSuppierId()); +// The supplier entity is now also loaded & tracked by the session +`} + + + + +{`class Product +{ + public ?string $id = null; + public ?string $name = null; + public ?string $supplierId = null; + + public function getId(): ?string + { + return $this->id; + } + + public function setId(?string $id): void + { + $this->id = $id; + } + + public function getName(): ?string + { + return $this->name; + } + + public function setName(?string $name): void + { + $this->name = $name; + } + + public function getSupplierId(): ?string + { + return $this->supplierId; + } + + public function setSupplierId(?string $supplierId): void + { + $this->supplierId = $supplierId; + } // The related document ID +} +`} + + + +### Load entities starting with + +[load_starting_with](../../../client-api/session/loading-entities.mdx#loadstartingwith) loads +entities whose ID starts with the specified prefix. +Loading entities with a common prefix can be executed `lazily`. + + + +{`/** @var Lazy> $lazyEmployees */ +$lazyEmployees = $session + // Add a call to Lazily + ->advanced()->lazily() + // Request to load entities whose ID starts with 'employees/' + // Documents will Not be loaded from the database here, no server call is made + ->loadStartingWith(Employee::class, "employees/"); + +$employees = $lazyEmployees->getValue(); // 'Load' operation is executed here +// The employee entities are now loaded & tracked by the session +`} + + +### Conditional load + +[conditional_load](../../../client-api/session/loading-entities.mdx#conditionalload) logic is: + +* If the entity is already loaded to the session: + no server call is made, the tracked entity is returned. +* If the entity is Not already loaded to the session: + the document will be loaded from the server only if the change-vector provided to the + method is older than the one in the server (i.e. if the document in the server is newer). +* Loading entities conditionally can be executed `lazily`. + + + +{`// Create document and get its change-vector: +$changeVector = null; +$session1 = $store->openSession(); +try \{ + $employee = new Employee(); + $session1->store($employee, "employees/1-A"); + $session1->saveChanges(); + + // Get the tracked entity change-vector + $changeVector = $session1->advanced()->getChangeVectorFor($employee); +\} finally \{ + $session1->close(); +\} + +// Conditionally lazy-load the document: +$session2 = $store->openSession(); +try \{ + $lazyEmployee = $session2 + // Add a call to Lazily + ->advanced()->lazily() + // Document will Not be loaded from the database here, no server call is made + + ->conditionalLoad(Employee::class, "employees/1-A", $changeVector); + + var + $loadedItem = $lazyEmployee->getValue(); // 'ConditionalLoad' operation is executed here + $employee = $loadedItem->getEntity(); + + // If ConditionalLoad has actually fetched the document from the server (logic described above) + // then the employee entity is now loaded & tracked by the session +\} finally \{ + $session2->close(); +\} +`} + + +### Run query + +A query can be executed `lazily`. +Learn more about running queries lazily in [lazy queries](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx). + + + +{`// Define a +// lazy query: +$lazyEmployees = $session + ->query(Employee::class) + ->whereEquals("FirstName", "John") + // Add a call to Lazily, the query will Not be executed here + ->lazily(); + +$employees = $lazyEmployees->getValue(); // Query is executed here + +// Note: Since query results are not projected, +// then the resulting employee entities will be tracked by the session. +`} + + +### Get revisions + +All methods for [getting revisions](../../../document-extensions/revisions/client-api/session/loading.mdx) and their metadata can be executed `lazily`. + + + +{`/** @var Lazy> $lazyRevisions */ +$lazyRevisions = $session + // Add a call to Lazily + ->advanced()->revisions()->lazily() + // Revisions will Not be fetched here, no server call is made + ->getFor(Employee::class, "employees/1-A"); + +// Usage is the same for the other get revisions methods: +// .Get() +// .GetMetadataFor() + +/** @var array $revisions */ +$revisions = $lazyRevisions->getValue(); // Getting revisions is executed here +`} + + +### Get compare-exchange value + +[get_compare_exchange_value](../../../client-api/session/cluster-transaction/compare-exchange.mdx#get-compare-exchange) +can be executed `lazily`. + + + +{`$sessionOptions = new SessionOptions(); +$sessionOptions->setTransactionMode(TransactionMode::clusterWide()); +$session = $store->openSession($sessionOptions); +try \{ + // Create compare-exchange value: + $session->advanced()->clusterTransaction() + ->createCompareExchangeValue(key: "someKey", value: "someValue"); + $session->SaveChanges(); + + // Get the compare-exchange value lazily: + /** @var Lazy> $lazyCmpXchg */ + $lazyCmpXchg = $session + // Add a call to Lazily + ->advanced()->clusterTransaction()->lazily() + // Compare-exchange values will Not be fetched here, no server call is made + ->getCompareExchangeValue(null, "someKey"); + + // Usage is the same for the other method: + // .GetCompareExchangeValues() + + /** @var CompareExchangeValue $cmpXchgValue */ + $cmpXchgValue = $lazyCmpXchg->getValue(); // Getting compare-exchange value is executed here +\} finally \{ + $session->close(); +\} +`} + + + + + +## Multiple lazy requests + +### Execute all requests - implicitly + +Accessing the value of ANY of the lazy instances will trigger +the execution of ALL pending lazy requests held up by the session, +in a SINGLE server call. + + + +{`// Define multiple lazy requests +$lazyUser1 = $session->advanced()->lazily()->load(User::class, "users/1-A"); +$lazyUser2 = $session->advanced()->lazily()->load(User::class, "users/2-A"); + +$lazyEmployees = $session->query(Employee::class) + ->lazily(); +$lazyProducts = $session->query(Product::class) + ->search("Name", "Ch*") + ->lazily(); + +// Accessing the value of ANY of the lazy instances will trigger +// the execution of ALL pending lazy requests held up by the session +// This is done in a SINGLE server call +$user1 = $lazyUser1->getValue(); + +// ALL the other values are now also available +// No additional server calls are made when accessing these values +$user2 = $lazyUser2->getValue(); +$employees = $lazyEmployees->getValue(); +$products = $lazyProducts->getValue(); +`} + + +### Execute all requests - explicitly + +Explicitly calling `executeAllPendingLazyOperations` will execute +ALL pending lazy requests held up by the session, in a SINGLE server call. + + + +{`// Define multiple lazy requests +$lazyUser1 = $session->advanced()->lazily->load(User::class, "users/1-A"); +$lazyUser2 = $session->advanced()->lazily->load(User::class, "users/2-A"); + +$lazyEmployees = $session->query(Employee::class) + ->lazily(); +$lazyProducts = $session->query(Product::class) + ->search("Name", "Ch*") + ->lazily(); + +// Explicitly call 'ExecuteAllPendingLazyOperations' +// ALL pending lazy requests held up by the session will be executed in a SINGLE server call +$session->advanced()->eagerly()->executeAllPendingLazyOperations(); + +// ALL values are now available +// No additional server calls are made when accessing the values +$user1 = $lazyUser1->getValue(); +$user2 = $lazyUser2->getValue(); +$employees = $lazyEmployees->getValue(); +$products = $lazyProducts->getValue(); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_perform-operations-lazily-python.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_perform-operations-lazily-python.mdx new file mode 100644 index 0000000000..2adee4c961 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_perform-operations-lazily-python.mdx @@ -0,0 +1,299 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Lazy request**: + + * You can define a lazy request within a session (e.g. a lazy-query or a lazy-load request) + and defer its execution until actually needed. + + * The lazy request definition is stored in the session and a `Lazy` instance is returned. + The request will be sent to the server and executed only when you access the value of this instance. + +* **Multiple lazy requests**: + + * Multiple lazy requests can be defined within the same session. + + * When triggering the deferred execution (whether implicitly or explicitly), + ALL pending lazy requests held up by the session will be sent to the server in a single call. + This can help reduce the number of remote calls made to the server over the network. + +* In this page: + * [Requests that can be executed lazily](../../../client-api/session/how-to/perform-operations-lazily.mdx#operations-that-can-be-executed-lazily) + * [Load entities](../../../client-api/session/how-to/perform-operations-lazily.mdx#load-entities) + * [Load entities with include](../../../client-api/session/how-to/perform-operations-lazily.mdx#load-entities-with-include) + * [Load entities starting with](../../../client-api/session/how-to/perform-operations-lazily.mdx#load-entities-starting-with) + * [Conditional load](../../../client-api/session/how-to/perform-operations-lazily.mdx#conditional-load) + * [Run query](../../../client-api/session/how-to/perform-operations-lazily.mdx#run-query) + * [Get revisions](../../../client-api/session/how-to/perform-operations-lazily.mdx#get-revisions) + * [Get compare-exchange value](../../../client-api/session/how-to/perform-operations-lazily.mdx#get-compare-exchange-value) + * [Multiple lazy requests](../../../client-api/session/how-to/perform-operations-lazily.mdx#multiple-lazy-requests) + * [Execute all requests - implicitly](../../../client-api/session/how-to/perform-operations-lazily.mdx#execute-all-requests---implicitly) + * [Execute all requests - explicitly](../../../client-api/session/how-to/perform-operations-lazily.mdx#execute-all-requests---explicitly) + + +## Operations that can be executed lazily + +### Load entities + +[load](../../../client-api/session/loading-entities.mdx#load) loads a document entity from +the database into the session. +Loading entities can be executed **lazily**. + + + +{`lazy_employee = ( + session + # Add a call to lazily + .advanced.lazily. + # Document will not be loaded from the database here, no server call in made + load("employees/1-A", Employee) +) + +employee = lazy_employee.value # 'load' operation is executed here +# The employee entity is now loaded & tracked by the session +`} + + +### Load entities with include + +[load with include](../../../client-api/session/loading-entities.mdx#load-with-includes) loads +both the document and the specified related document. +Loading entities with include can be executed **lazily**. + + + + +{`lazy_product = ( + session + # Add a call to lazily + .advanced.lazily + # Request to include the related Supplier document + # Documents will Not be loaded from the database here, no server call is made + .include("SupplierId").load("products/1-A") +) + +# 'Load with include' operation will be executed here +# Both documents will be retrieved from the database +product = lazy_product.value +# The product entity is now loaded & tracked by the session + +# Access the related document, no additional server call is made +supplier = session.load(product.SupplierId) +# The supplier entity is now also loaded & tracked by the session +`} + + + + +{`class Product: + def __init__(self, Id: str = None, Name: str = None, SupplierId: str = None): + self.Id = Id + self.Name = Name + self.SupplierId = SupplierId # The related document ID +`} + + + +### Load entities starting with + +[load_starting_with](../../../client-api/session/loading-entities.mdx#loadstartingwith) loads +entities whose ID starts with the specified prefix. +Loading entities with a common prefix can be executed **lazily**. + + + +{`lazy_employees = ( + session + # Add a call to lazily + .advanced.lazily + # Request to load entities whose ID starts with 'employees/' + # Documents will Not be loaded from the database here, no server call is made + .load_starting_with("employees/") +) + +employees = lazy_employees.value # 'load' operation is executed here +# The employee entities is now also loaded & tracked by the session +`} + + +### Conditional load + +[conditional_load](../../../client-api/session/loading-entities.mdx#conditionalload) logic is: + +* If the entity is already loaded to the session: + no server call is made, the tracked entity is returned. +* If the entity is Not already loaded to the session: + the document will be loaded from the server only if the change-vector provided to the + method is older than the one in the server (i.e. if the document in the server is newer). +* Loading entities conditionally can be executed **lazily**. + + + +{`# Create document and get is change-vector: +change_vector: Optional[str] = None +with store.open_session() as session1: + employee = Employee() + session1.store(employee, "employees/1-A") + session1.save_changes() + + # Get the tracked entity change-vector + change_vector = session1.advanced.get_change_vector_for(employee) + +# Conditionally lazy-load the document +with store.open_session() as session2: + lazy_employee = ( + session2 + # Add a call to lazily + .advanced.lazily + # Document will Not be loaded from the database here, no server call is made + .conditional_load("employees/1-A", change_vector) + ) + + loaded_item = lazy_employee.value # 'conditional_load' operation is executed here + employee = loaded_item.entity + + # If conditional_load has actually fetched the document from the server (logic described above) + # then the employee entity is now loaded & tracked by the session +`} + + +### Run query + +A query can be executed **lazily**. +Learn more about running queries lazily in [lazy queries](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx). + + + +{`# Define a lazy query: +lazy_employees = ( + session.query(object_type=Employee).where_equals("FirstName", "John") + # Add a call to lazily, the query will not be executed here + .lazily() +) + +employees = lazy_employees.value # query is executed here +# Note: Since query results are not projected, +# then the resulting employee entities will be tracked by the session. +`} + + +### Get revisions + +All methods for [getting revisions](../../../document-extensions/revisions/client-api/session/loading.mdx) and their metadata can be executed **lazily**. + + + +{`lazy_revisions = ( + session. + # Add a call to lazily + advanced.revisions.lazily + # Revisions will Not be fetched here, no sever call is made + .get_for("employees/1-A", Employee) +) + +# Usage is the same for the other get revisions methods: +# .get() +# .get_metadata_for() + +revisions = lazy_revisions.value # Getting revisions is executed here +`} + + +### Get compare-exchange value + +[get_compare_exchange_value](../../../client-api/session/cluster-transaction/compare-exchange.mdx#get-compare-exchange) +can be executed **lazily**. + + + +{`with store.open_session( + session_options=SessionOptions(transaction_mode=TransactionMode.CLUSTER_WIDE) +) as session: + # Create compare-exchange value: + session.advanced.cluster_transaction.create_compare_exchange_value("someKey", "someValue") + session.save_changes() + + # Get the compare-exchange value lazily: + lazy_cmp_xchg = ( + session + # Adda a call to lazily + .advanced.cluster_transaction.lazily + # Compare-exchange values will Not be fetched here, no server call is made + .get_compare_exchange_value("someKey") + ) + + # Usage is the same for the other method: + # .get_compare_exchange_values() + + cmp_xchg_value = lazy_cmp_xchg.value # Getting compare-exchange value is executed here +`} + + + + + +## Multiple lazy requests + +### Execute all requests - implicitly + +Accessing the value of ANY of the lazy instances will trigger +the execution of ALL pending lazy requests held up by the session, +in a SINGLE server call. + + + +{`# Define multiple lazy requests +lazy_user_1 = session.advanced.lazily.load("users/1-A") +lazy_user_2 = session.advanced.lazily.load("users/2-A") + +lazy_employees = session.query(object_type=Employee).lazily() +lazy_products = session.query(object_type=Product).search("Name", "Ch*").lazily() + +# Accessing the value of ANY of the lazy instances will trigger +# the execution of ALL pending lazy requests held up by the session +# This is done in a SINGLE server call +user1 = lazy_user_1.value + +# ALL the other values are now also available +# No additional server calls are made when accessing these values +user2 = lazy_user_2.value +employees = lazy_employees.value +products = lazy_products.value +`} + + +### Execute all requests - explicitly + +Explicitly calling `execute_all_pending_lazy_operations` will execute +ALL pending lazy requests held up by the session, in a SINGLE server call. + + + +{`# Define multiple lazy requests +lazy_user_1 = session.advanced.lazily.load("users/1-A") +lazy_user_2 = session.advanced.lazily.load("users/2-A") + +lazy_employees = session.query(object_type=Employee).lazily() +lazy_products = session.query(object_type=Product).search("Name", "Ch*").lazily() + +# Explicitly call 'execute_all_pending_lazy_operations' +# ALL pending lazy requests held up by the session will be executed in a SINGLE server call +session.advanced.eagerly.execute_all_pending_lazy_operations() + +# ALL values are now available +# No additional server calls are made when accessing the values +user1 = lazy_user_1.value +user2 = lazy_user_2.value +employees = lazy_employees.value +products = lazy_products.value +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_refresh-entity-csharp.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_refresh-entity-csharp.mdx new file mode 100644 index 0000000000..6c93721b00 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_refresh-entity-csharp.mdx @@ -0,0 +1,41 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To update an entity with the latest changes from the server, use the `Refresh` method +from `Advanced` session operations. + +## Syntax + + + +{`void Refresh(T entity); +`} + + + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **entity** | T | Instance of an entity that will be refreshed | + +## Example + + + +{`Employee employee = session.Load("employees/1"); +Assert.Equal("Doe", employee.LastName); + +// LastName changed to 'Shmoe' + +session.Advanced.Refresh(employee); +Assert.Equal("Shmoe", employee.LastName); +`} + + + +## Remarks + +Refreshing a transient entity (not attached) or an entity that was deleted on server-side will result in a `InvalidOperationException`. + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_refresh-entity-java.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_refresh-entity-java.mdx new file mode 100644 index 0000000000..9fa34ecb22 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_refresh-entity-java.mdx @@ -0,0 +1,42 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To update an entity with the latest changes from the server, use the `refresh` method +from `advanced` session operations. + +## Syntax + + + +{` void refresh(T entity); +`} + + + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **entity** | `T` | Instance of an entity that will be refreshed | + +## Example + + + +{`Employee employee = session.load(Employee.class, "employees/1"); +Assert.assertEquals("Doe", employee.getLastName()); + +// LastName changed to 'Shmoe' + +session.advanced().refresh(employee); + +Assert.assertEquals("Shmoe", employee.getLastName()); +`} + + + +## Remarks + +Refreshing a transient entity (not attached) or an entity that was deleted on server-side will result in a `IllegalStateException`. + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_refresh-entity-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_refresh-entity-nodejs.mdx new file mode 100644 index 0000000000..dcb8ea4310 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_refresh-entity-nodejs.mdx @@ -0,0 +1,42 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To update an entity with the latest changes from the server, use the `refresh()` method +from `advanced` session operations. + +## Syntax + + + +{`session.advanced.refresh(entity); +`} + + + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **entity** | `object` | Instance of an entity that will be refreshed | + +## Example + + + +{`const employee = await session.load("employees/1"); +assert.strictEqual("Doe", employee.lastName); + +// lastName changed to 'Shmoe' + +session.advanced.refresh(employee); + +assert.strictEqual("Shmoe", employee.lastName); +`} + + + +## Remarks + +Refreshing a transient entity (not attached) or an entity that was deleted server-side will result in an `InvalidOperationException` error. + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_refresh-entity-php.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_refresh-entity-php.mdx new file mode 100644 index 0000000000..135bcbdf64 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_refresh-entity-php.mdx @@ -0,0 +1,41 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To update an entity with the latest changes from the server, use the `refresh` method from `advanced` session operations. + +## Syntax + + + +{`public function refresh(object $entity): void; +`} + + + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **$entity** | `object` | An entity that will be refreshed | + +## Example + + + +{`$employee = $session->load(Employee::class, "employees/1"); +$this->assertEquals("Doe", $employee->getLastName()); + +// LastName changed to 'Shmoe' + +$session->advanced()->refresh($employee); + +$this->assertEquals("Shmoe", $employee->getLastName()); +`} + + + +## Remarks + +Refreshing a transient entity (not attached) or an entity that was deleted on server-side will result in a `InvalidOperationException`. + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_refresh-entity-python.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_refresh-entity-python.mdx new file mode 100644 index 0000000000..17aacdcfe1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_refresh-entity-python.mdx @@ -0,0 +1,40 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To update an entity with the latest changes from the server, use the `refresh` method from `advanced` session operations. + +## Syntax + + + +{`def refresh(self, entity: object) -> object: ... +`} + + + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| **entity** | `object` | An entity that will be refreshed | + +## Example + + + +{`employee = session.load("employees/1", Employee) +self.assertEquals("Doe", employee.last_name) + +# LastName changed to "Shmoe" + +session.advanced.refresh(employee) +self.assertEquals("Shmoe", employee.last_name) +`} + + + +## Remarks + +Refreshing a transient entity (not attached) or an entity that was deleted on server-side will result in a `InvalidOperationException`. + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_subscribe-to-events-csharp.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_subscribe-to-events-csharp.mdx new file mode 100644 index 0000000000..aab2b72201 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_subscribe-to-events-csharp.mdx @@ -0,0 +1,465 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Events** allow users to perform custom actions in response to operations made in + a `Document Store` or a `Session`. + +* An event is invoked when the selected action is executed on an entity, or querying is performed. + +* Subscribing to an event within a `Session` is valid only for this session. + + E.g., to invoke an event after SaveChanges() is called by **this session** only, use - + `session.Advanced.OnAfterSaveChanges += OnAfterSaveChangesEvent;` + +* Subscribing to an event at the `DocumentStore` level subscribes to this + event in all subsequent sessions. + + E.g., to invoke an event after SaveChanges() is called by **any subsequent session**, use - + `store.OnAfterSaveChanges += OnAfterSaveChangesEvent;` + + Read more about `DocumentStore` events [here](../../../client-api/how-to/subscribe-to-store-events.mdx). + +* In this Page: + * [OnBeforeStore](../../../client-api/session/how-to/subscribe-to-events.mdx#onbeforestore) + * [OnBeforeDelete](../../../client-api/session/how-to/subscribe-to-events.mdx#onbeforedelete) + * [OnAfterSaveChanges](../../../client-api/session/how-to/subscribe-to-events.mdx#onaftersavechanges) + * [OnBeforeQuery](../../../client-api/session/how-to/subscribe-to-events.mdx#onbeforequery) + * [OnBeforeConversionToDocument](../../../client-api/session/how-to/subscribe-to-events.mdx#onbeforeconversiontodocument) + * [OnAfterConversionToDocument](../../../client-api/session/how-to/subscribe-to-events.mdx#onafterconversiontodocument) + * [OnBeforeConversionToEntity](../../../client-api/session/how-to/subscribe-to-events.mdx#onbeforeconversiontoentity) + * [OnAfterConversionToEntity](../../../client-api/session/how-to/subscribe-to-events.mdx#onafterconversiontoentity) + * [OnSessionDisposing](../../../client-api/session/how-to/subscribe-to-events.mdx#onsessiondisposing) + + + +## OnBeforeStore + +This event is invoked as a part of `SaveChanges` but before it is actually sent to the server. +It should be defined with this signature: + + + +{`private void OnBeforeStoreEvent(object sender, BeforeStoreEventArgs args); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **sender** | `IDocumentSession` | The session on which `SaveChanges()` has been called, triggering this event | +| **args** | `BeforeStoreEventArgs` | `args` contains the session on which `SaveChanges()` has been called, the ID of the document being Stored, the document's metadata, and the document itself. | + +The class `BeforeStoreEventArgs`: + + + +{`public class BeforeStoreEventArgs +\{ + public InMemoryDocumentSessionOperations Session; + public string DocumentId; + public object Entity; + public IMetadataDictionary DocumentMetadata; +\} +`} + + + +### Example + +Say we want to discontinue all of the products that are not in stock. + + + +{`private void OnBeforeStoreEvent(object sender, BeforeStoreEventArgs args) +\{ + var product = args.Entity as Product; + if (product?.UnitsInStock == 0) + \{ + product.Discontinued = true; + \} +\} +`} + + + +After we subscribe to the event, every stored entity will invoke the method. + + + +{`// Subscribe to the event +session.Advanced.OnBeforeStore += OnBeforeStoreEvent; + +session.Store(new Product +\{ + Name = "RavenDB v3.5", + UnitsInStock = 0 +\}); +session.Store(new Product +\{ + Name = "RavenDB v4.0", + UnitsInStock = 1000 +\}); + +session.SaveChanges(); // Here the method is invoked +`} + + + + + +## OnBeforeDelete + +This event is invoked by `Delete(id)` or `Delete(entity)`. It is only executed when `SaveChanges()` +is called, but before the commands are actually sent to the server. +It should be defined with this signature: + + + +{`private void OnBeforeDeleteEvent(object sender, BeforeDeleteEventArgs args); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **sender** | `IDocumentSession` | The session on which `SaveChanges()` has been called, triggering this event | +| **args** | `BeforeDeleteEventArgs` | `args` contains the session on which `SaveChanges()` has been called, the ID of the document being deleted, the document's metadata, and the document itself. | + +The class `BeforeDeleteEventArgs`: + + + +{`public class BeforeDeleteEventArgs +\{ + public InMemoryDocumentSessionOperations Session; + public string DocumentId; + public object Entity; + public IMetadataDictionary DocumentMetadata; +\} +`} + + + +### Example + +To prevent anyone from deleting entities we can create a method as follows: + + + +{`private void OnBeforeDeleteEvent(object sender, BeforeDeleteEventArgs args) +\{ + throw new NotSupportedException(); +\} +`} + + + +and subscribe it to the session: + + + +{`// Subscribe to the event +session.Advanced.OnBeforeDelete += OnBeforeDeleteEvent; + +var product = session.Load("products/1-A"); +var product2 = session.Load("products/2-A"); + +// OnBeforeDelete is triggered whether you +// call Delete() on an entity or on its ID +session.Delete(product); +session.SaveChanges(); // NotSupportedException will be thrown + +session.Delete("products/2-A"); +session.SaveChanges(); // NotSupportedException will be thrown +`} + + + + + +## OnAfterSaveChanges + +This event is invoked after the `SaveChanges` is returned. +It should be defined with this signature: + + + +{`private void OnAfterSaveChangesEvent(object sender, AfterSaveChangesEventArgs args); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **sender** | `IDocumentSession` | The session on which `SaveChanges()` has been called, triggering this event | +| **args** | `AfterSaveChangesEventArgs` | `args` contains the session on which `SaveChanges()` has been called, the ID of the document being deleted, and the document itself. | + +The class `AfterSaveChangesEventArgs`: + + + +{`public class AfterSaveChangesEventArgs +\{ + public InMemoryDocumentSessionOperations Session; + public string DocumentId; + public object Entity; +\} +`} + + + +### Example + +If we want to log each entity that was saved, we can create a method as follows: + + + +{`private void OnAfterSaveChangesEvent(object sender, AfterSaveChangesEventArgs args) +\{ + if (Log.IsInfoEnabled) + Log.Info($"Document '\{args.DocumentId\}' was saved."); +\} +`} + + + + + +## OnBeforeQuery + +This event is invoked just before the query is sent to the server. +It should be defined with this signature: + + + +{`private void OnBeforeQueryEvent(object sender, BeforeQueryEventArgs args); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **sender** | `IDocumentSession` | The session on which `SaveChanges()` has been called, triggering this event | +| **args** | `BeforeQueryEventArgs` | `args` contains the session on which `SaveChanges()` has been called, and the query's [query customizations](../../../client-api/session/querying/how-to-customize-query.mdx). | + +The class `BeforeQueryEventArgs`: + + + +{`public class BeforeQueryEventArgs +\{ + public InMemoryDocumentSessionOperations Session; + public IDocumentQueryCustomization queryCustomization; +\} +`} + + + +### Example I + +If you want to disable caching of all query results, you can implement the method as follows: + + + +{`private void OnBeforeQueryEvent(object sender, BeforeQueryEventArgs args) +\{ + args.QueryCustomization.NoCaching(); +\} +`} + + + +### Example II + +If you want each query to [wait for non-stale results](../../../indexes/stale-indexes.mdx) you can create an event as follows: + + + +{`private void OnBeforeQueryEvent(object sender, BeforeQueryEventArgs args) +\{ + args.QueryCustomization.WaitForNonStaleResults(TimeSpan.FromSeconds(30)); +\} +`} + + + + + +## OnBeforeConversionToDocument + +This event is invoked before conversion of an entity to blittable JSON document. E.g. it's called when sending a document to a server. +It should be defined with this signature: + + + +{`private void OnBeforeConversionToDocumentEvent(object sender, BeforeConversionToDocumentEventArgs args); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **sender** | `IDocumentSession` | The session on which `SaveChanges()` has been called, triggering this event | +| **args** | `BeforeConversionToDocumentEventArgs` | `args` contains the session on which `SaveChanges()` has been called, the ID of the document being ConversionToDocumentd, and the document itself. | + +The class `BeforeConversionToDocumentEventArgs`: + + + +{`public class BeforeConversionToDocumentEventArgs +\{ + public InMemoryDocumentSessionOperations Session; + public string DocumentId; + public object Entity; +\} +`} + + + +### Example + + + +{`private void OnBeforeConversionToDocument(object sender, BeforeConversionToDocumentEventArgs args) +\{ + if (args.Entity is Item item) + item.Before = true; +\} +`} + + + + + +## OnAfterConversionToDocument + +This event is invoked after conversion of an entity to blittable JSON document. +It should be defined with this signature: + + + +{`private void OnAfterConversionToDocumentEvent(object sender, AfterConversionToDocumentEventArgs args); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **sender** | `IDocumentSession` | The session on which `SaveChanges()` has been called, triggering this event | +| **args** | `AfterConversionToDocumentEventArgs` | `args` contains the session on which `SaveChanges()` has been called, the ID of the document being ConversionToDocumentd, and the document itself. | + +The class `AfterConversionToDocumentEventArgs`: + + + +{`public class AfterConversionToDocumentEventArgs +\{ + public InMemoryDocumentSessionOperations Session; + public string DocumentId; + public object Entity; +\} +`} + + + +### Example + + + +{`private void OnAfterConversionToDocument(object sender, AfterConversionToDocumentEventArgs args) +\{ + if (args.Entity is Item item) + \{ + if (args.Document.Modifications == null) + args.Document.Modifications = new DynamicJsonValue(); + + args.Document.Modifications["After"] = true; + args.Document = args.Session.Context.ReadObject(args.Document, args.Id); + + item.After = true; + \} +\} +`} + + + + + +## OnBeforeConversionToEntity + +This event is invoked before conversion of a JSON document to an entity. E.g. it's called when loading a document. + +It takes the argument `BeforeConversionToEntityEventArgs`, that consists of a JSON document, its ID and type, and the session instance. + + + +{`private void OnBeforeConversionToEntity(object sender, BeforeConversionToEntityEventArgs args) +\{ + var document = args.Document; + if (document.Modifications == null) + document.Modifications = new DynamicJsonValue(); + + document.Modifications["Before"] = true; + args.Document = args.Session.Context.ReadObject(document, args.Id); +\} +`} + + + + + + +## OnAfterConversionToEntity + +This event is invoked after conversion of a JSON document to an entity. It takes the argument `AfterConversionToEntityEventArgs`, that consists of a JSON document, its ID, the session instance and a converted entity. + + + +{`private void OnAfterConversionToEntity(object sender, AfterConversionToEntityEventArgs args) +\{ + if (args.Entity is Item item) + item.After = true; +\} +`} + + + + + +## OnSessionDisposing + +This event is invoked by the disposal of a session, **before** the session is disposed of. +Keeping track of sessions disposal allows you, among other uses, to verify that sessions +that are no longer needed are disposed of. + + + +{`private void OnSessionDisposingEvent(object sender, SessionDisposingEventArgs args); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **sender** | `IDocumentSession` | The session whose disposal triggered this event | +| **args** | `SessionDisposingEventArgs` | `args` contains the session that is disposed of. | + +The class `SessionDisposingEventArgs`: + + + +{`public class SessionDisposingEventArgs : EventArgs +\{ + public InMemoryDocumentSessionOperations Session \{ get; \} +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_subscribe-to-events-java.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_subscribe-to-events-java.mdx new file mode 100644 index 0000000000..33f83356a8 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_subscribe-to-events-java.mdx @@ -0,0 +1,516 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +* **Events** allow users to perform custom actions in response to operations made in + a `Document Store` or a `Session`. + +* An event is invoked when the selected action is executed on an entity, or querying is performed. + +* Subscribing to an event in a `Session` is valid only for this session. + +* Subscribing to an event at the `DocumentStore` level subscribes to this + event in all subsequent sessions. + Read more about `DocumentStore` events [here](../../../client-api/how-to/subscribe-to-store-events.mdx). + +## beforeStoreListener + +This event is invoked as a part of `saveChanges` but before it is actually sent to the server. +It should be defined with this signature: + + + +{`public void addBeforeStoreListener(EventHandler handler); + +public void removeBeforeStoreListener(EventHandler handler); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **handler** | `EventHandler` | handle this event | + +The class `BeforeStoreEventArgs`: + + + +{`public BeforeStoreEventArgs(InMemoryDocumentSessionOperations session, String documentId, Object entity); +`} + + + + + +{`public class BeforeStoreEventArgs +\{ + private IMetadataDictionary _documentMetadata; + private final InMemoryDocumentSessionOperations session; + private final String documentId; + private final Object entity; + + //getters and setters (omitted for brevity) +\} +`} + + + +### Example + +Say we want to discontinue all of the products that are not in stock. + + + +{`private void onBeforeStoreEvent(Object sender, BeforeStoreEventArgs args) \{ + if (args.getEntity() instanceof Product) \{ + Product product = (Product) args.getEntity(); + if (product.getUnitsInStock() == 0) \{ + product.setDiscontinued(true); + \} + \} +\} +`} + + + +After we subscribe to the event, every stored entity will invoke the method. + + + +{`// subscribe to the event +store.addBeforeStoreListener(this::onBeforeStoreEvent); + +try (IDocumentSession session = store.openSession()) \{ + Product product1 = new Product(); + product1.setName("RavenDB v3.5"); + product1.setUnitsInStock(0); + + session.store(product1); + + Product product2 = new Product(); + product2.setName("RavenDB v4.0"); + product2.setUnitsInStock(1000); + session.store(product2); + + session.saveChanges(); // Here the method is invoked +\} +`} + + + + + +## beforeDeleteListener + +This event is invoked by `delete(id)` or `delete(entity)`. It is only executed when `saveChanges()` +is called, but before the commands are actually sent to the server. +It should be defined with this signature: + + + +{`public void addBeforeDeleteListener(EventHandler handler); + +public void removeBeforeDeleteListener(EventHandler handler); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **handler** | `EventHandler` | handle this event | + +The class `BeforeDeleteEventArgs`: + + + +{`public BeforeDeleteEventArgs(InMemoryDocumentSessionOperations session, String documentId, Object entity); +`} + + + + + +{`public class BeforeDeleteEventArgs +\{ + private IMetadataDictionary _documentMetadata; + private final InMemoryDocumentSessionOperations session; + private final String documentId; + private final Object entity; + + //getters and setters (omitted for brevity) +\} +`} + + + +### Example + +To prevent anyone from deleting entities we can create a method as follows: + + + +{`private void onBeforeDeleteEvent(Object sender, BeforeDeleteEventArgs args) \{ + throw new NotImplementedException("Sample"); +\} +`} + + + +and subscribe it to the session: + + + +{`// subscribe to the event +store.addBeforeDeleteListener(this::onBeforeDeleteEvent); + +// open a session and delete entity +try (IDocumentSession session = store.openSession()) \{ + Product product = session.load(Product.class, "products/1-A"); + + session.delete(product); + session.saveChanges(); // NotImplementedException will be thrown here +\} +`} + + + + + +## afterSaveChangesListener + +This event is invoked after the `saveChanges` is returned. +It should be defined with this signature: + + + +{`public void addAfterSaveChangesListener(EventHandler handler); + +public void removeAfterSaveChangesListener(EventHandler handler); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **handler** | `EventHandler` | handle this event | + + +The class `AfterSaveChangesEventArgs`: + + + + +{`public AfterSaveChangesEventArgs(InMemoryDocumentSessionOperations session, String documentId, Object entity); +`} + + + + + +{`public class AfterSaveChangesEventArgs +\{ + private IMetadataDictionary _documentMetadata; + private final InMemoryDocumentSessionOperations session; + private final String documentId; + private final Object entity; + + //getters and setters (omitted for brevity) +\} +`} + + + +### Example + +If we want to log each entity that was saved, we can create a method as follows: + + + +{`private void onAfterSaveChangesEvent(Object sender, AfterSaveChangesEventArgs args) \{ + if (log.isLoggable(Level.INFO)) \{ + log.info("Document " + args.getDocumentId() + " was saved"); + \} +\} +`} + + + + + +## beforeQueryListener + +This event is invoked just before the query is sent to the server. +It should be defined with this signature: + + + +{`public void addBeforeQueryListener(EventHandler handler); + +public void removeBeforeQueryListener(EventHandler handler); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **handler** | `EventHandler` | handle this event | + +The class `BeforeQueryEventArgs`: + + + +{`public class BeforeQueryEventArgs +\{ + private final InMemoryDocumentSessionOperations session; + private final IDocumentQueryCustomization queryCustomization; + + //getters (omitted for brevity) +\} +`} + + + +### Example I + +If you want to disable caching of all query results, you can implement the method as follows: + + + +{`private void onBeforeQueryEvent(Object sender, BeforeQueryEventArgs args) \{ + args.getQueryCustomization().noCaching(); +\} +`} + + + +### Example II + +If you want each query to [wait for non-stale results](../../../indexes/stale-indexes.mdx) you can create an event as follows: + + + +{`private void onBeforeQueryEvent(BeforeQueryEventArgs args) \{ + args.getQueryCustomization().waitForNonStaleResults(Duration.ofSeconds(30)); +\} +`} + + + + + +## beforeConversionToDocumentListener + +This event is invoked before conversion of an entity to blittable JSON document. E.g. it's called when sending a document to a server. +It should be defined with this signature: + + + +{`public void addBeforeConversionToDocumentListener(EventHandler handler); + +public void removeBeforeConversionToDocumentListener(EventHandler handler); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **handler** | `EventHandler` | handle this event | + +The class `BeforeConversionToDocumentEventArgs`: + + + +{`public BeforeConversionToDocumentEventArgs(InMemoryDocumentSessionOperations session, String id, Object entity); +`} + + + + + + +{`public class BeforeConversionToDocumentEventArgs +\{ + private String _id; + private Object _entity; + private InMemoryDocumentSessionOperations _session; + + //getters (omitted for brevity) +\} +`} + + + +### Example + + + +{`private void onBeforeConversionToDocument(Object sender, BeforeConversionToDocumentEventArgs args) \{ + if (args.getEntity() instanceof Item) \{ + Item item = (Item) args.getEntity(); + item.setBefore(true); + \} +\} +`} + + + + + +## afterConversionToDocumentListener + +This event is invoked after conversion of an entity to blittable JSON document. +It should be defined with this signature: + + + +{`public void addAfterConversionToDocumentListener(EventHandler handler); + +public void removeAfterConversionToDocumentListener(EventHandler handler); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **handler** | `EventHandler` | handle this event | + +The class `AfterConversionToDocumentEventArgs`: + + + +{`public AfterConversionToDocumentEventArgs(InMemoryDocumentSessionOperations session, String id, Object entity, Reference document); +`} + + + + + +{`public class AfterConversionToDocumentEventArgs +\{ + private String _id; + private Object _entity; + private Reference _document; + private InMemoryDocumentSessionOperations _session; + + //getters (omitted for brevity) +\} +`} + + + +### Example + + + +{`private void onAfterConversionToDocument(Object sender, AfterConversionToDocumentEventArgs args) \{ + if (args.getEntity() instanceof Item) + \{ + Item item = (Item) args.getEntity(); + item.setAfter(true); + \} +\} +`} + + + + + +## beforeConversionToEntityListener + +This event is invoked before conversion of a JSON document to an entity. E.g. it's called when loading a document. + +It takes the argument `BeforeConversionToEntityEventArgs`, that consists of a JSON document, its ID and type, and the session instance. + + + +{`public void addBeforeConversionToEntityListener(EventHandler handler); + +public void removeBeforeConversionToEntityListener(EventHandler handler); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **handler** | `EventHandler` | handle this event | + + + + +{`public class BeforeConversionToEntityEventArgs\{ + + private String _id; + private Class _type; + private Reference _document; + private InMemoryDocumentSessionOperations _session; + + //getters (omitted for brevity) +\} +`} + + + + + +{`private void onBeforeConversionToEntity(Object sender, BeforeConversionToEntityEventArgs args) \{ + if (args.getId() == "item/1-A") + \{ + if (log.isLoggable(Level.INFO)) \{ + log.info("Document " + args.getId() + " has found"); + \} + \} +\} +`} + + + + + +## afterConversionToEntityListener + +This event is invoked after conversion of a JSON document to an entity. It takes the argument `AfterConversionToEntityEventArgs`, that consists of a JSON document, its ID, the session instance and a converted entity. + + + +{`public void addAfterConversionToEntityListener(EventHandler handler); + +public void removeAfterConversionToEntityListener(EventHandler handler); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **handler** | `EventHandler` | handle this event | + + + +{`public class AfterConversionToEntityEventArgs \{ + + private String _id; + private ObjectNode _document; + private Object _entity; + private InMemoryDocumentSessionOperations _session; + + //getters (omitted for brevity) +\} +`} + + + + + +{`private void onAfterConversionToEntity(Object sender, AfterConversionToEntityEventArgs args) \{ + if (args.getEntity() instanceof Item) \{ + Item item = (Item) args.getEntity(); + item.setAfter(true); + \} +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/_subscribe-to-events-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/how-to/_subscribe-to-events-nodejs.mdx new file mode 100644 index 0000000000..7352c4887a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/_subscribe-to-events-nodejs.mdx @@ -0,0 +1,345 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +* **Events** allow users to perform custom actions in response to operations made in + a `Document Store` or a `Session`. + +* An event is invoked when the selected action is executed on an entity, or querying is performed. + +* Subscribing to an event in a `Session` is valid only for this session. + +* Subscribing to an event at the `DocumentStore` level subscribes to this + event in all subsequent sessions. + Read more about `DocumentStore` events [here](../../../client-api/how-to/subscribe-to-store-events.mdx). + +## OnBeforeStore + +This event is invoked as a part of `saveChanges()` but before it is actually sent to the server. +It should be defined with this signature: + + + +{`store.addSessionListener("beforeStore", event => onBeforeStore(event)); +`} + + + +The usage of `SessionBeforeStoreEventArgs`: + + + +{`const beforeStoreEventArgs = new SessionBeforeStoreEventArgs(session, documentId, entity); +`} + + + +### Example + +Say we want to discontinue all of the products that are not in stock. + + + +{`function onBeforeStore(args) \{ + if (args.getEntity() instanceof Product) \{ + const product = args.getEntity(); + if (product.unitsInStock === 0) \{ + product.discontinued = true; + \} + \} +\} +`} + + + +After we subscribe to the event, every stored entity will invoke the method. + + + +{`store.addSessionListener("beforeStore", event => onBeforeStore(event)); + +session = store.openSession(); +await session.store(new Product( + \{ + name: "RavenDB v3.5", + unitsInStock: 0 + \}) +) + +await session.store(new Product( + \{ + name: "RavenDB v4.0", + unitsInStock: 1000 + \}) +) + +await session.saveChanges(); // Here the method is invoked +`} + + + + + +## OnBeforeDelete + +This event is invoked by `delete(id)` or `delete(entity)`. It is only executed when `saveChanges()` +is called, but before the commands are actually sent to the server. +It should be defined with this signature: + + + +{`store.addSessionListener("beforeDelete", event => onBeforeDelete(event)); +`} + + + +The usage of `SessionBeforeDeleteEventArgs`: + + + +{`const beforeDeleteEventArgs = new SessionBeforeDeleteEventArgs(session, documentId, entity); +`} + + + +### Example + +To prevent anyone from deleting entities we can create a method as follows: + + + +{`function onBeforeDelete(args) \{ + throw new Error("Not implemented"); +\} +`} + + + +and subscribe it to the session: + + + +{`store.addSessionListener("beforeDelete", event => onBeforeDelete(event)); + +session = store.openSession(); +let product = await session.load("products/1-A", Product); +let product2 = await session.load("products/2-A", Product); + +// onBeforeDelete is triggered whether you +// call delete() on an entity or on its ID +await session.delete(product); +await session.saveChanges(); // NotSupportedException will be thrown + +await session.delete("products/2-A"); +await session.saveChanges(); // NotSupportedException will be thrown +`} + + + + + +## OnAfterSaveChanges + +This event is invoked after the `saveChanges` is returned. +It should be defined with this signature: + + + +{`store.addSessionListener("afterSave", event => onAfterSaveChanges(event)); +`} + + + + +The usage of `SessionAfterSaveChangesEventArgs`: + + + +{`const afterSaveChangesEventArgs = new SessionAfterSaveChangesEventArgs(session, documentId, entity); +`} + + + + +### Example + +If we want to log each entity that was saved, we can create a method as follows: + + + +{`function onAfterSaveChanges(args) \{ + console.log("Document " + args.documentId + " was saved."); +\} +`} + + + + + +## OnBeforeQuery + +This event is invoked just before the query is sent to the server. +It should be defined with this signature: + + + +{`store.addSessionListener("beforeQuery", event => onBeforeQuery(event)); +`} + + + +The usage of `SessionBeforeQueryEventArgs`: + + + +{`const beforeQueryEventArgs = new SessionBeforeQueryEventArgs(session, documentId, entity); +`} + + + +### Example I + +If you want to disable caching of all query results, you can implement the method as follows: + + + +{`function onBeforeQuery(args) \{ + args.queryCustomization.noCaching(); +\} +`} + + + +### Example II + +If you want each query to [wait for non-stale results](../../../indexes/stale-indexes.mdx) you can create an event as follows: + + + +{`function onBeforeQuery(args) \{ + args.queryCustomization.waitForNonStaleResults(30); +\} +`} + + + + + +## OnBeforeConversionToDocument + +This event is invoked before conversion of an entity to blittable JSON document. E.g. it's called when sending a document to a server. +It should be defined with this signature: + + + +{`store.addSessionListener("beforeConversionToDocument", event => onBeforeConversionToDocument(event)); +`} + + + +The usage of `BeforeConversionToDocumentEventArgs`: + + + +{`const beforeConversionToDocumentEventArgs = new BeforeConversionToDocumentEventArgs(session, documentId, entity); +`} + + + +### Example + + + +{`function onBeforeConversionToDocument(args) \{ + if (args.getEntity() instanceof Product) \{ + const product = args.getEntity(); + product.before = true; + \} +\} +`} + + + + + +## OnAfterConversionToDocument + +This event is invoked after conversion of an entity to blittable JSON document. +It should be defined with this signature: + + + +{`store.addSessionListener("afterConversionToDocument", event => onAfterConversionToDocument(event)); +`} + + + +The class `AfterConversionToDocumentEventArgs`: + + + +{`const afterConversionToDocument = new AfterConversionToDocumentEventArgs(session, documentId, entity, document); +`} + + + + +### Example + + + +{`function onAfterConversionToDocument(args) \{ + if (args.getEntity() instanceof Product) \{ + const product = args.getEntity(); + if (product.document.after == null) \{ + product.document.after = true; + \} + \} +\} +`} + + + + + +## OnBeforeConversionToEntity + +This event is invoked before conversion of a JSON document to an entity. E.g. it's called when loading a document. + +It takes the argument `BeforeConversionToEntityEventArgs`, that consists of a JSON document, its ID and type, and the session instance. + + + +{`store.addSessionListener("beforeConversionToEntity", (event: BeforeConversionToEntityEventArgs) => \{ + const document = ObjectUtil.clone(event.document); + + document.before = true; + event.document = document; +\}); +`} + + + + + + +## OnAfterConversionToEntity + +This event is invoked after conversion of a JSON document to an entity. It takes the argument `AfterConversionToEntityEventArgs`, that consists of a JSON document, its ID, the session instance and a converted entity. + + + +{`store.addSessionListener("afterConversionToEntity", (event: AfterConversionToEntityEventArgs) => \{ + if (event.entity instanceof Item) \{ + const item = event.entity; + item.after = true; + \} +\}); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/check-if-attachment-exists.mdx b/versioned_docs/version-7.1/client-api/session/how-to/check-if-attachment-exists.mdx new file mode 100644 index 0000000000..9c140ba87f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/check-if-attachment-exists.mdx @@ -0,0 +1,52 @@ +--- +title: "How to Check if an Attachment Exists" +hide_table_of_contents: true +sidebar_label: ...check if attachment exists +sidebar_position: 16 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import CheckIfAttachmentExistsCsharp from './_check-if-attachment-exists-csharp.mdx'; +import CheckIfAttachmentExistsJava from './_check-if-attachment-exists-java.mdx'; +import CheckIfAttachmentExistsPython from './_check-if-attachment-exists-python.mdx'; +import CheckIfAttachmentExistsPhp from './_check-if-attachment-exists-php.mdx'; +import CheckIfAttachmentExistsNodejs from './_check-if-attachment-exists-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/how-to/check-if-document-exists.mdx b/versioned_docs/version-7.1/client-api/session/how-to/check-if-document-exists.mdx new file mode 100644 index 0000000000..ba5d30960a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/check-if-document-exists.mdx @@ -0,0 +1,52 @@ +--- +title: "How to Check if a Document Exists" +hide_table_of_contents: true +sidebar_label: ...check if document exists +sidebar_position: 15 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import CheckIfDocumentExistsCsharp from './_check-if-document-exists-csharp.mdx'; +import CheckIfDocumentExistsJava from './_check-if-document-exists-java.mdx'; +import CheckIfDocumentExistsPython from './_check-if-document-exists-python.mdx'; +import CheckIfDocumentExistsPhp from './_check-if-document-exists-php.mdx'; +import CheckIfDocumentExistsNodejs from './_check-if-document-exists-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/how-to/check-if-entity-has-changed.mdx b/versioned_docs/version-7.1/client-api/session/how-to/check-if-entity-has-changed.mdx new file mode 100644 index 0000000000..169e0c9eb1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/check-if-entity-has-changed.mdx @@ -0,0 +1,50 @@ +--- +title: "How to Check for Entity Changes" +hide_table_of_contents: true +sidebar_label: ...check for entity changes +sidebar_position: 14 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import CheckIfEntityHasChangedCsharp from './_check-if-entity-has-changed-csharp.mdx'; +import CheckIfEntityHasChangedJava from './_check-if-entity-has-changed-java.mdx'; +import CheckIfEntityHasChangedPython from './_check-if-entity-has-changed-python.mdx'; +import CheckIfEntityHasChangedPhp from './_check-if-entity-has-changed-php.mdx'; +import CheckIfEntityHasChangedNodejs from './_check-if-entity-has-changed-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx b/versioned_docs/version-7.1/client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx new file mode 100644 index 0000000000..1b47c27e69 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/check-if-there-are-any-changes-on-a-session.mdx @@ -0,0 +1,50 @@ +--- +title: "How to Check for Session Changes" +hide_table_of_contents: true +sidebar_label: ...check for session changes +sidebar_position: 13 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import CheckIfThereAreAnyChangesOnASessionCsharp from './_check-if-there-are-any-changes-on-a-session-csharp.mdx'; +import CheckIfThereAreAnyChangesOnASessionJava from './_check-if-there-are-any-changes-on-a-session-java.mdx'; +import CheckIfThereAreAnyChangesOnASessionPython from './_check-if-there-are-any-changes-on-a-session-python.mdx'; +import CheckIfThereAreAnyChangesOnASessionPhp from './_check-if-there-are-any-changes-on-a-session-php.mdx'; +import CheckIfThereAreAnyChangesOnASessionNodejs from './_check-if-there-are-any-changes-on-a-session-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/how-to/clear-a-session.mdx b/versioned_docs/version-7.1/client-api/session/how-to/clear-a-session.mdx new file mode 100644 index 0000000000..014b91eb9b --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/clear-a-session.mdx @@ -0,0 +1,50 @@ +--- +title: "How to Clear a Session" +hide_table_of_contents: true +sidebar_label: ...clear a session +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import ClearASessionCsharp from './_clear-a-session-csharp.mdx'; +import ClearASessionJava from './_clear-a-session-java.mdx'; +import ClearASessionPython from './_clear-a-session-python.mdx'; +import ClearASessionPhp from './_clear-a-session-php.mdx'; +import ClearASessionNodejs from './_clear-a-session-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/how-to/defer-operations.mdx b/versioned_docs/version-7.1/client-api/session/how-to/defer-operations.mdx new file mode 100644 index 0000000000..e39ca93278 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/defer-operations.mdx @@ -0,0 +1,49 @@ +--- +title: "How to Defer Commands" +hide_table_of_contents: true +sidebar_label: ...defer commands +sidebar_position: 3 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import DeferOperationsCsharp from './_defer-operations-csharp.mdx'; +import DeferOperationsJava from './_defer-operations-java.mdx'; +import DeferOperationsPython from './_defer-operations-python.mdx'; +import DeferOperationsPhp from './_defer-operations-php.mdx'; +import DeferOperationsNodejs from './_defer-operations-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/how-to/evict-entity-from-a-session.mdx b/versioned_docs/version-7.1/client-api/session/how-to/evict-entity-from-a-session.mdx new file mode 100644 index 0000000000..a2880af610 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/evict-entity-from-a-session.mdx @@ -0,0 +1,48 @@ +--- +title: "Evict a Single Entity from a Session" +hide_table_of_contents: true +sidebar_label: ...evict a single entity from a session +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import EvictEntityFromASessionCsharp from './_evict-entity-from-a-session-csharp.mdx'; +import EvictEntityFromASessionJava from './_evict-entity-from-a-session-java.mdx'; +import EvictEntityFromASessionPython from './_evict-entity-from-a-session-python.mdx'; +import EvictEntityFromASessionPhp from './_evict-entity-from-a-session-php.mdx'; +import EvictEntityFromASessionNodejs from './_evict-entity-from-a-session-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/how-to/get-and-modify-entity-metadata.mdx b/versioned_docs/version-7.1/client-api/session/how-to/get-and-modify-entity-metadata.mdx new file mode 100644 index 0000000000..a10e511c44 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/get-and-modify-entity-metadata.mdx @@ -0,0 +1,42 @@ +--- +title: "Session: How to Get and Modify Entity Metadata" +hide_table_of_contents: true +sidebar_label: ...get and modify entity metadata +sidebar_position: 7 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetAndModifyEntityMetadataCsharp from './_get-and-modify-entity-metadata-csharp.mdx'; +import GetAndModifyEntityMetadataJava from './_get-and-modify-entity-metadata-java.mdx'; +import GetAndModifyEntityMetadataNodejs from './_get-and-modify-entity-metadata-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/how-to/get-current-session-node.mdx b/versioned_docs/version-7.1/client-api/session/how-to/get-current-session-node.mdx new file mode 100644 index 0000000000..963b4edb9f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/get-current-session-node.mdx @@ -0,0 +1,46 @@ +--- +title: "Session: How to Get the Current Session Node" +hide_table_of_contents: true +sidebar_label: ...get current session node +sidebar_position: 11 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetCurrentSessionNodeCsharp from './_get-current-session-node-csharp.mdx'; +import GetCurrentSessionNodeJava from './_get-current-session-node-java.mdx'; +import GetCurrentSessionNodePhp from './_get-current-session-node-php.mdx'; +import GetCurrentSessionNodeNodejs from './_get-current-session-node-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/how-to/get-entity-change-vector.mdx b/versioned_docs/version-7.1/client-api/session/how-to/get-entity-change-vector.mdx new file mode 100644 index 0000000000..b01b2bdf2e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/get-entity-change-vector.mdx @@ -0,0 +1,49 @@ +--- +title: "Session: How to Get Entity Change-Vector" +hide_table_of_contents: true +sidebar_label: ...get entity change vector +sidebar_position: 8 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetEntityChangeVectorCsharp from './_get-entity-change-vector-csharp.mdx'; +import GetEntityChangeVectorJava from './_get-entity-change-vector-java.mdx'; +import GetEntityChangeVectorPython from './_get-entity-change-vector-python.mdx'; +import GetEntityChangeVectorPhp from './_get-entity-change-vector-php.mdx'; +import GetEntityChangeVectorNodejs from './_get-entity-change-vector-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/how-to/get-entity-counters.mdx b/versioned_docs/version-7.1/client-api/session/how-to/get-entity-counters.mdx new file mode 100644 index 0000000000..7a2d7e9750 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/get-entity-counters.mdx @@ -0,0 +1,46 @@ +--- +title: "How to Get Entity Counters" +hide_table_of_contents: true +sidebar_label: ...get entity counters +sidebar_position: 10 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetEntityCountersCsharp from './_get-entity-counters-csharp.mdx'; +import GetEntityCountersPython from './_get-entity-counters-python.mdx'; +import GetEntityCountersPhp from './_get-entity-counters-php.mdx'; +import GetEntityCountersNodejs from './_get-entity-counters-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/how-to/get-entity-id.mdx b/versioned_docs/version-7.1/client-api/session/how-to/get-entity-id.mdx new file mode 100644 index 0000000000..df1e4dfa1b --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/get-entity-id.mdx @@ -0,0 +1,46 @@ +--- +title: "Session: How to Get Entity ID" +hide_table_of_contents: true +sidebar_label: ...get entity id +sidebar_position: 6 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetEntityIdCsharp from './_get-entity-id-csharp.mdx'; +import GetEntityIdJava from './_get-entity-id-java.mdx'; +import GetEntityIdNodejs from './_get-entity-id-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/how-to/get-entity-last-modified.mdx b/versioned_docs/version-7.1/client-api/session/how-to/get-entity-last-modified.mdx new file mode 100644 index 0000000000..d6867f6eaa --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/get-entity-last-modified.mdx @@ -0,0 +1,49 @@ +--- +title: "Session: How to Get Entity Last Modified" +hide_table_of_contents: true +sidebar_label: ...get entity last modified +sidebar_position: 9 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetEntityLastModifiedCsharp from './_get-entity-last-modified-csharp.mdx'; +import GetEntityLastModifiedJava from './_get-entity-last-modified-java.mdx'; +import GetEntityLastModifiedPython from './_get-entity-last-modified-python.mdx'; +import GetEntityLastModifiedPhp from './_get-entity-last-modified-php.mdx'; +import GetEntityLastModifiedNodejs from './_get-entity-last-modified-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/how-to/get-tracked-entities.mdx b/versioned_docs/version-7.1/client-api/session/how-to/get-tracked-entities.mdx new file mode 100644 index 0000000000..b16fbc629a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/get-tracked-entities.mdx @@ -0,0 +1,35 @@ +--- +title: "How to Get Tracked Entities" +hide_table_of_contents: true +sidebar_label: ...get tracked entities +sidebar_position: 12 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetTrackedEntitiesCsharp from './_get-tracked-entities-csharp.mdx'; +import GetTrackedEntitiesNodejs from './_get-tracked-entities-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/how-to/ignore-entity-changes.mdx b/versioned_docs/version-7.1/client-api/session/how-to/ignore-entity-changes.mdx new file mode 100644 index 0000000000..b47f00743f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/ignore-entity-changes.mdx @@ -0,0 +1,51 @@ +--- +title: "How to Ignore Entity Changes" +hide_table_of_contents: true +sidebar_label: ...ignore entity changes +sidebar_position: 17 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import IgnoreEntityChangesCsharp from './_ignore-entity-changes-csharp.mdx'; +import IgnoreEntityChangesJava from './_ignore-entity-changes-java.mdx'; +import IgnoreEntityChangesPython from './_ignore-entity-changes-python.mdx'; +import IgnoreEntityChangesPhp from './_ignore-entity-changes-php.mdx'; +import IgnoreEntityChangesNodejs from './_ignore-entity-changes-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/how-to/perform-operations-lazily.mdx b/versioned_docs/version-7.1/client-api/session/how-to/perform-operations-lazily.mdx new file mode 100644 index 0000000000..b878e20eec --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/perform-operations-lazily.mdx @@ -0,0 +1,43 @@ +--- +title: "Perform requests lazily" +hide_table_of_contents: true +sidebar_label: ...perform requests lazily +sidebar_position: 4 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import PerformOperationsLazilyCsharp from './_perform-operations-lazily-csharp.mdx'; +import PerformOperationsLazilyPython from './_perform-operations-lazily-python.mdx'; +import PerformOperationsLazilyPhp from './_perform-operations-lazily-php.mdx'; +import PerformOperationsLazilyNodejs from './_perform-operations-lazily-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/how-to/refresh-entity.mdx b/versioned_docs/version-7.1/client-api/session/how-to/refresh-entity.mdx new file mode 100644 index 0000000000..2617d386e9 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/refresh-entity.mdx @@ -0,0 +1,50 @@ +--- +title: "Session: How to Refresh an Entity" +hide_table_of_contents: true +sidebar_label: ...refresh entity +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import RefreshEntityCsharp from './_refresh-entity-csharp.mdx'; +import RefreshEntityJava from './_refresh-entity-java.mdx'; +import RefreshEntityPython from './_refresh-entity-python.mdx'; +import RefreshEntityPhp from './_refresh-entity-php.mdx'; +import RefreshEntityNodejs from './_refresh-entity-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/how-to/subscribe-to-events.mdx b/versioned_docs/version-7.1/client-api/session/how-to/subscribe-to-events.mdx new file mode 100644 index 0000000000..11f84aa436 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/how-to/subscribe-to-events.mdx @@ -0,0 +1,41 @@ +--- +title: "Session: Subscribing to Session Events" +hide_table_of_contents: true +sidebar_label: ...subscribe to events +sidebar_position: 5 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import SubscribeToEventsCsharp from './_subscribe-to-events-csharp.mdx'; +import SubscribeToEventsJava from './_subscribe-to-events-java.mdx'; +import SubscribeToEventsNodejs from './_subscribe-to-events-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/loading-entities.mdx b/versioned_docs/version-7.1/client-api/session/loading-entities.mdx new file mode 100644 index 0000000000..c0dc487488 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/loading-entities.mdx @@ -0,0 +1,57 @@ +--- +title: "Session: Loading Entities" +hide_table_of_contents: true +sidebar_label: Loading Entities +sidebar_position: 4 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import LoadingEntitiesCsharp from './_loading-entities-csharp.mdx'; +import LoadingEntitiesJava from './_loading-entities-java.mdx'; +import LoadingEntitiesPython from './_loading-entities-python.mdx'; +import LoadingEntitiesPhp from './_loading-entities-php.mdx'; +import LoadingEntitiesNodejs from './_loading-entities-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/opening-a-session.mdx b/versioned_docs/version-7.1/client-api/session/opening-a-session.mdx new file mode 100644 index 0000000000..6acaa4ec4f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/opening-a-session.mdx @@ -0,0 +1,64 @@ +--- +title: "Open a Session" +hide_table_of_contents: true +sidebar_label: Open a Session +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import OpeningASessionCsharp from './_opening-a-session-csharp.mdx'; +import OpeningASessionJava from './_opening-a-session-java.mdx'; +import OpeningASessionPython from './_opening-a-session-python.mdx'; +import OpeningASessionPhp from './_opening-a-session-php.mdx'; +import OpeningASessionNodejs from './_opening-a-session-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/_category_.json b/versioned_docs/version-7.1/client-api/session/querying/_category_.json new file mode 100644 index 0000000000..5155f8a760 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 8, + "label": Querying, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-count-query-results-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-count-query-results-csharp.mdx new file mode 100644 index 0000000000..13d7de932b --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-count-query-results-csharp.mdx @@ -0,0 +1,178 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The following options are available to **count query results**: + + * [Count](../../../client-api/session/querying/how-to-count-query-results.mdx#count) + + * [LongCount](../../../client-api/session/querying/how-to-count-query-results.mdx#longcount) + + * [Get number of results from query stats](../../../client-api/session/querying/how-to-count-query-results.mdx#get-count-from-query-stats) + + +## Count + +* When the number of resulting items is expected to be an **`Int32`** variable, + use `Count` in a synchronous session (or `CountAsync` in an async session). + +* `Count` is implemented in `System.Linq`. + `CountAsync` is implemented in `Raven.Client.Documents`. + +* An `OverflowException` will be thrown if the number of items exceeds **`Int32.MaxValue`**. + + + + + + +{`// using System.Linq; +// ================== + +int numberOfOrders = session + .Query() + .Where(order => order.ShipTo.Country == "UK") + // Calling 'Count' from System.Linq + .Count(); + +// The query returns the NUMBER of orders shipped to UK (Int32) +`} + + + + +{`// using Raven.Client.Documents; +// using Raven.Client.Documents.Linq; +// ================================== + +int numberOfOrders = await asyncSession + .Query() + // Calling 'Where' from Raven.Client.Documents.Linq + .Where(order => order.ShipTo.Country == "UK") + // Calling 'CountAsync' from Raven.Client.Documents + .CountAsync(); + +// The query returns the NUMBER of orders shipped to UK (Int32) +`} + + + + +{`// using Raven.Client.Documents.Session; +// ===================================== + +int numberOfOrders = session.Advanced + .DocumentQuery() + .WhereEquals(order => order.ShipTo.Country, "UK") + // Calling 'Count' from Raven.Client.Documents.Session + .Count(); + +// The query returns the NUMBER of orders shipped to UK (Int32) +`} + + + + +{`from "Orders" +where ShipTo.Country == "UK" limit 0, 0 + +// The RQL generated will trigger query execution +// however, no documents are returned (limit is set 0) +`} + + + + + + + + +## LongCount + +* When the number of resulting items is expected to be an **`Int64`** variable, + use `LongCount` in a synchronous session (or `LongCountAsync` in an async session). + +* `LongCount` is implemented in both `Raven.Client.Documents` & `System.Linq` (use as needed). + `LongCountAsync` is implemented in `Raven.Client.Documents`. + + + + + + +{`// using Raven.Client.Documents; +// using Raven.Client.Documents.Linq; +// ================================== + +long numberOfOrders = session + .Query() + // Calling 'Where' from Raven.Client.Documents.Linq + .Where(order => order.ShipTo.Country == "UK") + // Calling 'LongCount' from Raven.Client.Documents + .LongCount(); + +// The query returns the NUMBER of orders shipped to UK (Int64) +`} + + + + +{`// using Raven.Client.Documents; +// using Raven.Client.Documents.Linq; +// ================================== + +long numberOfOrders = await asyncSession + .Query() + // Calling 'Where' from Raven.Client.Documents.Linq + .Where(order => order.ShipTo.Country == "UK") + // Calling 'LongCountAsync' from Raven.Client.Documents + .LongCountAsync(); + +// The query returns the NUMBER of orders shipped to UK (Int64) +`} + + + + +{`// using Raven.Client.Documents.Session; +// ===================================== + +long numberOfOrders = session.Advanced + .DocumentQuery() + .WhereEquals(order => order.ShipTo.Country, "UK") + // Calling 'LongCount' from Raven.Client.Documents.Session + .LongCount(); + +// The query returns the NUMBER of orders shipped to UK (Int64) +`} + + + + +{`from "Orders" +where ShipTo.Country == "UK" limit 0, 0 + +// The RQL generated will trigger query execution +// however, no documents are returned (limit is set 0) +`} + + + + + + + + +## Get count from query stats + +* When executing a query, you can retrieve the query statistics, which include the total number of results. + +* The total number of results is available in the `TotalResults` property of the `QueryStatistics` object. + Learn more in [Get Query Statistics](../../../client-api/session/querying/how-to-get-query-statistics.mdx). + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-count-query-results-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-count-query-results-nodejs.mdx new file mode 100644 index 0000000000..f48dfc3954 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-count-query-results-nodejs.mdx @@ -0,0 +1,63 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The following options are available to **count query results**: + + * [Count](../../../client-api/session/querying/how-to-count-query-results.mdx#count) + + * [Get number of results from query stats](../../../client-api/session/querying/how-to-count-query-results.mdx#get-count-from-query-stats) + + +## Count + +* Call `count` to get the number of items in the query results. + +* Method `longCount`, which is also available, produces exactly the same results as `count`. + It is only included to be consistent with the .NET client. + + + + + + +{`const numberOfOrders = await session + .query({ collection: "Orders" }) + .whereEquals("ShipTo.Country", "UK") + // Call 'count' to get the number of results + .count(); + +// The query returns the NUMBER of orders shipped to UK +`} + + + + +{`from "Orders" +where ShipTo.Country == "UK" limit 0, 0 + +// The RQL generated will trigger query execution +// however, no documents are returned (limit is set 0) +`} + + + + + + + + +## Get count from query stats + + +* When executing a query, you can retrieve the query statistics, which include the total number of results. + +* The total number of results is available in the `totalResults` property of the `QueryStatistics` object. + Learn more in [Get Query Statistics](../../../client-api/session/querying/how-to-get-query-statistics.mdx). + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-count-query-results-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-count-query-results-php.mdx new file mode 100644 index 0000000000..131ed81ddc --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-count-query-results-php.mdx @@ -0,0 +1,53 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The following options are available to **count query results**: + + * [`Count`](../../../client-api/session/querying/how-to-count-query-results.mdx#count) + * [Get number of results from query stats](../../../client-api/session/querying/how-to-count-query-results.mdx#get-count-from-query-stats) + + +## `Count` + +Count query results using the `Count` method. + + + + +{`/** @var int $numberOfOrders */ +$numberOfOrders = $session->advanced() + ->documentQuery(Order::class) + ->whereEquals("ship_to.country", "UK") + // Calling 'Count' from Raven.Client.Documents.Session + ->Count(); + +// The query returns the NUMBER of orders shipped to UK (int) +`} + + + + +{`from "Orders" +where ShipTo.Country == "UK" limit 0, 0 + +// The RQL generated will trigger query execution +// however, no documents are returned (limit is set 0) +`} + + + + + + +## Get count from query stats + +When executing a query, you can retrieve the query statistics that include the total number of results. +Learn more in [Get Query Statistics](../../../client-api/session/querying/how-to-get-query-statistics.mdx). + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-count-query-results-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-count-query-results-python.mdx new file mode 100644 index 0000000000..3f0029b289 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-count-query-results-python.mdx @@ -0,0 +1,76 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The following options are available to **count query results**: + + * [`count`](../../../client-api/session/querying/how-to-count-query-results.mdx#count) + * [Get number of results from query stats](../../../client-api/session/querying/how-to-count-query-results.mdx#get-count-from-query-stats) + + +## `count` + +Count query results using the `count` method. + + + + +{`number_of_orders = ( + session.advanced.document_query(object_type=Order).where_equals("ship_to.country", "UK").count() +) +# The query returns the NUMBER of orders shipped to UK (int) +`} + + + + +{`from "Orders" +where ship_to.country == "UK" limit 0, 0 + +// The RQL generated will trigger query execution +// however, no documents are returned (limit is set 0) +`} + + + + + + +## Get count from query stats + +When executing a query, you can retrieve query statistics that include the total number of results. +To do this, define a callback function that takes `QueryStatistics` as an argument and applies whatever +logic you want to apply. + + + +{`def **statistics_callback(statistics: QueryStatistics) -> None: + # Read and interact with QueryStatistics here + total_results = statistics.total_results + duration_milliseconds = statistics.duration_in_ms + ... +`} + + + +Then pass your function as an argument to the `query.statistics` method and use the retrieved `QueryStatistics` object. + + + +{`employees = list( +session.query(object_type=Employee) +.where_equals("first_name", "Robert") +.statistics(**statistics_callback) +) +`} + + + +Learn more in [Get Query Statistics](../../../client-api/session/querying/how-to-get-query-statistics.mdx). + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-customize-query-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-customize-query-csharp.mdx new file mode 100644 index 0000000000..21daef22cb --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-customize-query-csharp.mdx @@ -0,0 +1,885 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `Customize()` to set the following customization options on a specific [Query](../../../client-api/session/querying/how-to-query.mdx). + Can be set for both **dynamic** and **index** queries. + +* Each such customization can also be implemented via the [DocumentQuery](../../../client-api/session/querying/document-query/what-is-document-query.mdx) API. + +* A query can also be customized on the Store or Session level by subscribing to `OnBeforeQuery`. + Learn more in [Subscribing to Events](../../../client-api/session/how-to/subscribe-to-events.mdx). + +* Customization methods available: + + - [BeforeQueryExecuted](../../../client-api/session/querying/how-to-customize-query.mdx#beforequeryexecuted) + - [AfterQueryExecuted](../../../client-api/session/querying/how-to-customize-query.mdx#afterqueryexecuted) + - [AfterStreamExecuted](../../../client-api/session/querying/how-to-customize-query.mdx#afterstreamexecuted) + - [NoCaching](../../../client-api/session/querying/how-to-customize-query.mdx#nocaching) + - [NoTracking](../../../client-api/session/querying/how-to-customize-query.mdx#notracking) + - [Projection](../../../client-api/session/querying/how-to-customize-query.mdx#projection) + - [RandomOrdering](../../../client-api/session/querying/how-to-customize-query.mdx#randomordering) + - [Timings](../../../client-api/session/querying/how-to-customize-query.mdx#timings) + - [WaitForNonStaleResults](../../../client-api/session/querying/how-to-customize-query.mdx#waitfornonstaleresults) + +* [Methods return value](../../../client-api/session/querying/how-to-customize-query.mdx#methods-return-value) + + + +## BeforeQueryExecuted + +* Use `BeforeQueryExecuted` to customize the query just before it is executed. + + + +**Example** + + + + +{`List results = session + .Query() + // Call 'Customize' with 'BeforeQueryExecuted' + .Customize(x => x.BeforeQueryExecuted(query => + { + // Can modify query parameters + query.SkipDuplicateChecking = true; + // Can apply any needed action, e.g. write to log + _logger.Info($"Query to be executed is: {query.Query}"); + })) + .Where(x => x.FirstName == "Robert") + .ToList(); +`} + + + + +{`List results = await asyncSession + .Query() + // Call 'Customize' with 'BeforeQueryExecuted' + .Customize(x => x.BeforeQueryExecuted(query => + { + // Can modify query parameters + query.SkipDuplicateChecking = true; + // Can apply any needed action, e.g. write to log + _logger.Info($"Query to be executed is: {query.Query}"); + })) + .Where(x => x.FirstName == "Robert") + .ToListAsync(); +`} + + + + +{`List results = session.Advanced + .DocumentQuery() + .WhereEquals(x => x.FirstName, "Robert") + // Call 'BeforeQueryExecuted' + .BeforeQueryExecuted(query => + { + // Can modify query parameters + query.SkipDuplicateChecking = true; + // Can apply any needed action, e.g. write to log + _logger.Info($"Query to be executed is: {query.Query}"); + }) + .ToList(); +`} + + + + +{`List results = session.Advanced + .RawQuery("from 'Employees' where FirstName == 'Robert'") + // Call 'BeforeQueryExecuted' + .BeforeQueryExecuted(query => + { + // Can modify query parameters + query.SkipDuplicateChecking = true; + // Can apply any needed action, e.g. write to log + _logger.Info($"Query to be executed is: {query.Query}"); + }) + .ToList(); +`} + + + + + + + + +**Syntax** + + + +{`IDocumentQueryCustomization BeforeQueryExecuted(Action action); +`} + + + +| Parameters | Type | Description | +|------------| ---- |---------------------------------------------------------------------------------------------------------------------------------| +| **action** | `Action` | An _Action_ method that operates on the query.
The query is passed in the [IndexQuery](../../../glossary/index-query.mdx) param. | + +
+ + + +## AfterQueryExecuted + +* Use `AfterQueryExecuted` to access the raw query result after it is executed. + + + +**Example** + + + + +{`List results = session + .Query() + // Call 'Customize' with 'AfterQueryExecuted' + .Customize(x => x.AfterQueryExecuted(rawResult => + { + // Can access the raw query result + var queryDuration = rawResult.DurationInMs; + // Can apply any needed action, e.g. write to log + _logger.Info($"{rawResult.LastQueryTime}"); + })) + .ToList(); +`} + + + + +{`List results = await asyncSession + .Query() + // Call 'Customize' with 'AfterQueryExecuted' + .Customize(x => x.AfterQueryExecuted(rawResult => + { + // Can access the raw query result + var queryDuration = rawResult.DurationInMs; + // Can apply any needed action, e.g. write to log + _logger.Info($"{rawResult.LastQueryTime}"); + })) + .ToListAsync(); +`} + + + + +{`List results = session.Advanced + .DocumentQuery() + // Call 'AfterQueryExecuted' + .AfterQueryExecuted(rawResult => + { + // Can access the raw query result + var queryDuration = rawResult.DurationInMs; + // Can apply any needed action, e.g. write to log + _logger.Info($"{rawResult.LastQueryTime}"); + }) + .ToList(); +`} + + + + +{`List results = session.Advanced + .RawQuery("from 'Employees'") + // Call 'AfterQueryExecuted' + .AfterQueryExecuted(rawResult => + { + // Can access the raw query result + var queryDuration = rawResult.DurationInMs; + // Can apply any needed action, e.g. write to log + _logger.Info($"{rawResult.LastQueryTime}"); + }) + .ToList(); +`} + + + + + + + + +**Syntax** + + + +{`IDocumentQueryCustomization AfterQueryExecuted(Action action); +`} + + + +| Parameters | Type | Description | +|------------| ---- |-----------------------------------------------------------------------------------------------------------------------------| +| **action** | `Action` | An _Action_ method that receives the raw query result.
The query result is passed in the [QueryResult](../../../glossary/query-result.mdx) param. | + +
+ + + +## AfterStreamExecuted + +* Use `AfterStreamExecuted` to retrieve a raw (blittable) result of the streaming query. + +* Learn more in [how to stream query results](../../../client-api/session/querying/how-to-stream-query-results.mdx). + + + +**Example** + + + + +{`long totalStreamedResultsSize = 0; + +// Define the query +var query = session + .Query() + // Call 'Customize' with 'AfterStreamExecuted' + .Customize(x => x.AfterStreamExecuted(streamResult => + // Can access the stream result + totalStreamedResultsSize += streamResult.Size)); + +// Call 'Stream' to execute the query +var streamResults = session.Advanced.Stream(query); +`} + + + + +{`long totalStreamedResultsSize = 0; + +// Define the query +var query = asyncSession + .Query() + // Call 'Customize' with 'AfterStreamExecuted' + .Customize(x => x.AfterStreamExecuted(streamResult => + // Can access the stream result + totalStreamedResultsSize += streamResult.Size)); + +// Call 'Stream' to execute the query +var streamResults = await asyncSession.Advanced.StreamAsync(query); +`} + + + + +{`long totalStreamedResultsSize = 0; + +// Define the document query +var query = session.Advanced + .DocumentQuery() + // Call 'AfterStreamExecuted' + .AfterStreamExecuted(streamResult => + // Can access the stream result + totalStreamedResultsSize += streamResult.Size); + +// Call 'Stream' to execute the document query +var streamResults = session.Advanced.Stream(query); +`} + + + + +{`long totalStreamedResultsSize = 0; + +// Define the raw query +var query = session.Advanced + .RawQuery("from 'Employees'") + // Call 'AfterStreamExecuted' + .AfterStreamExecuted(streamResult => + // Can access the stream result + totalStreamedResultsSize += streamResult.Size); + +// Call 'Stream' to execute the document query +var streamResults = session.Advanced.Stream(query); +`} + + + + + + + + +**Syntax** + + + +{`IDocumentQueryCustomization AfterStreamExecuted(Action action); +`} + + + +| Parameters | Type | Description | +|------------| ---- |----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **action** | `Action` | An _Action_ method that recieves a single stream query result.
The stream result is passed in the [BlittableJsonReaderObject](../../../glossary/blittable-json-reader-object.mdx) param. | + +
+ + + +## NoCaching + +* By default, query results are cached. + +* You can use the `NoCaching` customization to disable query caching. + +* Learn more in [disable caching per session](../../../client-api/session/configuration/how-to-disable-caching.mdx). + + + +**Example** + + + + +{`List results = session + .Query() + // Call 'Customize' with 'NoCaching' + .Customize(x => x.NoCaching()) + .Where(x => x.FirstName == "Robert") + .ToList(); +`} + + + + +{`List results = await asyncSession + .Query() + // Call 'Customize' with 'NoCaching' + .Customize(x => x.NoCaching()) + .Where(x => x.FirstName == "Robert") + .ToListAsync(); +`} + + + + +{`List results = session.Advanced + .DocumentQuery() + .WhereEquals(x => x.FirstName, "Robert") + // Call 'NoCaching' + .NoCaching() + .ToList(); +`} + + + + +{`List results = session.Advanced + .RawQuery("from 'Employees' where FirstName == 'Robert'") + // Call 'NoCaching' + .NoCaching() + .ToList(); +`} + + + + + + + + +**Syntax** + + + +{`IDocumentQueryCustomization NoCaching(); +`} + + + + + + + +## NoTracking + +* By default, the [Session](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx) tracks all changes made to all entities that it has either loaded, stored, or queried for. + +* You can use the `NoTracking` customization to disable entity tracking. + +* See [disable entity tracking](../../../client-api/session/configuration/how-to-disable-tracking.mdx) for all other options. + + + +**Example** + + + + +{`List results = session + .Query() + // Call 'Customize' with 'NoTracking' + .Customize(x => x.NoTracking()) + .Where(x => x.FirstName == "Robert") + .ToList(); +`} + + + + +{`List results = await asyncSession + .Query() + // Call 'Customize' with 'NoTracking' + .Customize(x => x.NoTracking()) + .Where(x => x.FirstName == "Robert") + .ToListAsync(); +`} + + + + +{`List results = session.Advanced + .DocumentQuery() + .WhereEquals(x => x.FirstName, "Robert") + // Call 'NoTracking' + .NoTracking() + .ToList(); +`} + + + + +{`List results = session.Advanced + .RawQuery("from 'Employees' where FirstName == 'Robert'") + // Call 'NoTracking' + .NoTracking() + .ToList(); +`} + + + + + + + + +**Syntax** + + + +{`IDocumentQueryCustomization NoTracking(); +`} + + + + + + + +## Projection + +* By default, when [querying an index](../../../indexes/querying/query-index.mdx) and projecting query results, + the server will try to retrieve field values from the fields [stored in the index](../../../indexes/storing-data-in-index.mdx). + + Projecting means the query returns only specific document fields instead of the full document. + + +* If the index does Not store these fields, the field values will be retrieved from the documents. + +* Use the `Projection` method to customize and modify this behavior. + +* Note: + Entities resulting from a projecting query are Not tracked by the session. + Learn more about projections in: + + * [Project index query results](../../../indexes/querying/projections.mdx) + * [Project dynamic query results](../../../client-api/session/querying/how-to-project-query-results.mdx) + + + +**Example** + + + + +{`List results = session + .Query() + // Call 'Customize' + // Pass the requested projection behavior to the 'Projection' method + .Customize(x => x.Projection(ProjectionBehavior.FromDocumentOrThrow)) + // Select the fields that will be returned by the projection + .Select(x => x.FullName) + .ToList(); +`} + + + + +{`List results = await asyncSession + .Query() + // Call 'Customize' + // Pass the requested projection behavior to the 'Projection' method + .Customize(x => x.Projection(ProjectionBehavior.FromDocumentOrThrow)) + // Select the fields that will be returned by the projection + .Select(x => x.FullName) + .ToListAsync(); +`} + + + + +{`List results = session.Advanced + .DocumentQuery("Employees/ByFullName") + // Pass the requested projection behavior to the 'SelectFields' method + // and specify the field that will be returned by the projection + .SelectFields(ProjectionBehavior.FromDocumentOrThrow, "FullName") + .ToList(); +`} + + + + +{`List results = session.Advanced + // Define an RQL query that returns a projection + .RawQuery(@"from index 'Employees/ByFullName' select FullName") + // Pass the requested projection behavior to the 'Projection' method + .Projection(ProjectionBehavior.FromDocumentOrThrow) + .ToList(); +`} + + + + +{`public class Employees_ByFullName : AbstractIndexCreationTask +{ + // The IndexEntry class defines the index-fields. + public class IndexEntry + { + public string FullName { get; set; } + } + + public Employees_ByFullName() + { + // The 'Map' function defines the content of the index-fields + Map = employees => from employee in employees + select new IndexEntry + { + FullName = $"{employee.FirstName} {employee.LastName}" + }; + + // Store field 'FullName' in the index + Store(x => x.FullName, FieldStorage.Yes); + } +} +`} + + + + +In the above example: + + * Field _'FullName'_ is stored in the index (see index definition in the rightmost tab). + However, the server will try to fetch the value from the document since the default behavior was modified to `FromDocumentOrThrow`. + * An exception will be thrown since an _'Employee'_ document does not contain the property _'FullName'_. + (based on the Northwind sample data). + + + + + +**Syntax** + + + +{`IDocumentQueryCustomization Projection(ProjectionBehavior projectionBehavior); + +public enum ProjectionBehavior \{ + Default, + FromIndex, + FromIndexOrThrow, + FromDocument, + FromDocumentOrThrow +\} +`} + + + +* `Default` + Retrieve values from the stored index fields when available. + If fields are not stored then get values from the document, + a field that is not found in the document is skipped. +* `FromIndex` + Retrieve values from the stored index fields when available. + A field that is not stored in the index is skipped. +* `FromIndexOrThrow` + Retrieve values from the stored index fields when available. + An exception is thrown if the index does not store the requested field. +* `FromDocument` + Retrieve values directly from the documents store. + A field that is not found in the document is skipped. +* `FromDocumentOrThrow` + Retrieve values directly from the documents store. + An exception is thrown if the document does not contain the requested field. + + + + + +## RandomOrdering + +* Use `RandomOrdering` to order the query results randomly. + +* More ordering options are available in this [Sorting](../../../client-api/session/querying/sort-query-results.mdx) article. + + + +**Example** + + + + +{`List results = session + .Query() + // Call 'Customize' with 'RandomOrdering' + .Customize(x => x.RandomOrdering()) + .ToList(); +`} + + + + +{`List results = await asyncSession + .Query() + // Call 'Customize' with 'RandomOrdering' + .Customize(x => x.RandomOrdering()) + .ToListAsync(); +`} + + + + +{`List results = session.Advanced + .DocumentQuery() + // Call 'RandomOrdering' + .RandomOrdering() + .ToList(); +`} + + + + +{`List results = session.Advanced + // Define an RQL query that orders the results randomly + .RawQuery("from 'Employees' order by random()") + .ToList(); +`} + + + + + + + + +**Syntax** + + + +{`IDocumentQueryCustomization RandomOrdering(); +IDocumentQueryCustomization RandomOrdering(string seed); +`} + + + +| Parameters | Type | Description | +|------------| ------------- |-------------------------------------------------------------------------------------------------| +| **seed** | `string` | Order the search results randomly using this seed.
Useful when executing repeated random queries. | + +
+ + + +## Timings + +* Use `Timings` to get detailed stats of the time spent by the server on each part of the query. + +* The timing statistics will be included in the query results. + +* Learn more in [how to include query timings](../../../client-api/session/querying/debugging/query-timings.mdx). + + + +**Example** + + + + +{`List results = session + .Query() + // Call 'Customize' with 'Timings' + // Provide an out param for the timings results + .Customize(x => x.Timings(out QueryTimings timings)) + .Where(x => x.FirstName == "Robert") + .ToList(); +`} + + + + +{`List results = await asyncSession + .Query() + // Call 'Customize' with 'Timings' + // Provide an out param for the timings results + .Customize(x => x.Timings(out QueryTimings timings)) + .Where(x => x.FirstName == "Robert") + .ToListAsync(); +`} + + + + +{`List results = session.Advanced + .DocumentQuery() + .WhereEquals(x => x.FirstName, "Robert") + // Call 'Timings'. + // Provide an out param for the timings results + .Timings(out QueryTimings timings) + .ToList(); +`} + + + + +{`List results = session.Advanced + .RawQuery("from 'Employees' where FirstName == 'Robert'") + // Call 'Timings'. + // Provide an out param for the timings results + .Timings(out QueryTimings timings) + .ToList(); +`} + + + + + + + + +**Syntax** + + + +{`IDocumentQueryCustomization Timings(out QueryTimings timings); +`} + + + +| Parameters | Type | Description | +|------------| ------------- | ----- | +| **timings** | `QueryTimings` | An out param that will be filled with the timings results | + + + + + +## WaitForNonStaleResults + +* All queries in RavenDB provide results using an index, even when you don't specify one. + See detailed explanation in [Queries always provide results using an index](../../../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index). + +* Use `WaitForNonStaleResults` to instruct the query to wait for non-stale results from the index. + +* A `TimeoutException` will be thrown if the query is not able to return non-stale results within the specified + (or default) timeout. + +* Note: This feature is Not available when [streaming the query results](../../../client-api/session/querying/how-to-stream-query-results.mdx). + Calling _WaitForNonStaleResults_ with a streaming query will throw an exception. + +* Learn more about stale results in [stale indexes](../../../indexes/stale-indexes.mdx). + + + +**Example** + + + + +{`List results = session + .Query() + // Call 'Customize' with 'WaitForNonStaleResults' + .Customize(x => x.WaitForNonStaleResults(TimeSpan.FromSeconds(10))) + .Where(x => x.FirstName == "Robert") + .ToList(); +`} + + + + +{`List results = await asyncSession + .Query() + // Call 'Customize' with 'WaitForNonStaleResults' + .Customize(x => x.WaitForNonStaleResults(TimeSpan.FromSeconds(10))) + .Where(x => x.FirstName == "Robert") + .ToListAsync(); +`} + + + + +{`List results = session.Advanced + .DocumentQuery() + .WhereEquals(x => x.FirstName, "Robert") + // Call 'WaitForNonStaleResults' + .WaitForNonStaleResults(TimeSpan.FromSeconds(10)) + .ToList(); +`} + + + + +{`List results = session.Advanced + .RawQuery("from 'Employees' where FirstName == 'Robert'") + // Call 'WaitForNonStaleResults' + .WaitForNonStaleResults(TimeSpan.FromSeconds(10)) + .ToList(); +`} + + + + + + + + +**Syntax** + + + +{`IDocumentQueryCustomization WaitForNonStaleResults(TimeSpan? waitTimeout); +`} + + + +| Parameters | Type | Description | +|------------| ------------- |-----------| +| **waitTimeout** | `TimeSpan?` | Time to wait for non-stale results.
Default is 15 seconds. | + +
+ + + +## Methods return value + +All of the above customization methods return the following: + +| `Query` return value | | +|-----------------------------| ----- | +| IDocumentQueryCustomization | Returns self for easier method chaining. | + +| `DocumentQuery` return value | | +|---------------------------------| ----- | +| IQueryBase | Returns self for easier method chaining. | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-customize-query-java.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-customize-query-java.mdx new file mode 100644 index 0000000000..790f28f186 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-customize-query-java.mdx @@ -0,0 +1,331 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +The following query customization options are available in the `IDocumentQueryCustomization` interface: + +- [BeforeQueryExecuted](../../../client-api/session/querying/how-to-customize-query.mdx#beforequeryexecuted) +- [AfterQueryExecuted](../../../client-api/session/querying/how-to-customize-query.mdx#afterqueryexecuted) +- [AfterStreamExecuted](../../../client-api/session/querying/how-to-customize-query.mdx#afterstreamexecuted) +- [NoCaching](../../../client-api/session/querying/how-to-customize-query.mdx#nocaching) +- [NoTracking](../../../client-api/session/querying/how-to-customize-query.mdx#notracking) +- [ProjectionBehavior](../../../client-api/session/querying/how-to-customize-query.mdx#projectionbehavior) +- [RandomOrdering](../../../client-api/session/querying/how-to-customize-query.mdx#randomordering) +- [WaitForNonStaleResults](../../../client-api/session/querying/how-to-customize-query.mdx#waitfornonstaleresults) + + +## BeforeQueryExecuted + +Allows you to modify the index query just before it's executed. + + + +{`IDocumentQueryCustomization addBeforeQueryExecutedListener(Consumer action); +IDocumentQueryCustomization removeBeforeQueryExecutedListener(Consumer action); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **action** | Consumer<IndexQuery> | Action that will modify IndexQuery. | + +| Return Value | | +| ------------- | ----- | +| IDocumentQueryCustomization | Returns self for easier method chaining. | + +### Example + + + +{`session.advanced().addBeforeQueryListener( + (sender, event) -> event.getQueryCustomization().addBeforeQueryExecutedListener( + // set 'pageSize' to 10 + q -> q.setPageSize(10))); + +session.query(Employee.class).toList(); +`} + + + + + +## AfterQueryExecuted + +Allows you to retrieve a raw query result after it's executed. + + + +{`IDocumentQueryCustomization addAfterQueryExecutedListener(Consumer action); +IDocumentQueryCustomization removeAfterQueryExecutedListener(Consumer action); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **action** | Consumer<QueryResult> | Action that has the query result. | + +| Return Value | | +| ------------- | ----- | +| IDocumentQueryCustomization | Returns self for easier method chaining. | + +### Example + + + +{`AtomicReference queryDuration = new AtomicReference<>(); + +session.query(Employee.class) + .addAfterQueryExecutedListener(result -> \{ + queryDuration.set(Duration.ofMillis(result.getDurationInMs())); + \}) + .toList(); +`} + + + + + +## AfterStreamExecuted + +Allows you to retrieve a raw result of the streaming query. + + + +{`IDocumentQueryCustomization addAfterStreamExecutedListener(Consumer action); +IDocumentQueryCustomization removeAfterStreamExecutedListener(Consumer action); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **action** | Consumer<ObjectNode> | Action that has the single query result. | + +| Return Value | | +| ------------- | ----- | +| IDocumentQueryCustomization | Returns self for easier method chaining. | + +### Example + + + +{`Reference totalStreamedResultsSize = new Reference(0L); + +session.query(Employee.class) + .addAfterStreamExecutedListener(result -> \{ + totalStreamedResultsSize.value += result.size(); + \}) + .toList(); +`} + + + + + +## NoCaching + +By default, queries are cached. To disable query caching use the `noCaching` customization. + + + +{`IDocumentQueryCustomization noCaching(); +`} + + + +| Return Value | | +| ------------- | ----- | +| IDocumentQueryCustomization | Returns self for easier method chaining. | + +### Example + + + +{`session.advanced().addBeforeQueryListener( + ((sender, event) -> event.getQueryCustomization().noCaching())); + +List results = session.query(Employee.class) + .whereEquals("FirstName", "Robert") + .toList(); +`} + + + + + +## NoTracking + +To disable entity tracking by `session` use `noTracking`. Usage of this option will prevent holding the query results in memory. + + + +{`IDocumentQueryCustomization noTracking(); +`} + + + +| Return Value | | +| ------------- | ----- | +| IDocumentQueryCustomization | Returns self for easier method chaining. | + +### Example + + + +{`session.advanced().addBeforeQueryListener( + ((sender, event) -> event.getQueryCustomization().noTracking())); + +List results = session.query(Employee.class) + .whereEquals("FirstName", "Robert") + .toList(); +`} + + + + + +## ProjectionBehavior + +By default, queries are satisfied with the values stored in the index. If the index +doesn't contain the requested values, they are retrieved from the documents +themselves. + +This behavior can be configured using the `projection` option, which takes a +`ProjectionBehavior`: + + + +{`IDocumentQueryCustomization projection(ProjectionBehavior projectionBehavior); + +public enum ProjectionBehavior \{ + DEFAULT, + FROM_INDEX, + FROM_INDEX_OR_THROW, + FROM_DOCUMENT, + FROM_DOCUMENT_OR_THROW +\} +`} + + + +* `Default` - query will be satisfied with indexed data when possible, and directly +from the document when it is not. +* `FromIndex` - query will be satisfied with indexed data when possible, and when +it is not, the field is skipped. +* `FromIndexOrThrow` - query will be satisfied with indexed data. If the index does +not contain the requested data, an exception is thrown. +* `FromDocument` - query will be satisfied with document data when possible, and +when it is not, the field is skipped. +* `FromDocumentOrThrow` - query will be satisfied with document data. If the +document does not contain the requested data, an exception is thrown. + +### Example + + + +{`session.advanced().addBeforeQueryListener((sender, event) + -> event.getQueryCustomization().projection(ProjectionBehavior.DEFAULT)); + +List results = session.query(Employee.class) + .selectFields(Employee.class,"name") + .toList(); +`} + + + + + +## RandomOrdering + +To order results randomly, use the `randomOrdering` method. + + + +{`IDocumentQueryCustomization randomOrdering(); + +IDocumentQueryCustomization randomOrdering(String seed); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **seed** | String | Seed used for ordering. Useful when repeatable random queries are needed. | + +| Return Value | | +| ------------- | ----- | +| IDocumentQueryCustomization | Returns self for easier method chaining. | + +### Example + + + +{`session.advanced().addBeforeQueryListener( + (sender, event) -> event.getQueryCustomization().randomOrdering()); + +//result will be ordered randomly each time +List results = session.query(Employee.class) + .whereEquals("FirstName", "Robert") + .toList(); +`} + + + + + +## WaitForNonStaleResults + +Queries can be 'instructed' to wait for non-stale results for a specified amount of time using the `waitForNonStaleResults` method. If the query won't be able to return +non-stale results within the specified (or default) timeout, then a `TimeoutException` is thrown. + +Note: This feature is Not available when [streaming the query results](../../../client-api/session/querying/how-to-stream-query-results.mdx). +Calling _waitForNonStaleResults_ with a streaming query will throw an exception. + + +If a query sent to the server specifies that it needs to wait for non-stale results, then RavenDB sets the cutoff Etag for the staleness check. +It is the Etag of the last document (or document tombstone), from the collection(s) processed by the index, as of the query arrived to the server. +This way the server won't be waiting forever for the non-stale results even though documents are constantly updated meanwhile. + +If the last Etag processed by the index is greater than the cutoff then the results are considered as non-stale. + + + + + +{`IDocumentQueryCustomization waitForNonStaleResults(); + +IDocumentQueryCustomization waitForNonStaleResults(Duration waitTimeout); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **waitTimeout** | Duration | Time to wait for an index to return non-stale results. The default is 15 seconds. | + +| Return Value | | +| ------------- | ----- | +| IDocumentQueryCustomization | Returns self for easier method chaining. | + +### Example + + + +{`session.advanced().addBeforeQueryListener( + (sender, event) -> event.getQueryCustomization().waitForNonStaleResults()); + +List results = session.query(Employee.class) + .whereEquals("FirstName", "Robert") + .toList(); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-customize-query-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-customize-query-nodejs.mdx new file mode 100644 index 0000000000..088475b077 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-customize-query-nodejs.mdx @@ -0,0 +1,504 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The following query customization methods that are available for the **.NET client** under `IDocumentQueryCustomization` + are also available in the **Node.js client**. + +* These methods can be used for both a dynamic-query and an index-query. + +* A [query](../../../client-api/session/querying/how-to-query.mdx) can also be customized on the Store or Session level by subscribing to the `beforeQuery` event. + Learn more in [Subscribing to Events](../../../client-api/session/how-to/subscribe-to-events.mdx). + +* Customization methods available: + + - [on ("beforeQueryExecuted")](../../../client-api/session/querying/how-to-customize-query.mdx#on-("beforequeryexecuted")) + - [on ("afterQueryExecuted")](../../../client-api/session/querying/how-to-customize-query.mdx#on-("afterqueryexecuted")) + - [noCaching](../../../client-api/session/querying/how-to-customize-query.mdx#nocaching) + - [noTracking](../../../client-api/session/querying/how-to-customize-query.mdx#notracking) + - [projectionBehavior](../../../client-api/session/querying/how-to-customize-query.mdx#projectionbehavior) + - [randomOrdering](../../../client-api/session/querying/how-to-customize-query.mdx#randomordering) + - [timings](../../../client-api/session/querying/how-to-customize-query.mdx#timings) + - [waitForNonStaleResults](../../../client-api/session/querying/how-to-customize-query.mdx#waitfornonstaleresults) + + +## on (beforeQueryExecuted) + +* Use `on("beforeQueryExecuted")` to customize the query just before it is executed. + + + +**Example** + + + + +{`const results = await session + // Query an index + .query(BlogPost, BlogPosts_ByTag) + // Provide a callback for the 'beforeQueryExecuted' event + .on("beforeQueryExecuted", query => { + // Can modify query parameters + query.skipDuplicateChecking = true; + // Can apply any needed action, e.g. write to log/console + console.log(\`Query to be executed is: \${query.query}\`); + }) + .all(); +`} + + + + +{`class BlogPosts_ByTag extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map(BlogPost, b => { + const result = []; + + b.tags.forEach(item => { + result.push({ + tag: item + }); + }); + + return result; + + // This fanout index outputs multiple index entries per each document, + // (an index-entry per tag in from the tags list). + // The query can be customized to return the documents without the duplicates, + // (see the query example in the first tab). + }) + } +} + +class BlogPost { + constructor() { + this.id = null; + this.title = null; + this.body = null; + this.tags = null; + } +} + +class TagResult { + constructor() { + this.tag = null; + } +} +`} + + + + + + + + +**Syntax** + + + +{`on("beforeQueryExecuted", eventHandler); +`} + + + +| Parameters | Type | Description | +|------------------|-----------------|---------------------------------------------------------------------------------------------------------------------------------------| +| **eventHandler** | (query) => void | A callback method that is invoked when the `beforeQueryExecuted` event is emitted.
The passed query param is of type `IndexQuery`. | + +
+ + + +## on (afterQueryExecuted) + +* Use `on("afterQueryExecuted")` to access the raw query result after it is executed. + + + +**Example** + + + +{`let queryDuration = 0; + +const results = await session + .query(\{ collection: "employees" \}) + // Provide a callback for the 'afterQueryExecuted' event + .on("afterQueryExecuted", rawResult => \{ + // Can access the raw query result + queryDuration = rawResult.durationInMs + // Can apply any needed action, e.g. write to log/console + console.log(\`$\{rawResult.lastQueryTime\}\`); + \}) + .all(); +`} + + + + + + + +**Syntax** + + + +{`on("afterQueryExecuted", eventHandler); +`} + + + +| Parameters | Type | Description | +|--------------------------|-----------------------|----------------------------------------------------------------------------------------------------------------------------------------| +| **eventHandler** | (queryResult) => void | A callback method that is invoked when the `afterQueryExecuted` event is emitted.
The passed query param is of type `QueryResult`. | + +
+ + + +## noCaching + +* By default, query results are cached. + +* You can use the `noCaching` customization to disable query caching. + + + +**Example** + + + +{`const results = await session + .query(\{ collection: "employees" \}) + .whereEquals("firstName", "Robert") + // Add a call to 'noCaching' + .noCaching() + .all(); +`} + + + + + + + +**Syntax** + + + +{`noCaching(); +`} + + + + + + + +## noTracking + +* By default, the [Session](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx) tracks all changes made to all entities that it has either loaded, stored, or queried for. + +* You can use the `noTracking` customization to disable entity tracking. + +* See [disable entity tracking](../../../client-api/session/configuration/how-to-disable-tracking.mdx) for all other options. + + + +**Example** + + + +{`const results = await session + .query(\{ collection: "employees" \}) + .whereEquals("firstName", "Robert") + // Add a call to 'noTrcking' + .noTracking() + .all(); +`} + + + + + + + +**Syntax** + + + +{`noTracking(); +`} + + + + + + + +## projectionBehavior + +* By default, when [querying an index](../../../indexes/querying/query-index.mdx), and projecting query results + (projecting means the query returns only specific document fields instead of the full document) + then the server will try to retrieve the fields' values from the fields [stored in the index](../../../indexes/storing-data-in-index.mdx). + +* If the index does Not store those fields then the fields' values will be retrieved from the documents store. + +* Use the `selectFields` method to customize and modify this behavior for the specified fields. + +* Note: + Entities resulting from a projecting query are Not tracked by the session. + Learn more about projections in: + * [Projections](../../../indexes/querying/projections.mdx) + * [How to project query results](../../../client-api/session/querying/how-to-project-query-results.mdx) + + + +**Example** + + + + +{`// The projection class: +class EmployeeProjectedDetails { + constructor() { + this.fullName = null; + } +} + +// Define a query with a projection +const query = session + // Query an index that has stored fields + .query(Employee, Employee_ByFullName) + // Use 'selectFields' to project the query results + // Pass the requested projection behavior (3'rd param) + .selectFields(["fullName"], EmployeeProjectedDetails, "FromDocumentOrThrow") + .all(); + +// * Field 'fullName' is stored in the index. +// However, the server will try to fetch the value from the document +// since the default behavior was modified to \`FromDocumentOrThrow\`. + +// * An exception will be thrown - +// since an 'Employee' document does not contain the property 'fullName'. +// (based on the Northwind sample data). +`} + + + + +{`class Employee_ByFullName extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map(Employee, e => { + return { + fullName: e.firstName + " " + e.lastName + } + }) + + // Store field 'fullName' in the index + this.store("fullName", "Yes"); + } +} +`} + + + + + + + + +**Syntax** + + + +{`selectFields(properties, projectionClass, projectionBehavior); +`} + + + +| Parameters | Type | Description | +|------------------------|----------|----------------------------------------------------------------| +| **properties** | string[] | Fields' names for which to fetch values | +| **projectionClass** | object | The projected results class | +| **projectionBehavior** | string | The requested projection behavior, see available options below | + +* `Default` + Retrieve values from the stored index fields when available. + If fields are not stored then get values from the document, + a field that is not found in the document is skipped. +* `FromIndex` + Retrieve values from the stored index fields when available. + A field that is not stored in the index is skipped. +* `FromIndexOrThrow` + Retrieve values from the stored index fields when available. + An exception is thrown if the index does not store the requested field. +* `FromDocument` + Retrieve values directly from the documents store. + A field that is not found in the document is skipped. +* `FromDocumentOrThrow` + Retrieve values directly from the documents store. + An exception is thrown if the document does not contain the requested field. + + + + + +## randomOrdering + +* Use `RandomOrdering` to order the query results randomly. + +* More ordering options are available in this [Sorting](../../../client-api/session/querying/sort-query-results.mdx) article. + + + +**Example** + + + +{`const results = await session + .query(\{ collection: "employees" \}) + // Add a call to 'randomOrdering', can pass a seed + .randomOrdering("123") + .all(); +`} + + + + + + + +**Syntax** + + + +{`randomOrdering(); +randomOrdering(seed); +`} + + + +| Parameters | Type | Description | +|------------|--------|--------------------------------------------------------------------------------------------------------| +| **seed** | string | Order the search results randomly using this seed.
Useful when executing repeated random queries. | + +
+ + + +## timings + +* Use `Timings` to get detailed stats of the time spent by the server on each part of the query. + +* The timing statistics will be included in the query results. + +* Learn more in [how to include query timings](../../../client-api/session/querying/debugging/query-timings.mdx). + + + +**Example** + + + +{`let timingsResults; + +const results = await session.query(\{ collection: "Products" \}) + .whereEquals("firstName", "Robert") + // Call 'timings', pass a callback function + // Output param 'timingsResults' will be filled with the timings details when query returns + .timings(t => timingsResults = t) + .all(); +`} + + + + + + + +**Syntax** + + + +{`timings(timingsCallback) +`} + + + +| Parameters | Type | Description | +| - |----------------| - | +| **timings** | `QueryTimings` | An _out_ param that will be filled with the timings results | + +| `QueryTimings` | | | +| - |-----------------------------------|---------------------------------------------------| +| **DurationInMs** | long | Total duration | +| **Timings** | IDictionary<string, QueryTimings> | Dictionary with _QueryTimings_ info per time part | + + + + + + +## waitForNonStaleResults + +* All queries in RavenDB provide results using an index, even when you don't specify one. + See detailed explanation in [Queries always provide results using an index](../../../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index). + +* Use `waitForNonStaleResults` to instruct the query to wait for non-stale results from the index. + +* A `TimeoutException` will be thrown if the query is not able to return non-stale results within the specified + (or default) timeout. + +* Note: This feature is Not available when [streaming the query results](../../../client-api/session/querying/how-to-stream-query-results.mdx). + Calling _waitForNonStaleResults_ with a streaming query will throw an exception. + +* Learn more about stale results in [stale indexes](../../../indexes/stale-indexes.mdx). + + + +**Example** + + + +{`const results = await session.query(\{ collection: "Products" \}) + .whereEquals("firstName", "Robert") + // Call 'waitForNonStaleResults', + // Optionally, pass the time to wait. Default is 15 seconds. + .waitForNonStaleResults(10_000) + .all(); +`} + + + + + + + +**Syntax** + + + +{`waitForNonStaleResults(); +waitForNonStaleResults(waitTimeout); +`} + + + +| Parameters | Type | Description | +|-----------------|--------|---------------------------------------------------------------------------------------| +| **waitTimeout** | number | Time (ms) to wait for an index to return non-stale results.
Default is 15 seconds. | + +
+ + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-customize-query-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-customize-query-php.mdx new file mode 100644 index 0000000000..24c0313d65 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-customize-query-php.mdx @@ -0,0 +1,349 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the below methods to set customization options on a specific [query](../../../client-api/session/querying/how-to-query.mdx). + Customizations can be set for both **dynamic** and **index** queries. + +* A query can also be customized on the store or session level by + [subscribing to `OnBeforeQuery`](../../../client-api/session/how-to/subscribe-to-events.mdx). + +* Available customization methods : + - [`addBeforeQueryExecutedListener` and `removeBeforeQueryExecutedListener`](../../../client-api/session/querying/how-to-customize-query.mdx#addbeforequeryexecutedlistener-and-removebeforequeryexecutedlistener) + - [`addAfterQueryExecutedListener` and `removeAfterQueryExecutedListener`](../../../client-api/session/querying/how-to-customize-query.mdx#addafterqueryexecutedlistener-and-removeafterqueryexecutedlistener) + - [`noCaching`](../../../client-api/session/querying/how-to-customize-query.mdx#nocaching) + - [`noTracking`](../../../client-api/session/querying/how-to-customize-query.mdx#notracking) + - [Projection](../../../client-api/session/querying/how-to-customize-query.mdx#randomordering) + - [`randomOrdering`](../../../client-api/session/querying/how-to-customize-query.mdx#randomordering) + - [`waitForNonStaleResults`](../../../client-api/session/querying/how-to-customize-query.mdx#waitfornonstaleresults) +* [Methods return value](../../../client-api/session/querying/how-to-customize-query.mdx#methods-return-value) + + + +## `addBeforeQueryExecutedListener` and `removeBeforeQueryExecutedListener` + +* Use these methods to customize the query just before it is executed. + +**Example** + + + +{`$session->advanced()->addBeforeQueryListener(function ($sender, BeforeQueryEventArgs $event) \{ + $event->getQueryCustomization() + ->addBeforeQueryExecutedListener(function($q) \{ + // set 'pageSize' to 10 + $q->setPageSize(10); + \}); +\}); + +$session->query(Employee::class)->toList(); +`} + + + + + +**Syntax** + + + +{`public function addBeforeQueryExecutedListener(Closure $action): DocumentQueryCustomizationInterface; +public function removeBeforeQueryExecutedListener(Closure $action): DocumentQueryCustomizationInterface; +`} + + + +| Parameters | Type | Description | +|------------| ---- |------------------------------------------------------| +| **$action** | `Closure` | An _$action_ method that operates on the query | + + + + + +## `addAfterQueryExecutedListener` and `removeAfterQueryExecutedListener` + +* Use these methods to access the raw query result after it is executed. + +**Example** + + + +{`$queryDuration = null; + +$session->query(Employee::class) + ->addAfterQueryExecutedListener(function($result) use (&$queryDuration) \{ + $queryDuration = Duration::ofMillis($result->getDurationInMs()); + \}) + ->toList(); +`} + + + + + +**Syntax** + + + +{`public function addAfterQueryExecutedListener(Closure $action): DocumentQueryCustomizationInterface; +public function removeAfterQueryExecutedListener(Closure $action): DocumentQueryCustomizationInterface; +`} + + + +| Parameters | Type | Description | +|------------| ---- |-------------| +| **$action** | `Closure` | An _Action_ method that receives the raw query result | + + + + + +## `noCaching` + +* By default, query results are cached. + +* You can use the `noCaching` customization to disable query caching. + +* Learn more in [disable caching per session](../../../client-api/session/configuration/how-to-disable-caching.mdx). + +**Example** + + + +{`$session->advanced() + ->addBeforeQueryListener(function($sender, BeforeQueryEventArgs $event) \{ + $event->getQueryCustomization()->noCaching(); + \}); + +$results = $session->query(Employee::class) + ->whereEquals("FirstName", "Robert") + ->toList(); +`} + + + +**Syntax** + + + +{`public function noCaching(): DocumentQueryCustomizationInterface +`} + + + + + +## `noTracking` + +* By default, the [session](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx) tracks all changes made to all entities that it has either loaded, stored, or queried for. + +* You can use the `noTracking` customization to disable entity tracking. + +* See [disable entity tracking](../../../client-api/session/configuration/how-to-disable-tracking.mdx) for all other options. + +**Example** + + + +{`$session->advanced() + ->addBeforeQueryListener(function($sender, BeforeQueryEventArgs $event) \{ + $event->getQueryCustomization()->noTracking(); + \}); + +$results = $session->query(Employee::class) + ->whereEquals("FirstName", "Robert") + ->toList(); +`} + + + +**Syntax** + + + +{`public function noTracking(): DocumentQueryCustomizationInterface +`} + + + + + +## Projection + +* By default, when [querying an index](../../../indexes/querying/query-index.mdx) and projecting query results, + the server will try to retrieve field values from the fields [stored in the index](../../../indexes/storing-data-in-index.mdx). + + Projecting means the query returns only specific document fields instead of the full document. + + +* If the index does Not store these fields, the field values will be retrieved from the documents. + +* Use the `projection` method to customize and modify this behavior. + +* Note: + Entities resulting from a projecting query are Not tracked by the session. + Learn more about projections in: + + * [Project index query results](../../../indexes/querying/projections.mdx) + * [Project dynamic query results](../../../client-api/session/querying/how-to-project-query-results.mdx) + +**Example** + + + +{`$session->advanced() + ->addBeforeQueryListener(function($sender, BeforeQueryEventArgs $event) \{ + $event->getQueryCustomization()->projection(ProjectionBehavior::default()); + \}); + +$results = $session->query(Employee::class) + ->selectFields(Employee::class, "name") + ->toList(); +`} + + + +**Syntax** + + + +{`public function projection(ProjectionBehavior $projectionBehavior): DocumentQueryCustomizationInterface; + +class ProjectionBehavior +\{ + public static function default(): ProjectionBehavior; + public static function fromIndex(): ProjectionBehavior; + public static function fromIndexOrThrow(): ProjectionBehavior; + public static function fromDocument(): ProjectionBehavior; + public static function fromDocumentOrThrow(): ProjectionBehavior; +\} +`} + + + +* `default` + Retrieve values from the stored index fields when available. + If fields are not stored then get values from the document, + a field that is not found in the document is skipped. +* `fromIndex` + Retrieve values from the stored index fields when available. + A field that is not stored in the index is skipped. +* `fromIndexOrThrow` + Retrieve values from the stored index fields when available. + An exception is thrown if the index does not store the requested field. +* `fromDocument` + Retrieve values directly from the documents store. + A field that is not found in the document is skipped. +* `fromDocumentOrThrow` + Retrieve values directly from the documents store. + An exception is thrown if the document does not contain the requested field. + + + +## `randomOrdering` + +* Use `randomOrdering` to order the query results randomly. + +* More ordering options are available in this [Sorting](../../../client-api/session/querying/sort-query-results.mdx) article. + +**Example** + + + +{`$session->advanced() + ->addBeforeQueryListener(function($sender, BeforeQueryEventArgs $event) \{ + $event->getQueryCustomization()->randomOrdering(); + \}); + +// Result will be ordered randomly each time +$results = $session->query(Employee::class) + ->whereEquals("FirstName", "Robert") + ->toList(); +`} + + + +**Syntax** + + + +{`public function randomOrdering(): DocumentQueryCustomizationInterface; +public function randomOrdering(?string $seed): DocumentQueryCustomizationInterface; +`} + + + +| Parameters | Type | Description | +|------------| ------------- |---------------------------------------------------------------------------------------------------| +| **$seed** | `?string` | Order the search results randomly using this seed.
Useful when executing repeated random queries. | + + + +## `waitForNonStaleResults` + +* All queries in RavenDB provide results using an index, even when you don't specify one. + See detailed explanation in [Queries always provide results using an index](../../../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index). + +* Use `waitForNonStaleResults` to instruct the query to wait for non-stale results from the index. + +* A `TimeoutException` will be thrown if the query is not able to return non-stale results within the specified + (or default) timeout. + +* Learn more about stale results in [stale indexes](../../../indexes/stale-indexes.mdx). + + + +**Example** + + + +{`$session->advanced() + ->addBeforeQueryListener(function($sender, BeforeQueryEventArgs $event) \{ + $event->getQueryCustomization()->waitForNonStaleResults(); + \}); + +$results = $session->query(Employee::class) + ->whereEquals("FirstName", "Robert") + ->toList(); +`} + + + + + + + +**Syntax** + + + +{`public function waitForNonStaleResults(): DocumentQueryCustomizationInterface; +public function waitForNonStaleResults(Duration $waitTimeout): DocumentQueryCustomizationInterface; +`} + + + +| Parameters | Type | Description | +|------------| ------------- |-----------| +| **$waitTimeout** | `Duration` | Time to wait for non-stale results.
Default is 15 seconds. | + +
+ + + +## Methods return value + +All of the above customization methods return the following: + +| Return value | | +|-----------------------------| ----- | +| `DocumentQueryCustomizationInterface` | Returns self for easier method chaining | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-customize-query-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-customize-query-python.mdx new file mode 100644 index 0000000000..35a7f6b5b2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-customize-query-python.mdx @@ -0,0 +1,603 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the below customization methods over a specific [query](../../../client-api/session/querying/how-to-query.mdx). + +* The customization methods can be set for both **dynamic** and **index** queries. + +* A query can be customized on the Store or Session level by calling various methods from the rich API. + Learn more in [Subscribing to Events](../../../client-api/session/how-to/subscribe-to-events.mdx). + +* Customization methods available: + + - [before_query_executed](../../../client-api/session/querying/how-to-customize-query.mdx#before_query_executed) + - [after_query_executed](../../../client-api/session/querying/how-to-customize-query.mdx#after_query_executed) + - [after_stream_executed](../../../client-api/session/querying/how-to-customize-query.mdx#after_stream_executed) + - [no_caching](../../../client-api/session/querying/how-to-customize-query.mdx#no_caching) + - [no_tracking](../../../client-api/session/querying/how-to-customize-query.mdx#no_tracking) + - [projection](../../../client-api/session/querying/how-to-customize-query.mdx#projection) + - [random_ordering](../../../client-api/session/querying/how-to-customize-query.mdx#random_ordering) + - [timings](../../../client-api/session/querying/how-to-customize-query.mdx#timings) + - [wait_for_non_stale_results](../../../client-api/session/querying/how-to-customize-query.mdx#wait_for_non_stale_results) + +* [Methods return value](../../../client-api/session/querying/how-to-customize-query.mdx#methods-return-value) + + + +## `before_query_executed` + +* Use `before_query_executed` to customize the query just before it is executed. + + + +**Example** + + + + +{`def __before_query_executed_callback(query: IndexQuery): + # Can modify query parameters + query.skip_duplicate_checking = True + # Can apply any needed action, e.g. write to log + logger.info(f"Query to be executed is: {query.query}") + +results = list( + session.query(object_type=Employee) + # Call 'add_before_query_executed_listener' + .add_before_query_executed_listener(__before_query_executed_callback).where_equals( + "first_name", "Robert" + ) +) +`} + + + + +{`def __before_query_executed_callback(query: IndexQuery): + # Can modify query parameters + query.skip_duplicate_checking = True + # Can apply any needed action, e.g. write to log + logger.info(f"Query to be executed is: {query.query}") + +results = list( + session.advanced.raw_query("from 'Employees' where FirstName == 'Robert'") + # Call 'add_before_query_executed_listener' + .add_before_query_executed_listener(__before_query_executed_callback) +) +`} + + + + + + + + +**Syntax** + + + +{`def add_before_query_executed_listener(self, action: Callable[[IndexQuery], None]) -> DocumentQuery[_T]: ... +`} + + + +| Parameters | Type | Description | +|------------| ---- |---------------------------------------------------------------------------------------------------------------------------------| +| **action** | `Callable[[IndexQuery], None]` | An _action_ method that operates on the query.
The query is passed in the [IndexQuery](../../../glossary/index-query.mdx) param. | + +
+ + + +## `after_query_executed` + +* Use `after_query_executed` to access the raw query result after it is executed. + + + +**Example** + + + + +{`def __after_query_executed_callback(raw_result: QueryResult): + # Can access the raw query result + query_duration = raw_result.duration_in_ms + # Can apply any needed action, e.g. write to log + logger.info(f"{raw_result.last_query_time}") + +results = list( + session.query(object_type=Employee) + # Call 'add_after_query_executed_listener' + .add_after_query_executed_listener(__after_query_executed_callback) +) +`} + + + + +{`def __after_query_executed_callback(raw_result: QueryResult): + # Can access the raw query result + query_duration = raw_result.duration_in_ms + # Can apply any needed action, e.g. write to log + logger.info(f"{raw_result.last_query_time}") + +result = list( + session.advanced.raw_query("from 'Employees'") + # Call 'add_after_query_executed_listener' + .add_after_query_executed_listener(__after_query_executed_callback) +) +`} + + + + + + + + +**Syntax** + + + +{`def add_after_query_executed_listener(self, action: Callable[[QueryResult], None]) -> DocumentQuery[_T]: ... +`} + + + +| Parameters | Type | Description | +|------------| ---- |-----------------------------------------------------------------------------------------------------------------------------| +| **action** | `Callable[[QueryResult], None]` | An _action_ method that receives the raw query results.
The query result is passed in the [QueryResult](../../../glossary/query-result.mdx) param. | + +
+ + + +## `after_stream_executed` + +* Use `after_stream_executed` to retrieve a raw (blittable) result of the streaming query. + +* Learn more in [how to stream query results](../../../client-api/session/querying/how-to-stream-query-results.mdx). + + + +**Syntax** + + + +{`def add_after_stream_executed_listener(self, action: Callable[[dict], None]) -> DocumentQuery[_T]: ... +`} + + + +| Parameters | Type | Description | +|------------| ---- |----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **action** | `Callable[[dict], None])` | An _action_ method that recieves a single stream query result.
The stream result is passed in the `dict` param. | + +
+ + + +## `no_caching` + +* By default, query results are cached. + +* You can use the `no_caching` customization method to disable query caching. + +* Learn more in [disable caching per session](../../../client-api/session/configuration/how-to-disable-caching.mdx). + + + +**Example** + + + + +{`results = list( + session.query(object_type=Employee) + # Call 'no_caching' + .no_caching().where_equals("first_name", "Robert") +) +`} + + + + +{`results = list( + session.advanced.raw_query("from 'Employees' where first_name == 'Robert'") + # Call 'no_caching' + .no_caching() +) +`} + + + + + + + + +**Syntax** + + + +{`def no_caching(self) -> DocumentQuery[_T]: ... +`} + + + + + + + +## `no_tracking` + +* By default, the [Session](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx) tracks all changes made to all entities that it has either loaded, stored, or queried for. + +* You can use the `no_tracking` customization to disable entity tracking. + +* See [disable entity tracking](../../../client-api/session/configuration/how-to-disable-tracking.mdx) for all other options. + + + +**Example** + + + + +{`results = list( + session.query(object_type=Employee) + # Call 'no_tracking\` + .no_tracking().where_equals("first_name", "Robert") +) +`} + + + + +{`results = list( + session.advanced.raw_query("from 'Employees' where first_name == 'Robert'") + # Call 'no_tracking' + .no_tracking() +) +`} + + + + + + + + +**Syntax** + + + +{`def no_tracking(self) -> DocumentQuery[_T]: ... +`} + + + + + + + +## `projection` + +* By default, when [querying an index](../../../indexes/querying/query-index.mdx) and projecting query results, + the server will try to retrieve field values from the fields [stored in the index](../../../indexes/storing-data-in-index.mdx). + + Projecting means the query returns only specific document fields instead of the full document. + + +* If these fields are not stored in the index, the field values will be retrieved from the documents. + +* Use the `select_fields` method to customize and modify this behavior. + +* Note: + Entities resulting from a projecting query are Not tracked by the session. + Learn more about projections in: + * [Project index query results](../../../indexes/querying/projections.mdx) + * [Project dynamic query results](../../../client-api/session/querying/how-to-project-query-results.mdx) + + + +**Example** + + + + +{`results = list( + session.query_index_type(Employees_ByFullName, Employees_ByFullName.IndexEntry) + # Pass the requested projection behavior to the 'select_fields' method + # and specify the field that will be returned to the projection + .select_fields( + Employees_ByFullName.IndexEntry, + "full_name", + projection_behavior=ProjectionBehavior.FROM_INDEX_OR_THROW, + ) +) +`} + + + + +{`results = list( + session.advanced.raw_query( + "from index 'Employees/ByFullName' select full_name", Employees_ByFullName.IndexEntry + ).projection(ProjectionBehavior.FROM_DOCUMENT_OR_THROW) +) +`} + + + + +{`class Employees_ByFullName(AbstractIndexCreationTask): + class IndexEntry: + def __init__(self, full_name: str = None): + self.full_name = full_name + + def __init__(self): + super().__init__() + self.map = ( + "docs.Employees.Select(employee => new { " + " full_name = (employee.first_name + ' ' + employee.last_name)" + "})" + ) + self._store("full_name", FieldStorage.YES) +`} + + + + +In the above example: + + * The _'full_name'_ field is stored in the index (see index definition in the rightmost tab). + However, the server will try to fetch the value from the document since the default behavior was modified to `FROM_DOCUMENT_OR_THROW`. + * An exception will be thrown since an _'Employee'_ document does not contain the _'full_name'_ property. + (based on the Northwind sample data). + + + + + + + + +## `random_ordering` + +* Use `random_ordering` to order the query results randomly. + +* Learn [here](../../../client-api/session/querying/sort-query-results.mdx) about additional ordering options. + + + +**Example** + + + + +{`results = list( + session.query(object_type=Employee) + # Call 'random_ordering' + .random_ordering() +) +`} + + + + +{`results = list( + session.advanced + # Define an RQL query that orders the results randomly + .raw_query("from 'Employees' order by random()", Employee) +) +`} + + + + + + + + +**Syntax** + + + +{`def random_ordering(self, seed: str = None) -> DocumentQuery[_T]: ... +`} + + + +| Parameters | Type | Description | +|------------| ------------- |-------------------------------------------------------------------------------------------------| +| **seed** | `str` | Order the search results randomly using this seed.
Useful when executing repeated random queries. | + +
+ + + +## `timings` + +* When executing a query, you can retrieve query statistics that include the time spent by the server on each part of the query. + +* To do this, define a callback function that takes `QueryTimings` as an argument and applies whatever + logic you want to apply. + +* Then pass your function as an argument to the `query.timings` method and use the retrieved `QueryTimings` object. + +* Learn more in [how to include query timings](../../../client-api/session/querying/debugging/query-timings.mdx). + + + +**Example** + + + + +{`def __timings_callback(timings: QueryTimings): + logger.log(logging.DEBUG, timings.duration_in_ms) + +results = list( + session.query(object_type=Employee).where_equals("first_name", "Robert") + # Call 'timings'. + # Provide a callback for the timings result - interact with QueryTimings inside the callback + .timings(__timings_callback) +) +`} + + + + +{`def __timings_callback(timings: QueryTimings): + logger.log(logging.DEBUG, timings.duration_in_ms) + +results = list( + session.advanced.raw_query("from 'Employees' where first_name == 'Robert'") + # Call 'timings'. + # Provide a callback for the timings result - interact with QueryTimings inside the callback + .timings(__timings_callback) +) +`} + + + + + + + + +**Syntax** + + + +{`def timings(self, timings_callback: Callable[[QueryTimings], None]) -> DocumentQuery[_T]: ... +`} + + + +| Parameters | Type | Description | +|------------| ------------- | ----- | +| **timings_callback** | `Callable[[QueryTimings], None]` | An _action_ that will be called with the timings results | + + + + + +## `wait_for_non_stale_results` + +* All RavenDB queries provide results using an index, even when an index is not specified. + See detailed explanation in [Queries always provide results using an index](../../../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index). + +* If `wait_for_non_stale_results` is used, the query will wait for non-stale results from the index. + +* A `TimeoutException` will be thrown if the query is unable to return non-stale results within the specified + (or default) timeout. + +* Note: This feature is Not available when [streaming the query results](../../../client-api/session/querying/how-to-stream-query-results.mdx). + Calling `wait_for_non_stale_results` with a streaming query will throw an exception. + +* Learn more about stale results in [stale indexes](../../../indexes/stale-indexes.mdx). + + + +**Example** + + + + +{`results = list( + session.query(object_type=Employee) + # Call 'wait_for_non_stale_results\` + .wait_for_non_stale_results(timedelta(seconds=10)).where_equals("first_name", "Robert") +) +`} + + + + +{`results = list( + session.advanced.raw_query("from 'Employees' where first_name == 'Robert'", Employee) + # Call 'wait_for_non_stale_results\` + .wait_for_non_stale_results(timedelta(seconds=10)) +) +`} + + + + + + + + +**Syntax** + + + +{`def wait_for_non_stale_results(self, wait_timeout: timedelta = None) -> DocumentQuery[_T]: ... +`} + + + +| Parameters | Type | Description | +|------------| ------------- |-----------| +| **wait_timeout** | `timedelta` | Time to wait for non-stale results.
Default: 15 seconds | + +
+ + + +## `Methods return value` + +All of the above customization methods return the following: + +| `document_query` return value | | +|-------------------------------| ----- | +| `DocumentQuery[_T]` | Returns self for easier method chaining | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-field-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-field-csharp.mdx new file mode 100644 index 0000000000..98199e8ba1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-field-csharp.mdx @@ -0,0 +1,121 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To query for documents that contain a particular field, + use extension method `WhereExists()` which is accessible from [DocumentQuery](../../../client-api/session/querying/document-query/what-is-document-query.mdx). + +* A document that doesn't contain the specified field will be excluded from the query results. + +* In this page: + * [Filter by field name](../../../client-api/session/querying/how-to-filter-by-field.mdx#filter-by-field-name) + * [Filter by field path](../../../client-api/session/querying/how-to-filter-by-field.mdx#filter-by-field-path) + * [Syntax](../../../client-api/session/querying/how-to-filter-by-field.mdx#syntax) + + +## Filter by field name + + + + +{`// Only documents that contain field 'FirstName' will be returned + +List results = session + .Advanced + .DocumentQuery() + .WhereExists("FirstName") + // Or use lambda expression: .WhereExists(x => x.FirstName) + .ToList(); +`} + + + + +{`// Only documents that contain field 'FirstName' will be returned + +List results = await asyncSession + .Advanced + .AsyncDocumentQuery() + .WhereExists("FirstName") + // Or use lambda expression: .WhereExists(x => x.FirstName) + .ToListAsync(); +`} + + + + +{`// Only documents that contain field 'FirstName' will be returned + +from Employees +where exists("FirstName") +`} + + + + + + +## Filter by field path + + + + +{`// Only documents that contain the 'Latitude' property in the specified path will be returned + +List results = session + .Advanced + .DocumentQuery() + .WhereExists("Address.Location.Latitude") + // Or use lambda expression: .WhereExists(x => x.Address.Location.Latitude) + .ToList(); +`} + + + + +{`// Only documents that contain the 'Latitude' property in the specified path will be returned + +List results = await asyncSession + .Advanced + .AsyncDocumentQuery() + .WhereExists("Address.Location.Latitude") + // Or use lambda expression: .WhereExists(x => x.Address.Location.Latitude) + .ToListAsync(); +`} + + + + +{`// Only documents that contain the 'Latitude' property in the specified path will be returned + +from Employees +where exists("Address.Location.Latitude") +`} + + + + + + +## Syntax + + + +{`IDocumentQuery WhereExists(string fieldName); + +IDocumentQuery WhereExists(Expression> propertySelector); +`} + + + +| Parameters | Type | Description | +|----------------------|------------------------------|-----------------------------------------------------------------------| +| **fieldName** | `string` | The name / path of the document field to filter by | +| **propertySelector** | `Expression>` | Lambda expression with name / path of the document field to filter by | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-field-java.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-field-java.mdx new file mode 100644 index 0000000000..e45c6de519 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-field-java.mdx @@ -0,0 +1,82 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To filter documents by whether they contain a particular field, use the LINQ extension method `whereExists()` accessible +from the [Document Query](../../../client-api/session/querying/document-query/what-is-document-query.mdx). + +* If a document doesn't contain the specified field, it is excluded from the query results. + +* In this page: + * [Syntax](../../../client-api/session/querying/how-to-filter-by-field.mdx#syntax) + * [Examples](../../../client-api/session/querying/how-to-filter-by-field.mdx#examples) + + +## Syntax + + + +{`IDocumentQuery whereExists(String fieldName); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **fieldName** | `String` | The name or path to the field you want to filter by | + + + +## Examples + +#### Filter by Field Name + + + + +{`List results = session + .advanced() + .documentQuery(Employee.class) + .whereExists("FirstName") + .toList(); +`} + + + + +{`from Employees +where exists("FirstName") +`} + + + + + +#### Filter by the Path to a Field + + + + +{`List results = session + .advanced() + .documentQuery(Employee.class) + .whereExists("Address.Location.Latitude") + .toList(); +`} + + + + +{`from Employees +where exists("Address.Location.Latitude") +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-field-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-field-nodejs.mdx new file mode 100644 index 0000000000..aaac6c470f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-field-nodejs.mdx @@ -0,0 +1,85 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use method `whereExists()` to query for documents that contain a particular field. + +* A document that doesn't contain the specified field will be excluded from the query results. + +* In this page: + * [Filter by field name](../../../client-api/session/querying/how-to-filter-by-field.mdx#filter-by-field-name) + * [Filter by field path](../../../client-api/session/querying/how-to-filter-by-field.mdx#filter-by-field-path) + * [Syntax](../../../client-api/session/querying/how-to-filter-by-field.mdx#syntax) + + +## Filter by field name + + + + +{`// Only documents that contain field 'firstName' will be returned + +await session.query({ collection: "Employees" }) + .whereExists("firstName"); + .all(); +`} + + + + +{`// Only documents that contain field 'firstName' will be returned + +from Employees +where exists("firstName") +`} + + + + + + +## Filter by field path + + + + +{`// Only documents that contain the 'latitude' property in the specified path will be returned + +await session.query({ collection: "Employees" }) + .whereExists("address.location.latitude"); + .all(); +`} + + + + +{`// Only documents that contain the 'latitude' property in the specified path will be returned + +from Employees +where exists("address.location.latitude") +`} + + + + + + +## Syntax + + + +{`whereExists(fieldName); +`} + + + +| Parameters | Type | Description | +|----------------------|------------------------------|----------------------------------------------------| +| **fieldName** | `string` | The name / path of the document field to filter by | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-field-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-field-php.mdx new file mode 100644 index 0000000000..64d6cb2c74 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-field-php.mdx @@ -0,0 +1,86 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To query for documents that contain a particular field, use the `whereExists` extension method. + +* A document that doesn't contain the specified field will be excluded from the query results. + +* In this page: + * [Filter by field name](../../../client-api/session/querying/how-to-filter-by-field.mdx#filter-by-field-name) + * [Filter by field path](../../../client-api/session/querying/how-to-filter-by-field.mdx#filter-by-field-path) + * [Syntax](../../../client-api/session/querying/how-to-filter-by-field.mdx#syntax) + + +## Filter by field name + + + + +{`/** @var array $results */ +$results = $session + ->advanced() + ->documentQuery(Employee::class) + ->whereExists("FirstName") + ->toList(); +`} + + + + +{`// Only documents that contain field 'FirstName' will be returned + +from Employees +where exists("FirstName") +`} + + + + + + +## Filter by field path + + + + +{`$results = $session + ->advanced() + ->documentQuery(Employee::class) + ->whereExists("Address.Location.Latitude") + ->toList(); +`} + + + + +{`// Only documents that contain the 'Latitude' property in the specified path will be returned + +from Employees +where exists("Address.Location.Latitude") +`} + + + + + + +## Syntax + + + +{`public function whereExists(?string $fieldName): DocumentQueryInterface; +`} + + + +| Parameters | Type | Description | +|----------------|-----------|----------------------------------------------------| +| **$fieldName** | `?string` | The name / path of the document field to filter by | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-field-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-field-python.mdx new file mode 100644 index 0000000000..fe4a571e5c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-field-python.mdx @@ -0,0 +1,82 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To search for documents that contain a specific field by the field **name** or **path**, + use the [document_query](../../../client-api/session/querying/document-query/what-is-document-query.mdx) + `where_exists` method. + Documents that do not contain the specified field will be excluded from the query results. + +* In this page: + * [Filter by Field Name or Path](../../../client-api/session/querying/how-to-filter-by-field.mdx#filter-by-field-name-or-path) + * [Syntax](../../../client-api/session/querying/how-to-filter-by-field.mdx#syntax) + * [Examples](../../../client-api/session/querying/how-to-filter-by-field.mdx#examples) + + +## Filter by Field Name or Path + +To search for documents that contain a specific field by the field's **name** or **path**, +pass the name or the path to `where_exists` as demonstrated below. +### Syntax + + + +{`def where_exists(self, field_name: str) -> DocumentQuery[_T]: ... +`} + + + +| Parameters | Type | Description | +|----------------------|-------------|-------------------------------------------| +| **field_name** | `str` | Field Name or Path to filter documents by | +### Examples + +Pass `where_exists` a string containing the field name or path. + +* **Pass a Field Name**: + + + +{`# Only documents that contain field 'first_name' will be returned +results = list(session.advanced.document_query(object_type=Employee).where_exists("first_name")) +`} + + + + +{`// Only documents that contain the 'FirstName' field will be returned + +from Employees +where exists("FirstName") +`} + + + + +* **Pass a Field Path**: + + + +{`results = list( + session.advanced.document_query(object_type=Employee).where_exists("address.location.latitude") +) +`} + + + + +{`// Only documents that contain the 'Latitude' property in the specified path will be returned + +from Employees +where exists("Address.Location.Latitude") +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-non-existing-field-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-non-existing-field-csharp.mdx new file mode 100644 index 0000000000..4977ffe79c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-non-existing-field-csharp.mdx @@ -0,0 +1,204 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* There are situations where new fields are added to some documents in a collection over time. + +* To find the documents that are missing the newly added fields you can either: + * [Query the collection (dynamic query)](../../../client-api/session/querying/how-to-filter-by-non-existing-field.mdx#query-the-collection-(dynamic-query)) + * [Query a static index](../../../client-api/session/querying/how-to-filter-by-non-existing-field.mdx#query-a-static-index) + * [Use Studio to Run an RQL Query](../../../client-api/session/querying/how-to-filter-by-non-existing-field.mdx#use-studio-to-run-an-rql-query) + +---- + + +## Query the collection (dynamic query) + +To run a dynamic query over a collection and find which documents are missing a specified field, +use the `Not` and `WhereExists` extension methods, accessible from the [DocumentQuery](../../../client-api/session/querying/document-query/what-is-document-query.mdx) API, +as shown below. + +This will either create a new auto-index or add the queried field to an existing auto-index. +Learn more about the dynamic query flow [here](../../../client-api/session/querying/how-to-query.mdx#dynamicquery). + +**Example** + + + + +{`List ordersWithoutFreightField = session + .Advanced + // Define a DocumentQuery on 'Orders' collection + .DocumentQuery() + // Search for documents that do Not contain field 'Freight' + .Not.WhereExists("Freight") + // Execute the query + .ToList(); + +// Results will be only the documents that do Not contain the 'Freight' field in 'Orders' collection +`} + + + + +{`List ordersWithoutFreightField = await asyncSession + .Advanced + // Define a DocumentQuery on 'Orders' collection + .AsyncDocumentQuery() + // Search for documents that do Not contain field 'Freight' + .Not.WhereExists("Freight") + // Execute the query + .ToListAsync(); + +// Results will be only the documents that do Not contain the 'Freight' field in 'Orders' collection +`} + + + + +{`from "Orders" +where true and not exists("Freight") +// \`not\` cannot be used immediately after \`where\`, thus we use \`where true\`. +`} + + + + + + +## Query a static index + +Documents with missing fields can be searched by querying a static index. + +The index definition must contain the following document fields indexed: + +* A document field that **exists** in **all** documents of the queried collection, e.g. the _Id_ field. + Indexing this field will ensure that all the documents of this collection are indexed. +* A document field that is suspected to be **missing** from some documents of the queried collection. + +**Example** + + + +{`// Define a static index on the 'Orders' collection +// ================================================ + +public class Orders_ByFreight : AbstractIndexCreationTask +\{ + public class IndexEntry + \{ + // Define the index-fields + public decimal Freight \{ get; set; \} + public string Id \{ get; set; \} + \} + + public Orders_ByFreight() + \{ + // Define the index Map function + Map = orders => from doc in orders + select new IndexEntry + \{ + // Index a field that might be missing in SOME documents + Freight = doc.Freight, + // Index a field that exists in ALL documents in the collection + Id = doc.Id + \}; + \} +\} +`} + + + + + + +{`// Query the index +// =============== + +List ordersWithoutFreightField = session + .Advanced + // Define a DocumentQuery on the index + .DocumentQuery() + // Verify the index is not stale (optional) + .WaitForNonStaleResults() + // Search for documents that do Not contain field 'Freight' + .Not.WhereExists(x => x.Freight) + // Execute the query + .ToList(); + +// Results will be only the documents that do Not contain the 'Freight' field in 'Orders' collection +`} + + + + +{`// Query the index +// =============== + +List ordersWithoutFreightField = await asyncSession + .Advanced + // Define a DocumentQuery on the index + .AsyncDocumentQuery() + // Verify the index is not stale (optional) + .WaitForNonStaleResults() + // Search for documents that do Not contain field 'Freight' + .Not.WhereExists(x => x.Freight) + // Execute the query + .ToListAsync(); + +// Results will be only the documents that do Not contain the 'Freight' field in 'Orders' collection +`} + + + + +{`from index "Orders/ByFreight" +where true and not exists("Freight") +// \`not\` cannot come immediately after \`where\`, thus we use \`where true\`. +`} + + + + + + +## Use Studio to Run an RQL Query + +* Documents can be searched by missing fields using Studio's [Query view](../../../studio/database/queries/query-view.mdx). + +* Use an [RQL](../../../client-api/session/querying/what-is-rql.mdx) expression such as: + + +{`from "Orders" +where exists("Company") and not exists("Freight") +`} + + + +* In the `where` clause: + First search for a field that **exists** in **all** documents of the queried collection, e.g. the _Id_ field. + Then search for a field that **may be missing** from some documents of the queried collection. + + ![List Documents Without a Specified Field](./assets/non-existing-field-studio-rql.png) + + 1. **Indexes** + Click to see the Indexes menu. + 2. **Query** + Select to open the Query view. + 3. **Query editor** + Write the RQL query. + 4. **Run Query** + Click to run the query. + 5. **Index used** + The name of the auto-index created to serve this query. + You can click it to see the available Studio options for this index. + 6. **Results** + This is the list of documents that do not contain the specified 'Freight' field. + (the "Freight" Field was removed from these Northwind documents for this example.) + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-non-existing-field-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-non-existing-field-nodejs.mdx new file mode 100644 index 0000000000..9f98fcbdd0 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-non-existing-field-nodejs.mdx @@ -0,0 +1,159 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* There are situations where new fields are added to some documents in a collection over time. + +* To find the documents that are missing the newly added fields you can either: + * [Query the collection (dynamic query)](../../../client-api/session/querying/how-to-filter-by-non-existing-field.mdx#query-the-collection-(dynamic-query)) + * [Query a static index](../../../client-api/session/querying/how-to-filter-by-non-existing-field.mdx#query-a-static-index) + * [Use Studio to Run an RQL Query](../../../client-api/session/querying/how-to-filter-by-non-existing-field.mdx#use-studio-to-run-an-rql-query) + +---- + + +## Query the collection (dynamic query) + +To run a dynamic query over a collection and find which documents are missing a specified field, +use the `not` and `whereExists` extension methods, accessible from the [query](../../../client-api/session/querying/how-to-query.mdx) API, +as shown below. + +This will either create a new auto-index or add the queried field to an existing auto-index. +Learn more about the dynamic query flow [here](../../../client-api/session/querying/how-to-query.mdx#dynamicquery). + +**Example** + + + + +{`const ordersWithoutFreightField = await session + // Define a query on 'orders' collection + .query({ collection: "orders" }) + // Search for documents that do Not contain field 'freight' + .not() + .whereExists("freight") + // Execute the query + .all(); + +// Results will be only the documents that do Not contain the 'freight' field in 'orders' collection +`} + + + + +{`from "orders" +where true and not exists("freight") +// \`not\` cannot be used immediately after \`where\`, thus we use \`where true\`. +`} + + + + + + +## Query a static index + +Documents with missing fields can be searched by querying a static index. + +The index definition must contain the following document fields indexed: + +* A document field that **exists** in **all** documents of the queried collection, e.g. the _Id_ field. + Indexing this field will ensure that all the documents of this collection are indexed. +* A document field that is suspected to be **missing** from some documents of the queried collection. + +**Example** + + + +{`// Define a static index on the 'orders' collection +// ================================================ + +class Orders_ByFreight extends AbstractJavaScriptIndexCreationTask \{ + + constructor() \{ + super(); + + // Define the index-fields + this.map("orders", o => (\{ + // Index a field that might be missing in SOME documents + freight: o.firstName, + // Index a field that exists in ALL documents in the collection + id: o.lastName + \})); + \} +\} +`} + + + + + + +{`// Query the index +// =============== + +const employees = await session + // Define a query on the index + .query({ indexName: "Orders/ByFreight" }) + // Search for documents that do Not contain field 'freight' + .not() + .whereExists("freight") + // Execute the query + .all(); + +// Results will be only the documents that do Not contain the 'freight' field in 'orders' collection +`} + + + + +{`from index "Orders/ByFreight" +where true and not exists("freight") +// \`not\` cannot come immediately after \`where\`, thus we use \`where true\`. +`} + + + + + + +## Use Studio to Run an RQL Query + +* Documents can be searched by missing fields using Studio's [Query view](../../../studio/database/queries/query-view.mdx). + +* Use an [RQL](../../../client-api/session/querying/what-is-rql.mdx) expression such as: + + +{`from "Orders" +where exists("Company") and not exists("Freight") +`} + + + +* In the `where` clause: + First search for a field that **exists** in **all** documents of the queried collection, e.g. the _Id_ field. + Then search for a field that **may be missing** from some documents of the queried collection. + + ![List Documents Without a Specified Field](./assets/non-existing-field-studio-rql.png) + + 1. **Indexes** + Click to see the Indexes menu. + 2. **Query** + Select to open the Query view. + 3. **Query editor** + Write the RQL query. + 4. **Run Query** + Click to run the query. + 5. **Index used** + The name of the auto-index created to serve this query. + You can click it to see the available Studio options for this index. + 6. **Results** + This is the list of documents that do not contain the specified 'Freight' field. + (the "Freight" Field was removed from these Northwind documents for this example.) + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-non-existing-field-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-non-existing-field-php.mdx new file mode 100644 index 0000000000..5e09aa7b61 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-non-existing-field-php.mdx @@ -0,0 +1,190 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* There are situations where new fields are added to some documents in a collection over time. +* To find documents that are missing the newly added fields you can either: + * [Query the collection (dynamic query)](../../../client-api/session/querying/how-to-filter-by-non-existing-field.mdx#query-the-collection-(dynamic-query)) + * [Query a static index](../../../client-api/session/querying/how-to-filter-by-non-existing-field.mdx#query-a-static-index) + * [Use Studio to run an RQL Query](../../../client-api/session/querying/how-to-filter-by-non-existing-field.mdx#use-studio-to-run-an-rql-query) + +---- + + +## Query the collection (dynamic query) + +To run a dynamic query over a collection and find which documents are missing a specified field, +use the `not` and `whereExists` extension methods, accessible from the [documentQuery](../../../client-api/session/querying/document-query/what-is-document-query.mdx) API, +as shown below. + +This will either create a new auto-index or add the queried field to an existing auto-index. +Learn more about the dynamic query flow [here](../../../client-api/session/querying/how-to-query.mdx#dynamicquery). + +**Example** + + + + +{`/** @var array $ordersWithoutFreightField */ +$ordersWithoutFreightField = $session + ->advanced() + // Define a DocumentQuery on 'Orders' collection + ->documentQuery(Order::class) + // Search for documents that do Not contain field 'Freight' + ->not() + ->whereExists("Freight") + // Execute the query + ->toList(); + +// Results will be only the documents that do Not contain the 'Freight' field in 'Orders' collection +`} + + + + +{`from "Orders" +where true and not exists("Freight") +// \`not\` cannot be used immediately after \`where\`, thus we use \`where true\`. +`} + + + + + + +## Query a static index + +Documents with missing fields can be searched by querying a static index. + +The index definition must contain the following document fields indexed: + +* A document field that **exists** in **all** documents of the queried collection, e.g. the _Id_ field. + Indexing this field will ensure that all the documents of this collection are indexed. +* A document field that is suspected to be **missing** from some documents of the queried collection. + +**Example** + + + +{`// Define a static index on the 'Orders' collection +// ================================================ + +class IndexEntry +\{ + // Define the index-fields + public ?float $freight = null; + public ?string $id = null; + + public function getFreight(): float + \{ + return $this->freight; + \} + + public function setFreight(float $freight): void + \{ + $this->freight = $freight; + \} + + public function getId(): ?string + \{ + return $this->id; + \} + + public function setId(?string $id): void + \{ + $this->id = $id; + \} +\} + +class Orders_ByFright extends AbstractIndexCreationTask +\{ + public function __construct() + \{ + parent::__construct(); + // Define the index Map function + $this->map = "orders => from doc in orders select new \{\\n" . + " freight = doc.name, \\n" . + " id = doc.id\\n" . + "\})"; + + \} + \} +`} + + + + + + +{`// Query the index +// =============== + +/** @var array $ordersWithoutFreightField */ +$ordersWithoutFreightField = $session + ->advanced() + // Define a DocumentQuery on the index + ->documentQuery(IndexEntry::class, Orders_ByFright::class) + // Verify the index is not stale (optional) + ->waitForNonStaleResults() + // Search for documents that do Not contain field 'Freight' + ->not() + ->whereExists("Freight") + // Execute the query + ->toList(); + +// Results will be only the documents that do Not contain the 'Freight' field in 'Orders' collection +`} + + + + +{`from index "Orders/ByFreight" +where true and not exists("Freight") +// \`not\` cannot come immediately after \`where\`, thus we use \`where true\`. +`} + + + + + + +## Use Studio to Run an RQL Query + +* Documents can be searched by missing fields using Studio's [Query view](../../../studio/database/queries/query-view.mdx). + +* Use an [RQL](../../../client-api/session/querying/what-is-rql.mdx) expression such as: + + +{`from "Orders" +where exists("Company") and not exists("Freight") +`} + + + +* In the `where` clause: + First search for a field that **exists** in **all** documents of the queried collection, e.g. the _Id_ field. + Then search for a field that **may be missing** from some documents of the queried collection. + + ![List Documents Without a Specified Field](./assets/non-existing-field-studio-rql.png) + + 1. **Indexes** + Click to see the Indexes menu. + 2. **Query** + Select to open the Query view. + 3. **Query editor** + Write the RQL query. + 4. **Run Query** + Click to run the query. + 5. **Index used** + The name of the auto-index created to serve this query. + You can click it to see the available Studio options for this index. + 6. **Results** + This is the list of documents that do not contain the specified 'Freight' field. + (the "Freight" Field was removed from these Northwind documents for this example.) + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-non-existing-field-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-non-existing-field-python.mdx new file mode 100644 index 0000000000..bb534523e7 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-filter-by-non-existing-field-python.mdx @@ -0,0 +1,157 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* There are situations where new fields are added to some documents in a collection over time. + +* To find the documents that are missing the newly added fields you can either: + * [Query the collection (dynamic query)](../../../client-api/session/querying/how-to-filter-by-non-existing-field.mdx#query-the-collection-(dynamic-query)) + * [Query a static index](../../../client-api/session/querying/how-to-filter-by-non-existing-field.mdx#query-a-static-index) + * [Use Studio to Run an RQL Query](../../../client-api/session/querying/how-to-filter-by-non-existing-field.mdx#use-studio-to-run-an-rql-query) + +---- + + +## Query the collection (dynamic query) + +To run a dynamic query over a collection and find which documents are missing a specified field, +use the `not_` and `where_exists` methods, accessible from the [document_query](../../../client-api/session/querying/document-query/what-is-document-query.mdx) +API, as demonstrated below. + +This will either create a new auto-index or add the queried field to an existing auto-index. +Learn more about the dynamic query flow [here](../../../client-api/session/querying/how-to-query.mdx#dynamicquery). + +**Example** + + + + +{`orders_without_freight_field = list( + session + # Define a DocumentQuery on 'Orders' collection + .document_query(object_type=Order) + # Search for documents that do not contain field 'freight' + .not_().where_exists("freight") +) + +# Results will be only the documents that do not contain the 'freight' field in 'Orders' collection +`} + + + + +{`from "Orders" +where true and not exists("Freight") +// \`not\` cannot be used immediately after \`where\`, thus we use \`where true\`. +`} + + + + + + +## Query a static index + +Documents with missing fields can be searched by querying a static index. + +The index definition must contain the following document fields indexed: + +* A document field that **exists** in **all** documents of the queried collection, e.g. the _Id_ field. + Indexing this field will ensure that all the documents of this collection are indexed. +* A document field that is suspected to be **missing** from some documents of the queried collection. + +**Example** + + + +{`# Define a static index on the 'Orders' collection +# ================================================ + + +class Orders_ByFreight(AbstractIndexCreationTask): + class IndexEntry: + def __init__(self, freight: int = None, Id: str = None): + self.freight = freight + self.Id = Id + + def __init__(self): + # Call super().__init__() to initialize your index class + super().__init__() + # Define the index Map function + self.map = "from o in docs.Orders select new \{ freight = o.freight, Id = o.Id \}" +`} + + + + + + +{`# Query the index +# =============== +fields = list(session.query_index_type(Orders_ByFreight, Orders_ByFreight.IndexEntry)) +orders_without_freight_field = list( + session + # Define a DocumentQuery on the index + .query_index_type(Orders_ByFreight, Orders_ByFreight.IndexEntry) + # Verify the index is not stale (optional) + .wait_for_non_stale_results() + # Search for documents that do not contain field 'freight' + .not_().where_exists("freight") +) + +# Results will be only the documents that do not contain the 'freight' field in 'Orders' collection +`} + + + + +{`from index "Orders/ByFreight" +where true and not exists("Freight") +// \`not\` cannot come immediately after \`where\`, thus we use \`where true\`. +`} + + + + + + +## Use Studio to Run an RQL Query + +* Documents can be searched by missing fields using Studio's [Query view](../../../studio/database/queries/query-view.mdx). + +* Use an [RQL](../../../client-api/session/querying/what-is-rql.mdx) expression such as: + + +{`from "Orders" +where exists("Company") and not exists("Freight") +`} + + + +* In the `where` clause: + First search for a field that **exists** in **all** documents of the queried collection, e.g. the _Id_ field. + Then search for a field that **may be missing** from some documents of the queried collection. + + ![List Documents Without a Specified Field](./assets/non-existing-field-studio-rql.png) + + 1. **Indexes** + Click to see the Indexes menu. + 2. **Query** + Select to open the Query view. + 3. **Query editor** + Write the RQL query. + 4. **Run Query** + Click to run the query. + 5. **Index used** + The name of the auto-index created to serve this query. + You can click it to see the available Studio options for this index. + 6. **Results** + This is the list of documents that do not contain the specified 'Freight' field. + (the "Freight" Field was removed from these Northwind documents for this example.) + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-get-query-statistics-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-get-query-statistics-csharp.mdx new file mode 100644 index 0000000000..8e2f60380b --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-get-query-statistics-csharp.mdx @@ -0,0 +1,136 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Detailed **query statistics** can be retrieved for every executed query using the `Statistics` method. + +* Stats such as query duration, number of results, index name used in the query, and more, + are returned in the `QueryStatistics` object. + +* In This Page: + * [Get query statistics](../../../client-api/session/querying/how-to-get-query-statistics.mdx#get-query-statistics) + * [Syntax](../../../client-api/session/querying/how-to-get-query-statistics.mdx#syntax) + + +## Get query statistics + + + + +{`List employees = session + .Query() + .Where(x => x.FirstName == "Anne") + // Get query stats: + // * Call 'Statistics' + // * Pass an out 'QueryStatistics' param for getting the stats + .Statistics(out QueryStatistics stats) + .ToList(); + +long numberOfResults = stats.TotalResults; // Get results count +long queryDuration = stats.DurationInMs; // Get query duration +string indexNameUsed = stats.IndexName; // Get index name used in query +// ... +`} + + + + +{`List employees = await asyncSession + .Query() + .Where(x => x.FirstName == "Anne") + // Get query stats: + // * Call 'Statistics' + // * Pass an out 'QueryStatistics' param for getting the stats + .Statistics(out QueryStatistics stats) + .ToListAsync(); + +long numberOfResults = stats.TotalResults; // Get results count +long queryDuration = stats.DurationInMs; // Get query duration +string indexNameUsed = stats.IndexName; // Get index name used in query +// ... +`} + + + + +{`List employees = session.Advanced + .DocumentQuery() + .WhereEquals(x => x.FirstName, "Anne") + // Get query stats: + // * Call 'Statistics' + // * Pass an out 'QueryStatistics' param for getting the stats + .Statistics(out QueryStatistics stats) + .ToList(); + +long numberOfResults = stats.TotalResults; // Get results count +long queryDuration = stats.DurationInMs; // Get query duration +string indexNameUsed = stats.IndexName; // Get index name used in query +// ... +`} + + + + +{`from "Employees" where FirstName == "Anne" +`} + + + + + + +## Syntax + + + +{`IRavenQueryable Statistics(out QueryStatistics stats); +`} + + + +| Parameter | Type | Description | +|-----------|-------------------|-------------------------------------------------| +| **stats** | `QueryStatistics` | An 'out' param for getting the query statistics | + +
+ + + +{`public class QueryStatistics +\{ + public bool IsStale \{ get; set; \} + public long DurationInMs \{ get; set; \} + public long TotalResults \{ get; set; \} + public long SkippedResults \{ get; set; \} + public long? ScannedResults \{ get; set; \} + public DateTime Timestamp \{ get; set; \} + public string IndexName \{ get; set; \} + public DateTime IndexTimestamp \{ get; set; \} + public DateTime LastQueryTime \{ get; set; \} + public long? ResultEtag \{ get; set; \} + public string NodeTag \{ get; set; \} +\} +`} + + + +| Property | Type | Description | +|--------------------|------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **IsStale** | `bool` | Are the results returned by the query potentially stale | +| **DurationInMs** | `long` | Query duration on the server side in Milliseconds | +| **TotalResults** | `long` | The total count of results that matched the query as `Int32`.
Matching query results can also be counted as `Int32` using [Count](../../../client-api/session/querying/how-to-count-query-results.mdx#count). | +| **SkippedResults** | `long` | The number of results skipped by the server.
Learn more in [paging through tampered results](../../../indexes/querying/paging.mdx#paging-through-tampered-results). | +| **ScannedResults** | `long?` | The number of results scanned by the query.
Relevant only when using a filter clause in the query. | +| **Timestamp** | `DateTime` | The time when the query results were unstale | +| **IndexName** | `string` | The name of the queried index | +| **IndexTimestamp** | `IndexTimestamp` | The timestamp of the queried index | +| **LastQueryTime** | `DateTime` | The timestamp of the last time the index was queried | +| **ResultEtag** | `long?` | Results Etag | +| **NodeTag** | `string` | Tag of the cluster node that responded to the query | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-get-query-statistics-java.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-get-query-statistics-java.mdx new file mode 100644 index 0000000000..9ba80a6e77 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-get-query-statistics-java.mdx @@ -0,0 +1,75 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Query statistics can provide important information about a query like duration, total number of results, staleness information, etc. +To access statistics use the `statistics` method. + +## Example + + + +{`Reference stats = new Reference<>(); + +List employees = session.query(Employee.class) + .whereEquals("FirstName", "Robert") + .statistics(stats) + .toList(); + +int totalResults = stats.value.getTotalResults(); +long durationInMs = stats.value.getDurationInMs(); +`} + + + +## Syntax + + + +{`IDocumentQuery statistics(Reference stats); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **stats** | `QueryStatistics` | Statistics for query. | + + + +{`public class QueryStatistics \{ + + private boolean isStale; + private long durationInMs; + private int totalResults; + private long longTotalResults; + private int skippedResults; + private Date timestamp; + private String indexName; + private Date indexTimestamp; + private Date lastQueryTime; + private Long resultEtag; + private String nodeTag; + +\} +`} + + + + +| Property | Type | Description | +|--------------------|-----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **IsStale** | `boolean` | Are the results returned by the query potentially stale | +| **DurationInMs** | `long` | Query duration on the server side in Milliseconds | +| **TotalResults** | `long` | The total count of results that matched the query as `int` | +| **SkippedResults** | `long` | The number of results skipped by the server.
Learn more in [paging through tampered results](../../../indexes/querying/paging.mdx#paging-through-tampered-results). | +| **ScannedResults** | `long` | The number of results scanned by the query.
Relevant only when using a filter clause in the query. | +| **Timestamp** | `Date` | The time when the query results were unstale | +| **IndexName** | `string` | The name of the queried index | +| **indexTimestamp** | `Date` | The timestamp of the queried index | +| **LastQueryTime** | `Date` | The timestamp of the last time the index was queried | +| **ResultEtag** | `Long` | Results Etag | +| **NodeTag** | `String` | Tag of the cluster node that responded to the query | + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-get-query-statistics-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-get-query-statistics-nodejs.mdx new file mode 100644 index 0000000000..e113cefe0c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-get-query-statistics-nodejs.mdx @@ -0,0 +1,81 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Detailed **query statistics** can be retrieved for every executed query using the `statistics` method. + +* Stats such as query duration, number of results, index name used in the query, and more, + are returned in the `QueryStatistics` object. + +* In This Page: + * [Get query statistics](../../../client-api/session/querying/how-to-get-query-statistics.mdx#get-query-statistics) + * [Syntax](../../../client-api/session/querying/how-to-get-query-statistics.mdx#syntax) + + +## Get query statistics + + + + +{`// Define an output param for getting the query stats +let stats; + +const employees = await session + .query({ collection: "Employees" }) + .whereEquals("FirstName", "Anne") + // Get query stats: + // * Call 'statistics', pass a callback function + // * Output param 'stats' will be filled with the stats when query returns + .statistics(s => stats = s) + .all(); + +const numberOfResults = stats.value.totalResults; // Get results count +const queryDuration = stats.value.durationInMs; // Get query duration +const indexNameUsed = stats.value.indexName; // Get index name used in query +// ... +`} + + + + +{`from "Employees" where FirstName == "Anne" +`} + + + + + + +## Syntax + + + +{`query.statistics(statsCallback); +`} + + + +| Parameter | Type | Description | +|-------------------|-------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **statsCallback** | `(stats) => void` | <ul><li>A callback function with an output parameter.</li><li>The parameter passed to the callback will be filled with the `QueryStatistics` object when query returns.</li></ul> | + +| `QueryStatistics` | | | +|---------------------|-----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **isStale** | `boolean` | Are the results returned by the query potentially stale | +| **durationInMs** | `number` | Query duration on the server side in Milliseconds | +| **totalResults** | `number` | The total count of results that matched the query | +| **skippedResults** | `number` | The number of results skipped by the server.
Learn more in [paging through tampered results](../../../indexes/querying/paging.mdx#paging-through-tampered-results). | +| **scannedResults** | `long?` | The number of results scanned by the query.
Relevant only when using a filter clause in the query. | +| **timestamp** | `Date` | The time when the query results were unstale | +| **indexName** | `string` | The name of the queried index | +| **indexTimestamp** | `Date` | The timestamp of the queried index | +| **lastQueryTime** | `Date` | The timestamp of the last time the index was queried | +| **resultEtag** | `number` | Results Etag | +| **nodeTag** | `string` | Tag of the cluster node that responded to the query | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-get-query-statistics-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-get-query-statistics-php.mdx new file mode 100644 index 0000000000..96ffb43074 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-get-query-statistics-php.mdx @@ -0,0 +1,96 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Detailed **query statistics** can be retrieved for every executed query using the `statistics` method. + +* Stats such as query duration, number of results, index name used in the query, and more, + are returned in the `QueryStatistics` object. + +* In This Page: + * [Get query statistics](../../../client-api/session/querying/how-to-get-query-statistics.mdx#get-query-statistics) + * [Syntax](../../../client-api/session/querying/how-to-get-query-statistics.mdx#syntax) + + +## Get query statistics + + + + +{`$stats = new QueryStatistics(); + +$employees = $session->query(Employee::class) + ->whereEquals("FirstName", "Robert") + ->statistics($stats) + ->toList(); + +$totalResults = $stats->getTotalResults(); +$durationInMs = $stats->getDurationInMs(); +`} + + + + +{`from "Employees" where FirstName == "Robert" +`} + + + + + + +## Syntax + + + +{`public function statistics(QueryStatistics &$stats): DocumentQueryInterface; +`} + + + +| Parameter | Type | Description | +|------------|-------------------|-------------------------------------------------| +| **$stats** | `QueryStatistics` | An 'out' param for getting the query statistics | + +
+ + + +{`class QueryStatistics +\{ + private bool $isStale = false; + private int $durationInMs = 0; + private int $totalResults = 0; + private int $longTotalResults = 0; + private int $skippedResults = 0; + private ?int $scannedResults = null; + private ?DateTimeInterface $timestamp = null; + private ?string $indexName = null; + private ?DateTimeInterface $indexTimestamp = null; + private ?DateTimeInterface $lastQueryTime = null; + private ?int $resultEtag = null; + private ?string $nodeTag = null; +\} +`} + + + +| Property | Type | Description | +|---------------------------------------------|----------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **$isStale** | `bool` | Are the results returned by the query potentially stale | +| **$durationInMs** | `int` | Query duration on the server side in Milliseconds | +| **$totalResults**,
**$longTotalResults** | `int` | The total count of results that matched the query.
Matching query results can also be counted using [Count](../../../client-api/session/querying/how-to-count-query-results.mdx#count). | +| **$skippedResults** | `int` | The number of results skipped by the server.
Learn more in [paging through tampered results](../../../indexes/querying/paging.mdx#paging-through-tampered-results). | +| **$timestamp** | `?DateTimeInterface` | The time when the query results were unstale | +| **$indexName** | `?string` | The name of the queried index | +| **$indexTimestamp** | `?DateTimeInterface` | The timestamp of the queried index | +| **$lastQueryTime** | `?DateTimeInterface` | The timestamp of the last time the index was queried | +| **$resultEtag** | `?int` | Results Etag | +| **$nodeTag** | `?string` | Tag of the cluster node that responded to the query | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-get-query-statistics-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-get-query-statistics-python.mdx new file mode 100644 index 0000000000..30ed404e8f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-get-query-statistics-python.mdx @@ -0,0 +1,91 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Detailed **query statistics** can be retrieved for every executed query using the `statistics` method. + +* Stats such as query duration, number of results, index name used in the query, and more, + are returned in the `QueryStatistics` object. + +* In This Page: + * [Get query statistics](../../../client-api/session/querying/how-to-get-query-statistics.mdx#get-query-statistics) + * [Syntax](../../../client-api/session/querying/how-to-get-query-statistics.mdx#syntax) + + +## Get query statistics + + + + +{`def __statistics_callback(statistics: QueryStatistics) -> None: + # Read and interact with QueryStatistics here + total_results = statistics.total_results + duration_milliseconds = statistics.duration_in_ms + ... + +employees = list( + session.query(object_type=Employee) + .where_equals("first_name", "Robert") + .statistics(__statistics_callback) +) +`} + + + + +{`from "Employees" where FirstName == "Anne" +`} + + + + + + +## Syntax + + + +{`def statistics(self, stats_callback: Callable[[QueryStatistics], None]) -> DocumentQuery[_T]: ... + +test_get_query_statistics(self): +with self.embedded_server.get_document_store("QueryStatistics") as store: + with store.open_session() as session: + # region stats_2 + def __statistics_callback(statistics: QueryStatistics) -> None: + # Read and interact with QueryStatistics here + total_results = statistics.total_results + duration_milliseconds = statistics.duration_in_ms + ... + + employees = list( + session.query(object_type=Employee) + .where_equals("first_name", "Robert") + .statistics(__statistics_callback) + ) +`} + + + +| Parameter | Type | Description | +|--------------------|-------------------------------------|--------------------------------------------------------------| +| **stats_callback** | `Callable[[QueryStatistics], None]` | An _action_ that will be called with query statistics object | + +| Property | Type | Description | +|---------------------|------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **is_stale** | `bool` | Are the results returned by the query potentially stale | +| **duration_in_ms** | `int` | Query duration on the server side in Milliseconds | +| **total_results** | `int` | The total count of results that matched the query
Matching query results can also be counted using [count](../../../client-api/session/querying/how-to-count-query-results.mdx#count). | +| **skipped_results** | `int` | The number of results skipped by the server.
Learn more in [paging through tampered results](../../../indexes/querying/paging.mdx#paging-through-tampered-results). | +| **timestamp** | `datetime` | The time when the query results were unstale | +| **index_name** | `str` | The name of the queried index | +| **index_timestamp** | `IndexTimestamp` | The timestamp of the queried index | +| **last_query_time** | `datetime` | The timestamp of the last time the index was queried | +| **result_etag** | `int` | Results Etag | +| **node_tag** | `str` | Tag of the cluster node that responded to the query | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-make-a-spatial-query-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-make-a-spatial-query-csharp.mdx new file mode 100644 index 0000000000..76701c04da --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-make-a-spatial-query-csharp.mdx @@ -0,0 +1,962 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Documents that contain spatial data can be queried by spatial queries that employ geographical criteria. + You can use either _Dynamic spatial query_ or _Spatial index query_. + + * **Dynamic spatial query** + Make a dynamic spatial query on a collection (described below). + An auto-index will be created by the server. + + * **Spatial index query** + Index your documents' spatial data in a static-index (see [indexing spatial data](../../../indexes/indexing-spatial-data.mdx)) + and then make a spatial query on this index (see [query a spatial index](../../../indexes/querying/spatial.mdx)). + +* To perform a spatial search, + use the `Spatial` method, which provides a wide range of spatial functionalities. + +* When making a dynamic spatial query from Studio, + results are also displayed on the global map. See [spatial queries map view](../../../studio/database/queries/spatial-queries-map-view.mdx). +* In this page: + + * [Search by radius](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#search-by-radius) + * [Search by shape](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#search-by-shape) + * [Circle](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#circle) + * [Polygon](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#polygon) + * [Spatial sorting](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#spatial-sorting) + * [Order by distance](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#order-by-distance) + * [Order by distance descending](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#order-by-distance-descending) + * [Sort results by rounded distance](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#sort-results-by-rounded-distance) + * [Get resulting distance](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#get-resulting-distance) + * [Spatial API](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#spatial-api) + * [`Spatial`](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#section) + * [`DynamicSpatialFieldFactory`](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#section-1) + * [`SpatialCriteriaFactory`](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#section-2) + * [`OrderByDistance`](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#section-3) + * [`OrderByDistanceDescending`](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#section-4) + + +## Search by radius + +Use the `WithinRadius` method to search for all documents containing spatial data that is located +within the specified distance from the given center point. + + + + +{`// This query will return all matching employee entities +// that are located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +// Define a dynamic query on Employees collection +List employeesWithinRadius = session + .Query() + // Call 'Spatial' method + .Spatial( + // Call 'Point' + // Pass the path to the document fields containing the spatial data + pointField => pointField.Point( + x => x.Address.Location.Latitude, + x => x.Address.Location.Longitude), + // Set the geographical area in which to search for matching documents + // Call 'WithinRadius', pass the radius and the center points coordinates + criteria => criteria.WithinRadius(20, 47.623473, -122.3060097)) + .ToList(); +`} + + + + +{`// This query will return all matching employee entities +// that are located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +// Define a dynamic query on Employees collection +List employeesWithinRadius = await asyncSession + .Query() + // Call 'Spatial' method + .Spatial( + // Call 'Point' + // Pass the path to the document fields containing the spatial data + pointField => pointField.Point( + x => x.Address.Location.Latitude, + x => x.Address.Location.Longitude), + // Set the geographical area in which to search for matching documents + // Call 'WithinRadius', pass the radius and the center points coordinates + criteria => criteria.WithinRadius(20, 47.623473, -122.3060097)) + .ToListAsync(); +`} + + + + +{`// This query will return all matching employee entities +// that are located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +// Define a dynamic query on Employees collection +List employeesWithinRadius = session.Advanced + .DocumentQuery() + // Call 'Spatial' method + .Spatial( + // Call 'Point' + // Pass the path to the document fields containing the spatial data + pointField => pointField.Point( + x => x.Address.Location.Latitude, + x => x.Address.Location.Longitude), + // Set the geographical area in which to search for matching documents + // Call 'WithinRadius', pass the radius and the center points coordinates + criteria => criteria.WithinRadius(20, 47.623473, -122.3060097)) + .ToList(); +`} + + + + +{`// This query will return all matching employee entities +// that are located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +from Employees +where spatial.within( + spatial.point(Address.Location.Latitude, Address.Location.Longitude), + spatial.circle(20, 47.623473, -122.3060097) +) +`} + + + + + + +## Search by shape + +* Use the `RelatesToShape` method to search for all documents containing spatial data that is located + in the specified relation to the given shape. + +* The shape is specified as either a **circle** or a **polygon** in a [WKT](https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry) format. + +* The relation to the shape can be one of: `Within`, `Contains`, `Disjoint`, `Intersects`. +#### Circle + + + + +{`// This query will return all matching employee entities +// that are located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +// Define a dynamic query on Employees collection +List employeesWithinShape = session + .Query() + // Call 'Spatial' method + .Spatial( + // Call 'Point' + // Pass the path to the document fields containing the spatial data + factory => factory.Point( + x => x.Address.Location.Latitude, + x => x.Address.Location.Longitude), + // Set the geographical search criteria, call 'RelatesToShape' + criteria => criteria.RelatesToShape( + // Specify the WKT string. Note: longitude is written FIRST + shapeWkt: "CIRCLE(-122.3060097 47.623473 d=20)", + // Specify the relation between the WKT shape and the documents spatial data + relation: SpatialRelation.Within, + // Optional: customize radius units (default is Kilometers) + units: SpatialUnits.Miles)) + .ToList(); +`} + + + + +{`// This query will return all matching employee entities +// that are located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +// Define a dynamic query on Employees collection +List employeesWithinShape = await asyncSession + .Query() + // Call 'Spatial' method + .Spatial( + // Call 'Point' + // Pass the path to the document fields containing the spatial data + factory => factory.Point( + x => x.Address.Location.Latitude, + x => x.Address.Location.Longitude), + // Set the geographical search criteria, call 'RelatesToShape' + criteria => criteria.RelatesToShape( + // Specify the WKT string. Note: longitude is written FIRST + shapeWkt: "CIRCLE(-122.3060097 47.623473 d=20)", + // Specify the relation between the WKT shape and the documents spatial data + relation: SpatialRelation.Within, + // Optional: customize radius units (default is Kilometers) + units: SpatialUnits.Miles)) + .ToListAsync(); +`} + + + + +{`// This query will return all matching employee entities +// that are located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +// Define a dynamic query on Employees collection +List employeesWithinShape = session.Advanced + .DocumentQuery() + // Call 'Spatial' method + .Spatial( + // Call 'Point' + // Pass the path to the document fields containing the spatial data + factory => factory.Point( + x => x.Address.Location.Latitude, + x => x.Address.Location.Longitude), + // Set the geographical search criteria, call 'RelatesToShape' + criteria => criteria.RelatesToShape( + // Specify the WKT string. Note: longitude is written FIRST + shapeWkt: "CIRCLE(-122.3060097 47.623473 d=20)", + // Specify the relation between the WKT shape and the documents spatial data + relation: SpatialRelation.Within, + // Optional: customize radius units (default is Kilometers) + units: SpatialUnits.Miles)) + .ToList(); +`} + + + + +{`// This query will return all matching employee entities +// that are located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +from Employees +where spatial.within( + spatial.point(Address.Location.Latitude, Address.Location.Longitude), + spatial.wkt("CIRCLE(-122.3060097 47.623473 d=20)", "miles") +) +`} + + + +#### Polygon + + + + +{`// This query will return all matching company entities +// that are located within the specified polygon. + +// Define a dynamic query on Companies collection +List companiesWithinShape = session + .Query() + // Call 'Spatial' method + .Spatial( + // Call 'Point' + // Pass the path to the document fields containing the spatial data + factory => factory.Point( + x => x.Address.Location.Latitude, + x => x.Address.Location.Longitude), + // Set the geographical search criteria, call 'RelatesToShape' + criteria => criteria.RelatesToShape( + // Specify the WKT string + shapeWkt: @"POLYGON (( + -118.6527948 32.7114894, + -95.8040242 37.5929338, + -102.8344151 53.3349629, + -127.5286633 48.3485664, + -129.4620208 38.0786067, + -118.7406746 32.7853769, + -118.6527948 32.7114894 + ))", + // Specify the relation between the WKT shape and the documents spatial data + relation: SpatialRelation.Within)) + .ToList(); +`} + + + + +{`// This query will return all matching company entities +// that are located within the specified polygon. + +// Define a dynamic query on Companies collection +List companiesWithinShape = await asyncSession + .Query() + // Call 'Spatial' method + .Spatial( + // Call 'Point' + // Pass the path to the document fields containing the spatial data + factory => factory.Point( + x => x.Address.Location.Latitude, + x => x.Address.Location.Longitude), + // Set the geographical search criteria, call 'RelatesToShape' + criteria => criteria.RelatesToShape( + // Specify the WKT string + shapeWkt: @"POLYGON (( + -118.6527948 32.7114894, + -95.8040242 37.5929338, + -102.8344151 53.3349629, + -127.5286633 48.3485664, + -129.4620208 38.0786067, + -118.7406746 32.7853769, + -118.6527948 32.7114894 + ))", + // Specify the relation between the WKT shape and the documents spatial data + relation: SpatialRelation.Within)) + .ToListAsync(); +`} + + + + +{`// This query will return all matching company entities +// that are located within the specified polygon. + +// Define a dynamic query on Companies collection +List companiesWithinShape = session.Advanced + .DocumentQuery() + // Call 'Spatial' method + .Spatial( + // Call 'Point' + // Pass the path to the document fields containing the spatial data + factory => factory.Point( + x => x.Address.Location.Latitude, + x => x.Address.Location.Longitude), + // Set the geographical search criteria, call 'RelatesToShape' + criteria => criteria.RelatesToShape( + // Specify the WKT string + shapeWkt: @"POLYGON (( + -118.6527948 32.7114894, + -95.8040242 37.5929338, + -102.8344151 53.3349629, + -127.5286633 48.3485664, + -129.4620208 38.0786067, + -118.7406746 32.7853769, + -118.6527948 32.7114894 + ))", + // Specify the relation between the WKT shape and the documents spatial data + relation: SpatialRelation.Within)) + .ToList(); +`} + + + + +{`// This query will return all matching company entities +// that are located within the specified polygon. + +from companies +where spatial.within( + spatial.point(Address.Location.Latitude, Address.Location.Longitude), + spatial.wkt("POLYGON (( + -118.6527948 32.7114894, + -95.8040242 37.5929338, + -102.8344151 53.3349629, + -127.5286633 48.3485664, + -129.4620208 38.0786067, + -118.7406746 32.7853769, + -118.6527948 32.7114894))") +) +`} + + + + + + +* The polygon's coordinates must be provided in counterclockwise order. + +* The first and last coordinates must mark the same location to form a closed region. + +![WKT polygon](./assets/spatial_1.png) + + + + + +## Spatial sorting + +* Use `OrderByDistance` or `OrderByDistanceDescending` to sort the results by distance from a given point. + +* By default, distance is measured by RavenDB in **kilometers**. + The distance can be rounded to a specific range. +#### Order by distance + + + + +{`// Return all matching employee entities located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +// Sort the results by their distance from a specified point, +// the closest results will be listed first. + +List employeesSortedByDistance = session + .Query() + // Provide the query criteria: + .Spatial( + pointField => pointField.Point( + x => x.Address.Location.Latitude, + x => x.Address.Location.Longitude + ), + criteria => criteria.WithinRadius(20, 47.623473, -122.3060097)) + // Call 'OrderByDistance' + .OrderByDistance( + factory => factory.Point( + // Pass the path to the document fields containing the spatial data + x => x.Address.Location.Latitude, + x => x.Address.Location.Longitude + ), + // Sort the results by their distance from this point: + 47.623473, -122.3060097) + .ToList(); +`} + + + + +{`// Return all matching employee entities located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +// Sort the results by their distance from a specified point, +// the closest results will be listed first. + +List employeesSortedByDistance = await asyncSession + .Query() + // Provide the query criteria: + .Spatial( + pointField => pointField.Point( + x => x.Address.Location.Latitude, + x => x.Address.Location.Longitude + ), + criteria => criteria.WithinRadius(20, 47.623473, -122.3060097)) + // Call 'OrderByDistance' + .OrderByDistance( + factory => factory.Point( + // Pass the path to the document fields containing the spatial data + x => x.Address.Location.Latitude, + x => x.Address.Location.Longitude + ), + // Sort the results by their distance from this point: + 47.623473, -122.3060097) + .ToListAsync(); +`} + + + + +{`// Return all matching employee entities located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +// Sort the results by their distance from a specified point, +// the closest results will be listed first. + +List employeesSortedByDistance = session.Advanced + .DocumentQuery() + // Provide the query criteria: + .Spatial( + pointField => pointField.Point( + x => x.Address.Location.Latitude, + x => x.Address.Location.Longitude + ), + criteria => criteria.WithinRadius(20, 47.623473, -122.3060097)) + // Call 'OrderByDistance' + .OrderByDistance( + factory => factory.Point( + // Pass the path to the document fields containing the spatial data + x => x.Address.Location.Latitude, + x => x.Address.Location.Longitude + ), + // Sort the results by their distance from this point: + 47.623473, -122.3060097) + .ToList(); +`} + + + + +{`// Return all matching employee entities located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +// Sort the results by their distance from a specified point, +// the closest results will be listed first. + +from Employees +where spatial.within( + spatial.point(Address.Location.Latitude, Address.Location.Longitude), + spatial.circle(20, 47.623473, -122.3060097) +) +order by spatial.distance( + spatial.point(Address.Location.Latitude, Address.Location.Longitude), + spatial.point(47.623473, -122.3060097) +) +`} + + + +#### Order by distance descending + + + + +{`// Return all employee entities sorted by their distance from a specified point. +// The farthest results will be listed first. + +List employeesSortedByDistanceDesc = session + .Query() + // Call 'OrderByDistanceDescending' + .OrderByDistanceDescending( + factory => factory.Point( + // Pass the path to the document fields containing the spatial data + x => x.Address.Location.Latitude, + x => x.Address.Location.Longitude + ), + // Sort the results by their distance (descending) from this point: + 47.623473, -122.3060097) + .ToList(); +`} + + + + +{`// Return all employee entities sorted by their distance from a specified point. +// The farthest results will be listed first. + +List employeesSortedByDistanceDesc = await asyncSession + .Query() + // Call 'OrderByDistanceDescending' + .OrderByDistanceDescending( + factory => factory.Point( + // Pass the path to the document fields containing the spatial data + x => x.Address.Location.Latitude, + x => x.Address.Location.Longitude + ), + // Sort the results by their distance (descending) from this point: + 47.623473, -122.3060097) + .ToListAsync(); +`} + + + + +{`// Return all employee entities sorted by their distance from a specified point. +// The farthest results will be listed first. + +List employeesSortedByDistanceDesc = session.Advanced + .DocumentQuery() + // Call 'OrderByDistanceDescending' + .OrderByDistanceDescending( + factory => factory.Point( + // Pass the path to the document fields containing the spatial data + x => x.Address.Location.Latitude, + x => x.Address.Location.Longitude + ), + // Sort the results by their distance (descending) from this point: + 47.623473, -122.3060097) + .ToList(); +`} + + + + +{`// Return all employee entities sorted by their distance from a specified point. +// The farthest results will be listed first. + +from Employees +order by spatial.distance( + spatial.point(Address.Location.Latitude, Address.Location.Longitude), + spatial.point(47.623473, -122.3060097) +) desc +`} + + + +#### Sort results by rounded distance + + + + +{`// Return all employee entities. +// Results are sorted by their distance to a specified point rounded to the nearest 100 km interval. +// A secondary sort can be applied within the 100 km range, e.g. by field LastName. + +List employeesSortedByRoundedDistance = session + .Query() + // Call 'OrderByDistance' + .OrderByDistance( + factory => factory.Point( + // Pass the path to the document fields containing the spatial data + x => x.Address.Location.Latitude, + x => x.Address.Location.Longitude) + // Round up distance to 100 km + .RoundTo(100), + // Sort the results by their distance from this point: + 47.623473, -122.3060097) + // A secondary sort can be applied + .ThenBy(x => x.LastName) + .ToList(); +`} + + + + +{`// Return all employee entities. +// Results are sorted by their distance to a specified point rounded to the nearest 100 km interval. +// A secondary sort can be applied within the 100 km range, e.g. by field LastName. + +List employeesSortedByRoundedDistance = await asyncSession + .Query() + // Call 'OrderByDistance' + .OrderByDistance( + factory => factory.Point( + // Pass the path to the document fields containing the spatial data + x => x.Address.Location.Latitude, + x => x.Address.Location.Longitude) + // Round up distance to 100 km + .RoundTo(100), + // Sort the results by their distance from this point: + 47.623473, -122.3060097) + // A secondary sort can be applied + .ThenBy(x => x.LastName) + .ToListAsync(); +`} + + + + +{`// Return all employee entities. +// Results are sorted by their distance to a specified point rounded to the nearest 100 km interval. +// A secondary sort can be applied within the 100 km range, e.g. by field LastName. + +List employeesSortedByRoundedDistance = session.Advanced + .DocumentQuery() + // Call 'OrderByDistance' + .OrderByDistance( + factory => factory.Point( + // Pass the path to the document fields containing the spatial data + x => x.Address.Location.Latitude, + x => x.Address.Location.Longitude) + // Round up distance to 100 km + .RoundTo(100), + // Sort the results by their distance from this point: + 47.623473, -122.3060097) + // A secondary sort can be applied + .OrderBy(x => x.LastName) + .ToList(); +`} + + + + +{`// Return all employee entities. +// Results are sorted by their distance to a specified point rounded to the nearest 100 km interval. +// A secondary sort can be applied within the 100 km range, e.g. by field LastName. + +from Employees +order by spatial.distance( + spatial.point(Address.Location.Latitude, Address.Location.Longitude), + spatial.point(47.623473, -122.3060097), + 100 +), LastName +`} + + + +#### Get resulting distance + +* The distance is available in the `@spatial` metadata property within each result. +* Note the following difference between the underlying search engines: + * When using **Lucene**: + This metadata property is always available in the results. + * When using **Corax**: + In order to enhance performance, this property is not included in the results by default. + To get this metadata property you must set the [Indexing.Corax.IncludeSpatialDistance](../../../server/configuration/indexing-configuration.mdx#indexingcoraxincludespatialdistance) configuration value to _true_. + Learn about the available methods for setting an indexing configuration key in this [indexing-configuration](../../../server/configuration/indexing-configuration.mdx) article. + + + +{`// Get the distance of the results: +// ================================ + +// Call 'GetMetadataFor', pass an entity from the resulting employees list +var metadata = session.Advanced.GetMetadataFor(employeesSortedByDistance[0]); + +// The distance is available in the '@spatial' metadata property +var spatialResults = (IDictionary)metadata[Constants.Documents.Metadata.SpatialResult]; + +var distance = spatialResults["Distance"]; // The distance of the entity from the queried location +var latitude = spatialResults["Latitude"]; // The entity's longitude value +var longitude = spatialResults["Longitude"]; // The entity's longitude value +`} + + + + + +## Spatial API + +#### `Spatial` + + + +{`IRavenQueryable Spatial( + Expression> path, + Func clause); + +IRavenQueryable Spatial( + string fieldName, + Func clause); + +IRavenQueryable Spatial( + Func, DynamicSpatialField> field, + Func clause); + +IRavenQueryable Spatial( + DynamicSpatialField field, + Func clause); +`} + + + +| Parameters | Type | Description | +|---------------|-------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------| +| **path** | `Expression>` | Path to spatial field in an index
(when querying an index) | +| **fieldName** | `string` | Path to spatial field in an index
(when querying an index) | +| **field** | `Func, DynamicSpatialField>`
or
`DynamicSpatialField` | Factory or field that points to a document field
(when making a dynamic query).
Either `PointField` or `WktField`. | +| **clause** | `Func` | Spatial criteria that will be executed on a given spatial field | +#### `DynamicSpatialFieldFactory` + + + +{`PointField Point( + Expression> latitudePath, + Expression> longitudePath); + +WktField Wkt(Expression> wktPath); +`} + + + +| Parameters | Type | Description | +|----------------------------------------------------|-------------------------------|------------------------------------------------------------------------------| +| **latitudePath** / **longitudePath** / **wktPath** | `Expression>` | Path to the field in a document containing either longitude, latitude or WKT | +#### `SpatialCriteriaFactory` + + + +{`SpatialCriteria RelatesToShape( + string shapeWkt, + SpatialRelation relation, + double distErrorPercent = Constants.Documents.Indexing.Spatial.DefaultDistanceErrorPct); + +SpatialCriteria RelatesToShape( + string shapeWkt, + SpatialRelation relation, + SpatialUnits units, + double distErrorPercent = Constants.Documents.Indexing.Spatial.DefaultDistanceErrorPct); + +SpatialCriteria Intersects( + string shapeWkt, + double distErrorPercent = Constants.Documents.Indexing.Spatial.DefaultDistanceErrorPct); + +SpatialCriteria Intersects( + string shapeWkt, + SpatialUnits units, + double distErrorPercent = Constants.Documents.Indexing.Spatial.DefaultDistanceErrorPct); + +SpatialCriteria Contains( + string shapeWkt, + double distErrorPercent = Constants.Documents.Indexing.Spatial.DefaultDistanceErrorPct); + +SpatialCriteria Contains( + string shapeWkt, + SpatialUnits units, + double distErrorPercent = Constants.Documents.Indexing.Spatial.DefaultDistanceErrorPct); + +SpatialCriteria Disjoint( + string shapeWkt, + double distErrorPercent = Constants.Documents.Indexing.Spatial.DefaultDistanceErrorPct); + +SpatialCriteria Disjoint( + string shapeWkt, + SpatialUnits units, + double distErrorPercent = Constants.Documents.Indexing.Spatial.DefaultDistanceErrorPct); + +SpatialCriteria Within( + string shapeWkt, + double distErrorPercent = Constants.Documents.Indexing.Spatial.DefaultDistanceErrorPct); + +SpatialCriteria Within( + string shapeWkt, + SpatialUnits units, + double distErrorPercent = Constants.Documents.Indexing.Spatial.DefaultDistanceErrorPct); + +SpatialCriteria WithinRadius( + double radius, + double latitude, + double longitude, + SpatialUnits? radiusUnits = null, + double distErrorPercent = Constants.Documents.Indexing.Spatial.DefaultDistanceErrorPct); +`} + + + +| Parameter | Type | Description | +|-------------------------------------------|-------------------|-------------------------------------------------------------------------------------------------------------------------------------| +| **shapeWkt** | `string` | [WKT](https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry)-based shape used in query criteria | +| **relation** | `SpatialRelation` | Relation of the shape to the spatial data in the document/index.
Can be `Within`, `Contains`, `Disjoint`, `Intersects`. | +| **distErrorPercent** | `double` | Maximum distance error tolerance in percents. Default: 0.025 | +| **radius** / **latitude** / **longitude** | `double` | Used to define a radius of a circle | +| **radiusUnits** / **units** | `SpatialUnits` | Determines if circle or shape should be calculated in `Kilometers` or `Miles`.
By default, distances are measured in kilometers. | +#### `OrderByDistance` + + + +{`// From point +IOrderedQueryable OrderByDistance( + Func, DynamicSpatialField> field, + double latitude, + double longitude); + +IOrderedQueryable OrderByDistance( + DynamicSpatialField field, + double latitude, + double longitude); + +IOrderedQueryable OrderByDistance( + Expression> path, + double latitude, + double longitude); + +IOrderedQueryable OrderByDistance( + string fieldName, + double latitude, + double longitude); + +// From center of WKT shape +IOrderedQueryable OrderByDistance( + Func, DynamicSpatialField> field, + string shapeWkt); + +IOrderedQueryable OrderByDistance( + DynamicSpatialField field, + string shapeWkt); + +IOrderedQueryable OrderByDistance( + Expression> path, + string shapeWkt); + +IOrderedQueryable OrderByDistance( + string fieldName, + string shapeWkt); + +// Rounding +IOrderedQueryable OrderByDistance( + Expression> path, + double latitude, + double longitude, + double roundFactor); + +IOrderedQueryable OrderByDistance( + string fieldName, + double latitude, + double longitude, + double roundFactor); + +IOrderedQueryable OrderByDistance( + Expression> path, + string shapeWkt, + double roundFactor); + +IOrderedQueryable OrderByDistance( + string fieldName, + string shapeWkt, + double roundFactor); +`} + + +#### `OrderByDistanceDescending` + + + +{`// From point +IOrderedQueryable OrderByDistanceDescending( + Func, DynamicSpatialField> field, + double latitude, + double longitude); + +IOrderedQueryable OrderByDistanceDescending( + DynamicSpatialField field, + double latitude, + double longitude); + +IOrderedQueryable OrderByDistanceDescending( + Expression> path, + double latitude, + double longitude); + +IOrderedQueryable OrderByDistanceDescending( + string fieldName, + double latitude, + double longitude); + +// From center of WKT shape +IOrderedQueryable OrderByDistanceDescending( + Func, DynamicSpatialField> field, + string shapeWkt); + +IOrderedQueryable OrderByDistanceDescending( + DynamicSpatialField field, + string shapeWkt); + +IOrderedQueryable OrderByDistanceDescending( + Expression> path, + string shapeWkt); + +IOrderedQueryable OrderByDistanceDescending( + string fieldName, + string shapeWkt); + +// Rounding +IOrderedQueryable OrderByDistanceDescending( + Expression> path, + double latitude, + double longitude, + double roundFactor); + +IOrderedQueryable OrderByDistanceDescending( + string fieldName, + double latitude, + double longitude, + double roundFactor); + +IOrderedQueryable OrderByDistanceDescending( + Expression> path, + string shapeWkt, + double roundFactor); + +IOrderedQueryable OrderByDistanceDescending( + string fieldName, + string shapeWkt, + double roundFactor); +`} + + + +| Parameter | Type | Description | +|------------------------------|--------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **path** | `Expression>` | Path to spatial field in index
(when querying an index) | +| **fieldName** | `string` | Path to spatial field in index
(when querying an index) | +| **field** | `Func, DynamicSpatialField>`
or
`DynamicSpatialField` | Factory or field that points to a document field
(when making a dynamic query).
Either `PointField` or `WktField`. | +| **shapeWkt** | `string` | [WKT](https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry)-based shape to be used as a point from which distance will be measured. If the shape is not a single point, then the center of the shape will be used as a reference. | +| **latitude** / **longitude** | `double` | Used to define a point from which distance will be measured | +| **roundFactor** | `double` | A distance interval in kilometers.
The distance from the point is rounded up to the nearest interval.
The results within the same interval can be sorted by a secondary order.
If no other order was specified, then by ascending order of document Id. | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-make-a-spatial-query-java.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-make-a-spatial-query-java.mdx new file mode 100644 index 0000000000..0394bb5a15 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-make-a-spatial-query-java.mdx @@ -0,0 +1,272 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Spatial indexes can be queried using the `spatial` method which contains a full spectrum of spatial methods. The following article will cover these methods: + +- [Spatial](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#spatial) +- [OrderByDistance](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#orderbydistance) +- [OrderByDistanceDescending](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#orderbydistancedescending) + +## Spatial + + + +{`IDocumentQuery spatial(String fieldName, Function clause); + +IDocumentQuery spatial(DynamicSpatialField field, Function clause); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **fieldName** | String | Path to spatial field in an index | +| **field** | DynamicSpatialField | Field that points to a dynamic field (used with auto-indexes). Either `PointField` or `WktField` | +| **clause** | Function<SpatialCriteriaFactory, SpatialCriteria> | Spatial criteria | + +### DynamicSpatialField + + + +{`public PointField(String latitude, String longitude) + +public WktField(String wkt) +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **latitude** or **longitude** or **wkt** | String | Path to the field in a document containing either longitude, latitude or WKT | + +### SpatialCriteriaFactory + + + +{`SpatialCriteria relatesToShape(String shapeWkt, SpatialRelation relation); + +SpatialCriteria relatesToShape(String shapeWkt, SpatialRelation relation, double distErrorPercent); + +SpatialCriteria intersects(String shapeWkt); + +SpatialCriteria intersects(String shapeWkt, double distErrorPercent) ; + +SpatialCriteria contains(String shapeWkt); + +SpatialCriteria contains(String shapeWkt, double distErrorPercent); + +SpatialCriteria disjoint(String shapeWkt); + +SpatialCriteria disjoint(String shapeWkt, double distErrorPercent); + +SpatialCriteria within(String shapeWkt); + +SpatialCriteria within(String shapeWkt, double distErrorPercent); + +SpatialCriteria withinRadius(double radius, double latitude, double longitude); + +SpatialCriteria withinRadius(double radius, double latitude, double longitude, SpatialUnits radiusUnits); + +SpatialCriteria withinRadius(double radius, double latitude, double longitude, SpatialUnits radiusUnits, double distErrorPercent); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **shapeWkt** | String | [WKT](https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry)-based shape to be used in operation | +| **relation** | SpatialRelation | Shape relation. Can be `WITHIN`, `CONTAINS`, `DISJOINT`, `INTERSECTS` | +| **distErrorPercent** | double | Maximum distance error tolerance in percents. Default: 0.025 | +| **radius** or **latitude** or **longitude** | double | Used to define a radius circle | +| **radiusUnits** | SpatialUnits | Determines if circle should be calculated in `KILOMETERS` or `MILES` units | + + +When using `spatial.wkt()` to define a **polygon**, the vertices (points that form the corners of the polygon) must be listed +in a counter-clockwise order: + + + +![NoSQL DB - Query a Spatial Index](./assets/spatial_1.png) + + +### Example I + + + + +{`// return all matching entities +// within 10 kilometers radius +// from 32.1234 latitude and 23.4321 longitude coordinates +List results = session + .query(House.class) + .spatial( + new PointField("latitude", "longitude"), + f -> f.withinRadius(10, 32.1234, 23.4321)) + .toList(); +`} + + + + +{`from Houses +where spatial.within(spatial.point(Latitude, Longitude), spatial.circle(10, 32.1234. 23.4321)) +`} + + + + +### Example II + + + + +{`// return all matching entities +// within 10 kilometers radius +// from 32.1234 latitude and 23.4321 longitude coordinates +// this equals to WithinRadius(10, 32.1234, 23.4321) +List results = session + .query(House.class) + .spatial( + new PointField("latitude", "longitude"), + f -> f.relatesToShape("Circle(32.1234 23.4321 d=10.0000)", SpatialRelation.WITHIN) + ) + .toList(); +`} + + + + +{`from Houses +where spatial.within(spatial.point(Latitude, Longitude), spatial.wkt('Circle(32.1234 23.4321 d=10.0000)')) +`} + + + + + + +## OrderByDistance + +To sort by distance from given point use the `orderByDistance` method. The closest results will come first. + + + +{`IDocumentQuery orderByDistance(DynamicSpatialField field, double latitude, double longitude); + +IDocumentQuery orderByDistance(DynamicSpatialField field, String shapeWkt); + +IDocumentQuery orderByDistance(String fieldName, double latitude, double longitude); + +IDocumentQuery orderByDistance(String fieldName, String shapeWkt); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **fieldName** | String | Path to spatial field in index | +| **field** | DynamicSpatialField | Field that points to a dynamic field (used with auto-indexes). Either `PointField` or `WktField` | +| **shapeWkt** | String | [WKT](https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry)-based shape to be used as a point from which distance will be measured. If the shape is not a single point, then the center of the shape will be used as a reference. | +| **latitude** or **longitude** | double | Used to define a point from which distance will be measured | + +### Example + + + + +{`// return all matching entities +// within 10 kilometers radius +// from 32.1234 latitude and 23.4321 longitude coordinates +// sort results by distance from 32.1234 latitude and 23.4321 longitude point +List results = session + .query(House.class) + .spatial( + new PointField("latitude", "longitude"), + f -> f.withinRadius(10, 32.1234, 23.4321) + ) + .orderByDistance( + new PointField("latitude", "longtude"), + 32.12324, 23.4321) + .toList(); +`} + + + + +{`from Houses +where spatial.within(spatial.point(Latitude, Longitude), spatial.circle(10, 32.1234. 23.4321)) +order by spatial.distance(spatial.point(Latitude, Longitude), spatial.point(32.1234, 23.4321)) +`} + + + + + + +## OrderByDistanceDescending + +To sort by distance from given point use the `OrderByDistanceDescending` method. The farthest results will come first. + + + +{`IDocumentQuery orderByDistanceDescending(DynamicSpatialField field, double latitude, double longitude); + +IDocumentQuery orderByDistanceDescending(DynamicSpatialField field, String shapeWkt); + +IDocumentQuery orderByDistanceDescending(String fieldName, double latitude, double longitude); + +IDocumentQuery orderByDistanceDescending(String fieldName, String shapeWkt); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **fieldName** | String | Path to spatial field in index | +| **field** | DynamicSpatialField | Field that points to a dynamic field (used with auto-indexes). Either `PointField` or `WktField` | +| **shapeWkt** | String | WKT-based shape to be used as a point from which distance will be measured. If the shape is not a single point, then the center of the shape will be used as a reference. | +| **latitude** or **longitude** | double | Used to define a point from which distance will be measured | + +### Example + + + + +{`// return all matching entities +// within 10 kilometers radius +// from 32.1234 latitude and 23.4321 longitude coordinates +// sort results by distance from 32.1234 latitude and 23.4321 longitude point +List results = session + .query(House.class) + .spatial( + new PointField("latitude", "longitude"), + f -> f.withinRadius(10, 32.1234, 23.4321) + ) + .orderByDistanceDescending( + new PointField("latitude", "longtude"), + 32.12324, 23.4321) + .toList(); +`} + + + + +{`from Houses +where spatial.within(spatial.point(Latitude, Longitude), spatial.circle(10, 32.1234. 23.4321)) +order by spatial.distance(spatial.point(Latitude, Longitude), spatial.point(32.1234, 23.4321)) desc +`} + + + + + + +## Remarks + + +By default, distances are measured in **kilometers**. + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-make-a-spatial-query-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-make-a-spatial-query-nodejs.mdx new file mode 100644 index 0000000000..f63a3d807a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-make-a-spatial-query-nodejs.mdx @@ -0,0 +1,514 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Documents that contain spatial data can be queried by spatial queries that employ geographical criteria. + You can use either _Dynamic spatial query_ or _Spatial index query_. + + * **Dynamic spatial query** + Make a dynamic spatial query on a collection (described below). + An auto-index will be created by the server. + + * **Spatial index query** + Index your documents' spatial data in a static-index (see [indexing spatial data](../../../indexes/indexing-spatial-data.mdx)) + and then make a spatial query on this index (see [query a spatial index](../../../indexes/querying/spatial.mdx)). + +* To perform a spatial search, + use the `spatial` method, which provides a wide range of spatial functionalities. + +* When making a dynamic spatial query from Studio, + results are also displayed on the global map. See [spatial queries map view](../../../studio/database/queries/spatial-queries-map-view.mdx). +* In this page: + + * [Search by radius](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#search-by-radius) + * [Search by shape](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#search-by-shape) + * [Circle](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#circle) + * [Polygon](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#polygon) + * [Spatial sorting](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#spatial-sorting) + * [Order by distance](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#orderbydistance) + * [Order by distance desc](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#orderbydistancedesc) + * [Rounded distance](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#roundeddistance) + * [Get resulting distance](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#getresultingdistance) + * [Spatial API](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#spatial-api) + + +## Search by radius + +Use the `withinRadius` method to search for all documents containing spatial data that is located +within the specified distance from the given center point. + + + + +{`// This query will return all matching employee entities +// that are located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +// Define a dynamic query on 'employees' collection +const employeesWithinRadius = await session + .query({ collection: "employees" }) + // Call 'spatial' method + .spatial( + // Specify the document fields containing the spatial data + new PointField("address.location.latitude", "address.location.longitude"), + // Set the geographical area in which to search for matching documents + // Call 'withinRadius', pass the radius and the center points coordinates + criteria => criteria.withinRadius(20, 47.623473, -122.3060097)) + .all(); +`} + + + + +{`// This query will return all matching employee entities +// that are located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +from "employees" +where spatial.within( + spatial.point(address.location.latitude, address.location.longitude), + spatial.circle(20, 47.623473, -122.3060097) +) +`} + + + + + + +## Search by shape + +* Use the `relatesToShape` method to search for all documents containing spatial data that is located + in the specified relation to the given shape. + +* The shape is specified as either a __circle__ or a __polygon__ in a [WKT](https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry) format. + +* The relation to the shape can be one of: `Within`, `Contains`, `Disjoint`, `Intersects`. + + + +
__Circle__: + + + + +{`// This query will return all matching employee entities +// that are located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +// Define a dynamic query on 'employees' collection +const employeesWithinShape = await session + .query({ collection: "employees" }) + // Call 'spatial' method + .spatial( + // Specify the document fields containing the spatial data + new PointField("address.location.latitude", "address.location.longitude"), + // Set the geographical search criteria, call 'relatesToShape' + criteria => criteria.relatesToShape( + // Specify the WKT string. Note: longitude is written FIRST + "CIRCLE(-122.3060097 47.623473 d=20)", + // Specify the relation between the WKT shape and the documents spatial data + "Within", + // Customize radius units (default is Kilometers) and error percentage (Optional) + "Miles", + 0)) + .all(); +`} + + + + +{`// This query will return all matching employee entities +// that are located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +from "employees" +where spatial.within( + spatial.point(address.location.latitude, address.location.longitude), + spatial.wkt("CIRCLE(-122.3060097 47.623473 d=20)", "miles") +) +`} + + + + + + + + + __Polygon__: + + + + +{`// This query will return all matching company entities +// that are located within the specified polygon. + +// Define a dynamic query on 'companies' collection +const companiesWithinShape = await session + .query({ collection: "employees" }) + // Call 'spatial' method + .spatial( + // Specify the document fields containing the spatial data + new PointField("address.location.latitude", "address.location.longitude"), + // Set the geographical search criteria, call 'relatesToShape' + criteria => criteria.relatesToShape( + // Specify the WKT string + \`POLYGON (( + -118.6527948 32.7114894, + -95.8040242 37.5929338, + -102.8344151 53.3349629, + -127.5286633 48.3485664, + -129.4620208 38.0786067, + -118.7406746 32.7853769, + -118.6527948 32.7114894 + ))\`, + // Specify the relation between the WKT shape and the documents spatial data + "Within")) + .all(); +`} + + + + +{`// This query will return all matching company entities +// that are located within the specified polygon. + +from "companies" +where spatial.within( + spatial.point(address.location.latitude, address.location.longitude), + spatial.wkt("POLYGON (( + -118.6527948 32.7114894, + -95.8040242 37.5929338, + -102.8344151 53.3349629, + -127.5286633 48.3485664, + -129.4620208 38.0786067, + -118.7406746 32.7853769, + -118.6527948 32.7114894))") +) +`} + + + + + + + __Polygon rules__: + +* The polygon's coordinates must be provided in counterclockwise order. + +* The first and last coordinates must mark the same location to form a closed region. + +![WKT polygon](./assets/spatial_1.png) + + + + + + + +## Spatial sorting + +* Use `orderByDistance` or `orderByDistanceDescending` to sort the results by distance from a given point. + +* By default, distance in RavenDB measured in **kilometers**. + The distance can be rounded to a specific range. + + + + __Order by distance__: + + + + +{`// Return all matching employee entities located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +// Sort the results by their distance from a specified point, +// the closest results will be listed first. + +const employeesSortedByDistance = await session + .query({ collection: "employees" }) + // Provide the query criteria: + .spatial( + new PointField("address.location.latitude", "address.location.longitude"), + criteria => criteria.withinRadius(20, 47.623473, -122.3060097)) + // Call 'orderByDistance' + .orderByDistance( + // Specify the document fields containing the spatial data + new PointField("address.location.latitude", "address.location.longitude"), + // Sort the results by their distance from this point: + 47.623473, -122.3060097) + .all(); +`} + + + + +{`// Return all matching employee entities located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +// Sort the results by their distance from a specified point, +// the closest results will be listed first. + +from "employees" +where spatial.within( + spatial.point(address.location.latitude, address.location.longitude), + spatial.circle(20, 47.623473, -122.3060097) +) +order by spatial.distance( + spatial.point(address.location.latitude, address.location.longitude), + spatial.point(47.623473, -122.3060097) +) +`} + + + + + + + + + __Order by distance descending__: + + + + +{`// Return all employee entities sorted by their distance from a specified point. +// The farthest results will be listed first. + +const employeesSortedByDistanceDesc = await session + .query({ collection: "employees" }) + // Call 'orderByDistanceDescending' + .orderByDistanceDescending( + // Specify the document fields containing the spatial data + new PointField("address.location.latitude", "address.location.longitude"), + // Sort the results by their distance (descending) from this point: + 47.623473, -122.3060097) + .all(); +`} + + + + +{`// Return all employee entities sorted by their distance from a specified point. +// The farthest results will be listed first. + +from "employees" +order by spatial.distance( + spatial.point(address.location.latitude, address.location.longitude), + spatial.point(47.623473, -122.3060097) +) desc +`} + + + + + + + + + __Sort results by rounded distance__: + + + + +{`// Return all employee entities. +// Results are sorted by their distance to a specified point rounded to the nearest 100 km interval. +// A secondary sort can be applied within the 100 km range, e.g. by field lastName. + +const employeesSortedByRoundedDistance = await session + .query({ collection: "employees" }) + // Call 'orderByDistanceDescending' + .orderByDistance( + // Specify the document fields containing the spatial data + new PointField("address.location.latitude", "address.location.longitude") + // Round up distance to 100 km + .roundTo(100), + // Sort the results by their distance (descending) from this point: + 47.623473, -122.3060097) + // A secondary sort can be applied + .orderBy("lastName") + .all(); +`} + + + + +{`// Return all employee entities. +// Results are sorted by their distance to a specified point rounded to the nearest 100 km interval. +// A secondary sort can be applied within the 100 km range, e.g. by field lastName. + +from "employees" +order by spatial.distance( + spatial.point(address.location.latitude, address.location.longitude), + spatial.point(47.623473, -122.3060097), + 100 +), lastName +`} + + + + + + + + + __Get resulting distance__: + +* The distance is available in the `@spatial` metadata property within each result. + +* Note the following difference between the underlying search engines: + + * When using __Lucene__: + This metadata property is always available in the results. + + * When using __Corax__: + In order to enhance performance, this property is not included in the results by default. + To get this metadata property you must set the [Indexing.Corax.IncludeSpatialDistance](../../../server/configuration/indexing-configuration.mdx#indexingcoraxincludespatialdistance) configuration value to _true_. + Learn about the available methods for setting an indexing configuration key in this [indexing-configuration](../../../server/configuration/indexing-configuration.mdx) article. + + + +{`// Get the distance of the results: +// ================================ + +// Call 'GetMetadataFor', pass an entity from the resulting employees list +const metadata = session.advanced.getMetadataFor(employeesSortedByDistance[0]); + +// The distance is available in the '@spatial' metadata property +const spatialResults = metadata["@spatial"]; + +const distance = spatialResults.Distance; // The distance of the entity from the queried location +const latitude = spatialResults.Latitude; // The entity's longitude value +const longitude = spatialResults.Longitude; // The entity's longitude value +`} + + + + + + + +## Spatial API + +#### spatial + + + + +{`spatial(fieldName, clause); +spatial(field, clause); +`} + + + +| Parameters | Type | Description | +|---------------|------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------| +| __fieldName__ | `string` | Path to spatial field in an index
(when querying an index). | +| __field__ | `DynamicSpatialField` | Object that contains the document's spatial fields,
either `PointField` or `WktField`
(when making a dynamic query). | +| __clause__ | `(SpatialCriteriaFactory) => SpatialCrieteria` | Spatial criteria that will be executed on a given spatial field. | + +#### DynamicSpatialField + + + + +{`class PointField \{ + latitude; + longitude; +\} + +class WktField \{ + wkt; +\} +`} + + + +| Parameters | Type | Description | +|---------------|----------|---------------------------------------------------------| +| __latitude__ | `string` | Path to the document field that contains the latitude | +| __longitude__ | `string` | Path to the document field that contains the longitude | +| __wktPath__ | `string` | Path to the document field that contains the WKT string | + +#### SpatialCriteriaFactory + + + + +{`relatesToShape(shapeWkt, relation); +relatesToShape(shapeWkt, relation, units, distErrorPercent); +intersects(shapeWkt); +intersects(shapeWkt, distErrorPercent); +intersects(shapeWkt, distErrorPercent); +intersects(shapeWkt, units, distErrorPercent); +contains(shapeWkt); +contains(shapeWkt, units); +contains(shapeWkt, distErrorPercent); +contains(shapeWkt, units, distErrorPercent); +disjoint(shapeWkt); +disjoint(shapeWkt, units); +disjoint(shapeWkt, distErrorPercent); +disjoint(shapeWkt, units, distErrorPercent); +within(shapeWkt); +within(shapeWkt, units); +within(shapeWkt, distErrorPercent); +within(shapeWkt, units, distErrorPercent); +withinRadius(radius, latitude, longitude); +withinRadius(radius, latitude, longitude, radiusUnits); +withinRadius(radius, latitude, longitude, radiusUnits, distErrorPercent); +`} + + + +| Parameter | Type | Description | +|-------------------------------------------|----------|-------------------------------------------------------------------------------------------------------------------------------------| +| __shapeWkt__ | `string` | [WKT](https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry)-based shape used in query criteria | +| __relation__ | `string` | Relation of the shape to the spatial data in the document/index.
Can be `Within`, `Contains`, `Disjoint`, `Intersects`. | +| __distErrorPercent__ | `number` | Maximum distance error tolerance in percents. Default: 0.025 | +| __radius__ / __latitude__ / __longitude__ | `number` | Used to define a radius of a circle | +| __radiusUnits__ / __units__ | `string` | Determines if circle or shape should be calculated in `Kilometers` or `Miles`.
By default, distances are measured in kilometers. | + +#### orderByDistance + + + + +{`orderByDistance(field, latitude, longitude); +orderByDistance(field, shapeWkt); +orderByDistance(fieldName, latitude, longitude); +orderByDistance(fieldName, latitude, longitude, roundFactor: number); +orderByDistance(fieldName, shapeWkt); +`} + + + +#### orderByDistanceDescending + + + + +{`orderByDistanceDescending(field, latitude, longitude); +orderByDistanceDescending(field, shapeWkt); +orderByDistanceDescending(fieldName, latitude, longitude); +orderByDistanceDescending(fieldName, latitude, longitude, roundFactor); +orderByDistanceDescending(fieldName, shapeWkt); +`} + + + +| Parameter | Type | Description | +|------------------------------|-----------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| __fieldName__ | `string` | Path to spatial field in index
(when querying an index). | +| __field__ | `DynamicSpatialField` | Object that contains the document's spatial fields,
either `PointField` or `WktField`
(when making a dynamic query). | +| __shapeWkt__ | `string` | [WKT](https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry)-based shape to be used as a point from which distance will be measured. If the shape is not a single point, then the center of the shape will be used as a reference. | +| __latitude__ / __longitude__ | `number` | Used to define a point from which distance will be measured | +| __roundFactor__ | `number` | A distance interval in kilometers.
The distance from the point is rounded up to the nearest interval.
The results within the same interval can be sorted by a secondary order.
If no other order was specified, then by ascending order of document Id. | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-make-a-spatial-query-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-make-a-spatial-query-python.mdx new file mode 100644 index 0000000000..f970fa79dd --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-make-a-spatial-query-python.mdx @@ -0,0 +1,539 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Documents that contain spatial data can be queried by spatial queries that employ geographical criteria. + You can use either _Dynamic spatial query_ or _Spatial index query_. + + * **Dynamic spatial query** + Make a dynamic spatial query on a collection (described below). + An auto-index will be created by the server. + + * **Spatial index query** + Index your documents' spatial data in a static-index (see [indexing spatial data](../../../indexes/indexing-spatial-data.mdx)) + and then make a spatial query on this index (see [query a spatial index](../../../indexes/querying/spatial.mdx)). + +* To perform a spatial search, + use the `spatial` method, which provides a wide range of spatial functionalities. + +* When making a dynamic spatial query from Studio, + results are also displayed on the global map. See [spatial queries map view](../../../studio/database/queries/spatial-queries-map-view.mdx). +* In this page: + + * [Search by radius](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#search-by-radius) + * [Search by shape](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#search-by-shape) + * [Circle](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#circle) + * [Polygon](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#polygon) + * [Spatial sorting](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#spatial-sorting) + * [Order by distance](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#order-by-distance) + * [Order by distance descending](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#order-by-distance-descending) + * [Sort results by rounded distance](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#sort-results-by-rounded-distance) + * [Get resulting distance](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#get-resulting-distance) + * [Spatial API](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#spatial-api) + * [`spatial`](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#section) + * [`DynamicSpatialField`](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#section-1) + * [`SpatialCriteria`](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#section-2) + * [`order_by_distance`, `order_by_distance_wkt`](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#section-3) + * [`order_by_distance_descending`, `order_by_distance_descending_wkt`](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#section-4) + + +## Search by radius + +Use the `within_radius` method to search for all documents containing spatial data that is located +within the specified distance from the given center point. + + + + +{`# This query will return all matching employee entities +# that are located within 20 kilometers radius +# from point (47.623473 latitude, -122.3060097 longitude). + +# Define a query on Employees collection +employees_within_radius = list( + session.query(object_type=Employee) + # Call 'spatial' method + .spatial( + # Create 'PointField' + # Pass the path to document fields containing the spatial data + PointField("Address.Location.Latitude", "Address.Location.Longitude"), + # Set the geographical area in which to search for matching documents + # Call 'within_radius', pass the radius and the center points coordinates + lambda criteria: criteria.within_radius(20, 47.623473, -122.3060097), + ) +) +`} + + + + +{`// This query will return all matching employee entities +// that are located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +from Employees +where spatial.within( + spatial.point(Address.Location.Latitude, Address.Location.Longitude), + spatial.circle(20, 47.623473, -122.3060097) +) +`} + + + + + + +## Search by shape + +* Use the `relates_to_shape` method to search for all documents containing spatial data that is located + in the specified relation to the given shape. + +* The shape is specified as either a **circle** or a **polygon** in a [WKT](https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry) format. + +* The relation to the shape can be one of: `WITHIN`, `CONTAINS`, `DISJOINT`, `INTERSECTS`. +#### Circle + + + + +{`# This query will return all matching employee entities +# that are located within 20 kilometers radius +# from point (47.623473 latitude, -122.3060097 longitude). + +# Define a query on Employees collection +employees_within_shape = list( + session.query(object_type=Employee) + # Call 'spatial' method + .spatial( + # Create 'PointField' + # Pass the path to document fields containing the spatial data + PointField("Address.Location.Latitude", "Address.Location.Longitude"), + # Set the geographical search criteria, call 'relates_to_shape' + lambda criteria: criteria.relates_to_shape( + # Specify the WKT string. Note: longitude is written FIRST + shape_wkt="CIRCLE(-122.3060097 47.623473 d=20)", + # Specify the relation between the WKT shape and the documents spatial data + relation=SpatialRelation.WITHIN, + # Optional: customize radius units (default is Kilometers) + units=SpatialUnits.MILES, + ), + ) +) +`} + + + + +{`// This query will return all matching employee entities +// that are located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +from Employees +where spatial.within( + spatial.point(Address.Location.Latitude, Address.Location.Longitude), + spatial.wkt("CIRCLE(-122.3060097 47.623473 d=20)", "miles") +) +`} + + + +#### Polygon + + + + +{`# This query will return all matching company entities +# that are located within the specified polygon. + +# Define a query on Companies collection +companies_within_shape = list( + session.query(object_type=Company) + # Call 'spatial' method + .spatial( + # Create 'PointField' + # Pass the path to document fields containing the spatial data + PointField("Address.Location.Latitude", "Address.Location.Longitude"), + # Set the geographical search criteria, call 'relates_to_shape' + lambda criteria: criteria.relates_to_shape( + # Specify the WKT string. Note: longitude is written FIRST + shape_wkt="""POLYGON (( + -118.6527948 32.7114894, + -95.8040242 37.5929338, + -102.8344151 53.3349629, + -127.5286633 48.3485664, + -129.4620208 38.0786067, + -118.7406746 32.7853769, + -118.6527948 32.7114894 + ))""", + # Specify the relation between the WKT shape and the documents spatial data + relation=SpatialRelation.WITHIN, + ), + ) +) +`} + + + + +{`// This query will return all matching company entities +// that are located within the specified polygon. + +from companies +where spatial.within( + spatial.point(Address.Location.Latitude, Address.Location.Longitude), + spatial.wkt("POLYGON (( + -118.6527948 32.7114894, + -95.8040242 37.5929338, + -102.8344151 53.3349629, + -127.5286633 48.3485664, + -129.4620208 38.0786067, + -118.7406746 32.7853769, + -118.6527948 32.7114894))") +) +`} + + + + + + +* The polygon's coordinates must be provided in counterclockwise order. + +* The first and last coordinates must mark the same location to form a closed region. + +![WKT polygon](./assets/spatial_1.png) + + + + + +## Spatial sorting + +* Use `order_by_distance` or `order_by_distance_descending` to sort the results by distance from a given point. + +* By default, distance is measured by RavenDB in **kilometers**. + The distance can be rounded to a specific range. +#### Order by distance + + + + +{`# Return all matching employee entities located within 20 kilometers radius +# from point (47.623473 latitude, -122.3060097 longitude) + +# Sort the results by their distance from a specified point, +# the closest results will be listed first. + +employees_sorted_by_distance = list( + session.query(object_type=Employee) + # Provide the query criteria: + .spatial( + PointField("Address.Location.Latitude", "Address.Location.Longitude"), + lambda criteria: criteria.within_radius(20, 47.623473, -122.3060097), + ) + # Call 'order_by_distance' + .order_by_distance( + PointField("Address.Location.Latitude", "Address.Location.Longitude"), 47.623473, -122.3060097 + ) +) +`} + + + + +{`// Return all matching employee entities located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +// Sort the results by their distance from a specified point, +// the closest results will be listed first. + +from Employees +where spatial.within( + spatial.point(Address.Location.Latitude, Address.Location.Longitude), + spatial.circle(20, 47.623473, -122.3060097) +) +order by spatial.distance( + spatial.point(Address.Location.Latitude, Address.Location.Longitude), + spatial.point(47.623473, -122.3060097) +) +`} + + + +#### Order by distance descending + + + + +{`# Return all employee entities sorted by their distance from a specified point. +# The farthest results will be listed first. + +employees_sorted_by_distance_desc = list( + session.query(object_type=Employee) + # Call 'order_by_distance_descending' + .order_by_distance_descending( + # Pass the path to document fields containing the spatial data + PointField("Address.Location.Latitude", "Address.Location.Longitude"), + # Sort the results by their distance (descending) from this point: + 47.623473, + -122.3060097, + ) +) +`} + + + + +{`// Return all employee entities sorted by their distance from a specified point. +// The farthest results will be listed first. + +from Employees +order by spatial.distance( + spatial.point(Address.Location.Latitude, Address.Location.Longitude), + spatial.point(47.623473, -122.3060097) +) desc +`} + + + +#### Sort results by rounded distance + + + + +{`# Return all employee entities. +# Results are sorted by their distance to a specified point rounded to the nearest 100 km interval. +# A secondary sort can be applied within the 100 km range, e.g. by field LastName. + +employees_sorted_by_rounded_distance = list( + session.query(object_type=Employee) + # Call 'order_by_distance' + .order_by_distance( + # Pass the path to the document fields containing the spatial data + PointField("Address.Location.Latitude", "Address.Location.Longitude") + # Round up distance to 100 km + .round_to(100), + # Sort the results by their distance from this point: + 47.623473, + -122.3060097, + ).order_by( + "LastName" + ) # todo gracjan: check if its possible to order by again without then_by + # todo reeb: skip this example for now, we'll get back to it later on + # A secondary sort can be applied +) + +pass + +o: +gion spatial_7 +spatial( +self, +field_name_or_field: Union[str, DynamicSpatialField], +clause: Callable[[SpatialCriteriaFactory], SpatialCriteria], +.. +`} + + + + +{`// Return all employee entities. +// Results are sorted by their distance to a specified point rounded to the nearest 100 km interval. +// A secondary sort can be applied within the 100 km range, e.g. by field LastName. + +from Employees +order by spatial.distance( + spatial.point(Address.Location.Latitude, Address.Location.Longitude), + spatial.point(47.623473, -122.3060097), + 100 +), LastName +`} + + + +#### Get resulting distance + +* The distance is available in the `@spatial` metadata property within each result. +* Note the following difference between the underlying search engines: + * When using **Lucene**: + This metadata property is always available in the results. + * When using **Corax**: + In order to enhance performance, this property is not included in the results by default. + To get this metadata property you must set the [Indexing.Corax.IncludeSpatialDistance](../../../server/configuration/indexing-configuration.mdx#indexingcoraxincludespatialdistance) configuration value to _true_. + Learn about the available methods for setting an indexing configuration key in this [indexing-configuration](../../../server/configuration/indexing-configuration.mdx) article. + + + +{`# Get the distance of the results: +# ================================ + +# Call 'get_metadata_for', pass an entity from the resulting employees list +metadata = session.advanced.get_metadata_for(employees_sorted_by_distance[0]) + +# The distance is available in the '@spatial' metadata property +spatial_results = metadata["@spatial"] + +distance = spatial_results["Distance"] # The distance of the entity from the queried location +latitude = spatial_results["Latitude"] # The entity's latitude value +longitude = spatial_results["Longitude"] # The entity's longitude value +`} + + + + + +## Spatial API + +#### `spatial` + + + +{`def spatial( + self, + field_name_or_field: Union[str, DynamicSpatialField], + clause: Callable[[SpatialCriteriaFactory], SpatialCriteria], +): ... +`} + + + +| Parameters | Type | Description | +|---------------|--------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------| +| **field_name_or_field** | `Union[str, DynamicSpatialField]` | `Str` - Path to spatial field in an index (when querying an index)

**-or-**

`DynamicSpatialField` - Object that contains the document's spatial fields, either `PointField` or `WktField`(when making a dynamic query). | +| **clause** | `Callable[[SpatialCriteriaFactory], SpatialCriteria]` | Callback taking leverage of SpatialCriteriaFactory that comes as an argument, allowing to build SpatialCriteria. | +#### `DynamicSpatialField` + + + +{`class PointField(DynamicSpatialField): + def __init__(self, latitude: str, longitude: str): ... + +class WktField(DynamicSpatialField): + def __init__(self, wkt: str): ... +`} + + + +| Parameters | Type | Description | +|---------------|-------|-----------------------------------------------------------| +| **latitude** | `str` | Path to a document point field that contains the latitude | +| **longitude** | `str` | Path to a document point field that contains the longitude | +| **wkt** | `str` | Path to a document wkt field that contains the WKT string | +#### `SpatialCriteria` + + + +{`def relates_to_shape( + self, + shape_wkt: str, + relation: SpatialRelation, + units: SpatialUnits = None, + dist_error_percent: Optional[float] = constants.Documents.Indexing.Spatial.DEFAULT_DISTANCE_ERROR_PCT, +) -> SpatialCriteria: ... + +def intersects( + self, + shape_wkt: str, + units: Optional[SpatialUnits] = None, + dist_error_percent: Optional[float] = constants.Documents.Indexing.Spatial.DEFAULT_DISTANCE_ERROR_PCT, +) -> SpatialCriteria: ... + +def contains( + self, + shape_wkt: str, + units: Optional[SpatialUnits] = None, + dist_error_percent: Optional[float] = constants.Documents.Indexing.Spatial.DEFAULT_DISTANCE_ERROR_PCT, +) -> SpatialCriteria: ... + +def disjoint( + self, + shape_wkt: str, + units: Optional[SpatialUnits] = None, + dist_error_percent: Optional[float] = constants.Documents.Indexing.Spatial.DEFAULT_DISTANCE_ERROR_PCT, +) -> SpatialCriteria: ... + +def within( + self, + shape_wkt: str, + units: Optional[SpatialUnits] = None, + dist_error_percent: Optional[float] = constants.Documents.Indexing.Spatial.DEFAULT_DISTANCE_ERROR_PCT, +) -> SpatialCriteria: ... + +def within_radius( + self, + radius: float, + latitude: float, + longitude: float, + radius_units: Optional[SpatialUnits] = None, + dist_error_percent: Optional[float] = constants.Documents.Indexing.Spatial.DEFAULT_DISTANCE_ERROR_PCT, +) -> SpatialCriteria: ... +`} + + + +| Parameter | Type | Description | +|---------------|-------|--------------------| +| **shape_wkt** | `str` | [WKT](https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry)-based shape used in query criteria | +| **relation** | `SpatialRelation` | Relation of the shape to the spatial data in the document/index.
Can be `WITHIN`, `CONTAINS`, `DISJOINT`, `INTERSECTS` | +| **units** / **radius_units** | `SpatialUnits` | Determines if circle or shape should be calculated in `KILOMETERS` or `MILES`.
By default, distances are measured in kilometers. | +| **dist_error_percent** (Optional) | `float` | Maximum distance error tolerance in percents.
**Default: 0.025** | +| **radius** / **latitude** / **longitude** | `float` | Used to define a radius of a circle | +#### `order_by_distance`, `order_by_distance_wkt` + + + +{`# From point & rounding + +def order_by_distance( + self, + field_or_field_name: Union[str, DynamicSpatialField], + latitude: float, + longitude: float, + round_factor: Optional[float] = 0.0, +) -> DocumentQuery[_T]: ... + +# From center of WKT shape + +def order_by_distance_wkt( + self, field_or_field_name: Union[str, DynamicSpatialField], shape_wkt: str +) -> DocumentQuery[_T]: ... +`} + + +#### `order_by_distance_descending`, `order_by_distance_descending_wkt` + + + +{`# From point & rounding + +def order_by_distance_descending( + self, + field_or_field_name: Union[str, DynamicSpatialField], + latitude: float, + longitude: float, + round_factor: Optional[float] = 0.0, +) -> DocumentQuery[_T]: ... + +# From center of WKT shape + +def order_by_distance_descending_wkt( + self, field_or_field_name: Union[str, DynamicSpatialField], shape_wkt: str +) -> DocumentQuery[_T]: ... +`} + + + +| Parameter | Type | Description | +|------------------------------|--------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **field_or_field_name** | `Union[str, DynamicSpatialField]` | `Str` - Path to spatial field in an index (when querying an index)

**-or-**

`DynamicSpatialField` - Object that contains the document's spatial fields, either `PointField` or `WktField`(when making a dynamic query). | +| **latitude** | `float` | The latitude of the point from which the distance is measured | +| **longitude** | `float` | The longitude of the point from which the distance is measured | +| **round_factor** (Optional) | `float` | A distance interval in kilometers.
The distance from the point is rounded up to the nearest interval.
The results within the same interval can be sorted by a secondary order.
If no other order was specified, then by ascending order of document Id. | +| **shape_wkt** | `str` | [WKT](https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry)-based shape used in query criteria | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-a-faceted-search-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-a-faceted-search-csharp.mdx new file mode 100644 index 0000000000..314f55705c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-a-faceted-search-csharp.mdx @@ -0,0 +1,15 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A **Faceted Search** provides an efficient way to explore and navigate through large datasets or search results. + +* To make a faceted search, + a static-index must be defined for the fields you want to query and apply facets on. + Please refer to article **Query by Facets** under [Indexes > Querying > Faceted search](../../../indexes/querying/faceted-search.mdx). + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-a-faceted-search-java.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-a-faceted-search-java.mdx new file mode 100644 index 0000000000..7efcdb11b5 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-a-faceted-search-java.mdx @@ -0,0 +1,338 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To execute facet (aggregation) query using the session `query` method, use the `aggregateBy` or `aggregateUsing` methods. This will scope you to the aggregation query builder where you will be allowed to define single or multiple facets for the query using a straightforward and fluent API. + +## Syntax + + + +{`IAggregationDocumentQuery aggregateBy(Consumer> builder); + +IAggregationDocumentQuery aggregateBy(FacetBase facet); + +IAggregationDocumentQuery aggregateBy(Facet... facet); + +IAggregationDocumentQuery aggregateUsing(String facetSetupDocumentId); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **facet** | FacetBase | `FacetBase` implementation defining the scope of the facet and its options (either `Facet` or `RangeFacet`) | +| **facets** | `FacetBase...` | Items containing `FacetBase` implementations | +| **builder** | `Consumer>` | Builder with a fluent API that constructs a `FacetBase` instance | +| **facetSetupDocumentId** | String | ID of a document containing `FacetSetup` | + +### Facet & RangeFacet + + +`RangeFacet` allows you to split the results of the calculations into several ranges, in contrast to `Facet` where whole spectrum of results will be used to generate a single outcome. + + + + + +{`public class Facet { + private String fieldName; + private FacetOptions options; + private Map aggregations; + private String displayFieldName; + + public String getFieldName() { + return fieldName; + } + + public void setFieldName(String fieldName) { + this.fieldName = fieldName; + } + + public FacetOptions getOptions() { + return options; + } + + public void setOptions(FacetOptions options) { + this.options = options; + } + + public Map getAggregations() { + return aggregations; + } + + public void setAggregations(Map aggregations) { + this.aggregations = aggregations; + } + + public String getDisplayFieldName() { + return displayFieldName; + } + + public void setDisplayFieldName(String displayFieldName) { + this.displayFieldName = displayFieldName; + } +} +`} + + + + +{`public class RangeFacet { + private List ranges; + private FacetOptions options; + private Map aggregations; + private String displayFieldName; + + public List getRanges() { + return ranges; + } + + public void setRanges(List ranges) { + this.ranges = ranges; + } + + public FacetOptions getOptions() { + return options; + } + + public void setOptions(FacetOptions options) { + this.options = options; + } + + public Map getAggregations() { + return aggregations; + } + + public void setAggregations(Map aggregations) { + this.aggregations = aggregations; + } + + public String getDisplayFieldName() { + return displayFieldName; + } + + public void setDisplayFieldName(String displayFieldName) { + this.displayFieldName = displayFieldName; + } +} +`} + + + + +{`public enum FacetAggregation { + NONE, + MAX, + MIN, + AVERAGE, + SUM; +} +`} + + + + +### Builder + + + +{`IFacetOperations byRanges(RangeBuilder range, RangeBuilder... ranges); + +IFacetOperations byField(String fieldName); + +IFacetOperations withDisplayName(String displayName); + +IFacetOperations withOptions(FacetOptions options); + +IFacetOperations sumOn(String path); +IFacetOperations minOn(String path); +IFacetOperations maxOn(String path); +IFacetOperations averageOn(String path); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **path** | String | Points to the index field that should be used for operation (`byRanges`, `byField`) or to document field that should be used for aggregation (`sumOn`, `minOn`, `maxOn`, `averageOn`) | +| **fieldName** | String | Points to the index field that should be used for operation (`byRanges`, `byField`) or to document field that should be used for aggregation (`sumOn`, `minOn`, `maxOn`, `averageOn`) | +| **displayName** | String | If set, results of a facet will be returned under this name | +| **options** | `FacetOptions` | Non-default options that should be used for operation | + +### Options + + + +{`private FacetTermSortMode termSortMode = FacetTermSortMode.VALUE_ASC; +private boolean includeRemainingTerms; +private int start; +private int pageSize = Integer.MAX_VALUE; + +//getters and setters +`} + + + +| Options | | | +| ------------- | ------------- | ----- | +| **termSortMode** | `FacetTermSortMode` | Indicates how terms should be sorted (`VALUE_ASC`, `VALUE_DESC`, `COUNT_ASC`, `COUNT_DESC`) | +| **includeRemainingTerms** | booelean | Indicates if remaining terms should be included in results | +| **start** | int | Used to skip given number of facet results in the outcome | +| **pageSize** | int | Used to limit facet results to the given value | + +## Example I + + + + +{`FacetOptions facetOptions = new FacetOptions(); +facetOptions.setTermSortMode(FacetTermSortMode.COUNT_DESC); + +Facet facet1 = new Facet(); +facet1.setFieldName("manufacturer"); +facet1.setOptions(facetOptions); + +RangeFacet facet2 = new RangeFacet(); +facet2.setRanges(Arrays.asList( + "cost < 200", + "cost between 200 and 400", + "cost between 400 and 600", + "cost between 600 and 800", + "cost >= 800" +)); +facet2.setAggregations(Collections.singletonMap(FacetAggregation.AVERAGE, "cost")); + +RangeFacet facet3 = new RangeFacet(); +facet3.setRanges(Arrays.asList( + "megapixels < 3", + "megapixels between 3 and 7", + "megapixels between 7 and 10", + "megapixels >= 10" +)); + +Map facets = session + .query(Camera.class, Query.index("Camera/Costs")) + .aggregateBy(facet1) + .andAggregateBy(facet2) + .andAggregateBy(facet3) + .execute(); +`} + + + + +{`from index 'Camera/Costs' +select +facet(manufacturer), +facet(cost < 200, cost >= 200 AND cost < 400, cost >= 400 AND cost < 600, cost >= 600 AND cost < 800, cost >= 800), +facet(megapixels < 3, megapixels >= 3 AND megapixels < 7, megapixels >= 7 AND megapixels < 10, megapixels >= 10) +`} + + + + +## Example II + + + + +{`FacetOptions options = new FacetOptions(); +options.setTermSortMode(FacetTermSortMode.COUNT_DESC); + +RangeBuilder costBuilder = RangeBuilder.forPath("cost"); +RangeBuilder megapixelsBuilder = RangeBuilder.forPath("megapixels"); + +Map facetResult = session + .query(Camera.class, Query.index("Camera/Costs")) + .aggregateBy(builder -> builder + .byField("manufacturer") + .withOptions(options)) + .andAggregateBy(builder -> builder + .byRanges( + costBuilder.isLessThan(200), + costBuilder.isGreaterThanOrEqualTo(200).isLessThan(400), + costBuilder.isGreaterThanOrEqualTo(400).isLessThan(600), + costBuilder.isGreaterThanOrEqualTo(600).isLessThan(800), + costBuilder.isGreaterThanOrEqualTo(800)) + .averageOn("cost")) + .andAggregateBy(builder -> builder + .byRanges( + megapixelsBuilder.isLessThan(3), + megapixelsBuilder.isGreaterThanOrEqualTo(3).isLessThan(7), + megapixelsBuilder.isGreaterThanOrEqualTo(7).isLessThan(10), + megapixelsBuilder.isGreaterThanOrEqualTo(10) + )) + .execute(); +`} + + + + +{`from index 'Camera/Costs' +select +facet(manufacturer), +facet(cost < 200, cost >= 200 AND cost < 400, cost >= 400 AND cost < 600, cost >= 600 AND cost < 800, cost >= 800), +facet(megapixels < 3, megapixels >= 3 AND megapixels < 7, megapixels >= 7 AND megapixels < 10, megapixels >= 10) +`} + + + + +## Example III + + + + +{`FacetSetup facetSetup = new FacetSetup(); + +Facet facetManufacturer = new Facet(); +facetManufacturer.setFieldName("manufacturer"); +facetSetup.setFacets(Arrays.asList(facetManufacturer)); + +RangeFacet cameraFacet = new RangeFacet(); +cameraFacet.setRanges(Arrays.asList( + "cost < 200", + "cost between 200 and 400", + "cost between 400 and 600", + "cost between 600 and 800", + "cost >= 800" +)); + +RangeFacet megapixelsFacet = new RangeFacet(); +megapixelsFacet.setRanges(Arrays.asList( + "megapixels < 3", + "megapixels between 3 and 7", + "megapixels between 7 and 10", + "megapixels >= 10" +)); + +facetSetup.setRangeFacets(Arrays.asList(cameraFacet, megapixelsFacet)); + +session.store(facetSetup, "facets/CameraFacets"); +session.saveChanges(); + +Map facets = session + .query(Camera.class, Query.index("Camera/Costs")) + .aggregateUsing("facets/CameraFacets") + .execute(); +`} + + + + +{`from index 'Camera/Costs' +select facet(id('facets/CameraFacets')) +`} + + + + +## Remarks + + +`aggregateBy` only supports aggregation by a single field. If you want to aggregate by multiple fields, you need to emit a single field that contains all values. + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-a-faceted-search-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-a-faceted-search-nodejs.mdx new file mode 100644 index 0000000000..314f55705c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-a-faceted-search-nodejs.mdx @@ -0,0 +1,15 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A **Faceted Search** provides an efficient way to explore and navigate through large datasets or search results. + +* To make a faceted search, + a static-index must be defined for the fields you want to query and apply facets on. + Please refer to article **Query by Facets** under [Indexes > Querying > Faceted search](../../../indexes/querying/faceted-search.mdx). + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-a-faceted-search-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-a-faceted-search-php.mdx new file mode 100644 index 0000000000..2b59ba75d7 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-a-faceted-search-php.mdx @@ -0,0 +1,15 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A **Faceted Search** provides an efficient way to explore and navigate through large datasets or search results. + +* To make a faceted search, + a static-index must be defined for the fields you want to query and apply facets on. + Please refer to the **Query by Facets** article under [Indexes > Querying > Faceted search](../../../indexes/querying/faceted-search.mdx). + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-a-faceted-search-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-a-faceted-search-python.mdx new file mode 100644 index 0000000000..4ec465d428 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-a-faceted-search-python.mdx @@ -0,0 +1,277 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To execute facet (aggregation) query via the session `query` method, use `aggregate_by`, +`aggregate_by_facets`, or `aggregate_using`. +This will scope you to the aggregation query builder, where you'll be allowed to define +single or multiple facets for the query using a straightforward and fluent API. + +## Syntax + + + +{`def aggregate_by( + self, builder_or_facet: Union[Callable[[FacetBuilder], None], FacetBase] +) -> AggregationDocumentQuery[_T]: ... + +def aggregate_by_facets(self, facets: List[FacetBase]) -> AggregationDocumentQuery[_T]: ... + +def aggregate_using(self, facet_setup_document_id: str) -> AggregationDocumentQuery[_T]: ... +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **builder_or_facet** | `Union[Callable[[FacetBuilder], None]` | **Builder** with a fluent API that constructs a `FacetBase` instance
**-or-**
**FacetBase** implementation defining the scope of the facet and its options (either `Facet` or `RangeFacet`) | +| **facets** | `List[FacetBase]` | Items containing `FacetBase` implementations | +| **facet_setup_document_id** | `str` | ID of a document containing `FacetSetup` | + +### Facet & RangeFacet + + +`RangeFacet` allows you to split the results of the calculations into several ranges, in contrast to `Facet` where whole spectrum of results will be used to generate a single outcome. + + + + + +{`class FacetBase(ABC): + def __init__(self): + self.display_field_name: Union[None, str] = None + self.options: Union[None, FacetOptions] = None + self.aggregations: Dict[FacetAggregation, Set[FacetAggregationField]] = {} + +class Facet(FacetBase): + def __init__(self, field_name: str = None): + super().__init__() + self.field_name = field_name +`} + + + + +{`class RangeFacet(FacetBase): + def __init__(self, parent: Optional[FacetBase] = None): + super().__init__() + self.ranges: List[str] = [] +`} + + + + +{`class FacetAggregation(enum.Enum): + NONE = "None" + MAX = "Max" + MIN = "Min" + AVERAGE = "Average" + SUM = "Sum" +`} + + + + +### Builder + + + +{`def by_ranges(self, range_: RangeBuilder, *ranges: RangeBuilder) -> FacetOperations[_T]: ... + +def by_field(self, field_name: str) -> FacetOperations[_T]: ... + +def with_display_name(self, display_name: str) -> FacetOperations[_T]: ... + +def with_options(self, options: FacetOptions) -> FacetOperations[_T]: ... + +def sum_on(self, path: str, display_name: Optional[str] = None) -> FacetOperations[_T]: ... + +def min_on(self, path: str, display_name: Optional[str] = None) -> FacetOperations[_T]: ... + +def max_on(self, path: str, display_name: Optional[str] = None) -> FacetOperations[_T]: ... + +def average_on(self, path: str, display_name: Optional[str] = None) -> FacetOperations[_T]: ... +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| ***ranges** | `RangeBuilder` | A list of aggregated ranges | +| **field_name** | `str` | Points to the index field that should be used for operation (`by_ranges`, `by_field`) or to document field that should be used for aggregation (`sum_on`, `min_on`, `max_on`, `average_on`) | +| **display_name** | `str` | If set, results of a facet will be returned under this name | +| **options** | `FacetOptions` | Non-default options that should be used for operation | +| **path** | `str` | Points to the index field that should be used for operation (`by_ranges`, `by_field`) or to document field that should be used for aggregation (`sum_on`, `min_on`, `max_on`, `average_on`) | + +### Options + + + +{`def __init__(self): + self.page_size: int = constants.int_max + self.start: Union[None, int] = None + self.term_sort_mode: FacetTermSortMode = FacetTermSortMode.VALUE_ASC + self.include_remaining_terms: bool = False +`} + + + +| Options | | | +| ------------- | ------------- | ----- | +| **term_sort_mode** | `FacetTermSortMode` | Indicates how terms should be sorted (`VALUE_ASC`, `VALUE_DESC`, `COUNT_ASC`, `COUNT_DESC`) | +| **include_remaining_terms** | `bool` | Indicates if remaining terms should be included in results | +| **start** | `Union[None, int]` | Used to skip given number of facet results in the outcome | +| **page_size** | `int` | Used to limit facet results to the given value | + +## Example I + + + + +{`facet_options = FacetOptions.default_options() +facet_options.term_sort_mode = FacetTermSortMode.COUNT_DESC +facet_options.start = 0 + +facet1 = Facet("manufacturer") +facet1.options = facet_options + +facet2 = RangeFacet() +facet2.ranges = [ + "cost < 200", + "cost between 200 and 400", + "cost between 400 and 600", + "cost between 600 and 800", + "cost >= 800", +] +facet2.aggregations = {FacetAggregation.AVERAGE: {FacetAggregationField("cost")}} + +facet3 = RangeFacet() +facet3.ranges = [ + "megapixels < 3", + "megapixels between 3 and 7", + "megapixels between 7 and 10", + "megapixels >= 10", +] + +facets = ( + session.query_index("Camera/Costs", Camera) + .aggregate_by(facet1) + .and_aggregate_by(facet2) + .and_aggregate_by(facet3) + .execute() +) +`} + + + + +{`from index 'Camera/Costs' +select +facet(manufacturer), +facet(cost < 200, cost >= 200 AND cost < 400, cost >= 400 AND cost < 600, cost >= 600 AND cost < 800, cost >= 800), +facet(megapixels < 3, megapixels >= 3 AND megapixels < 7, megapixels >= 7 AND megapixels < 10, megapixels >= 10) +`} + + + + +## Example II + + + + +{`options = FacetOptions() +options.start = 0 +options.term_sort_mode = FacetTermSortMode.COUNT_DESC + +cost_builder = RangeBuilder.for_path("cost") +megapixels_builder = RangeBuilder.for_path("megapixels") + +facet_result = ( + session.query_index("Camera/Costs", Camera) + .aggregate_by(lambda builder: builder.by_field("manufacturer").with_options(options)) + .and_aggregate_by( + lambda builder: builder.by_ranges( + cost_builder.is_less_than(200), + cost_builder.is_greater_than_or_equal_to(200).is_less_than(400), + cost_builder.is_greater_than_or_equal_to(400).is_less_than(600), + cost_builder.is_greater_than_or_equal_to(600).is_less_than(800), + cost_builder.is_greater_than_or_equal_to(800), + ).average_on("cost") + ) + .and_aggregate_by( + lambda builder: builder.by_ranges( + megapixels_builder.is_less_than(3), + megapixels_builder.is_greater_than_or_equal_to(3).is_less_than(7), + megapixels_builder.is_greater_than_or_equal_to(7).is_less_than(10), + megapixels_builder.is_greater_than_or_equal_to(10), + ) + ) +).execute() +`} + + + + +{`from index 'Camera/Costs' +select +facet(manufacturer), +facet(cost < 200, cost >= 200 AND cost < 400, cost >= 400 AND cost < 600, cost >= 600 AND cost < 800, cost >= 800), +facet(megapixels < 3, megapixels >= 3 AND megapixels < 7, megapixels >= 7 AND megapixels < 10, megapixels >= 10) +`} + + + + +## Example III + + + + +{`facet_setup = FacetSetup() + +facet_manufacturer = Facet() +facet_manufacturer.field_name = "manufacturer" +facet_setup.facets = [facet_manufacturer] + +camera_facet = RangeFacet() +camera_facet.ranges = [ + "cost < 200", + "cost between 200 and 400", + "cost between 400 and 600", + "cost between 600 and 800", + "cost >= 800", +] + +megapixels_facet = RangeFacet() +megapixels_facet.ranges = [ + "megapixels < 3", + "megapixels between 3 and 7", + "megapixels between 7 and 10", + "megapixels >= 10", +] + +facet_setup.range_facets = [camera_facet, megapixels_facet] + +session.store(facet_setup, "facets/CameraFacets") +session.save_changes() + +facets = session.query_index("Camera/Costs", Camera).aggregate_using("facets/CameraFacets").execute() +`} + + + + +{`from index 'Camera/Costs' +select facet(id('facets/CameraFacets')) +`} + + + + +`aggregate_by` only supports aggregation by a single field. +If you want to aggregate by multiple fields, emit a single field that contains all values. + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-group-by-query-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-group-by-query-csharp.mdx new file mode 100644 index 0000000000..3bd757b831 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-group-by-query-csharp.mdx @@ -0,0 +1,508 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Since RavenDB 4.0, the query optimizer supports dynamic group by queries and automatically creates auto map-reduce indexes. + +You can create a dynamic query that does an aggregation by using the LINQ `GroupBy()` method or `group by into` syntax. + +The supported aggregation operations are: + +- `Count` +- `Sum` + +## Group By Single Field + + + + +{`var results = (from o in session.Query() + group o by o.ShipTo.Country + into g + select new + { + Country = g.Key, + OrderedQuantity = g.Sum(order => order.Lines.Sum(line => line.Quantity)) + }) + .ToList(); +`} + + + + +{`var results = await (from o in asyncSession.Query() + group o by o.ShipTo.Country + into g + select new + { + Country = g.Key, + OrderedQuantity = g.Sum(order => order.Lines.Sum(line => line.Quantity)) + }) + .ToListAsync(); +`} + + + + +{`from Orders +group by ShipTo.City +select ShipTo.City as Country, sum(Lines[].Quantity) as TotalQuantity +`} + + + + + + +## Group By Multiple Fields + + + + +{`var results = session.Query() + .GroupBy(x => new + { + x.Employee, + x.Company + }) + .Select(x => new + { + EmployeeIdentifier = x.Key.Employee, + x.Key.Company, + Count = x.Count() + }) + .ToList(); +`} + + + + +{`var results = await asyncSession.Query() + .GroupBy(x => new + { + x.Employee, + x.Company + }) + .Select(x => new + { + EmployeeIdentifier = x.Key.Employee, + x.Key.Company, + Count = x.Count() + }) + .ToListAsync(); +`} + + + + +{`from Orders +group by Employee, Company +select Employee as EmployeeIdentifier, Company, count() +`} + + + + + + +## Select Composite GroupBy Key + + + + +{`var results = session.Query() + .GroupBy(x => new EmployeeAndCompany + { + Employee = x.Employee, + Company = x.Company + }) + .Select(x => new CountOfEmployeeAndCompanyPairs + { + EmployeeCompanyPair = x.Key, + Count = x.Count() + }) + .ToList(); +`} + + + + +{`var results = await asyncSession.Query() + .GroupBy(x => new EmployeeAndCompany + { + Employee = x.Employee, + Company = x.Company + }) + .Select(x => new CountOfEmployeeAndCompanyPairs + { + EmployeeCompanyPair = x.Key, + Count = x.Count() + }) + .ToListAsync(); +`} + + + + +{`from Orders +group by Employee, Company +select key() as EmployeeCompanyPair, count() +`} + + + + + + +## Group By Array + +### By Array Values + +In order to group by values of array, you need to use `GroupByArrayValues`. The following query will group by `Product` property from `Lines` collection +and calculate the count per ordered products. Underneath a fanout, an auto map-reduce index will be created to handle such query. + + + + +{`var results = session.Query() + .GroupByArrayValues(x => x.Lines.Select(y => y.Product)) + .Select(x => new + { + Count = x.Count(), + Product = x.Key + }) + .ToList(); +`} + + + + +{`var results = await asyncSession.Query() + .GroupByArrayValues(x => x.Lines.Select(y => y.Product)) + .Select(x => new + { + Count = x.Count(), + Product = x.Key + }) + .ToListAsync(); +`} + + + + +{`from Orders +group by Lines[].Product +select Lines[].Product, count() +`} + + + + +Inside a single group by statement you can mix collection values and value of another property. That's supported by `DocumentQuery` only: + + + + +{`var results = session.Advanced.DocumentQuery() + .GroupBy("Lines[].Product", "ShipTo.Country") + .SelectKey("Lines[].Product", "Product") + .SelectKey("ShipTo.Country", "Country") + .SelectCount() + .OfType() + .ToList(); +`} + + + + +{`var results = await asyncSession.Advanced.AsyncDocumentQuery() + .GroupBy("Lines[].Product", "ShipTo.Country") + .SelectKey("Lines[].Product", "Product") + .SelectKey("ShipTo.Country", "Country") + .SelectCount() + .OfType() + .ToListAsync(); +`} + + + + +{`from Orders +group by Lines[].Product, ShipTo.Country +select Lines[].Product as Product, ShipTo.Country as Country, count() +`} + + + + +Grouping by multiple values from **the same** collection is supported as well: + + + + +{`var results = session.Query() + .GroupByArrayValues(x => x.Lines.Select(y => new + { + y.Product, + y.Quantity + })) + .Select(x => new ProductInfo + { + Count = x.Count(), + Product = x.Key.Product, + Quantity = x.Key.Quantity + }) + .ToList(); +`} + + + + +{`var results = await asyncSession.Query() + .GroupByArrayValues(x => x.Lines.Select(y => new + { + y.Product, + y.Quantity + })) + .Select(x => new ProductInfo + { + Count = x.Count(), + Product = x.Key.Product, + Quantity = x.Key.Quantity + }) + .ToListAsync(); +`} + + + + +{`from Orders +group by Lines[].Product, Lines[].Quantity +select Lines[].Product as Product, Lines[].Quantity as Quantity, count() +`} + + + + +### By Array Content + +Another option is to group by array content. The reduction key will be calculated based on all values of a collection specified in `GroupBy`. +The client API exposes the `GroupByArrayContent` extension method for that purpose. + + + + +{`var results = session.Query() + .GroupByArrayContent(x => x.Lines.Select(y => y.Product)) + .Select(x => new ProductsInfo + { + Count = x.Count(), + Products = x.Key + }) + .ToList(); +`} + + + + +{`var results = await asyncSession.Query() + .GroupByArrayContent(x => x.Lines.Select(y => y.Product)) + .Select(x => new ProductsInfo + { + Count = x.Count(), + Products = x.Key + }) + .ToListAsync(); +`} + + + + +{`from Orders +group by array(Lines[].Product) +select key() as Products, count() +`} + + + + +Grouping by array content and a value of another property is supported by `DocumentQuery`: + + + + +{`var results = session.Advanced.DocumentQuery() + .GroupBy(("Lines[].Product", GroupByMethod.Array), ("ShipTo.Country", GroupByMethod.None)) + .SelectKey("Lines[].Product", "Products") + .SelectKey("ShipTo.Country", "Country") + .SelectCount() + .OfType() + .ToList(); +`} + + + + +{`var results = await asyncSession.Advanced.AsyncDocumentQuery() + .GroupBy(("Lines[].Product", GroupByMethod.Array), ("ShipTo.Country", GroupByMethod.None)) + .SelectKey("Lines[].Product", "Products") + .SelectKey("ShipTo.Country", "Country") + .SelectCount() + .OfType() + .ToListAsync(); +`} + + + + +{`from Orders +group by array(Lines[].Product), ShipTo.Country +select Lines[].Product as Products, ShipTo.Country as Country, count() +`} + + + + +Grouping by multiple values from **the same** collection is also supported by `DocumentQuery`: + + + + +{`var results = session.Advanced.DocumentQuery() + .GroupBy(("Lines[].Product", GroupByMethod.Array), ("Lines[].Quantity", GroupByMethod.Array)) + .SelectKey("Lines[].Product", "Products") + .SelectKey("Lines[].Quantity", "Quantities") + .SelectCount() + .OfType() + .ToList(); +`} + + + + +{`var results = await asyncSession.Advanced.AsyncDocumentQuery() + .GroupBy(("Lines[].Product", GroupByMethod.Array), ("Lines[].Quantity", GroupByMethod.Array)) + .SelectKey("Lines[].Product", "Products") + .SelectKey("Lines[].Quantity", "Quantities") + .SelectCount() + .OfType() + .ToListAsync(); +`} + + + + +{`from Orders +group by array(Lines[].Product), array(Lines[].Quantity) +select Lines[].Product as Products, Lines[].Quantity as Quantities, count() +`} + + + + + +In order to use the above extension methods you need to add the following **using** statement: + + + +{`using Raven.Client.Documents; +`} + + + + + + +## Sorting + +Results of dynamic group by queries can be sorted by an aggregation function used in the query. As the available aggregation operations are `Count` and `Sum` you can use them to +order the results. + +#### By Count + + + + +{`var results = session.Query() + .GroupBy(x => x.Employee) + .Select(x => new + { + Employee = x.Key, + Count = x.Count() + }) + .OrderBy(x => x.Count) + .ToList(); +`} + + + + +{`var results = await asyncSession.Query() + .GroupBy(x => x.Employee) + .Select(x => new + { + Employee = x.Key, + Count = x.Count() + }) + .OrderBy(x => x.Count) + .ToListAsync(); +`} + + + + +{`from Orders +group by Employee +order by count() as long +select Employee, count() +`} + + + + +#### By Sum + + + + +{`var results = session.Query() + .GroupBy(x => x.Employee) + .Select(x => new + { + Employee = x.Key, + Sum = x.Sum(y => y.Freight) + }) + .OrderBy(x => x.Sum) + .ToList(); +`} + + + + +{`var results = await asyncSession.Query() + .GroupBy(x => x.Employee) + .Select(x => new + { + Employee = x.Key, + Count = x.Count() + }) + .OrderBy(x => x.Count) + .ToListAsync(); +`} + + + + +{`from Orders +group by Employee +order by sum(Freight) as double +select key() as Employee, sum(Freight) as Sum +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-group-by-query-java.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-group-by-query-java.mdx new file mode 100644 index 0000000000..1b817b8cc6 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-group-by-query-java.mdx @@ -0,0 +1,264 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Since RavenDB 4.0, the query optimizer supports dynamic group by queries and automatically creates auto map-reduce indexes. + +You can create a dynamic query that does an aggregation by using the `groupBy()` method. + +The supported aggregation operations are: + +- `count` +- `sum` + +## Group By Single Field + + + + +{`List orders = session.query(Order.class) + .groupBy("ShipTo.Country") + .selectKey("ShipTo.Country", "Country") + .selectSum(new GroupByField("Lines[].Quantity", "OrderedQuantity")) + .ofType(CountryAndQuantity.class) + .toList(); +`} + + + + +{`from Orders +group by ShipTo.City +select ShipTo.City as Country, sum(Lines[].Quantity) as TotalQuantity +`} + + + + + + +## Group By Multiple Fields + + + + +{`List results = session.query(Order.class) + .groupBy("Employee", "Company") + .selectKey("Employee", "EmployeeIdentifier") + .selectKey("Company") + .selectCount() + .ofType(CountByCompanyAndEmployee.class) + .toList(); +`} + + + + +{`from Orders +group by Employee, Company +select Employee as EmployeeIdentifier, Company, count() +`} + + + + + + +## Select Composite GroupBy Key + + + + +{`List orders = session.query(Order.class) + .groupBy("Employee", "Company") + .selectKey("key()", "EmployeeCompanyPair") + .selectCount("Count") + .ofType(CountOfEmployeeAndCompanyPairs.class) + .toList(); +`} + + + + +{`from Orders +group by Employee, Company +select key() as EmployeeCompanyPair, count() +`} + + + + + + +## Group By Array + +### By Array Values + +In order to group by values of array, you need to use `groupBy(array(...))`. The following query will group by `product` field from `Lines` collection +and calculate the count per ordered products. Underneath a fanout, an auto map-reduce index will be created to handle such query. + + + + +{`List products = session.query(Order.class) + .groupBy(array("Lines[].Product")) + .selectKey("key()", "Products") + .selectCount() + .ofType(ProductsInfo.class) + .toList(); +`} + + + + +{`from Orders +group by Lines[].Product +select Lines[].Product, count() +`} + + + + +Inside a single group by statement you can mix collection values and value of another property. That's supported by `DocumentQuery` only: + + + + +{`List results = session.advanced().documentQuery(Order.class) + .groupBy("Lines[].Product", "ShipTo.Country") + .selectKey("Lines[].Product", "Product") + .selectKey("ShipTo.Country", "Country") + .selectCount() + .ofType(ProductInfo.class) + .toList(); +`} + + + + +{`from Orders +group by Lines[].Product, ShipTo.Country +select Lines[].Product as Product, ShipTo.Country as Country, count() +`} + + + + +Grouping by multiple values from **the same** collection is supported as well: + + + + +{`List results = session.query(Order.class) + .groupBy(array("Lines[].Product"), array("Lines[].Quantity")) + .selectKey("Lines[].Product", "Product") + .selectKey("Lines[].Quantity", "Quantity") + .selectCount() + .ofType(ProductInfo.class) + .toList(); +`} + + + + +{`from Orders +group by Lines[].Product, Lines[].Quantity +select Lines[].Product as Product, Lines[].Quantity as Quantity, count() +`} + + + + +### By Array Content + +Another option is to group by array content. The reduction key will be calculated based on all values of a collection specified in `GroupBy`. +The client API exposes the `GroupByArrayContent` extension method for that purpose. + + + + +{`List results = session.query(Order.class) + .groupBy(array("Lines[].Product")) + .selectKey("key()", "Products") + .selectCount() + .ofType(ProductsInfo.class) + .toList(); +`} + + + + +{`from Orders +group by array(Lines[].Product) +select key() as Products, count() +`} + + + + +Grouping by array content and a value of another property is supported by `DocumentQuery`: + + + + +{`List results = session.query(Order.class) + .groupBy(array("Lines[].Product"), field("ShipTo.Country")) + .selectKey("Lines[].Product", "Products") + .selectKey("ShipTo.Country", "Country") + .selectCount() + .ofType(ProductsInfo.class) + .toList(); +`} + + + + +{`from Orders +group by array(Lines[].Product), ShipTo.Country +select Lines[].Product as Products, ShipTo.Country as Country, count() +`} + + + + +Grouping by multiple values from **the same** collection is also supported by `DocumentQuery`: + + + + +{`List results = session.query(Order.class) + .groupBy(array("Lines[].Product"), array("Lines[].Quantity")) + .selectKey("Lines[].Product", "Products") + .selectKey("Lines[].Quantity", "Quantities") + .selectCount() + .ofType(ProductsInfo.class) + .toList(); +`} + + + + +{`from Orders +group by array(Lines[].Product), array(Lines[].Quantity) +select Lines[].Product as Products, Lines[].Quantity as Quantities, count() +`} + + + + + +In order to use the above methods you need to add the following **static import** statement: + + + +{`import static net.ravendb.client.documents.queries.GroupBy.array; +import static net.ravendb.client.documents.queries.GroupBy.field; +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-group-by-query-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-group-by-query-nodejs.mdx new file mode 100644 index 0000000000..004ac76abb --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-group-by-query-nodejs.mdx @@ -0,0 +1,252 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Since RavenDB 4.0, the query optimizer supports dynamic group by queries and automatically creates auto map-reduce indexes. + +You can create a dynamic query that does an aggregation by using the `groupBy()` method. + +The supported aggregation operations are: + +- `count` +- `sum` + +## Group By Single Field + + + + +{`const orders = await session.query({ collection: "Orders" }) + .groupBy("ShipTo.Country") + .selectKey("ShipTo.Country", "Country") + .selectSum(new GroupByField("Lines[].Quantity", "OrderedQuantity")) + .ofType(CountryAndQuantity) + .all(); +`} + + + + +{`from Orders +group by ShipTo.City +select ShipTo.City as Country, sum(Lines[].Quantity) as TotalQuantity +`} + + + + + + +## Group By Multiple Fields + + + + +{`const results = await session.query({ collection: "Orders" }) + .groupBy("Employee", "Company") + .selectKey("Employee", "EmployeeIdentifier") + .selectKey("Company") + .selectCount() + .ofType(CountByCompanyAndEmployee) + .all(); +`} + + + + +{`from Orders +group by Employee, Company +select Employee as EmployeeIdentifier, Company, count() as Count +`} + + + + + + +## Select Composite GroupBy Key + + + + +{`const orders = await session.query({ collection: "Orders" }) + .groupBy("Employee", "Company") + .selectKey("key()", "EmployeeCompanyPair") + .selectCount("Count") + .ofType(CountOfEmployeeAndCompanyPairs) + .all(); +`} + + + + +{`from Orders +group by Employee, Company +select key() as EmployeeCompanyPair, count() as Count +`} + + + + + + +## Group By Array + +### By Array Values + +In order to group by values of array, you need to use `groupBy(array(...))`. The following query will group by `product` field from `lines` collection +and calculate the count per ordered products. Underneath a fanout, an auto map-reduce index will be created to handle such query. + + + + +{`const products = await session.query({ collection: "Orders" }) + .groupBy(GroupBy.array("Lines[].Product")) + .selectKey("key()", "Products") + .selectCount() + .ofType(ProductsInfo) + .all(); +`} + + + + +{`from Orders +group by Lines[].Product +select Lines[].Product, count() as Count +`} + + + + +Inside a single group by statement you can mix collection values and value of another property. That's supported by `DocumentQuery` only: + + + + +{`const results = await session.advanced.documentQuery({ collection: "Orders" }) + .groupBy("Lines[].Product", "ShipTo.Country") + .selectKey("Lines[].Product", "Product") + .selectKey("ShipTo.Country", "Country") + .selectCount() + .ofType(ProductInfo) + .all(); +`} + + + + +{`from Orders +group by Lines[].Product, ShipTo.Country +select Lines[].Product as Product, ShipTo.Country as Country, count() as Count +`} + + + + +Grouping by multiple values from **the same** collection is supported as well: + + + + +{`const results = await session.query({ collection: "Orders" }) + .groupBy(GroupBy.array("Lines[].Product"), GroupBy.array("Lines[].Quantity")) + .selectKey("Lines[].Product", "Product") + .selectKey("Lines[].Quantity", "Quantity") + .selectCount() + .ofType(ProductInfo) + .all(); +`} + + + + +{`from Orders +group by Lines[].Product, Lines[].Quantity +select Lines[].Product as Product, Lines[].Quantity as Quantity, count() as Count +`} + + + + +### By Array Content + +Another option is to group by array content. The reduction key will be calculated based on all values of a collection specified in `GroupBy`. +The client API exposes the `GroupByArrayContent` extension method for that purpose. + + + + +{`const results = await session.query({ collection: "Orders" }) + .groupBy(GroupBy.array("Lines[].Product")) + .selectKey("key()", "Products") + .selectCount() + .ofType(ProductsInfo) + .all(); +`} + + + + +{`from Orders +group by array(Lines[].Product) +select key() as Products, count() as Count +`} + + + + +Grouping by array content and a value of another property is supported by `DocumentQuery`: + + + + +{`const results = await session.query({ collection: "Orders" }) + .groupBy(GroupBy.array("Lines[].Product"), GroupBy.field("ShipTo.Country")) + .selectKey("Lines[].Product", "Products") + .selectKey("ShipTo.Country", "Country") + .selectCount() + .ofType(ProductsInfo) + .toList(); +`} + + + + +{`from Orders +group by array(Lines[].Product), ShipTo.Country +select Lines[].Product as Products, ShipTo.Country as Country, count() as Count +`} + + + + +Grouping by multiple values from **the same** collection is also supported by `DocumentQuery`: + + + + +{`const results = await session.query({ collection: "Orders" }) + .groupBy(GroupBy.array("Lines[].Product"), GroupBy.array("Lines[].Quantity")) + .selectKey("Lines[].Product", "Products") + .selectKey("Lines[].Quantity", "Quantities") + .selectCount() + .ofType(ProductsInfo) + .all(); +`} + + + + +{`from Orders +group by array(Lines[].Product), array(Lines[].Quantity) +select Lines[].Product as Products, Lines[].Quantity as Quantities, count() as Count +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-group-by-query-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-group-by-query-php.mdx new file mode 100644 index 0000000000..ad56903cf3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-group-by-query-php.mdx @@ -0,0 +1,262 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Since RavenDB 4.0, the query optimizer supports dynamic group by queries and automatically creates auto map-reduce indexes. + +You can create a dynamic query that does an aggregation using the `groupBy` method. + +Supported aggregation operations include: + +- `selectKey` +- `selectSum` +- `selectCount` + +## Group By Single Field + + + + +{`/** @var array $orders */ +$orders = $session->query(Order::class) + ->groupBy("ShipTo.Country") + ->selectKey("ShipTo.Country", "Country") + ->selectSum(new GroupByField("Lines[].Quantity", "OrderedQuantity")) + ->ofType(CountryAndQuantity::class) + ->toList(); +`} + + + + +{`from Orders +group by ShipTo.City +select ShipTo.City as Country, sum(Lines[].Quantity) as TotalQuantity +`} + + + + + + +## Group By Multiple Fields + + + + +{`$results = $session->query(Order::class) + ->groupBy("Employee", "Company") + ->selectKey("Employee", "EmployeeIdentifier") + ->selectKey("Company") + ->selectCount() + ->ofType(CountByCompanyAndEmployee::class) + ->toList(); +`} + + + + +{`from Orders +group by Employee, Company +select Employee as EmployeeIdentifier, Company, count() +`} + + + + + + +## Select Composite GroupBy Key + + + + +{`/** @var array $orders */ +$orders = $session->query(Order::class) + ->groupBy("Employee", "Company") + ->selectKey("key()", "EmployeeCompanyPair") + ->selectCount("Count") + ->ofType(CountOfEmployeeAndCompanyPairs::class) + ->toList(); +`} + + + + +{`from Orders +group by Employee, Company +select key() as EmployeeCompanyPair, count() +`} + + + + + + +## Group By Array + +### By Array Values + +The following query groups by the `Product` property of the `Lines` collection, +and calculates the count per ordered products. +Underneath a fanout, an auto map-reduce index will be created to handle such a query. + + + + +{`/** @var array $products */ +$products = $session->query(Order::class) + ->groupBy(GroupBy::array("Lines[].Product")) + ->selectKey("key()", "Products") + ->selectCount() + ->ofType(ProductsInfo::class) + ->toList(); +`} + + + + +{`from Orders +group by Lines[].Product +select Lines[].Product, count() +`} + + + + +Inside a single group by statement you can mix collection values and value of another property. + + + + +{`/** @var array $results */ +$results = $session->advanced()->documentQuery(Order::class) + ->groupBy("Lines[].Product", "ShipTo.Country") + ->selectKey("Lines[].Product", "Product") + ->selectKey("ShipTo.Country", "Country") + ->selectCount() + ->ofType(ProductInfo::class) + ->toList(); +`} + + + + +{`from Orders +group by Lines[].Product, ShipTo.Country +select Lines[].Product as Product, ShipTo.Country as Country, count() +`} + + + + +Grouping by multiple values from **the same** collection is supported as well: + + + + +{`/** @var array $results */ +$results = $session->query(Order::class) + ->groupBy(GroupBy::array("Lines[].Product"), GroupBy::array("Lines[].Quantity")) + ->selectKey("Lines[].Product", "Product") + ->selectKey("Lines[].Quantity", "Quantity") + ->selectCount() + ->ofType(ProductInfo::class) + ->toList(); +`} + + + + +{`from Orders +group by Lines[].Product, Lines[].Quantity +select Lines[].Product as Product, Lines[].Quantity as Quantity, count() +`} + + + + +### By Array Content + +Another option is to group by array content. +The reduction key will be calculated based on all values of a collection specified in `groupBy`. + + + + +{`/** @var array $results */ +$results = $session->query(Order::class) + ->groupBy(GroupBy::array("Lines[].Product")) + ->selectKey("key()", "Products") + ->selectCount() + ->ofType(ProductsInfo::class) + ->toList(); +`} + + + + +{`from Orders +group by array(Lines[].Product) +select key() as Products, count() +`} + + + + +Grouping by array content and a value of another property is supported by `documentQuery`: + + + + +{`/** @var array $results */ +$results = $session->query(Order::class) + ->groupBy(GroupBy::array("Lines[].Product"), GroupBy::field("ShipTo.Country")) + ->selectKey("Lines[].Product", "Products") + ->selectKey("ShipTo.Country", "Country") + ->selectCount() + ->ofType(ProductsInfo::class) + ->toList(); +`} + + + + +{`from Orders +group by array(Lines[].Product), ShipTo.Country +select Lines[].Product as Products, ShipTo.Country as Country, count() +`} + + + + +Grouping by multiple values from **the same** collection is also supported by `documentQuery`: + + + + +{`/** @var array $results */ +$results = $session->query(Order::class) + ->groupBy(GroupBy::array("Lines[].Product"), GroupBy::array("Lines[].Quantity")) + ->selectKey("Lines[].Product", "Products") + ->selectKey("Lines[].Quantity", "Quantities") + ->selectCount() + ->ofType(ProductsInfo::class) + ->toList(); +`} + + + + +{`from Orders +group by array(Lines[].Product), array(Lines[].Quantity) +select Lines[].Product as Products, Lines[].Quantity as Quantities, count() +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-group-by-query-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-group-by-query-python.mdx new file mode 100644 index 0000000000..1b496ef59a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-group-by-query-python.mdx @@ -0,0 +1,328 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To run a dynamic query that aggregates data, use the `group_by()` method. + +* Data can be grouped by a single or by multiple fields, and further aggregated + by **sum**, **type**, or **count**. + +* RavenDB's query optimizer supports dynamic grouping by query, by automatically + creating auto map-reduce indexes. + +* In This Page: + * [Group By a Single Field](../../../client-api/session/querying/how-to-perform-group-by-query.mdx#group-by-a-single-field) + * [Group By Multiple Fields](../../../client-api/session/querying/how-to-perform-group-by-query.mdx#group-by-multiple-fields) + * [Select Composite GroupBy Key](../../../client-api/session/querying/how-to-perform-group-by-query.mdx#select-composite-groupby-key) + * [Group By Array](../../../client-api/session/querying/how-to-perform-group-by-query.mdx#group-by-array) + * [By Array Values](../../../client-api/session/querying/how-to-perform-group-by-query.mdx#by-array-values) + * [By Array Content](../../../client-api/session/querying/how-to-perform-group-by-query.mdx#by-array-content) + * [Sorting](../../../client-api/session/querying/how-to-perform-group-by-query.mdx#sorting) + * [By Count](../../../client-api/session/querying/how-to-perform-group-by-query.mdx#by-count) + * [By Sum](../../../client-api/session/querying/how-to-perform-group-by-query.mdx#by-sum) + + +## Group By a Single Field + + + + +{`orders = list( + session.query(object_type=Order) + .group_by("ship_to.country") + .select_key("ship_to.country", "country") + .select_sum(GroupByField("lines[].quantity", "ordered_quantity")) + .of_type(CountryAndQuantity) +) +`} + + + + +{`from Orders +group by ShipTo.City +select ShipTo.City as Country, sum(Lines[].Quantity) as TotalQuantity +`} + + + + + + +## Group By Multiple Fields + + + + +{`results = list( + session.query(object_type=Order) + .group_by("employee", "company") + .select_key("employee", "employee_identifier") + .select_key("company") + .select_count() + .of_type(CountByCompanyAndEmployee) +) +`} + + + + +{`from Orders +group by Employee, Company +select Employee as EmployeeIdentifier, Company, count() +`} + + + + + + +## Select Composite GroupBy Key + + + + +{`orders = list( + session.query(object_type=Order) + .group_by("employee", "company") + .select_key("key()", "employee_company_pair") + .select_count("count") + .of_type(CountOfEmployeeAndCompanyPairs) +) +`} + + + + +{`from Orders +group by Employee, Company +select key() as EmployeeCompanyPair, count() +`} + + + + + + +## Group By Array + +### By Array Values + +The following query will group by the `lines[]` array `product` property +and calculate the count per product. +Behind the scenes, an auto map-reduce index is created to handle the query. + + + +{`products = list( + session.query(object_type=Order) + .group_by(GroupBy.array("lines[].product")) + .select_key("key()", "products") + .select_count() + .of_type(ProductsInfo) +) +`} + + + + +{`from Orders +group by Lines[].Product +select Lines[].Product, count() +`} + + + + +It is possible to group by the values of an array field **and** by the value of an additional property. + + + +{`products = list( + session.advanced.document_query(object_type=Order) + .group_by("lines[].product", "ship_to.country") + .select_key("lines[].product", "product") + .select_key("ship_to.country", "country") + .select_count() + .of_type(ProductInfo) +) +`} + + + + +{`from Orders +group by Lines[].Product, ShipTo.Country +select Lines[].Product as Product, ShipTo.Country as Country, count() +`} + + + + +Grouping by values of multiple fields of **the same** array is supported as well. + + + +{`results = list( + session.query(object_type=Order) + .group_by(GroupBy.array("lines[].product"), GroupBy.array("lines[].quantity")) + .select_key("lines[].product", "product") + .select_key("lines[].quantity", "quantity") + .select_count() + .of_type(ProductInfo) +) +`} + + + + +{`from Orders +group by Lines[].Product, Lines[].Quantity +select Lines[].Product as Product, Lines[].Quantity as Quantity, count() +`} + + + + +### By Array Content + +Another option is to group by array **content**. +The creation of the reduction key will be based on the content of the array field specified by `group_by`. + + + +{`results = list( + session.query(object_type=Order) + .group_by(GroupBy.array("lines[].product")) + .select_key("key()", "products") + .select_count() + .of_type(ProductsInfo) +) +`} + + + + +{`from Orders +group by array(Lines[].Product) +select key() as Products, count() +`} + + + + +It is possible to group by the content of an array field **and** by that of a field of an additional property. + + + +{`results = list( + session.query(object_type=Order) + .group_by(GroupBy.array("lines[].product"), GroupBy.field("ship_to.country")) + .select_key("lines[].product", "products") + .select_key("ship_to.country", "country") + .select_count() + .of_type(ProductsInfo) +) +`} + + + + +{`from Orders +group by array(Lines[].Product), ShipTo.Country +select Lines[].Product as Products, ShipTo.Country as Country, count() +`} + + + + +Grouping by multiple fields of **the same** array is also supported. + + + +{`results = list( + session.query(object_type=Order) + .group_by(GroupBy.array("lines[].product"), GroupBy.array("lines[].quantity")) + .select_key("lines[].product", "products") + .select_key("lines[].quantity", "quantities") + .select_count() + .of_type(ProductsInfo) +) +`} + + + + +{`from Orders +group by array(Lines[].Product), array(Lines[].Quantity) +select Lines[].Product as Products, Lines[].Quantity as Quantities, count() +`} + + + + + + +## Sorting + +The results of a dynamic group_by query can be sorted by an aggregation function used in the query. +The results can be ordered by the aggregation operations `count` and `sum`. + +#### By Count + + + + +{`results = list( + session.query(object_type=Order) + .group_by("employee") + .select_key("key()", "employee") + .select_count() + .order_by("count") +) +`} + + + + +{`from Orders +group by Employee +order by count() as long +select Employee, count() +`} + + + + +#### By Sum + + + + +{`results = list( + session.query(object_type=Order) + .group_by("employee") + .select_key("key()", "employee") + .select_sum(GroupByField("freight", "sum")) + .order_by("sum") +) +`} + + + + +{`from Orders +group by Employee +order by sum(Freight) as double +select key() as Employee, sum(Freight) as Sum +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-queries-lazily-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-queries-lazily-csharp.mdx new file mode 100644 index 0000000000..5b9cc17729 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-queries-lazily-csharp.mdx @@ -0,0 +1,337 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Query execution can be deferred: the query can be defined as **Lazy**, and executed + at a later time, when its results are actually needed. + +* This article contains examples for lazy queries. Prior to reading it, please refer + to [perform requests lazily](../../../client-api/session/how-to/perform-operations-lazily.mdx) + for general knowledge about RavenDB's lazy behavior and other request types that can be + executed lazily within a session. + +* In this page: + * [Lazy query](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx#lazy-query) + * [Lazy count query](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx#lazy-count-query) + * [Lazy suggestions query](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx#lazy-suggestions-query) + * [Lazy facets query](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx#lazy-facets-query) + * [Syntax](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx#syntax) + + +## Lazy query + + + + +{`// Define a lazy query +Lazy> lazyEmployees = session + .Query() + .Where(x => x.FirstName == "Robert") + // Add a call to 'Lazily' + .Lazily(); + +IEnumerable employees = lazyEmployees.Value; // Query is executed here +`} + + + + +{`// Define a lazy query +Lazy>> lazyEmployees = asyncSession + .Query() + .Where(x => x.FirstName == "Robert") + // Add a call to 'LazilyAsync' + .LazilyAsync(); + +IEnumerable employees = await lazyEmployees.Value; // Query is executed here +`} + + + + +{`// Define a lazy DocumentQuery +Lazy> lazyEmployees = session.Advanced + .DocumentQuery() + .WhereEquals(x => x.FirstName, "Robert") + // Add a call to 'Lazily' + .Lazily(); + +IEnumerable employees = lazyEmployees.Value; // DocumentQuery is executed here +`} + + + + +* Learn more about queries in this [query overview](../../../client-api/session/querying/how-to-query.mdx). + + + +## Lazy count query + + + + +{`// Define a lazy count query +Lazy lazyCount = session + .Query() + .Where(x => x.FirstName == "Robert") + // Add a call to 'CountLazily' + .CountLazily(); + +int count = lazyCount.Value; // Query is executed here +`} + + + + +{`// Define a lazy count query +Lazy> lazyCount = asyncSession + .Query() + .Where(x => x.FirstName == "Robert") + // Add a call to 'CountLazilyAsync' + .CountLazilyAsync(); + +int count = await lazyCount.Value; // Query is executed here +`} + + + + +{`// Define a lazy DocumentQuery +Lazy lazyCount = session.Advanced + .DocumentQuery() + .WhereEquals(x => x.FirstName, "Robert") + // Add a call to 'CountLazily' + .CountLazily(); + +int count = lazyCount.Value; // DocumentQuery is executed here +`} + + + + + + +## Lazy suggestions query + + + + +{`// Define a lazy suggestion query +Lazy> lazySuggestions = session + .Query() + .SuggestUsing(builder => builder.ByField(x => x.Name, "chaig")) + // Add a call to 'ExecuteLazy' + .ExecuteLazy(); + +Dictionary suggest = lazySuggestions.Value; // Query is executed here +List suggestions = suggest["Name"].Suggestions; +`} + + + + +{`// Define a lazy suggestion query +Lazy>> lazySuggestions = asyncSession + .Query() + .SuggestUsing(builder => builder.ByField("Name", "chaig")) + // Add a call to 'ExecuteLazyAsync' + .ExecuteLazyAsync(); + +Dictionary suggest = await lazySuggestions.Value; // Query is executed here +List suggestions = suggest["Name"].Suggestions; +`} + + + + +{`// Define a lazy DocumentQuery +Lazy> lazySuggestions = session.Advanced + .DocumentQuery() + .SuggestUsing(builder => builder.ByField("Name", "chaig")) + // Add a call to 'ExecuteLazy' + .ExecuteLazy(); + +Dictionary suggest = lazySuggestions.Value; // DocumentQuery is executed here +List suggestions = suggest["FullName"].Suggestions; +`} + + + + +* Learn more about suggestions in [query for suggestions](../../../client-api/session/querying/how-to-work-with-suggestions.mdx). + + + +## Lazy facets query + + + + +{`// Define a lazy facets query +Lazy> lazyFacets = session + .Query() + .AggregateBy(facetsDefinition) + // Add a call to 'ExecuteLazy' + .ExecuteLazy(); + +Dictionary facets = lazyFacets.Value; // Query is executed here + +FacetResult categoryResults = facets["Product Category"]; +FacetResult priceResults = facets["Price per Unit"]; +`} + + + + +{`// Define a lazy DocumentQuery +Lazy>> lazyFacets = asyncSession + .Query() + .AggregateBy(facetsDefinition) + // Add a call to 'ExecuteLazyAsync' + .ExecuteLazyAsync(); + +Dictionary facets = await lazyFacets.Value; // Query is executed here + +FacetResult categoryResults = facets["Product Category"]; +FacetResult priceResults = facets["Price per Unit"]; +`} + + + + +{`// Define a lazy DocumentQuery +Lazy> lazyFacets = session.Advanced + .DocumentQuery() + .AggregateBy(facetsDefinition) + // Add a call to 'ExecuteLazy' + .ExecuteLazy(); + +Dictionary facets = lazyFacets.Value; // DocumentQuery is executed here + +FacetResult categoryResults = facets["Product Category"]; +FacetResult priceResults = facets["Price per Unit"]; +`} + + + + +{`// The facets definition used in the facets query: +List facetsDefinition = new List +{ + new Facet + { + FieldName = "CategoryName", + DisplayFieldName = "Product Category" + }, + new RangeFacet + { + Ranges = + { + product => product.PricePerUnit < 25, + product => product.PricePerUnit >= 25 && product.PricePerUnit < 50, + product => product.PricePerUnit >= 50 && product.PricePerUnit < 100, + product => product.PricePerUnit >= 100 + }, + DisplayFieldName = "Price per Unit" + } +}; +`} + + + + +{`// The index definition used in the facets query: +public class Products_ByCategoryAndPrice : + AbstractIndexCreationTask +{ + // The IndexEntry class defines the index-fields + public class IndexEntry + { + public string CategoryName { get; set; } + public decimal PricePerUnit { get; set; } + } + + public Products_ByCategoryAndPrice() + { + // The 'Map' function defines the content of the index-fields + Map = products => from product in products + select new IndexEntry + { + CategoryName = LoadDocument(product.Category).Name, + PricePerUnit = product.PricePerUnit + }; + } +} +`} + + + + +* Learn more about facets in [perform faceted search](../../../client-api/session/querying/how-to-perform-a-faceted-search.mdx). + + + +## Syntax + + + +{`// Lazy query overloads: +Lazy> Lazily(); +Lazy> Lazily(Action> onEval); + +Lazy>> LazilyAsync(); +Lazy>> LazilyAsync(Action> onEval); +`} + + + + +{`// Lazy count query overloads: +Lazy CountLazily(); +Lazy LongCountLazily(); + +Lazy> CountLazilyAsync(CancellationToken token = default(CancellationToken)); +Lazy> LongCountLazilyAsync(CancellationToken token = default(CancellationToken)); +`} + + + + +{`// Lazy suggestions query overloads: +Lazy> + ExecuteLazy(Action> onEval = null); + +Lazy>> + ExecuteLazyAsync(Action> onEval = null, + CancellationToken token = default); +`} + + + + +{`// Lazy facets query overloads: +Lazy> + ExecuteLazy(Action> onEval = null); + +Lazy>> + ExecuteLazyAsync(Action> onEval = null, + CancellationToken token = default); +`} + + + +| Parameters | Type | Description | +|------------|-----------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------| +| **onEval** | `Action>`
`Action>`
`Action>` | An action that will be performed on the query results
when the query is executed. | + +| Return Value | | +|----------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------| +| `Lazy>`
`Lazy`
`Lazy>`
`Lazy>` | A lazy instance that will evaluate the query only when needed. | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-queries-lazily-java.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-queries-lazily-java.mdx new file mode 100644 index 0000000000..e221c2dd26 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-queries-lazily-java.mdx @@ -0,0 +1,136 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +In some situations, query execution must be delayed. To cover such a scenario, `lazily` and many other query extensions have been introduced. + +## Lazily + + + +{`Lazy> lazily(); + +Lazy> lazily(Consumer> onEval); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **onEval** | Consumer<List<TResult>> | An action that will be performed on the query results. | + +| Return Value | | +| ------------- | ----- | +| Lazy<List<TResult>> | Lazy query initializer returning query results. | + +### Example + + + +{`Lazy> employeesLazy = session + .query(Employee.class) + .whereEquals("FirstName", "Robert") + .lazily(); + +List employees = employeesLazy.getValue(); // query will be executed here +`} + + + + + +## Counts + + + +{`Lazy countLazily(); +`} + + + +| Return Value | | +| ------------- | ----- | +| Lazy<Integer> | Lazy query initializer returning a count of matched documents. | + +### Example + + + +{`Lazy countLazy = session + .query(Employee.class) + .whereEquals("FirstName", "Robert") + .countLazily(); + +Integer count = countLazy.getValue(); // query will be executed here +`} + + + + + +## Suggestions + + + +{`Lazy> executeLazy(); + +Lazy> executeLazy(Consumer> onEval); +`} + + + +| Return Value | | +| ------------- | ----- | +| Lazy<Map<String, SuggestionResult>> | Lazy query initializer containing a map with suggestions for matching executed query | + +### Example + + + +{`Lazy> suggestLazy = session + .query(Employee.class, Query.index("Employees_ByFullName")) + .suggestUsing(builder -> builder.byField("FullName", "johne")) + .executeLazy(); + +Map suggest = suggestLazy.getValue(); // query will be executed here +List suggestions = suggest.get("FullName").getSuggestions(); +`} + + + + + +## Facets + + + +{`Lazy> executeLazy(); + +Lazy> executeLazy(Consumer> onEval); +`} + + + +| Return Value | | +| ------------- | ----- | +| Lazy<Map<String, FacetResult>> | Lazy query initializer containing a map with facet results matching executed query | + +### Example + + + +{`Lazy> facetsLazy = session + .query(Camera.class, Query.index("Camera/Costs")) + .aggregateUsing("facets/CameraFacets") + .executeLazy(); + +Map facets = facetsLazy.getValue(); // query will be executed here +FacetResult results = facets.get("manufacturer"); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-queries-lazily-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-queries-lazily-nodejs.mdx new file mode 100644 index 0000000000..54ae3a0f85 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-queries-lazily-nodejs.mdx @@ -0,0 +1,171 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Query execution can be deferred: the query can be defined as **Lazy**, and executed + at a later time, when its results are actually needed. + +* This article contains examples for lazy queries. Prior to reading it, please refer + to [perform requests lazily](../../../client-api/session/how-to/perform-operations-lazily.mdx) + for general knowledge about RavenDB's lazy behavior and other request types that can be + executed lazily within a session. + +* In this page: + * [Lazy query](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx#lazy-query) + * [Lazy count query](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx#lazy-count-query) + * [Lazy suggestions query](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx#lazy-suggestions-query) + * [Lazy facets query](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx#lazy-facets-query) + * [Syntax](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx#syntax) + + +## Lazy query + + + +{`// Define a lazy query +const lazyEmployees = session + .query(\{ collection: "Employees" \}) + .whereEquals("FirstName", "Robert") + // Add a call to 'lazily' + .lazily(); + +const employees = await lazyEmployees.getValue(); // Query is executed here +`} + + + +* Learn more about queries in this [query overview](../../../client-api/session/querying/how-to-query.mdx). + + + +## Lazy count query + + + +{`// Define a lazy count query +const lazyCount = session + .query(\{ collection: "Employees" \}) + .whereEquals("FirstName", "Robert") + // Add a call to 'countLazily' + .countLazily(); + +const count = await lazyCount.getValue(); // Query is executed here +`} + + + + + +## Lazy suggestions query + + + +{`// Define a lazy suggestion query +const lazySuggestions = session + .query(\{ collection: "Products" \}) + .suggestUsing(builder => builder.byField("Name", "chaig")) + // Add a call to 'executeLazy' + .executeLazy(); + +const suggestResult = await lazySuggestions.getValue(); // Query is executed here +const suggestions = suggestResult["Name"].suggestions; +`} + + + +* Learn more about suggestions in [query for suggestions](../../../client-api/session/querying/how-to-work-with-suggestions.mdx). + + + +## Lazy facets query + + + + +{`// The facets definition used in the facets query: +// =============================================== +const categoryNameFacet = new Facet(); + +categoryNameFacet.fieldName = "categoryName"; +categoryNameFacet.displayFieldName = "Product Category"; + +const rangeFacet = new RangeFacet(); +rangeFacet.ranges = [ + "pricePerUnit < " + 25, + "pricePerUnit >= " + 25 + " and pricePerUnit < " + 50, + "pricePerUnit >= " + 50 + " and pricePerUnit < " + 100, + "pricePerUnit >= " + 100 +]; +rangeFacet.displayFieldName = 'Price per Unit'; + +const facetsDefinition = [categoryNameFacet, rangeFacet]; + +// The lazy factes query: +// ====================== +const lazyFacets = session + .query({ indexName: "Products/ByCategoryAndPrice" }) + .aggregateBy(...facetsDefinition) + // Add a call to 'executeLazy' + .executeLazy(); + +const facets = await lazyFacets.getValue(); // Query is executed here + +const categoryResults = facets["Product Category"]; +const priceResults = facets["Price per Unit"]; +`} + + + + +{`// The index definition used in the facets query: +class Products_ByCategoryAndPrice extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + const { load } = this.mapUtils(); + + this.map("Products", product => { + return { + categoryName: load(product.Category, "Categories").Name, + pricePerUnit: product.PricePerUnit + } + }); + } +} +`} + + + + +* Learn more about facets in [perform faceted search](../../../client-api/session/querying/how-to-perform-a-faceted-search.mdx). + + + +## Syntax + + + +{`lazily(); +lazily(onEval); + +countLazily(); + +executeLazy(); +`} + + + +| Parameters | Type | Description | +|------------|----------------------|--------------------------------------------------------------------------------------| +| **onEval** | `(object[]) => void` | An action that will be performed on the query results
when the query is executed. | + +| Return Value | | +|--------------|------------------------------------------------------------------| +| **object** | A `Lazy` instance that will evaluate the query only when needed. | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-queries-lazily-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-queries-lazily-php.mdx new file mode 100644 index 0000000000..4d2bd2d089 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-queries-lazily-php.mdx @@ -0,0 +1,143 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Query execution can be deferred: the query can be defined as **Lazy**, and executed + at a later time, when its results are actually needed. + +* This article contains examples for lazy queries. Prior to reading it, please refer + to [perform requests lazily](../../../client-api/session/how-to/perform-operations-lazily.mdx) + for general knowledge about RavenDB's lazy behavior and other request types that can be + executed lazily within a session. + +* Learn more about queries in this [query overview](../../../client-api/session/querying/how-to-query.mdx). + +* In this page: + * [Lazy query](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx#lazy-query) + * [Lazy count query](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx#lazy-count-query) + * [Lazy suggestions query](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx#lazy-suggestions-query) + * [Lazy Aggregation](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx#lazy-aggregation) + * [Syntax](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx#syntax) + + +## Lazy query + + + +{`/** @var Lazy> $employeesLazy */ +$employeesLazy = $session + ->query(Employee::class) + ->whereEquals("FirstName", "Robert") + ->lazily(); + +/** @var array $employees */ +$employees = $employeesLazy->getValue(); // query will be executed here +`} + + + + + +## Lazy count query + + + +{`/** @var Lazy $countLazy */ +$countLazy = $session + ->query(Employee::class) + ->whereEquals("FirstName", "Robert") + ->countLazily(); + +/** @var int $count */ +$count = $countLazy->getValue(); // query will be executed here +`} + + + + + +## Lazy suggestions query + + + +{`/** @var Lazy> $suggestLazy */ +$suggestLazy = $session + ->query(Employee::class, Query::index("Employees_ByFullName")) + ->suggestUsing(function($builder) \{ $builder->byField("FullName", "johne"); \}) + ->executeLazy(); + +/** @var array $suggest */ +$suggest = $suggestLazy->getValue(); // query will be executed here + +/** @var array $suggestions */ +$suggestions = $suggest["FullName"]->getSuggestions(); +`} + + + +* Learn more about suggestions in [query for suggestions](../../../client-api/session/querying/how-to-work-with-suggestions.mdx). + + + +## Lazy aggregation + + + +{`/** @var Lazy> $facetsLazy */ +$facetsLazy = $session + ->query(Camera::class, Query::index("Camera/Costs")) + ->aggregateUsing("facets/CameraFacets") + ->executeLazy(); + +/** @var array $facets */ +$facets = $facetsLazy->getValue(); // query will be executed here + +/** @var FacetResult $results */ +$results = $facets["manufacturer"]; +`} + + + + + +## Syntax + + + +{`/** + * Usage + * - lazily(); + * - lazily(Closure $onEval) + */ +function lazily(?Closure $onEval = null): Lazy; +`} + + + + +{`function countLazily(): Lazy; +`} + + + + +{`/** + * Usage + * - executeLazy(); + * - executeLazy(Closure $onEval) + */ +public function executeLazy(?Closure $onEval = null): Lazy; +`} + + + +| Parameter | Type | Description | +|-------------|------------|-----------------------------------------------------------------------| +| **$onEval** | `?Closure` | An action to perform on the query results when the query is executed | + +| Return Value | Description | +|--------------|---------------------------------------------------------| +| `Lazy` | A lazy instance that will evaluate the query only when needed | diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-queries-lazily-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-queries-lazily-python.mdx new file mode 100644 index 0000000000..d0f31fe02e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-perform-queries-lazily-python.mdx @@ -0,0 +1,218 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Query execution can be deferred: the query can be defined as **Lazy**, and executed + at a later time, when its results are actually needed. + +* This article contains examples for lazy queries. Prior to reading it, please refer + to [perform requests lazily](../../../client-api/session/how-to/perform-operations-lazily.mdx) + for general knowledge about RavenDB's lazy behavior and other request types that can be + executed lazily within a session. + +* In this page: + * [Lazy query](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx#lazy-query) + * [Lazy count query](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx#lazy-count-query) + * [Lazy suggestions query](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx#lazy-suggestions-query) + * [Lazy facets query](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx#lazy-facets-query) + * [Syntax](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx#syntax) + + +## Lazy query + +To run a [regular query](../../../client-api/session/querying/how-to-query.mdx) +(not a [suggestions](../../../client-api/session/querying/how-to-work-with-suggestions.mdx) +query or a [faceted search](../../../client-api/session/querying/how-to-perform-a-faceted-search.mdx)) +lazily, use `.lazily()` as follows. + + + +{`# Define a lazy query +lazy_employees = ( + session.query(object_type=Employee).where_equals("first_name", "Robert") + # Add a call to 'Lazily' + .lazily() +) + +employees = lazy_employees.value # Query is executed here +`} + + + + + +## Lazy count query + +To count query results lazily, use `count_lazily()` as follows. + + + +{`lazy_count = session.query(object_type=Employee).where_equals("first_name", "Robert").count_lazily() + +count = lazy_count.value # Query is executed here +`} + + + + + +## Lazy suggestions query + +To run a suggestions search lazily, use [suggest_using](../../../client-api/session/querying/how-to-work-with-suggestions.mdx) +along with `.execute_lazy()` as shown below. + + + +{`lazy_suggestions = ( + session.query(object_type=Product).suggest_using(lambda builder: builder.by_field("name", "chaig")) + # Add a call to 'execute_lazy' + .execute_lazy() +) + +suggest = lazy_suggestions.value # Query is executed here +suggestions = suggest["name"].suggestions +`} + + + + + +## Lazy facets query + +To run a faceted query lazily: + +* Search a predefined static index (see _Index_definition_ below) +* Aggregate the results using a facets definition (see _Facets_definition_ below) +* Apply `.execute_lazy()` (see _Query_ below) + + + + +{`# Define a lazy facets query +lazy_facets = ( + session.query_index_type( + Products_ByCategoryAndPrice, Products_ByCategoryAndPrice.IndexEntry + ).aggregate_by_facets(facets_definition) + # Add a call to 'execute_lazy' + .execute_lazy() +) + +facets = lazy_facets.value # Query is executed here + +category_results = facets["Product Category"] +price_results = facets["Price per Unit"] +`} + + + + +{`# The facets definition used in the facets query +facet = Facet(field_name="CategoryName") +facet.display_field_name = "Product Category" + +range_facet = RangeFacet() +range_facet.ranges = [ + "price_per_unit < 200", + "price_per_unit between 200 and 400", + "price_per_unit between 400 and 600", + "price_per_unit between 600 and 800", + "price_per_unit >= 800", +] +range_facet.display_field_name = "Price per Unit" + +facets_definition = [facet, range_facet] + +with store.open_session() as session: + # region lazy_10 + # Define a lazy facets query + lazy_facets = ( + session.query_index_type( + Products_ByCategoryAndPrice, Products_ByCategoryAndPrice.IndexEntry + ).aggregate_by_facets(facets_definition) + # Add a call to 'execute_lazy' + .execute_lazy() + ) + + facets = lazy_facets.value # Query is executed here + + category_results = facets["Product Category"] + price_results = facets["Price per Unit"] +`} + + + + +{`# The index definition used in the facets query +class Products_ByCategoryAndPrice(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = ( + "from p in docs.Products select new {" + 'CategoryName = LoadDocument(p.Category, "Categories").Name, PricePerUnit = p.PricePerUnit' + "}" + ) + + class IndexEntry: + def __init__(self, category_name: str, price_per_unit: int): + self.category_name = category_name + self.price_per_unit = price_per_unit +`} + + + + + +Learn more about facets in [perform faceted search](../../../client-api/session/querying/how-to-perform-a-faceted-search.mdx). + + + + +## Syntax + + + +{`# Lazy query +def lazily(self, on_eval: Callable[[List[_T]], None] = None) -> Lazy[List[_T]]: ... +`} + + + + +{`# Lazy count query +def count_lazily(self) -> Lazy[int]: ... +`} + + + + +{`# Lazy suggestions query +def execute_lazy( + self, on_eval: Optional[Callable[[Dict[str, SuggestionResult]], None]] = None +) -> "Lazy[Dict[str, SuggestionResult]]": ... +`} + + + + +{`# Lazy facet query +def execute_lazy( + self, on_eval: Optional[Callable[[Dict[str, FacetResult]], None]] = None +) -> Lazy[Dict[str, FacetResult]]: ... +`} + + + +| Parameters | Type | Description | +|------------|-----------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------| +| **on_eval** | `Callable[[List[_T]], None]`
`Callable[[Dict[str, SuggestionResult]], None]` (optional)
`Callable[[Dict[str, FacetResult]], None]` (optional)| An action that will be performed on the query results when the query is executed. | + +| Return Value | | +|----------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------| +| `Lazy[List[_T]]`
`Lazy[Dict[str, SuggestionResult]]`
`Lazy[Dict[str, FacetResult]]` | A lazy instance that will evaluate the query only when needed. | +| `Lazy[int]` | The results of a lazy count query | + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-project-query-results-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-project-query-results-csharp.mdx new file mode 100644 index 0000000000..f89da8202f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-project-query-results-csharp.mdx @@ -0,0 +1,825 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Applying a projection in a query allows you to shape the query results to meet specific requirements, + delivering just the data needed instead of the original full document content. + +* This article provides examples of projecting query results when making a **dynamic-query**. + For projecting results when querying a **static-index** see [project index query results](../../../indexes/querying/projections.mdx). + +* In this article: + * [Projections overview](../../../client-api/session/querying/how-to-project-query-results.mdx#projections-overview) + * [Projection Methods](../../../client-api/session/querying/how-to-project-query-results.mdx#projection-methods) + * [Select](../../../client-api/session/querying/how-to-project-query-results.mdx#select) + * [ProjectInto](../../../client-api/session/querying/how-to-project-query-results.mdx#projectinto) + * [SelectFields](../../../client-api/session/querying/how-to-project-query-results.mdx#selectfields) + * [Single projection per query](../../../client-api/session/querying/how-to-project-query-results.mdx#single-projection-per-query) + + +## Projections overview +**What are projections**: + +* A projection refers to the **transformation of query results** into a customized structure, + modifying the shape of the data returned by the server. + +* Instead of retrieving the full document from the server and then picking relevant data from it on the client, + you can request a subset of the data, specifying the document fields you want to get from the server. + +* The query can load [related documents](../../../indexes/indexing-related-documents.mdx#what-are-related-documents) and have their data merged into the projection results. + +* Objects and arrays, including content from nested structures, can be projected. + An alias name can be given to the projected fields, and any calculations can be applied within the projection. +**When to use projections**: + +* Projections allow you to tailor the query results specifically to your needs. + Getting specific details to display can be useful when presenting data to users or populating user interfaces. + Projection queries are also useful with [subscriptions](../../../client-api/data-subscriptions/what-are-data-subscriptions.mdx) + since all transformation work is done on the server side without having to send a lot of data over the wire. + +* Returning partial document data from the server reduces network traffic, + as clients receive only relevant data required for a particular task, enhancing overall performance. + +* Savings can be significant if you need to show just a bit of information from a large document. For example: + the size of the result when querying for all "Orders" documents where "Company" is "companies/65-A" is 19KB. + Performing the same query and projecting only the "Employee" and "OrderedAt" fields results in only 5KB. + +* However, when you need to actively work with the complete set of data on the client side, + then do use a query without a projection to retrieve the full document from the server. +**Projections are not tracked by the session**: + +* On the client side, the resulting projected entities returned by the query are Not tracked by the Session. + +* Any modification made to a projection entity will not modify the corresponding document on the server when _SaveChanges_ is called. +**Projections are the final stage in the query pipeline**: + +* Projections are applied as the last stage in the query pipeline, + after the query has been processed, filtered, sorted, and paged. + +* This means that the projection does Not apply to all the documents in the collection, + but only to the documents matching the query predicate. + +* Within the projection you can only filter what data will be returned from the matching documents, + but you cannot filter which documents will be returned. That has already been determined earlier in the query pipeline. + +* Only a single projection request can be made per Query (and DocumentQuery). + Learn more in [single projection per query](../../../client-api/session/querying/how-to-project-query-results.mdx#single-projection-per-query). +**The cost of projections**: + +* Queries in RavenDB do not allow any computation to occur during the query phase. + However, you can perform any [calculations](../../../client-api/session/querying/how-to-project-query-results.mdx#example---projection-with-calculations) + inside the projection. + +* But while calculations within a projection are allowed, having a very complex logic can impact query performance. + So RavenDB limits the total time it will spend processing a query and its projections. + Exceeding this time limit will fail the query. This is configurable, see the following configuration keys: + * [Databases.QueryTimeoutInSec](../../../server/configuration/database-configuration.mdx#databasesquerytimeoutinsec) + * [Databases.QueryOperationTimeoutInSec](../../../server/configuration/database-configuration.mdx#databasesqueryoperationtimeoutinsec) + + + +## Projection Methods + +## Select + +* The most common way to perform a query with a projection is to use the `Select` method. + +* You can specify what fields from the document you want to retrieve and even provide a complex expression. + + + +##### Example I - Projecting individual fields of the document: + + + + +{`var projectedResults = session + // Make a dynamic query on the Companies collection + .Query() + // Call Select to define the new structure that will be returned per Company document + .Select(x => new + { + Name = x.Name, + City = x.Address.City, + Country = x.Address.Country + }) + .ToList(); + +// Each resulting object in the list is Not a 'Company' entity, +// it is a new object containing ONLY the fields specified in the Select. +`} + + + + +{`var projectedResults = await asyncSession + // Make a dynamic query on the Companies collection + .Query() + // Call Select to define the new structure that will be returned per Company document + .Select(x => new {Name = x.Name, City = x.Address.City, Country = x.Address.Country}) + .ToListAsync(); + +// Each resulting object in the list is Not a 'Company' entity, +// it is a new object containing ONLY the fields specified in the Select. +`} + + + + +{`from "Companies" +select Name, Address.City as City, Address.Country as Country +`} + + + + + + + +##### Example II - Projecting arrays and objects: + + + + +{`var projectedResults = session + .Query() + .Select(x => new + { + ShipTo = x.ShipTo, + // Retrieve all product names from the Lines array in an Order document + ProductNames = x.Lines.Select(y => y.ProductName) + }) + .ToList(); +`} + + + + +{`var projectedResults = await asyncSession + .Query() + .Select(x => new + { + ShipTo = x.ShipTo, + // Retrieve all product names from the Lines array in an Order document + ProductNames = x.Lines.Select(y => y.ProductName) + }) + .ToListAsync(); +`} + + + + +{`// Using simple expression: +from "Orders" +select ShipTo, Lines[].ProductName as ProductNames + +// Using JavaScript object literal syntax: +from "Orders" as x +select { + ShipTo: x.ShipTo, + ProductNames: x.Lines.map(y => y.ProductName) +} +`} + + + + + + + +##### Example III - Projection with expression: + + + + +{`var projectedResults = session + .Query() + .Select(x => new + { + // Any expression can be provided for the projected content + FullName = x.FirstName + " " + x.LastName + }) + .ToList(); +`} + + + + +{`var projectedResults = await asyncSession + .Query() + .Select(x => new + { + // Any expression can be provided for the projected content + FullName = x.FirstName + " " + x.LastName + }) + .ToListAsync(); +`} + + + + +{`from "Employees" as e +select { + FullName: e.FirstName + " " + e.LastName +} +`} + + + + + + + +##### Example IV - Projection with calculations: + + + + +{`var projectedResults = session + .Query() + .Select(x => new + { + // Any calculations can be done within a projection + TotalProducts = x.Lines.Count, + TotalDiscountedProducts = x.Lines.Count(x => x.Discount > 0), + TotalPrice = x.Lines.Sum(l => l.PricePerUnit * l.Quantity) + }) + .ToList(); +`} + + + + +{`var projectedResults = await asyncSession + .Query() + .Select(x => new + { + // Any calculations can be done within a projection + TotalProducts = x.Lines.Count, + TotalDiscountedProducts = x.Lines.Count(x => x.Discount > 0), + TotalPrice = x.Lines.Sum(l => l.PricePerUnit * l.Quantity) + }) + .ToListAsync(); +`} + + + + +{`from "Orders" as x +select { + TotalProducts: x.Lines.length, + TotalDiscountedProducts: x.Lines.filter(x => x.Discount > 0).length, + TotalPrice: x.Lines + .map(l => l.PricePerUnit * l.Quantity) + .reduce((a, b) => a + b, 0) +} +`} + + + + + + + +##### Example V - Projecting using functions: + + + + +{`// Use LINQ query syntax notation +var projectedResults = (from e in session.Query() + // Define a function + let format = (Func)(p => p.FirstName + " " + p.LastName) + select new + { + // Call the function from the projection + FullName = format(e) + }) + .ToList(); +`} + + + + +{`// Use LINQ query syntax notation +var projectedResults = await (from e in asyncSession.Query() + // Define a function + let format = (Func)(p => p.FirstName + " " + p.LastName) + select new + { + // Call the function from the projection + FullName = format(e) + }) + .ToListAsync(); +`} + + + + +{`declare function output(e) { + var format = p => p.FirstName + " " + p.LastName; + return { FullName: format(e) }; +} +from "Employees" as e select output(e) +`} + + + + + + + +##### Example VI - Projecting using a loaded document: + + + + +{`// Use LINQ query syntax notation +var projectedResults = (from o in session.Query() + // Use RavenQuery.Load to load the related Company document + let c = RavenQuery.Load(o.Company) + select new + { + CompanyName = c.Name, // info from the related Company document + ShippedAt = o.ShippedAt // info from the Order document + }) + .ToList(); +`} + + + + +{`// Use LINQ query syntax notation +var projectedResults = (from o in asyncSession.Query() + // Use RavenQuery.Load to load the related Company document + let c = RavenQuery.Load(o.Company) + select new + { + CompanyName = c.Name, // info from the related Company document + ShippedAt = o.ShippedAt // info from the Order document + }) + .ToListAsync(); +`} + + + + +{`from "Orders" as o +load o.Company as c +select { + CompanyName: c.Name, + ShippedAt: o.ShippedAt +} +`} + + + + + + + +##### Example VII - Projection with dates: + + + + +{`var projectedResults = session + .Query() + .Select(e => new + { + DayOfBirth = e.Birthday.Day, + MonthOfBirth = e.Birthday.Month, + Age = DateTime.Today.Year - e.Birthday.Year + }) + .ToList(); +`} + + + + +{`var projectedResults = await asyncSession + .Query() + .Select(e => new + { + DayOfBirth = e.Birthday.Day, + MonthOfBirth = e.Birthday.Month, + Age = DateTime.Today.Year - e.Birthday.Year + }) + .ToListAsync(); +`} + + + + +{`from "Employees" as e +select { + DayOfBirth: new Date(Date.parse(e.Birthday)).getDate(), + MonthOfBirth: new Date(Date.parse(e.Birthday)).getMonth() + 1, + Age: new Date().getFullYear() - new Date(Date.parse(e.Birthday)).getFullYear() +} +`} + + + + + + + +##### Example VIII - Projection with raw JavaScript code: + + + + +{`var projectedResults = session.Query() + .Select(e => new + { + // Provide a JavaScript expression to the RavenQuery.Raw method + Date = RavenQuery.Raw("new Date(Date.parse(e.Birthday))"), + Name = RavenQuery.Raw(e.FirstName, "substr(0, 3)") + }) + .ToList(); +`} + + + + +{`var projectedResults = await asyncSession.Query() + .Select(e => new + { + // Provide a JavaScript expression to the RavenQuery.Raw method + Date = RavenQuery.Raw("new Date(Date.parse(e.Birthday))"), + Name = RavenQuery.Raw(e.FirstName, "substr(0, 3)") + }) + .ToListAsync(); +`} + + + + +{`from "Employees" as e +select { + Date: new Date(Date.parse(e.Birthday)), + Name: e.FirstName.substr(0, 3) +} +`} + + + + + + + +##### Example IX - Projection with metadata: + + + + +{`var projectedResults = session.Query() + .Select(e => new + { + Name = e.FirstName, Metadata = RavenQuery.Metadata(e) // Get the metadata + }) + .ToList(); +`} + + + + +{`var projectedResults = await asyncSession.Query() + .Select(e => new + { + Name = e.FirstName, Metadata = RavenQuery.Metadata(e) // Get the metadata + }) + .ToListAsync(); +`} + + + + +{`from "Employees" as e +select { + Name: e.FirstName, + Metadata: getMetadata(e) +} +`} + + + + + + + +##### Example X - Projection with include: + +When using `Include` in a projection query, +RavenDB includes the related document only if the included path is one of the fields in the projection. + + + + +{`var projectedResults = session + .Query() + // NOTE: + // While the following 'Include' line compiles, + // the related Supplier document will NOT BE INCLUDED in the query results, + // because 'Supplier' is not one of the projected fields in the 'Select' clause. + .Include(x => x.Supplier) + .Select(x => new + { + Name = x.Name, + ProductCategory = x.Category + }) + // The related Category document WILL BE INCLUDED in the query results, + // since 'ProductCategory' is one of the projected fields. + .Include(x => x.ProductCategory) + .ToList(); +`} + + + + +{`var projectedResults = await asyncSession + .Query() + // NOTE: + // While the following 'Include' line compiles, + // the related Supplier document will NOT BE INCLUDED in the query results, + // because 'Supplier' is not one of the projected fields in the 'Select' clause. + .Include(x => x.Supplier) + .Select(x => new + { + Name = x.Name, + ProductCategory = x.Category + }) + // The related Category document WILL BE INCLUDED in the query results, + // since 'ProductCategory' is one of the projected fields. + .Include(x =>x.ProductCategory) + .ToListAsync(); +`} + + + + +{`from "Products" +select Name, Category as ProductCategory +include Supplier, ProductCategory + +// NOTE: +// Only the related Category document WILL BE INCLUDED in the query results. + +// The related Supplier document will NOT BE INCLUDED in the query results, +// because 'Supplier' is Not one of the projected fields in the 'select' clause. +`} + + + + + + + +## ProjectInto + +* Instead of `Select`, you can use `ProjectInto` to project all public fields from a generic type. + +* The results will be projected into objects of the specified projection class. + + + + +{`var projectedResults = session + .Query() + // Call 'ProjectInto' instead of using 'Select' + // Pass the projection class + .ProjectInto() + .ToList(); + +// Each resulting object in the list is Not a 'Company' entity, +// it is an object of type 'ContactDetails'. +`} + + + + +{`var projectedResults = await asyncSession + .Query() + // Call 'ProjectInto' instead of using 'Select' + // Pass the projection class + .ProjectInto() + .ToListAsync(); + +// Each resulting object in the list is Not a 'Company' entity, +// it is an object of type 'ContactDetails'. +`} + + + + +{`public class ContactDetails +{ + // The projection class contains field names from the 'Company' document + public string Name { get; set; } + public string Phone { get; set; } + public string Fax { get; set; } +} +`} + + + + +{`from "Companies" +select Name, Phone, Fax +`} + + + + + + +## SelectFields + +The `SelectFields` method can only be used by a [Document Query](../../../client-api/session/querying/document-query/what-is-document-query.mdx). +It has two overloads: + + + +{`// 1) Select fields to project by the projection class type +IDocumentQuery SelectFields(); + +// 2) Select specific fields to project +IDocumentQuery SelectFields(params string[] fields); +`} + + + + +##### Using projection class type + +The projection class fields are the fields that you want to project from the document class. + + + + +{`// Make a dynamic DocumentQuery +var projectedResults = session.Advanced + .DocumentQuery() + // Call 'SelectFields' + // Pass the projection class type + .SelectFields() + .ToList(); + +// Each resulting object in the list is Not a 'Company' entity, +// it is an object of type 'ContactDetails'. +`} + + + + +{`// Make a dynamic DocumentQuery +var projectedResults = await asyncSession.Advanced + .AsyncDocumentQuery() + // Call 'SelectFields' + // Pass the projection class type + .SelectFields() + .ToListAsync(); + +// Each resulting object in the list is Not a 'Company' entity, +// it is an object of type 'ContactDetails'. +`} + + + + +{`public class ContactDetails +{ + // The projection class contains field names from the 'Company' document + public string Name { get; set; } + public string Phone { get; set; } + public string Fax { get; set; } +} +`} + + + + +{`from "Companies" +select Name, Phone, Fax +`} + + + + + + + +##### Using specific fields + +The fields specified are the fields that you want to project from the projection class. + + + + +{`// Define an array with the field names that will be projected +var projectionFields = new string[] +{ + // Fields from 'ContactDetails' class: + "Name", "Phone" +}; + +// Make a dynamic DocumentQuery +var projectedResults = session.Advanced + .DocumentQuery() + // Call 'SelectFields' + // Pass the projection class type & the fields to be projected from it + .SelectFields(projectionFields) + .ToList(); + +// Each resulting object in the list is Not a 'Company' entity, +// it is an object of type 'ContactDetails' containing data ONLY for the specified fields. +`} + + + + +{`// Define an array with the field names that will be projected +var projectionFields = new string[] +{ + // Fields from 'ContactDetails' class: + "Name", "Phone" +}; + +// Make a dynamic DocumentQuery +var projectedResults = await asyncSession.Advanced + .AsyncDocumentQuery() + // Call 'SelectFields' + // Pass the projection class type & the fields to be projected from it + .SelectFields(projectionFields) + .ToListAsync(); + +// Each resulting object in the list is Not a 'Company' entity, +// it is an object of type 'ContactDetails' containing data ONLY for the specified fields. +`} + + + + +{`public class ContactDetails +{ + // The projection class contains field names from the 'Company' document + public string Name { get; set; } + public string Phone { get; set; } + public string Fax { get; set; } +} +`} + + + + +{`from "Companies" +select Name, Phone +`} + + + + + + + +## Single projection per query + +* As of RavenDB v6.0, only a single projection request can be made per Query (and DocumentQuery). + +* Attempting multiple projection executions in the same query will result in an exception. + + * Query: + Multiple `Select` calls or a combination of `ProjectInto` with a `Select` call will result in an exception. + + * DocumentQuery: + Multiple `SelectFields` calls will result in an exception. + + + +{`// For example: +try +\{ + var projectedResults = session + .Query() + // Make first projection + .ProjectInto() + // A second projection is not supported and will throw + .Select(x => new \{Name = x.Name\}) + .ToList(); +\} +catch (Exception e) +\{ + // The following exception will be thrown: + // "Projection is already done. You should not project your result twice." +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-project-query-results-java.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-project-query-results-java.mdx new file mode 100644 index 0000000000..e11ca72cc7 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-project-query-results-java.mdx @@ -0,0 +1,316 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Instead of pulling full documents in query results you can just grab some pieces of data from documents. You can also transform the projected +results. The projections are defined with the usage of: + +- [SelectFields](../../../client-api/session/querying/how-to-project-query-results.mdx#selectfields) +- [OfType](../../../client-api/session/querying/how-to-project-query-results.mdx#oftype---simple-projection) + +## SelectFields + +The most common way to perform a query with projection is to use the `selectFields` method. You can specify what fields from a document you want to retrieve. + +### Example I - Projecting Individual Fields of the Document + + + + +{`// request name, city and country for all entities from 'Companies' collection +QueryData queryData = new QueryData( + new String[] { "Name", "Address.city", "Address.country"}, + new String[] { "Name", "City", "Country"}); +List results = session + .query(Company.class) + .selectFields(NameCityAndCountry.class, queryData) + .toList(); +`} + + + + +{`from Companies +select Name, Address.City as City, Address.Country as Country +`} + + + + +### Example II - Projecting Arrays and Objects + + + + +{`QueryData queryData = new QueryData(new String[]{ "ShipTo", "Lines[].ProductName" }, + new String[]{"ShipTo", "Products"}); + +List results = session.query(Order.class) + .selectFields(ShipToAndProducts.class, queryData) + .toList(); +`} + + + + +{`from Orders +select ShipTo, Lines[].ProductName as Products +`} + + + + +### Example III - Projection with Expression + + + + +{`List results = session.advanced().rawQuery(FullName.class, "from Employees as e " + + "select {" + + " FullName : e.FirstName + \\" \\" + e.LastName " + + "}").toList(); +`} + + + + +{`from Employees as e +select { + FullName : e.FirstName + " " + e.LastName +} +`} + + + + +### Example IV - Projection with `declared function` + + + + +{`List results = session.advanced().rawQuery(Employee.class, "declare function output(e) { " + + " var format = function(p){ return p.FirstName + \\" \\" + p.LastName; }; " + + " return { FullName : format(e) }; " + + "} " + + "from Employees as e select output(e)").toList(); +`} + + + + +{`declare function output(e) { + var format = function(p){ return p.FirstName + " " + p.LastName; }; + return { FullName : format(e) }; +} +from Employees as e select output(e) +`} + + + + +### Example V - Projection with Calculation + + + + +{`List results = session.advanced().rawQuery(Total.class, "from Orders as o " + + "select { " + + " Total : o.Lines.reduce( " + + " (acc , l) => acc += l.PricePerUnit * l.Quantity, 0) " + + "}").toList(); +`} + + + + +{`from Orders as o +select { + Total : o.Lines.reduce( + (acc , l) => acc += l.PricePerUnit * l.Quantity, 0) +} +`} + + + + +### Example VI - Projection Using a Loaded Document + + + + +{`List results = session.advanced().rawQuery(OrderProjection.class, "from Orders as o " + + "load o.Company as c " + + "select { " + + " CompanyName: c.Name," + + " ShippedAt: o.ShippedAt" + + "}").toList(); +`} + + + + +{`from Orders as o +load o.Company as c +select { + CompanyName: c.Name, + ShippedAt: o.ShippedAt +} +`} + + + + +### Example VII - Projection with Dates + + + + +{`List results = session.advanced().rawQuery(EmployeeProjection.class, "from Employees as e " + + "select { " + + " DayOfBirth : new Date(Date.parse(e.Birthday)).getDate(), " + + " MonthOfBirth : new Date(Date.parse(e.Birthday)).getMonth() + 1, " + + " Age : new Date().getFullYear() - new Date(Date.parse(e.Birthday)).getFullYear() " + + "}").toList(); +`} + + + + +{`from Employees as e +select { + DayOfBirth : new Date(Date.parse(e.Birthday)).getDate(), + MonthOfBirth : new Date(Date.parse(e.Birthday)).getMonth() + 1, + Age : new Date().getFullYear() - new Date(Date.parse(e.Birthday)).getFullYear() +} +`} + + + + +### Example VIII - Projection with Raw JavaScript Code + + + + +{`List results = session.advanced().rawQuery(EmployeeProjection.class, "from Employees as e " + + "select { " + + " Date : new Date(Date.parse(e.Birthday)), " + + " Name : e.FirstName.substr(0,3) " + + "}").toList(); +`} + + + + +{`from Employees as e +select { + Date : new Date(Date.parse(e.Birthday)), + Name : e.FirstName.substr(0,3) +} +`} + + + + +### Example IX - Projection with Metadata + + + + +{`List results = session.advanced().rawQuery(Employee.class, "from Employees as e " + + "select {" + + " Name : e.FirstName, " + + " Metadata : getMetadata(e)" + + "}").toList(); +`} + + + + +{`from Employees as e +select { + Name : e.FirstName, + Metadata : getMetadata(e) +} +`} + + + + +This method overload retrieves all public fields of the class given in generic and uses them to perform projection to the requested type. +You can use this method instead of using `selectFields` together with all fields of the projection class. + +### Example X + + + + +{`List results = session.query(Company.class, Companies_ByContact.class) + .selectFields(ContactDetails.class) + .toList(); +`} + + + + +{`from index 'Companies/ByContact' +select Name, Phone +`} + + + + +{`from index 'Companies/ByContact' +select Name, Phone +`} + + + + +{`private class Companies_ByContact extends AbstractIndexCreationTask { + public Companies_ByContact() { + + map = "from c in docs.Companies select new { Name = c.Contact.Name, Phone = c.Phone } "; + + storeAllFields(FieldStorage.YES); // name and phone fields can be retrieved directly from index + } +} +`} + + + + + + + +## OfType - simple projection + +`ofType` is a client-side projection. The easiest explanation of how it works is to take the results that the server returns and map them to given type. This may become useful when querying an index that contains fields that are not available in mapped type. + +### Example + + + +{`// query index 'Products_BySupplierName' +// return documents from collection 'Products' that have a supplier 'Norske Meierier' +// project them to 'Products' +List results = session.query(Products_BySupplierName.Result.class, Products_BySupplierName.class) + .whereEquals("Name", "Norske Meierier") + .ofType(Product.class) + .toList(); +`} + + + + + + +Projected entities (even named types) are not tracked by the session. + + + +If the projected fields are stored inside the index itself (`FieldStorage.YES` in the index definition), then the query results will be created directly from there instead of retrieving documents in order to project. + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-project-query-results-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-project-query-results-nodejs.mdx new file mode 100644 index 0000000000..f57a0d47bd --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-project-query-results-nodejs.mdx @@ -0,0 +1,533 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Applying a projection in a query allows you to shape the query results to meet specific requirements, + delivering just the data needed instead of the original full document content. + +* This article provides examples of projecting query results when making a **dynamic-query**. + For projecting results when querying a **static-index** see [project index query results](../../../indexes/querying/projections.mdx). + +* In this article: + * [Projections overview](../../../client-api/session/querying/how-to-project-query-results.mdx#projections-overview) + * [SelectFields](../../../client-api/session/querying/how-to-project-query-results.mdx#selectfields) + * [Projecting nested object types](../../../client-api/session/querying/how-to-project-query-results.mdx#projecting-nested-object-types) + * [Syntax](../../../client-api/session/querying/how-to-project-query-results.mdx#syntax) + + +## Projections overview +**What are projections**: + +* A projection refers to the **transformation of query results** into a customized structure, + modifying the shape of the data returned by the server. + +* Instead of retrieving the full document from the server and then picking relevant data from it on the client, + you can request a subset of the data, specifying the document fields you want to get from the server. + +* The query can load [related documents](../../../indexes/indexing-related-documents.mdx#what-are-related-documents) and have their data merged into the projection results. + +* Content from inner objects and arrays can be projected in addition to [projecting the nested object types](../../../client-api/session/querying/how-to-project-query-results.mdx#projecting-nested-object-types). + +* An alias name can be given to the projected fields, and any calculations can be applied within the projection. +**When to use projections**: + +* Projections allow you to tailor the query results specifically to your needs. + Getting specific details to display can be useful when presenting data to users or populating user interfaces. + Projection queries are also useful with [subscriptions](../../../client-api/data-subscriptions/what-are-data-subscriptions.mdx) + since all transformation work is done on the server side without having to send a lot of data over the wire. + +* Returning partial document data from the server reduces network traffic, + as clients receive only relevant data required for a particular task, enhancing overall performance. + +* Savings can be significant if you need to show just a bit of information from a large document. For example: + the size of the result when querying for all "Orders" documents where "Company" is "companies/65-A" is 19KB. + Performing the same query and projecting only the "Employee" and "OrderedAt" fields results in only 5KB. + +* However, when you need to actively work with the complete set of data on the client side, + then do use a query without a projection to retrieve the full document from the server. +**Projections are not tracked by the session**: + +* On the client side, the resulting projected entities returned by the query are Not tracked by the Session. + +* Any modification made to a projection entity will not modify the corresponding document on the server when _SaveChanges_ is called. +**Projections are the final stage in the query pipeline**: + +* Projections are applied as the last stage in the query pipeline, + after the query has been processed, filtered, sorted, and paged. + +* This means that the projection does Not apply to all the documents in the collection, + but only to the documents matching the query predicate. + +* Within the projection you can only filter what data will be returned from the matching documents, + but you cannot filter which documents will be returned. That has already been determined earlier in the query pipeline. +**The cost of projections**: + +* Queries in RavenDB do not allow any computation to occur during the query phase. + However, you can perform any [calculations](../../../client-api/session/querying/how-to-project-query-results.mdx#example-v---projection-with-calculations) + inside the projection. + +* But while calculations within a projection are allowed, having a very complex logic can impact query performance. + So RavenDB limits the total time it will spend processing a query and its projections. + Exceeding this time limit will fail the query. This is configurable, see the following configuration keys: + * [Databases.QueryTimeoutInSec](../../../server/configuration/database-configuration.mdx#databasesquerytimeoutinsec) + * [Databases.QueryOperationTimeoutInSec](../../../server/configuration/database-configuration.mdx#databasesqueryoperationtimeoutinsec) + + + +## SelectFields + +* Use `selectFields` to specify which fields should be returned per document that is matching the query criteria. + +* Complex projection expressions can be provided directly with RQL via the `rawQuery` syntax, + see examples below. + + + +##### Example I - Projecting individual fields of the document + + + + +{`// Make a dynamic query on the Companies collection +const projectedResults = await session.query({ collection: "companies" }) + // Call 'selectFields' + // Pass a list of fields that will be returned per Company document + .selectFields([ "Name", "Address.City", "Address.Country"]) + .all(); + +// Each resulting object in the list is Not a 'Company' entity, +// it is a new object containing ONLY the fields specified in the selectFields method. +`} + + + + +{`from "companies" +select Name, Address.City, Address.Country +`} + + + + + + + +##### Example II - Projecting individual fields with alias + + + + +{`// Define a QueryData object that will be used in the selectFields method +const queryData = new QueryData( + // Specify the document-fields you want to project from the document + [ "Name", "Address.City", "Address.Country"], + // Provide an ALIAS name for each document-field + [ "CompanyName", "City", "Country"]); + +const projectedResults = await session.query({ collection: "companies" }) + // Call 'selectFields', pass the queryData object + .selectFields(queryData) + .all(); + +// Each resulting object in the list is Not a 'Company' entity, +// it is a new object containing ONLY the fields specified in the selectFields method +// using their corresponding alias names. +`} + + + + +{`from "companies" +select Name as CompanyName, Address.City as City, Address.Country as Country +`} + + + + + + + +##### Example III - Projecting arrays and objects + + + + +{`// Define the projection with QueryData if you wish to use alias names +const queryData = new QueryData( + // Project the 'ShipTo' object and all product names from the Lines array in the document + [ "ShipTo", "Lines[].ProductName" ], + // Assign alias names + [ "ShipTo", "ProductNames" ]); + +const projectedResults = await session.query({ collection: "orders" }) + .selectFields(queryData) + .all(); +`} + + + + +{`// Using simple expression: +from "orders" +select ShipTo, Lines[].ProductName as ProductNames + +// Using JavaScript object literal syntax: +from "Orders" as x +select { + ShipTo: x.ShipTo, + ProductNames: x.Lines.map(y => y.ProductName) +} +`} + + + + + + + +##### Example IV - Projection with expression + + + + +{`// Define the projected data expression within a custom function. +// Any expression can be provided for the projected content. +const queryData = QueryData.customFunction("e", \`{ + FullName: e.FirstName + " " + e.LastName +}\`); + +const projectedResults = await session.query({ collection: "employees" }) + .selectFields(queryData) + .all(); +`} + + + + +{`from "employees" as e +select { + FullName: e.FirstName + " " + e.LastName +} +`} + + + + + + + +##### Example V - Projection with calculations + + + + +{`const projectedResults = await session.advanced + // Can provide an RQL query via the 'rawQuery' method + .rawQuery(\`from "Orders" as x + // Using JavaScript object literal syntax: + select { + // Any calculations can be done within the projection + TotalProducts: x.Lines.length, + TotalDiscountedProducts: x.Lines.filter(x => x.Discount > 0).length, + TotalPrice: x.Lines + .map(l => l.PricePerUnit * l.Quantity) + .reduce((a, b) => a + b, 0) }\`) + .all(); +`} + + + + +{`from "orders" as x +select { + TotalProducts: x.Lines.length, + TotalDiscountedProducts: x.Lines.filter(x => x.Discount > 0).length, + TotalPrice: x.Lines + .map(l => l.PricePerUnit * l.Quantity) + .reduce((a, b) => a + b, 0) +} +`} + + + + + + + +##### Example VI - Projecting using functions + + + + +{`const projectedResults = await session.advanced + .rawQuery(\`// Declare a function + declare function output(e) { + var format = p => p.FirstName + " " + p.LastName; + return { + FullName: format(e) + }; + } + // Call the function from the projection + from "employees" as e select output(e)\`) + .all(); +`} + + + + +{`declare function output(e) { + var format = p => p.FirstName + " " + p.LastName; + return { FullName: format(e) }; +} +from "employees" as e select output(e) +`} + + + + + + + +##### Example VII - Projecting using a loaded document + + + + +{`const projectedResults = await session.advanced + .rawQuery(\`from "Orders" as o + load o.Company as c // load the related Company document + select { + CompanyName: c.Name, // info from the related Company document + ShippedAt: o.ShippedAt // info from the Order document + }\`) + .all(); +`} + + + + +{`from "orders" as o +load o.Company as c +select { + CompanyName: c.Name, + ShippedAt: o.ShippedAt +} +`} + + + + + + + +##### Example VIII - Projection with dates + + + + +{`const projectedResults = await session.advanced + .rawQuery(\`from "employees" as e + select { + DayOfBirth: new Date(Date.parse(e.Birthday)).getDate(), + MonthOfBirth: new Date(Date.parse(e.Birthday)).getMonth() + 1, + Age: new Date().getFullYear() - new Date(Date.parse(e.Birthday)).getFullYear() + }\`) + .all(); +`} + + + + +{`from "employees" as e +select { + DayOfBirth: new Date(Date.parse(e.Birthday)).getDate(), + MonthOfBirth: new Date(Date.parse(e.Birthday)).getMonth() + 1, + Age: new Date().getFullYear() - new Date(Date.parse(e.Birthday)).getFullYear() +} +`} + + + + + + + +##### Example IX - Projection with metadata + + + + +{`const projectedResults = await session.advanced + .rawQuery(\`from "employees" as e + select { + Name: e.FirstName, + Metadata: getMetadata(e) // Get the metadata + }\`) + .all(); +`} + + + + +{`from "employees" as e +select { + Name: e.FirstName, + Metadata: getMetadata(e) +} +`} + + + + + + + +##### Example X - Projection with include: + +When using `include` in a projection query, +RavenDB includes the related document only if the included path is one of the fields in the projection. + + + + +{`const projectedResults = await session.query({ collection: "products" }) + // NOTE: + // While the following 'include' line compiles, + // the related Supplier document will NOT BE INCLUDED in the query results, + // because 'Supplier' is not one of the projected fields in the 'selectFields' clause. + .include("Supplier") + .selectFields([ "Name", "Category"]) + // The related Category document WILL BE INCLUDED in the query results, + // since 'Category' is one of the projected fields. + .include("Category") + .all(); +`} + + + + +{`from "products" +select Name, Category +include Supplier, Category + +// NOTE: +// Only the related Category document WILL BE INCLUDED in the query results. + +// The related Supplier document will NOT BE INCLUDED in the query results, +// because 'Supplier' is Not one of the projected fields in the 'select' clause. +`} + + + + + + + +## Projecting nested object types + +In the Node.js client, when projecting query results using the `selectFields` method (not via the `rawQuery` syntax), +the metadata field `@nested-object-types` from the document will be automatically added to the list of fields to project in the generated RQL that is sent to the server. + + + +{`// For example - Create a document with nested objects: +// ==================================================== + +class User \{ + constructor(firstName, lastName, jobDetails, lastLogin) \{ + this.firstName = firstName; + this.lastName = lastName; + this.jobDetails = jobDetails + this.lastLogin = lastLogin; + \} +\} + +class Job \{ + constructor(company, title) \{ + this.company = company; + this.title = title; + \} +\} + +const job = new Job("RavenDB", "CEO"); +const user = new User("Ayende", "Rahien", job, new Date(2023, 11, 12)); + +await session.store(user, "users/1"); +await session.saveChanges(); + +// Query the users collecions: +// =========================== + +class Projection \{ + constructor(jobDetails, lastLogin) \{ + this.jobDetails = jobDetails; + this.lastLogin = lastLogin; + \} +\} + +const projectedResults = await session.query(\{ collection: "users" \}) + // Project selected fields: + .selectFields(["jobDetails", "lastLogin"], Projection) + .all(); +`} + + + + + +{`// The following RQL is generated by the Node.js client: +// ===================================================== + +from "users" +select name, @metadata.@nested-object-types as **PROJECTED_NESTED_OBJECT_TYPES** +`} + + + + + +{`// Query results will include the following projected fields: +// ========================================================== + +// \{ +// jobDetails = \{ "company": "RavenDB", "title": "CEO" \} +// lastLogin = "2023-12-11T22:00:00.000Z" +// __PROJECTED_NESTED_OBJECT_TYPES__ = \{ "jobDetails": "Job", lastLogin": "date" \} // Nested field types +// \} +`} + + + + + +## Syntax + + + +{`selectFields(property); +selectFields(properties); + +selectFields(property, projectionClass); +selectFields(properties, projectionClass); +selectFields(properties, projectionClass, projectionBehavior); + +selectFields(queryData, projectionClass); +selectFields(queryData, projectionClass, projectionBehavior); +`} + + + +| Parameter | Type | Description | +|------------------------|-------------|---------------------------------------------------------------------------------------------------------------------------------------------------------| +| **property** | `string` | Field name to project | +| **properties** | `string[]` | List of field names to project | +| **queryData** | `QueryData` | Object with projection query definitions | +| **projectionClass** | `object` | The class type of the projected fields | +| **projectionBehavior** | `string` | Projection behavior is useful when querying a static-index.
Learn more in [projection behavior with indexes](../../../indexes/querying/projections.mdx). | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-project-query-results-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-project-query-results-php.mdx new file mode 100644 index 0000000000..e5ce0691aa --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-project-query-results-php.mdx @@ -0,0 +1,363 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Applying a projection in a query allows you to shape the query results to meet specific requirements, + delivering just the data needed instead of the original full document content. + +* This article provides examples of projecting query results when making a **dynamic-query**. + For projecting results when querying a **static-index** see [project index query results](../../../indexes/querying/projections.mdx). + +* In this page: + + * [Projections overview](../../../client-api/session/querying/how-to-project-query-results.mdx#projections-overview) + + * [Projection Methods](../../../client-api/session/querying/how-to-project-query-results.mdx#projection-methods) + * [`select`, `selectFields`](../../../client-api/session/querying/how-to-project-query-results.mdx#select,-selectfields) + * [`ofType`](../../../client-api/session/querying/how-to-project-query-results.mdx#oftype) + + * [Single projection per query](../../../client-api/session/querying/how-to-project-query-results.mdx#single-projection-per-query) + + +## Projections overview + +### What are projections: + +* A projection refers to the **transformation of query results** into a customized structure, + modifying the shape of the data returned by the server. + +* Instead of retrieving the full document from the server and then picking relevant data from it on the client, + you can request a subset of the data, specifying the document fields you want to get from the server. + +* The query can load [related documents](../../../indexes/indexing-related-documents.mdx#what-are-related-documents) and have their data merged into the projection results. + +* Objects and arrays can be projected, fields can be renamed, and any calculations can be made within the projection. + +* Content from inner objects and arrays can be projected. + An alias name can be given to the projected fields, and any calculations can be made within the projection. + +### When to use projections: + +* Projections allow you to tailor the query results specifically to your needs. + Getting specific details to display can be useful when presenting data to users or populating user interfaces. + Projection queries are also useful with [subscriptions](../../../client-api/data-subscriptions/what-are-data-subscriptions.mdx) + since all transformation work is done on the server side without having to send a lot of data over the wire. + +* Returning partial document data from the server reduces network traffic, + as clients receive only relevant data required for a particular task, enhancing overall performance. + +* Savings can be significant if you need to show just a bit of information from a large document. For example: + the size of the result when querying for all "Orders" documents where "Company" is "companies/65-A" is 19KB. + Performing the same query and projecting only the "Employee" and "OrderedAt" fields results in only 5KB. + +* However, when you need to actively work with the complete set of data on the client side, + then do use a query without a projection to retrieve the full document from the server. + +### Projections are not tracked by the session: + +* On the client side, the resulting projected entities returned by the query are Not tracked by the Session. + +* Any modification made to a projection entity will not modify the corresponding document on the server when _SaveChanges_ is called. + +### Projections are the final stage in the query pipeline: + +* Projections are applied as the last stage in the query pipeline, + after the query has been processed, filtered, sorted, and paged. + +* This means that the projection does Not apply to all the documents in the collection, + but only to the documents matching the query predicate. + +* Within the projection you can only filter what data will be returned from the matching documents, + but you cannot filter which documents will be returned. That has already been determined earlier in the query pipeline. + +* Only a [single projection request](../../../client-api/session/querying/how-to-project-query-results.mdx#single-projection-per-query) can be made per query. + +### The cost of projections: + +* Queries in RavenDB do not allow any computation to occur during the query phase. + However, you can perform any [calculations](../../../client-api/session/querying/how-to-project-query-results.mdx#example---projection-with-calculations) + inside the projection. + +* But while calculations within a projection are allowed, having a very complex logic can impact query performance. + So RavenDB limits the total time it will spend processing a query and its projections. + Exceeding this time limit will fail the query. This is configurable, see the following configuration keys: + * [Databases.QueryTimeoutInSec](../../../server/configuration/database-configuration.mdx#databasesquerytimeoutinsec) + * [Databases.QueryOperationTimeoutInSec](../../../server/configuration/database-configuration.mdx#databasesqueryoperationtimeoutinsec) + + + +## Projection Methods + +## `select`, `selectFields` + +Projections are commonly performed in PHP using the `select` and `selectFields` methods. +You can specify what fields from a document you want to retrieve and even provide complex expression. + +### Example - Projecting Individual Fields of the Document: + + + + +{`// request name, city and country for all entities from 'Companies' collection +$queryData = new QueryData( + [ "Name", "Address.city", "Address.country"], + [ "Name", "City", "Country"] +); + +/** @var array $results */ +$results = $session + ->query(Company::class) + ->selectFields(NameCityAndCountry::class, $queryData) + ->toList(); +`} + + + + +{`from Companies +select Name, Address.City as City, Address.Country as Country +`} + + + + +### Example - Projecting Arrays and Objects: + + + + +{`$queryData = new QueryData( + [ "ShipTo", "Lines[].ProductName" ], + [ "ShipTo", "Products" ] +); + +/** @var array $results */ +$results = $session->query(Order::class) + ->selectFields(ShipToAndProducts::class, $queryData) + ->toList(); +`} + + + + +{`from Orders +select ShipTo, Lines[].ProductName as Products +`} + + + + +### Example - Projection with expression: + + + + +{`/** @var array $results */ +$results = $session->advanced()->rawQuery(FullName::class, "from Employees as e " . + "select {" . + " FullName : e.FirstName + \\" \\" + e.LastName " . + "}") + ->toList(); +`} + + + + +{`from "Employees" as e +select { + FullName: e.FirstName + " " + e.LastName +} +`} + + + + +### Example - Projection with Calculation: + + + + +{`/** @var array $results */ +$results = $session->advanced()->rawQuery(Total::class, "from Orders as o " . + "select { " . + " Total : o.Lines.reduce( " . + " (acc , l) => acc += l.PricePerUnit * l.Quantity, 0) " . + "}") + ->toList(); +`} + + + + +{`from "Orders" as o +select { + Total : o.Lines.reduce( + (acc , l) => acc += l.PricePerUnit * l.Quantity, 0) +} +`} + + + + +### Example - Projection Using a Loaded Document: + + + + +{`/** @var array $results */ +$results = $session->advanced()->rawQuery(OrderProjection::class, "from Orders as o " . + "load o.Company as c " . + "select { " . + " CompanyName: c.Name," . + " ShippedAt: o.ShippedAt" . + "}") +->toList(); +`} + + + + +{`from "Orders" as o +load o.Company as c +select { + CompanyName: c.Name, + ShippedAt: o.ShippedAt +} +`} + + + + +### Example - Projection with Dates: + + + + +{`/** @var array $results */ +$results = $session->advanced()->rawQuery(EmployeeProjection::class, "from Employees as e " . + "select { " . + " DayOfBirth : new Date(Date.parse(e.Birthday)).getDate(), " . + " MonthOfBirth : new Date(Date.parse(e.Birthday)).getMonth() + 1, " . + " Age : new Date().getFullYear() - new Date(Date.parse(e.Birthday)).getFullYear() " . + "}") +->toList(); +`} + + + + +{`from "Employees" as e +select { + DayOfBirth : new Date(Date.parse(e.Birthday)).getDate(), + MonthOfBirth : new Date(Date.parse(e.Birthday)).getMonth() + 1, + Age : new Date().getFullYear() - new Date(Date.parse(e.Birthday)).getFullYear() +} +`} + + + + +### Example - Projection with Raw JavaScript Code: + + + + +{`/** @var array $results */ +$results = $session->advanced()->rawQuery(EmployeeProjection::class, "from Employees as e " . + "select { " . + " Date : new Date(Date.parse(e.Birthday)), " . + " Name : e.FirstName.substr(0,3) " . + "}") +->toList(); +`} + + + + +{`from "Employees" as e +select { + Date : new Date(Date.parse(e.Birthday)), + Name : e.FirstName.substr(0,3) +} +`} + + + + +### Example - Projection with Metadata: + + + + +{`/** @var array $results */ +$results = $session->advanced()->rawQuery(Employee::class, "from Employees as e " . + "select {" . + " Name : e.FirstName, " . + " Metadata : getMetadata(e)" . + "}") + ->toList(); +`} + + + + +{`from "Employees" as e +select { + Name : e.FirstName, + Metadata : getMetadata(e) +} +`} + + + + + + +## `ofType` + +`ofType` is a client-side projection that maps results that the server returns into a given type. +This may become useful when querying an index that contains fields that are not available in the mapped type. + +### Example + + + + +{`// query index 'Products_BySupplierName' +// return documents from collection 'Products' that have a supplier 'Norske Meierier' +// project them to 'Products' +/** @var array $results */ +$results = $session->query(Products_BySupplierName_Result::class, Products_BySupplierName::class) + ->whereEquals("Name", "Norske Meierier") + ->ofType(Product::class) + ->toList(); +`} + + + + +{`class Products_BySupplierName extends AbstractIndexCreationTask +{ +} + +class Products_BySupplierName_Result +{ +} +`} + + + + + + +## Single projection per query + +As of RavenDB v6.0, only a single projection request can be made per query. +Attempting multiple projection executions in the same query will result in an exception. + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-project-query-results-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-project-query-results-python.mdx new file mode 100644 index 0000000000..034ba60217 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-project-query-results-python.mdx @@ -0,0 +1,416 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Applying a projection in a query allows you to shape the query results to meet specific requirements, + delivering just the data needed instead of the original full document content. + +* This article provides examples of projecting query results when making a **dynamic-query**. + For projecting results when querying a **static-index** see [project index query results](../../../indexes/querying/projections.mdx). + +* In this page: + + * [Projections overview](../../../client-api/session/querying/how-to-project-query-results.mdx#projections-overview) + + * [Projection Methods](../../../client-api/session/querying/how-to-project-query-results.mdx#projection-methods) + * [select_fields_query_data](../../../client-api/session/querying/how-to-project-query-results.mdx#select_fields_query_data) + * [raw_query with `select`](../../../client-api/session/querying/how-to-project-query-results.mdx#raw_query-with-select) + * [select_fields](../../../client-api/session/querying/how-to-project-query-results.mdx#select_fields) + + * [Single projection per query](../../../client-api/session/querying/how-to-project-query-results.mdx#single-projection-per-query) + + +## Projections overview + +### What are projections: + +* A projection refers to the **transformation of query results** into a customized structure, + modifying the shape of the data returned by the server. + +* Instead of retrieving the full document from the server and then picking relevant data from it on the client, + you can request a subset of the data, specifying the document fields you want to get from the server. + +* The query can load [related documents](../../../indexes/indexing-related-documents.mdx#what-are-related-documents) and have their data merged into the projection results. + +* Objects and arrays can be projected, fields can be renamed, and any calculations can be made within the projection. + +* Content from inner objects and arrays can be projected. + An alias name can be given to the projected fields, and any calculations can be made within the projection. + +### When to use projections: + +* Projections allow you to tailor the query results specifically to your needs. + Getting specific details to display can be useful when presenting data to users or populating user interfaces. + Projection queries are also useful with [subscriptions](../../../client-api/data-subscriptions/what-are-data-subscriptions.mdx) + since all transformation work is done on the server side without having to send a lot of data over the wire. + +* Returning partial document data from the server reduces network traffic, + as clients receive only relevant data required for a particular task, enhancing overall performance. + +* Savings can be significant if you need to show just a bit of information from a large document. For example: + the size of the result when querying for all "Orders" documents where "Company" is "companies/65-A" is 19KB. + Performing the same query and projecting only the "Employee" and "OrderedAt" fields results in only 5KB. + +* However, when you need to actively work with the complete set of data on the client side, + then do use a query without a projection to retrieve the full document from the server. + +### Projections are not tracked by the session: + +* On the client side, the resulting projected entities returned by the query are Not tracked by the Session. + +* Any modification made to a projection entity will not modify the corresponding document on the server when `save_changes` is called. + +### Projections are the final stage in the query pipeline: + +* Projections are applied as the last stage in the query pipeline, + after the query has been processed, filtered, sorted, and paged. + +* This means that the projection does Not apply to all the documents in the collection, + but only to the documents matching the query predicate. + +* Within the projection you can only filter what data will be returned from the matching documents, + but you cannot filter which documents will be returned. That has already been determined earlier in the query pipeline. + +* Only a single projection request can be made per query. + Learn more in [single projection per query](../../../client-api/session/querying/how-to-project-query-results.mdx#single-projection-per-query). + +### The cost of projections: + +* Queries in RavenDB do not allow any computation to occur during the query phase. + However, you can perform any [calculations](../../../client-api/session/querying/how-to-project-query-results.mdx#example---projection-with-calculations) +* inside the projection. + +* But while calculations within a projection are allowed, having a very complex logic can impact query performance. + So RavenDB limits the total time it will spend processing a query and its projections. + Exceeding this time limit will fail the query. This is configurable, see the following configuration keys: + * [Databases.QueryTimeoutInSec](../../../server/configuration/database-configuration.mdx#databasesquerytimeoutinsec) + * [Databases.QueryOperationTimeoutInSec](../../../server/configuration/database-configuration.mdx#databasesqueryoperationtimeoutinsec) + + + + +## Projection Methods + +## select_fields_query_data + +* The most common way to perform a query with a projection is to use the `select_fields` or `select_fields_query_data` method. + +* You can specify what fields from the document you want to retrieve and even provide a complex expression. + +### Example - Projecting individual fields of the document: + + + + +{`class CompanyNameCityAndCountry: + def __init__(self, name: str = None, city: str = None, country: str = None): + self.name = name + self.city = city + self.country = country + +query_data = QueryData(["name", "address.city", "address.country"], ["name", "city", "country"]) +results = list( + session.query(object_type=Company).select_fields_query_data(CompanyNameCityAndCountry, query_data) +) + +# Each resulting object in the list is not a 'Company' entity, it is a new object containing ONLY the +# fields specified in the query_data +`} + + + + +{`from "Companies" +select Name, Address.City as City, Address.Country as Country +`} + + + + +### Example - Projecting arrays and objects: + + + + +{`class OrderShippingAddressAndProductNames: + def __init__(self, ship_to: str = None, product_names: List[str] = None): + self.ship_to = ship_to + self.product_names = product_names + +# Retrieve all product names from the Lines array in an Order document +query_data = QueryData(["ship_to", "lines[].product_name"], ["ship_to", "product_names"]) + +projected_results = list( + session.query(object_type=Order).select_fields_query_data( + OrderShippingAddressAndProductNames, query_data + ) +) +`} + + + + +{`// Using simple expression: +from "Orders" +select ShipTo, Lines[].ProductName as ProductNames + +// Using JavaScript object literal syntax: +from "Orders" as x +select { + ShipTo: x.ShipTo, + ProductNames: x.Lines.map(y => y.ProductName) +} +`} + + + + +### Example - Projection with expression: + + + + +{`class EmployeeFullName: + def __init__(self, full_name: str): + self.full_name = full_name + +# Use custom function in query data or raw query +query_data = QueryData.custom_function("o", "{ full_name: o.first_name + ' ' + o.last_name }") +projected_results = list( + session.query(object_type=Employee).select_fields_query_data(EmployeeFullName, query_data) +) +`} + + + + +{`from "Employees" as e +select { + FullName: e.FirstName + " " + e.LastName +} +`} + + + + +### Example - Projection with calculations: + + + + +{`class ProductsRaport: + def __init__( + self, total_products: int = None, total_discounted_products: int = None, total_price: int = None + ): + self.total_products = total_products + self.total_discounted_products = total_discounted_products + self.total_price = total_price + +# Use custom function in query data or raw query +query_data = QueryData.custom_function( + "o", + "{" + "total_products: o.lines.length," + " total_discounted_products: o.lines.filter((line) => line.discount > 0).length," + " total_price: o.lines.reduce(" + "(accumulator, line) => accumulator + line.price_per_unit * line.quantity, 0) " + "}", +) +projected_results = list( + session.query(object_type=Order).select_fields_query_data(ProductsRaport, query_data) +) +`} + + + + +{`from "Orders" as x +select { + TotalProducts: x.Lines.length, + TotalDiscountedProducts: x.Lines.filter(x => x.Discount > 0).length, + TotalPrice: x.Lines + .map(l => l.PricePerUnit * l.Quantity) + .reduce((a, b) => a + b, 0) +} +`} + + + + + + +## raw_query with `select` + +Data can be projected by sending the server raw RQL with the `select` keyword using the `raw_query` method. + +### Example - Projection with dates: + + + + +{`class EmployeeAgeDetails: + def __init__(self, day_of_birth: str = None, month_of_birth: str = None, age: str = None): + self.day_of_birth = day_of_birth + self.month_of_birth = month_of_birth + self.age = age + +# Use custom function in query data or raw query +results = session.advanced.raw_query( + "from Employees as e select {" + ' "day_of_birth : new Date(Date.parse(e.birthday)).getDate(),' + " month_of_birth : new Date(Date.parse(e.birthday)).getMonth() + 1," + " age : new Date().getFullYear() - new Date(Date.parse(e.birthday)).getFullYear()" + "}", + EmployeeAgeDetails, +) +`} + + + + +{`from "Employees" as e +select { + DayOfBirth: new Date(Date.parse(e.Birthday)).getDate(), + MonthOfBirth: new Date(Date.parse(e.Birthday)).getMonth() + 1, + Age: new Date().getFullYear() - new Date(Date.parse(e.Birthday)).getFullYear() +} +`} + + + + +### Example - Projection with raw JavaScript code: + + + + +{`class EmployeeBirthdayAndName: + def __init__(self, date: str = None, name: str = None): + self.date = date + self.name = name + +# Use custom function in query data or raw query +results = list( + session.advanced.raw_query( + "from Employees as e select {" + "date: new Date(Date.parse(e.birthday))," + "name: e.first_name.substr(0,3)" + "}", + EmployeeBirthdayAndName, + ) +) +`} + + + + +{`from "Employees" as e +select { + Date: new Date(Date.parse(e.Birthday)), + Name: e.FirstName.substr(0, 3) +} +`} + + + + +### Example - Projection with metadata: + + + + +{`projected_results = list( + session.advanced.raw_query( + "from Employees as e " + + "select {" + + " name : e.first_name, " + + " metadata : getMetadata(e)" + + "}", + EmployeeNameAndMetadata, + ) +) +`} + + + + +{`from "Employees" as e +select { + Name: e.FirstName, + Metadata: getMetadata(e) +} +`} + + + + + + +## select_fields + +The projected fields can also be specified using the `select_fields` method. + + + + +{`# Lets define an array with the field names that will be projected +# (its optional, you can pass field names as args loosely) +projection_fields = ["name", "phone"] +# Make a query +projected_results = list( + session.advanced.document_query(object_type=Company) + # Call 'select_fields' + # Pass the projection class type & the fields to be projected from it + .select_fields(ContactDetails, *projection_fields) +) + +# Each resulting object in the list is not a 'Company' entity +# it is an object of type 'ContactDetails' containing data ONLY for the specified fields +`} + + + + +{`from "Companies" +select Name, Phone +`} + + + + + + +## Single projection per query + +* As of RavenDB v6.0, only a single projection request can be made per query. + +* Attempting multiple projection executions in the same query, e.g. by calling + `select_fields_query_data` multiple times or by calling `select_fields_query_data` + and then `select_fields`, will result in an exception. + + + +{`# For example: +query_data = QueryData(["name"], ["funny_name"]) +try: + projected_results = list( + session.query(object_type=Company) + # Make a first projection + .select_fields(ContactDetails) + # A second projection is not supported and will raise an error + .select_fields_query_data(CompanyNameCityAndCountry, query_data) + ) +except Exception as e: + pass + # The following exception will be raised: + # "Projection is already done. You should not project your result twice." +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-query-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-query-csharp.mdx new file mode 100644 index 0000000000..1848229eb8 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-query-csharp.mdx @@ -0,0 +1,670 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Queries in RavenDB can be written with either of the following: + * **LINQ** - when querying with the session's `Query` method. + * **Low-level API** - when querying with the session's `DocumentQuery` method. + * **RQL**: + * when querying with the session's `RawQuery` method. + * when querying from the [Query view](../../../studio/database/queries/query-view.mdx) in Studio. + +* Queries defined with `Query` or `DocumentQuery` are translated by the RavenDB client to [RQL](../../../client-api/session/querying/what-is-rql.mdx) + when sent to the server. +* All queries in RavenDB use an **index** to provide results, even when you don't specify one. + Learn more [below](../../../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index). + +* Queries that do Not specify which index to use are called **Dynamic Queries**. + This article displays examples of dynamic queries only. + For examples showing how to query an index see [querying an index](../../../indexes/querying/query-index.mdx). +* The entities returned by the query are 'loaded' and **tracked** by the [Session](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx). + Entities will Not be tracked when: + * Query returns a [projection](../../../client-api/session/querying/how-to-project-query-results.mdx) + * Tracking is [disabled](../../../client-api/session/configuration/how-to-disable-tracking.mdx#disable-tracking-query-results) + +* Query results are **cached** by default. To disable query caching see [NoCaching](../../../client-api/session/querying/how-to-customize-query.mdx#nocaching). + +* Queries are timed out after a configurable time period. See [query timeout](../../../server/configuration/database-configuration.mdx#databasesquerytimeoutinsec). +* In this page: + * [Queries always provide results using an index](../../../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index) + * [Session.Query](../../../client-api/session/querying/how-to-query.mdx#sessionquery) + * [Session.Advanced.DocumentQuery](../../../client-api/session/querying/how-to-query.mdx#sessionadvanceddocumentquery) + * [Session.Advanced.RawQuery](../../../client-api/session/querying/how-to-query.mdx#sessionadvancedrawquery) + * [Custom methods and extensions for LINQ](../../../client-api/session/querying/how-to-query.mdx#custom-methods-and-extensions-for-linq) + * [Syntax](../../../client-api/session/querying/how-to-query.mdx#syntax) + + +## Queries always provide results using an index + +* Queries always use an index to provide fast results regardless of the size of your data. + +* When a query reaches a RavenDB instance, the instance calls its query optimizer to analyze the query + and determine which index should be used to retrieve the requested data. + +* Indexes allow to provide query results without scanning the entire dataset each and every time. + * Learn more about indexes, their general concept, and the different **index types** in this [indexes overview](../../../studio/database/indexes/indexes-overview.mdx) article. + * You can choose the underlying **search engine** that will be used by the RavenDB indexes. + Learn more in [selecting the search engine](../../../indexes/search-engine/corax.mdx#selecting-the-search-engine). + + + +We differentiate between the following **3 query scenarios**: + + 1. [Index query](../../../client-api/session/querying/how-to-query.mdx#indexquery) + 2. [Dynamic query](../../../client-api/session/querying/how-to-query.mdx#dynamicquery) + 3. [Full collection query](../../../client-api/session/querying/how-to-query.mdx#collectionquery) + +For each scenario, a different index type is used, as described below. + + + + +
+**1. Query an existing index**: + +* **Query type**: Index query + **Index used**: Static-index + +* You can specify which **STATIC-index** the query will use. + +* Static indexes are defined by the user, as opposed to auto-indexes that are created by the server + when querying a collection with some filtering applied. See [Static-index vs Auto-index](../../../studio/database/indexes/indexes-overview.mdx#auto-indexes--vs--static-indexes). + +* Example RQL:   `from index "Employees/ByFirstName" where FirstName == "Laura"` + See more examples in [querying an index](../../../indexes/querying/query-index.mdx). + + + + + +**2. Query a collection - with filtering**: + +* **Query type**: Dynamic Query + **Index used**: Auto-index + +* When querying a collection without specifying an index and with some filtering condition + (other than just the document ID) the query-optimizer will analyze the query to see if an **AUTO-index** + that can answer the query already exists, i.e. an auto-index on the collection queried with index-fields that match those queried. + +* If such auto-index (Not a static one...) is found, it will be used to fetch the results. + +* Else, if no relevant auto-index is found, + the query-optimizer will create a new auto-index with fields that match the query criteria. + At this time, and only at this time, the query will wait for the auto-indexing process to complete. + Subsequent queries that target this auto-index will be served immediately. + +* Note: if there exists an auto-index that is defined on the collection queried + but is indexing a different field than the one queried on, + then the query-optimizer will create a new auto-index that merges both the + fields from the existing auto-index and the new fields queried. + +* Once the newly created auto-index is done indexing the data, + the old auto-index is removed in favor of the new one. + +* Over time, an optimal set of indexes is generated by the query optimizer to answer your queries. + +* Example RQL:   `from Employees where FirstName == "Laura"` + See more examples [below](../../../client-api/session/querying/how-to-query.mdx#sessionquery). +* Note: Counters and Time series are an exception to this flow. + Dynamic queries on counters and time series values don't create auto-indexes. + However, a static-index can be defined on [Time series](../../../document-extensions/timeseries/indexing.mdx) and [Counters](../../../document-extensions/counters/indexing.mdx). + + + + + +**3. Query a collection - query full collection | query by ID**: + +* **Query type**: Full collection Query + **Index used**: The raw collection (internal storage indexes) + +* Full collection query: + + * When querying a collection without specifying an index and with no filtering condition, + then all documents from the specified collection are returned. + + * RavenDB uses the raw collection documents in its **internal storage indexes** as the source for this query. + No auto-index is created. + + * Example RQL:   `from Employees` + +* Query by document ID: + + * When querying a collection only by document ID or IDs, + then similar to the full collection query, no auto-index is created. + + * RavenDB uses the raw collection documents as the source for this query. + + * Example RQL:   `from Employees where id() == "employees/1-A"` + See more examples [below](../../../client-api/session/querying/how-to-query.mdx#sessionquery). + + + + +## Session.Query + +* The simplest way to issue a query is by using the session's `Query` method which supports LINQ. + Both the LINQ method syntax and the LINQ query syntax are supported. + +* The following examples show **dynamic queries** that do not specify which index to use. + Please refer to [querying an index](../../../indexes/querying/query-index.mdx) for other examples. + +* Querying can be enhanced using these [extension methods](../../../client-api/session/querying/how-to-query.mdx#custom-methods-and-extensions-for-linq). + + + +**Query collection - no filtering** + + + + +{`// This is a Full Collection Query +// No auto-index is created since no filtering is applied + +List allEmployees = session + .Query() // Query for all documents from 'Employees' collection + .ToList(); // Execute the query + +// All 'Employee' entities are loaded and will be tracked by the session +`} + + + + +{`// This is a Full Collection Query +// No auto-index is created since no filtering is applied + +List allEmployees = await asyncSession + .Query() // Query for all documents from 'Employees' collection + .ToListAsync(); // Execute the query + +// All 'Employee' entities are loaded and will be tracked by the session +`} + + + + +{`// This is a Full Collection Query +// No auto-index is created since no filtering is applied + +// Query for all documents from 'Employees' collection +IRavenQueryable query = from employee in session.Query() + select employee; +// Execute the query +List allEmployees = query.ToList(); + +// All 'Employee' entities are loaded and will be tracked by the session +`} + + + + +{`// This is a Full Collection Query +// No auto-index is created since no filtering is applied + +// Query for all documents from 'Employees' collection +IRavenQueryable query = from employee in asyncSession.Query() + select employee; +// Execute the query +List allEmployees = await query.ToListAsync(); + +// All 'Employee' entities are loaded and will be tracked by the session +`} + + + + +{`// This RQL is a Full Collection Query +// No auto-index is created since no filtering is applied + +from "Employees" +`} + + + + + + + + +**Query collection - by ID** + + + + +{`// Query collection by document ID +// No auto-index is created when querying only by ID + +Employee employee = session + .Query() + .Where(x => x.Id == "employees/1-A") // Query for specific document from 'Employees' collection + .FirstOrDefault(); // Execute the query + +// The resulting 'Employee' entity is loaded and will be tracked by the session +`} + + + + +{`// Query collection by document ID +// No auto-index is created when querying only by ID + +Employee employee = await asyncSession + .Query() + .Where(x => x.Id == "employees/1-A") // Query for specific document from 'Employees' collection + .FirstOrDefaultAsync(); // Execute the query + +// The resulting 'Employee' entity is loaded and will be tracked by the session +`} + + + + +{`// Query collection by document ID +// No auto-index is created when querying only by ID + +// Query for specific document from 'Employees' collection +IRavenQueryable query = from employee in session.Query() + where employee.Id == "employees/1-A" + select employee; +// Execute the query +Employee employeeResult = query.FirstOrDefault(); + +// The resulting 'Employee' entity is loaded and will be tracked by the session +`} + + + + +{`// Query collection by document ID +// No auto-index is created when querying only by ID + +// Query for specific document from 'Employees' collection +IRavenQueryable query = from employee in asyncSession.Query() + where employee.Id == "employees/1-A" + select employee; +// Execute the query +Employee employeeResult = await query.FirstOrDefaultAsync(); + +// The resulting 'Employee' entity is loaded and will be tracked by the session +`} + + + + +{`// This RQL queries the 'Employees' collection by ID +// No auto-index is created when querying only by ID + +from "Employees" where id() == "employees/1-A" +`} + + + + + + + + +**Query collection - with filtering** + + + + +{`// Query collection - filter by document field + +// An auto-index will be created if there isn't already an existing auto-index +// that indexes this document field + +List employees = session + .Query() + .Where(x => x.FirstName == "Robert") // Query for all 'Employee' documents that match this predicate + .ToList(); // Execute the query + +// The resulting 'Employee' entities are loaded and will be tracked by the session +`} + + + + +{`// Query collection - filter by document field + +// An auto-index will be created if there isn't already an existing auto-index +// that indexes this document field + +List employees = await asyncSession + .Query() + .Where(x => x.FirstName == "Robert") // Query for all 'Employee' documents that match this predicate + .ToListAsync(); // Execute the query + +// The resulting 'Employee' entities are loaded and will be tracked by the session +`} + + + + +{`// Query collection - filter by document field + +// An auto-index will be created if there isn't already an existing auto-index +// that indexes this document field + +// Query for all 'Employee' documents that match the requested predicate +IRavenQueryable query = from employee in session.Query() + where employee.FirstName == "Robert" + select employee; +// Execute the query +List employees = query.ToList(); + +// The resulting 'Employee' entities are loaded and will be tracked by the session +`} + + + + +{`// Query collection - filter by document field + +// An auto-index will be created if there isn't already an existing auto-index +// that indexes this document field + +// Query for all 'Employee' documents that match the requested predicate +IRavenQueryable query = from employee in asyncSession.Query() + where employee.FirstName == "Robert" + select employee; +// Execute the query +List employees = await query.ToListAsync(); + +// The resulting 'Employee' entities are loaded and will be tracked by the session +`} + + + + +{`// Query collection - filter by document field + +// An auto-index will be created if there isn't already an existing auto-index +// that indexes the requested field + +from "Employees" where FirstName == "Robert" +`} + + + + + + + + +**Query collection - with paging** + + + + +{`// Query collection - page results +// No auto-index is created since no filtering is applied + +List products = session + .Query() + .Skip(5) // Skip first 5 results + .Take(10) // Load up to 10 entities from 'Products' collection + .ToList(); // Execute the query + +// The resulting 'Product' entities are loaded and will be tracked by the session +`} + + + + +{`// Query collection - page results +// No auto-index is created since no filtering is applied + +List products = await asyncSession + .Query() + .Skip(5) // Skip first 5 results + .Take(10) // Load up to 10 entities from 'Products' collection + .ToListAsync(); // Execute the query + +// The resulting 'Product' entities are loaded and will be tracked by the session +`} + + + + +{`// Query collection - page results +// No auto-index is created since no filtering is applied + +IRavenQueryable query = (from product in session.Query() + select product) + .Skip(5) // Skip first 5 results + .Take(10); // Load up to 10 entities from 'Products' collection +// Execute the query +List products = query.ToList(); + +// The resulting 'Product' entities are loaded and will be tracked by the session +`} + + + + +{`// Query collection - page results +// No auto-index is created since no filtering is applied + +IRavenQueryable query = (from product in asyncSession.Query() + select product) + .Skip(5) // Skip first 5 results + .Take(10); // Load up to 10 entities from 'Products' collection +// Execute the query +List products = await query.ToListAsync(); + +// The resulting 'Product' entities are loaded and will be tracked by the session +`} + + + + +{`// Query collection - page results +// No auto-index is created since no filtering is applied + +from "Products" limit 5, 10 // skip 5, take 10 +`} + + + + +* By default, if the page size is not specified, all matching records will be retrieved from the database. + + + + + +## Session.Advanced.DocumentQuery + +* `DocumentQuery` provides a full spectrum of low-level querying capabilities, + giving you more flexibility and control when making complex queries. + +* Below is a simple _DocumentQuery_ usage. + For a full description and more examples see: + * [What is a document query](../../../client-api/session/querying/document-query/what-is-document-query.mdx) + * [Query -vs- DocumentQuery](../../../client-api/session/querying/document-query/query-vs-document-query.mdx) + +**Example**: + + + + +{`// Query with DocumentQuery - filter by document field + +// An auto-index will be created if there isn't already an existing auto-index +// that indexes this document field + +List employees = session + .Advanced.DocumentQuery() // Use DocumentQuery + .WhereEquals(x => x.FirstName, "Robert") // Query for all 'Employee' documents that match this predicate + .ToList(); // Execute the query + +// The resulting 'Employee' entities are loaded and will be tracked by the session +`} + + + + +{`// Query with DocumentQuery - filter by document field + +// An auto-index will be created if there isn't already an existing auto-index +// that indexes this document field + +List employees = await asyncSession + .Advanced.AsyncDocumentQuery() // Use DocumentQuery + .WhereEquals(x => x.FirstName, "Robert") // Query for all 'Employee' documents that match this predicate + .ToListAsync(); // Execute the query + +// The resulting 'Employee' entities are loaded and will be tracked by the session +`} + + + + +{`// Query collection - filter by document field + +// An auto-index will be created if there isn't already an existing auto-index +// that indexes the requested field + +from "Employees" where FirstName = "Robert" +`} + + + + + + +## Session.Advanced.RawQuery + +* Queries defined with [Query](../../../client-api/session/querying/how-to-query.mdx#sessionquery) or [DocumentQuery](../../../client-api/session/querying/how-to-query.mdx#sessionadvanceddocumentquery) are translated by the RavenDB client to [RQL](../../../client-api/session/querying/what-is-rql.mdx) + when sent to the server. + +* The session also gives you a way to express the query directly in RQL using the `RawQuery` method. + +**Example**: + + + + +{`// Query with RawQuery - filter by document field + +// An auto-index will be created if there isn't already an existing auto-index +// that indexes this document field + +List employees = session + // Provide RQL to RawQuery + .Advanced.RawQuery("from 'Employees' where FirstName = 'Robert'") + // Execute the query + .ToList(); + +// The resulting 'Employee' entities are loaded and will be tracked by the session +`} + + + + +{`// Query with RawQuery - filter by document field + +// An auto-index will be created if there isn't already an existing auto-index +// that indexes this document field + +List employees = await asyncSession + // Provide RQL to AsyncRawQuery + .Advanced.AsyncRawQuery("from 'Employees' where FirstName = 'Robert'") + // Execute the query + .ToListAsync(); + +// The resulting 'Employee' entities are loaded and will be tracked by the session +`} + + + + + + +## Custom methods and extensions for LINQ + +* Available custom methods and extensions for the session's [Query](../../../client-api/session/querying/how-to-query.mdx#sessionquery) method: + + * [AggregateBy](../../../client-api/session/querying/how-to-perform-a-faceted-search.mdx) + * AnyAsync + * [Count](../../../client-api/session/querying/how-to-count-query-results.mdx) + * [CountLazily](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx) + * [Customize](../../../client-api/session/querying/how-to-customize-query.mdx) + * FirstAsync + * FirstOrDefaultAsync + * [GroupByArrayValues](../../../client-api/session/querying/how-to-perform-group-by-query.mdx#by-array-values) + * [GroupByArrayContent](../../../client-api/session/querying/how-to-perform-group-by-query.mdx#by-array-content) + * [Highlight](../../../client-api/session/querying/text-search/highlight-query-results.mdx) + * [Include](../../../client-api/how-to/handle-document-relationships.mdx) + * [Intersect](../../../client-api/session/querying/how-to-use-intersect.mdx) + * [Lazily](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx) + * [LazilyAsync](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx) + * [LongCount](../../../client-api/session/querying/how-to-count-query-results.mdx) + * [MoreLikeThis](../../../client-api/session/querying/how-to-use-morelikethis.mdx) + * [OfType](../../../client-api/session/querying/how-to-project-query-results.mdx#oftype-(as)---simple-projection) + * [OrderByDistance](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#orderbydistance) + * [OrderByDistanceDescending](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#orderbydistancedesc) + * [OrderByScore](../../../client-api/session/querying/sort-query-results.mdx#order-by-score) + * [OrderByScoreDescending](../../../client-api/session/querying/sort-query-results.mdx#order-by-score) + * [ProjectInto](../../../client-api/session/querying/how-to-project-query-results.mdx) + * [Search](../../../client-api/session/querying/text-search/full-text-search.mdx) + * SingleAsync + * SingleOrDefaultAsync + * [Spatial](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx) + * [Statistics](../../../client-api/session/querying/how-to-get-query-statistics.mdx) + * [SuggestUsing](../../../client-api/session/querying/how-to-work-with-suggestions.mdx) + * ToListAsync + * ToArrayAsync + * [VectorSearch](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx) + + + +## Syntax + + + +{`// Overloads for querying a collection OR an index: +// ================================================ + +IRavenQueryable Query(string indexName = null, + string collectionName = null, bool isMapReduce = false); + +IDocumentQuery DocumentQuery(string indexName = null, + string collectionName = null, bool isMapReduce = false); + +// Overloads for querying an index: +// ================================ + +IRavenQueryable Query(); + +IDocumentQuery DocumentQuery(); + +// RawQuery: +// ========= + +IRawDocumentQuery RawQuery(string query); +`} + + + +| Parameter | Type | Description | +|--------------------|--------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **T** | object | <ul><li>The type of entities queried</li></ul> | +| **TIndexCreator** | string | <ul><li>The index class type</li></ul> | +| **collectionName** | string | <ul><li>Name of a collection to query</li><li>No need to provide this param when specifying `T`</li><li>Specify the collection name when querying a collection that is created
on the fly, i.e. when querying [Artifical Documents](../../../studio/database/indexes/create-map-reduce-index.mdx#saving-map-reduce-results-in-a-collection-(artificial-documents))</li><li>Mutually exclusive with _indexName_</li></ul> | +| **indexName** | string | <ul><li>Name of index to query</li><li>Mutually exclusive with _collectionName_</li></ul> | +| **isMapReduce** | string | <ul><li>Whether querying a map-reduce index</li></ul> | +| **query** | string | <ul><li>The RQL query string</li></ul> | + +| Return Value | | +| - | - | +| `IRavenQueryable`
`IDocumentQuery`
`IRawDocumentQuery` | Instances exposing additional query methods and [extensions](../../../client-api/session/querying/how-to-query.mdx#custom-methods-and-extensions-for-linq) | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-query-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-query-nodejs.mdx new file mode 100644 index 0000000000..43197c6171 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-query-nodejs.mdx @@ -0,0 +1,488 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Queries in RavenDB can be written with: + * The session's `query` method - rich API is provided + * The session's `rawQuery` method - using RQL + * The [Query view](../../../studio/database/queries/query-view.mdx) in Studio - using RQL + +* Queries defined with the `query` method are translated by the RavenDB client to [RQL](../../../client-api/session/querying/what-is-rql.mdx) + when sent to the server. +* All queries in RavenDB use an **index** to provide results, even when you don't specify one. + Learn more [below](../../../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index). + +* Queries that do Not specify which index to use are called **Dynamic Queries**. + This article displays examples of dynamic queries only. + For examples showing how to query an index see [querying an index](../../../indexes/querying/query-index.mdx). +* The entities returned by the query are 'loaded' and **tracked** by the [Session](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx). + Entities will Not be tracked when: + * Query returns a [projection](../../../client-api/session/querying/how-to-project-query-results.mdx) + * Tracking is [disabled](../../../client-api/session/configuration/how-to-disable-tracking.mdx#disable-tracking-query-results) + +* Query results are **cached** by default. To disable query caching see [noCaching](../../../client-api/session/querying/how-to-customize-query.mdx#nocaching). + +* Queries are timed out after a configurable time period. See [query timeout](../../../server/configuration/database-configuration.mdx#databasesquerytimeoutinsec). +* In this page: + * [Queries always provide results using an index](../../../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index) + * [session.query](../../../client-api/session/querying/how-to-query.mdx#sessionquery) + * [session.advanced.rawQuery](../../../client-api/session/querying/how-to-query.mdx#sessionadvancedrawquery) + * [query API](../../../client-api/session/querying/how-to-query.mdx#query-api) + * [Syntax](../../../client-api/session/querying/how-to-query.mdx#syntax) + + +## Queries always provide results using an index + +* Queries always use an index to provide fast results regardless of the size of your data. + +* When a query reaches a RavenDB instance, the instance calls its query optimizer to analyze the query + and determine which index should be used to retrieve the requested data. + +* Indexes allow to provide query results without scanning the entire dataset each and every time. + * Learn more about indexes, their general concept, and the different **index types** in this [indexes overview](../../../studio/database/indexes/indexes-overview.mdx) article. + * You can choose the underlying **search engine** that will be used by the RavenDB indexes. + Learn more in [selecting the search engine](../../../indexes/search-engine/corax.mdx#selecting-the-search-engine). + + + +We differentiate between the following **3 query scenarios**: + +1. [Index query](../../../client-api/session/querying/how-to-query.mdx#indexquery) +2. [Dynamic query](../../../client-api/session/querying/how-to-query.mdx#dynamicquery) +3. [Full collection query](../../../client-api/session/querying/how-to-query.mdx#collectionquery) + +For each scenario, a different index type is used, as described below. + + + + +
+**1. Query an existing index**: + +* **Query type**: Index query + **Index used**: Static-index + +* You can specify which **STATIC-static index** the query will use. + +* Static indexes are defined by the user, as opposed to auto-indexes that are created by the server + when querying a collection with some filtering applied. See [Static-index vs Auto-index](../../../studio/database/indexes/indexes-overview.mdx#auto-indexes--vs--static-indexes). + +* Example RQL:   `from index "Employees/ByFirstName" where FirstName == "Laura"` + See more examples in [querying an index](../../../indexes/querying/query-index.mdx). + + + + + +**2. Query a collection - with filtering (Dynamic Query)**: + +* **Query type**: Dynamic Query + **Index used**: Auto-index + +* When querying a collection without specifying an index and with some filtering condition + (other than just the document ID) the query-optimizer will analyze the query to see if an **AUTO-index** + that can answer the query already exists, i.e. an auto-index on the collection queried with index-fields that match those queried. + +* If such auto-index (Not a static one...) is found, it will be used to fetch the results. + +* Else, if no relevant auto-index is found, + the query-optimizer will create a new auto-index with fields that match the query criteria. + At this time, and only at this time, the query will wait for the auto-indexing process to complete. + Subsequent queries that target this auto-index will be served immediately. + +* Note: if there exists an auto-index that is defined on the collection queried + but is indexing a different field than the one queried on, + then the query-optimizer will create a new auto-index that merges both the + fields from the existing auto-index and the new fields queried. + +* Once the newly created auto-index is done indexing the data, + the old auto-index is removed in favor of the new one. + +* Over time, an optimal set of indexes is generated by the query optimizer to answer your queries. + +* Example RQL:   `from Employees where FirstName == "Laura"` + See more examples [below](../../../client-api/session/querying/how-to-query.mdx#sessionquery). +* Note: Counters and Time series are an exception to this flow. + Dynamic queries on counters and time series values don't create auto-indexes. + However, a static-index can be defined on [Time series](../../../document-extensions/timeseries/indexing.mdx) and [Counters](../../../document-extensions/counters/indexing.mdx). + + + + + +**3. Query a collection - no filtering**: + +* **Query type**: Full collection Query + **Index used**: The raw collection (internal storage indexes) + +* Full collection query: + + * When querying a collection without specifying an index and with no filtering condition, + then all documents from the specified collection are returned. + + * RavenDB uses the raw collection documents in its **internal storage indexes** as the source for this query. + No auto-index is created. + + * Example RQL:   `from Employees` + +* Query by document ID: + + * When querying a collection only by document ID or IDs, + then similar to the full collection query, no auto-index is created. + + * RavenDB uses the raw collection documents as the source for this query. + + * Example RQL:   `from Employees where id() == "employees/1-A"` + See more examples [below](../../../client-api/session/querying/how-to-query.mdx#sessionquery). + + + + +## session.query + +* The simplest way to issue a query is by using the session's `query` method. + Customize your query with these [API methods](../../../client-api/session/querying/how-to-query.mdx#query-api). + +* The following examples show **dynamic queries** that do not specify which index to use. + Please refer to [querying an index](../../../indexes/querying/query-index.mdx) for other examples. + + + +**Query collection - no filtering** + + + + +{`// This is a Full Collection Query +// No auto-index is created since no filtering is applied + +// Query for all documents from 'employees' collection +const employees = await session.query({ collection: "employees" }) + // Execute the query + .all(); + +// All 'employee' entities are loaded and will be tracked by the session +`} + + + + +{`// This is a Full Collection Query +// No auto-index is created since no filtering is applied + +// Query for all documents from 'employees' collection +const employees = await session.query(Employee) + // Execute the query + .all(); + +// All 'employee' entities are loaded and will be tracked by the session +`} + + + + +{`// This RQL is a Full Collection Query +// No auto-index is created since no filtering is applied + +from "employees" +`} + + + + + + + + +**Query collection - by ID** + + + + +{`// Query collection by document ID +// No auto-index is created when querying only by ID + +const employee = await session.query({ collection: "employees" }) + .whereEquals("id()", "employees/1-A") // Query for specific document from 'employees' collection + .first(); // Execute the query + +// The resulting 'employee' entity is loaded and will be tracked by the session +`} + + + + +{`// Query collection by document ID +// No auto-index is created when querying only by ID + +const employee = await session.query(Employee) + .whereEquals("id()", "employees/1-A") // Query for specific document from 'employees' collection + .first(); // Execute the query + +// The resulting 'employee' entity is loaded and will be tracked by the session +`} + + + + +{`// This RQL queries the 'Employees' collection by ID +// No auto-index is created when querying only by ID + +from "employees" where id() == "employees/1-A" +`} + + + + + + + + +**Query collection - with filtering** + + + + +{`// Query collection - filter by document field + +// An auto-index will be created if there isn't already an existing auto-index +// that indexes this document field + +const employees = await session.query({ collection: "employees" }) + .whereEquals("firstName", "Robert") // Query for all 'employee' documents that match this predicate + .all(); // Execute the query + +// The resulting 'employee' entities are loaded and will be tracked by the session +`} + + + + +{`// Query collection - filter by document field + +// An auto-index will be created if there isn't already an existing auto-index +// that indexes this document field + +const employees = await session.query(Employee) + .whereEquals("firstName", "Robert") // Query for all 'employee' documents that match this predicate + .all(); // Execute the query + +// The resulting 'employee' entities are loaded and will be tracked by the session +`} + + + + +{`// Query collection - filter by document field + +// An auto-index will be created if there isn't already an existing auto-index +// that indexes the requested field + +from "employees" where firstName == "Robert" +`} + + + + + + + + +**Query collection - with paging** + + + + +{`// Query collection - page results +// No auto-index is created since no filtering is applied + +const products = await session.query({ collection: "products" }) + .skip(5) // Skip first 5 results + .take(10) // Load up to 10 entities from 'products' collection + .all(); // Execute the query + +// The resulting 'product' entities are loaded and will be tracked by the session +`} + + + + +{`// Query collection - page results +// No auto-index is created since no filtering is applied + +const products = await session.query(Employee) + .skip(5) // Skip first 5 results + .take(10) // Load up to 10 entities from 'products' collection + .all(); // Execute the query + +// The resulting 'product' entities are loaded and will be tracked by the session +`} + + + + +{`// Query collection - page results +// No auto-index is created since no filtering is applied + +from "products" limit 5, 10 // skip 5, take 10 +`} + + + + +* By default, if the page size is not specified, all matching records will be retrieved from the database. + + + + + +## session.advanced.rawQuery + +* Queries defined with [query](../../../client-api/session/querying/how-to-query.mdx#sessionquery) are translated by the RavenDB client to [RQL](../../../client-api/session/querying/what-is-rql.mdx) when sent to the server. + +* The session also gives you a way to express the query directly in RQL using the `rawQuery` method. + +**Example**: + + + +{`// Query with rawQuery - filter by document field + +// An auto-index will be created if there isn't already an existing auto-index +// that indexes this document field + +const employees = await session.advanced + // Provide RQL to rawQuery + .rawQuery("from employees where firstName = 'Robert'") + // Execute the query + .all(); + +// The resulting 'employee' entities are loaded and will be tracked by the session +`} + + + + + +## query API + +Available methods for the session's [query](../../../client-api/session/querying/how-to-query.mdx#sessionquery) method: + +- addOrder +- addParameter +- aggregateBy +- aggregateUsing +- andAlso +- any +- [boost](../../../client-api/session/querying/text-search/boost-search-results.mdx) +- closeSubclause +- containsAll +- containsAny +- [count](../../../client-api/session/querying/how-to-count-query-results.mdx) +- [countLazily](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx#lazy-count-query) +- distinct +- first +- firstOrNull +- fuzzy +- getIndexQuery +- getQueryResult +- [groupBy](../../../client-api/session/querying/how-to-perform-group-by-query.mdx) +- [highlight](../../../client-api/session/querying/text-search/highlight-query-results.mdx) +- include +- [includeExplanations](../../../client-api/session/querying/debugging/include-explanations.mdx) +- [intersect](../../../client-api/session/querying/how-to-use-intersect.mdx) +- lazily +- [longCount](../../../client-api/session/querying/how-to-count-query-results.mdx) +- [moreLikeThis](../../../client-api/session/querying/how-to-use-morelikethis.mdx) +- negateNext +- [noCaching](../../../client-api/session/querying/how-to-customize-query.mdx#nocaching) +- [noTracking](../../../client-api/session/querying/how-to-customize-query.mdx#notracking) +- not +- [ofType](../../../client-api/session/querying/how-to-project-query-results.mdx#oftype) +- [on("afterQueryExecuted")](../../../client-api/session/querying/how-to-customize-query.mdx#on-("afterqueryexecuted")) +- [on("beforeQueryExecuted")](../../../client-api/session/querying/how-to-customize-query.mdx#on-("beforequeryexecuted")) +- openSubclause +- [orderBy](../../../client-api/session/querying/sort-query-results.mdx) +- [orderByDescending](../../../client-api/session/querying/sort-query-results.mdx) +- [orderByDistance](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#orderbydistance) +- [orderByDistanceDescending](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#orderbydistancedesc) +- [orderByScore](../../../client-api/session/querying/sort-query-results.mdx#order-by-score) +- [orderByScoreDescending](../../../client-api/session/querying/sort-query-results.mdx#order-by-score) +- orElse +- proximity +- [randomOrdering](../../../client-api/session/querying/how-to-customize-query.mdx#randomordering) +- relatesToShape +- search +- [selectFields](../../../indexes/querying/projections.mdx#selectfields) +- selectTimeSeries +- single +- singleOrNull +- skip +- spatial +- [statistics](../../../client-api/session/querying/how-to-get-query-statistics.mdx) +- [suggestUsing](../../../client-api/session/querying/how-to-work-with-suggestions.mdx) +- take +- timings +- usingDefaultOperator +- [waitForNonStaleResults](../../../client-api/session/querying/how-to-customize-query.mdx#waitfornonstaleresults) +- whereBetween +- [whereEndsWith](../../../client-api/session/querying/text-search/ends-with-query.mdx) +- whereEquals +- [whereExists](../../../client-api/session/querying/how-to-filter-by-field.mdx) +- whereGreaterThan +- whereGreaterThanOrEqual +- whereIn +- whereLessThan +- whereLessThanOrEqual +- [whereLucene](../../../client-api/session/querying/document-query/how-to-use-lucene.mdx) +- whereNotEquals +- [whereRegex](../../../client-api/session/querying/text-search/using-regex.mdx) +- [whereStartsWith](../../../client-api/session/querying/text-search/starts-with-query.mdx) +- withinRadiusOf + + + +## Syntax + + + +{`// Overload for querying a collection: +session.query(documentType); + +// Overload for querying an index: +session.query(documentType, index); + +// Overload for querying a collection OR an index: +session.query(opts); + +// rawQuery: +session.rawQuery(query); +`} + + + +| Parameter | Type | Description | +|------------------|-------------------------------|------------------------------| +| **documentType** | object | The type of entities queried | +| **index** | object | The index class | +| **opts** | `DocumentQueryOptions` object | Query options | +| **query** | string | The RQL query string | + +| `DocumentQueryOptions` | | | +| - | - | - | +| **collection** | string | <ul><li>Collection name queried</li></ul> | +| **indexName** | string | <ul><li>Index name queried</li></ul> | +| **index** | object | <ul><li>Index object queried</li><li>Note:
`indexName` & `index` are mutually exclusive with `collection`.
See examples in [querying an index](../../../indexes/querying/query-index.mdx).</li></ul> | + +| Return Value | | +| - | - | +| `object` | Instance implementing `IDocumentQuery` exposing the additional [query methods](../../../client-api/session/querying/how-to-query.mdx#query-api). | + +* Note: + Use `await` when executing the query, e.g. when calling `.all`, `.single`, `.first`, `.count`, etc. + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-query-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-query-php.mdx new file mode 100644 index 0000000000..f3f981b467 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-query-php.mdx @@ -0,0 +1,431 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* PHP Queries can be written - + * Using the session's `query` method + * Using the session's `documentQuery` method + * Using **RQL**: + * via the session's `rawQuery` method + * via the Studio's [Query view](../../../studio/database/queries/query-view.mdx) + +* Queries defined using `query` or `documentQuery` are translated by the RavenDB client + to [RQL](../../../client-api/session/querying/what-is-rql.mdx) when sent to the server. +* All RavenDB queries use an **index** to provide results, even when an index is not explicitly defined. + Learn more [below](../../../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index). + +* Queries that do Not specify which index to use are called **Dynamic Queries**. + This article shows examples of dynamic queries only. + For index query examples see [querying an index](../../../indexes/querying/query-index.mdx). +* The entities returned by the query are 'loaded' and **tracked** by the [session](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx). + Entities will Not be tracked when: + * Query returns a [projection](../../../client-api/session/querying/how-to-project-query-results.mdx) + * Tracking is [disabled](../../../client-api/session/querying/how-to-customize-query.mdx#notracking) + +* Query results are **cached** by default. + To disable query caching see [NoCaching](../../../client-api/session/querying/how-to-customize-query.mdx#nocaching). + +* Queries are timed out after a configurable time period. + See [query timeout](../../../server/configuration/database-configuration.mdx#databasesquerytimeoutinsec). +* In this page: + * [Queries always provide results using an index](../../../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index) + * [session.query](../../../client-api/session/querying/how-to-query.mdx#sessionquery) + * [session.advanced.documentQuery](../../../client-api/session/querying/how-to-query.mdx#sessionadvanceddocumentquery) + * [session.advanced.rawQuery](../../../client-api/session/querying/how-to-query.mdx#sessionadvancedrawquery) + * [Custom methods](../../../client-api/session/querying/how-to-query.mdx#custom-methods) + * [Syntax](../../../client-api/session/querying/how-to-query.mdx#syntax) + + +## Queries always provide results using an index + +* Queries always use an index to provide fast results regardless of the size of your data. + +* When a query reaches a RavenDB instance, the instance calls its query optimizer to analyze the query + and determine which index should be used to retrieve the requested data. + +* Indexes allow to provide query results without scanning the entire dataset each and every time. + * Learn more about indexes, their general concept, and the different **index types** in this [indexes overview](../../../studio/database/indexes/indexes-overview.mdx) article. + * You can choose the underlying **search engine** that will be used by the RavenDB indexes. + Learn more in [selecting the search engine](../../../indexes/search-engine/corax.mdx#selecting-the-search-engine). + + + +We differentiate between the following **3 query scenarios**: + +1. [Index query](../../../client-api/session/querying/how-to-query.mdx#indexquery) +2. [Dynamic query](../../../client-api/session/querying/how-to-query.mdx#dynamicquery) +3. [Full collection query](../../../client-api/session/querying/how-to-query.mdx#collectionquery) + +For each scenario, a different index type is used, as described below. + + + + +
+**1. Query an existing index**: + +* **Query type**: Index query + **Index used**: Static-index + +* You can specify which **STATIC-index** the query will use. + +* Static indexes are defined by the user, as opposed to auto-indexes that are created by the server + when querying a collection with some filtering applied. See [Static-index vs Auto-index](../../../studio/database/indexes/indexes-overview.mdx#auto-indexes--vs--static-indexes). + +* Example RQL:   `from index "Employees/ByFirstName" where FirstName == "Laura"` + See more examples in [querying an index](../../../indexes/querying/query-index.mdx). + + + + + +**2. Query a collection - with filtering**: + +* **Query type**: Dynamic Query + **Index used**: Auto-index + +* When querying a collection without specifying an index and with some filtering condition + (other than just the document ID) the query-optimizer will analyze the query to see if an **AUTO-index** + that can answer the query already exists, i.e. an auto-index on the collection queried with index-fields that match those queried. + +* If such auto-index (Not a static one...) is found, it will be used to fetch the results. + +* Else, if no relevant auto-index is found, + the query-optimizer will create a new auto-index with fields that match the query criteria. + At this time, and only at this time, the query will wait for the auto-indexing process to complete. + Subsequent queries that target this auto-index will be served immediately. + +* Note: if there exists an auto-index that is defined on the collection queried + but is indexing a different field than the one queried on, + then the query-optimizer will create a new auto-index that merges both the + fields from the existing auto-index and the new fields queried. + +* Once the newly created auto-index is done indexing the data, + the old auto-index is removed in favor of the new one. + +* Over time, an optimal set of indexes is generated by the query optimizer to answer your queries. + +* RQL Example:   `from Employees where FirstName == "Laura"` + See more examples [below](../../../client-api/session/querying/how-to-query.mdx#sessionquery). +* Note: Counters and Time series are an exception to this flow. + Dynamic queries on counters and time series values don't create auto-indexes. + However, a static-index can be defined on [Time series](../../../document-extensions/timeseries/indexing.mdx) + and [Counters](../../../document-extensions/counters/indexing.mdx). + + + + + +**3. Query a collection - query full collection | query by ID**: + +* **Query type**: Full collection Query + **Index used**: The raw collection (internal storage indexes) + +* Full collection query: + + * When querying a collection without specifying an index and with no filtering condition, + then all documents from the specified collection are returned. + + * RavenDB uses the raw collection documents in its **internal storage indexes** as the source for this query. + No auto-index is created. + + * Example RQL:   `from Employees` + +* Query by document ID: + + * When querying a collection only by document ID or IDs, + then similar to the full collection query, no auto-index is created. + + * RavenDB uses the raw collection documents as the source for this query. + + * Example RQL:   `from Employees where id() == "employees/1-A"` + See more examples [below](../../../client-api/session/querying/how-to-query.mdx#sessionquery). + + + + +## session.query + +* The simplest way to issue a query is by using the session's `query` method which supports LINQ. + Both the LINQ method syntax and the LINQ query syntax are supported. + +* The following examples show **dynamic queries** that do not specify which index to use. + Please refer to [querying an index](../../../indexes/querying/query-index.mdx) for other examples. + +* Querying can be enhanced using these [extension methods](../../../client-api/session/querying/how-to-query.mdx#custom-methods-and-extensions-for-linq). + + + +**Query collection - no filtering** + + + + +{`// This is a Full Collection Query +// No auto-index is created since no filtering is applied + +$allEmployees = $session + ->query(Employee::class) // Query for all documents from 'Employees' collection + ->toList(); // Execute the query + +// All 'Employee' entities are loaded and will be tracked by the session +`} + + + + +{`// This RQL is a Full Collection Query +// No auto-index is created since no filtering is applied + +from "Employees" +`} + + + + + + + + +**Query collection - by ID** + + + + +{`// Query collection by document ID +// No auto-index is created when querying only by ID + +$employee = $session + ->query(Employee::class) + ->whereEquals("Id", "employees/1-A") // Query for specific document from 'Employees' collection + ->firstOrDefault(); // Execute the query + +// The resulting 'Employee' entity is loaded and will be tracked by the session +`} + + + + +{`// This RQL queries the 'Employees' collection by ID +// No auto-index is created when querying only by ID + +from "Employees" where id() == "employees/1-A" +`} + + + + + + + + +**Query collection - with filtering** + + + + +{`// Query collection - filter by document field + +// An auto-index will be created if there isn't already an existing auto-index +// that indexes this document field + +$employees = $session + ->query(Employee::class) + ->whereEquals("FirstName", "Robert") // Query for all 'Employee' documents that match this predicate + ->ToList(); // Execute the query + +// The resulting 'Employee' entities are loaded and will be tracked by the session +`} + + + + +{`// Query collection - filter by document field + +// An auto-index will be created if there isn't already an existing auto-index +// that indexes the requested field + +from "Employees" where FirstName == "Robert" +`} + + + + + + + + +**Query collection - with paging** + + + + +{`// Query collection - page results +// No auto-index is created since no filtering is applied + +$products = $session + ->query(Product::class) + ->skip(5) // Skip first 5 results + ->take(10) // Load up to 10 entities from 'Products' collection + ->toList(); // Execute the query + +// The resulting 'Product' entities are loaded and will be tracked by the session +`} + + + + +{`// Query collection - page results +// No auto-index is created since no filtering is applied + +from "Products" limit 5, 10 // skip 5, take 10 +`} + + + + +* By default, if the page size is not specified, all matching records will be retrieved from the database. + + + + + +## session.advanced.documentQuery + +* `documentQuery` provides a full spectrum of low-level querying capabilities, + giving you more flexibility and control when making complex queries. + +* Below is a simple _DocumentQuery_ usage. + For a full description and more examples see: + * [What is a document query](../../../client-api/session/querying/document-query/what-is-document-query.mdx) + * [query -vs- documentQuery](../../../client-api/session/querying/document-query/query-vs-document-query.mdx) + +**Example**: + + + + +{`// Query with DocumentQuery - filter by document field + +// An auto-index will be created if there isn't already an existing auto-index +// that indexes this document field + +$employees = $session + ->advanced()->documentQuery(Employee::class) // Use DocumentQuery + ->whereEquals("FirstName", "Robert") // Query for all 'Employee' documents that match this predicate + ->toList(); // Execute the query + +// The resulting 'Employee' entities are loaded and will be tracked by the session +`} + + + + +{`// Query collection - filter by document field + +// An auto-index will be created if there isn't already an existing auto-index +// that indexes the requested field + +from "Employees" where FirstName = "Robert" +`} + + + + + + +## session.advanced.rawQuery + +* Queries defined with [query](../../../client-api/session/querying/how-to-query.mdx#sessionquery) + or [documentQuery](../../../client-api/session/querying/how-to-query.mdx#sessionadvanceddocumentquery) + are translated by the RavenDB client to [RQL](../../../client-api/session/querying/what-is-rql.mdx) + when sent to the server. + +* The session also gives you a way to express the query directly in RQL using the `rawQuery` method. + +**Example**: + + + +{`// Query with RawQuery - filter by document field + +// An auto-index will be created if there isn't already an existing auto-index +// that indexes this document field + +$employees = $session + // Provide RQL to RawQuery + ->advanced()->rawQuery(Employee::class, "from 'Employees' where FirstName = 'Robert'") + // Execute the query + ->toList(); + +// The resulting 'Employee' entities are loaded and will be tracked by the session +`} + + + + + +## Custom methods + +Available custom methods for session's [query](../../../client-api/session/querying/how-to-query.mdx#sessionquery) method: + +- [aggregateBy](../../../client-api/session/querying/how-to-perform-a-faceted-search.mdx) +- [count](../../../client-api/session/querying/how-to-count-query-results.mdx) +- [countLazily](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx) +- [customize](../../../client-api/session/querying/how-to-customize-query.mdx) +- [highlight](../../../client-api/session/querying/text-search/highlight-query-results.mdx) +- [include](../../../client-api/how-to/handle-document-relationships.mdx) +- [intersect](../../../client-api/session/querying/how-to-use-intersect.mdx) +- [lazily](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx) +- [longCount](../../../client-api/session/querying/how-to-count-query-results.mdx) +- [moreLikeThis](../../../client-api/session/querying/how-to-use-morelikethis.mdx) +- [ofType](../../../client-api/session/querying/how-to-project-query-results.mdx#oftype-(as)---simple-projection) +- [orderByDistance](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#orderbydistance) +- [orderByDistanceDescending](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#orderbydistancedesc) +- [orderByScore](../../../client-api/session/querying/sort-query-results.mdx#order-by-score) +- [orderByScoreDescending](../../../client-api/session/querying/sort-query-results.mdx#order-by-score) +- [projectInto](../../../client-api/session/querying/how-to-project-query-results.mdx) +- [search](../../../client-api/session/querying/text-search/full-text-search.mdx) +- [spatial](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx) +- [statistics](../../../client-api/session/querying/how-to-get-query-statistics.mdx) +- [suggestUsing](../../../client-api/session/querying/how-to-work-with-suggestions.mdx) + + + +## Syntax + + + +{`// Methods for querying a collection OR an index: +// ================================================ + +public function query(?string $className, $collectionOrIndexName = null): DocumentQueryInterface; + +public function documentQuery(?string $className, $indexNameOrClass = null, ?string $collectionName = null, bool $isMapReduce = false): DocumentQueryInterface; + +// RawQuery: +// ========= +public function rawQuery(?string $className, string $query): RawDocumentQueryInterface; +`} + + + +| Parameter | Type | Description | +|--------------------|--------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **$className** | `string` | The name of the class to query | +| **$collectionOrIndexName** | `?string` | The name of the collection or index to query | +| **$indexNameOrClass** | `?string` | The name or class of the index to query | +| **$collectionName** | `?string` | The name of the collection to query | +| **$isMapReduce** | `bool` | Whether querying a map-reduce index | +| **$query** |`string` | The RQL query string | + +| Return Value | | +| - | - | +| `DocumentQueryInterface`
`RawDocumentQueryInterface` | Interfaces exposing additional query methods and [extensions](../../../client-api/session/querying/how-to-query.mdx#custom-methods) | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-query-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-query-python.mdx new file mode 100644 index 0000000000..f927548615 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-query-python.mdx @@ -0,0 +1,515 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Queries in RavenDB can be written with either of the following: + * Using a rich API via the session's `query` method + * Using the [document_query](../../../client-api/session/querying/document-query/query-vs-document-query.mdx) method + * Using **RQL** - + - when querying via the session's `raw_query` method + - when querying from the Studio's [Query view](../../../studio/database/queries/query-view.mdx) + +* Queries defined with `query` or `document_query` are translated by the RavenDB client to [RQL](../../../client-api/session/querying/what-is-rql.mdx) + when sent to the server. +* All queries in RavenDB use an **index** to provide results, even when you don't specify one. + Learn more [below](../../../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index). + +* Queries that do Not specify which index to use are called **Dynamic Queries**. + This article displays examples of dynamic queries only. + For examples showing how to query an index see [querying an index](../../../indexes/querying/query-index.mdx). +* The entities returned by the query are 'loaded' and **tracked** by the [Session](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx). + Entities will Not be tracked when: + * Query returns a [projection](../../../client-api/session/querying/how-to-project-query-results.mdx) + * Tracking is [disabled](../../../client-api/session/configuration/how-to-disable-tracking.mdx#disable-tracking-query-results) + +* Query results are **cached** by default. To disable query caching see [NoCaching](../../../client-api/session/querying/how-to-customize-query.mdx#nocaching). + +* Queries are timed out after a configurable time period. See [query timeout](../../../server/configuration/database-configuration.mdx#databasesquerytimeoutinsec). +* In this page: + * [Queries always provide results using an index](../../../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index) + * [session.query](../../../client-api/session/querying/how-to-query.mdx#sessionquery) + * [session.advanced.document_query](../../../client-api/session/querying/how-to-query.mdx#sessionadvanceddocument_query) + * [session.advanced.raw_query](../../../client-api/session/querying/how-to-query.mdx#sessionadvancedraw_query) + * [Custom methods](../../../client-api/session/querying/how-to-query.mdx#custom-methods) + * [Syntax](../../../client-api/session/querying/how-to-query.mdx#syntax) + + +## Queries always provide results using an index + +* Queries always use an index to provide fast results regardless of the size of your data. + +* When a query reaches a RavenDB instance, the instance calls its query optimizer to analyze the query + and determine which index should be used to retrieve the requested data. + +* Indexes allow to provide query results without scanning the entire dataset each and every time. + * Learn more about indexes, their general concept, and the different **index types** in this [indexes overview](../../../studio/database/indexes/indexes-overview.mdx) article. + * You can choose the underlying **search engine** that will be used by the RavenDB indexes. + Learn more in [selecting the search engine](../../../indexes/search-engine/corax.mdx#selecting-the-search-engine). + + + +We differentiate between the following **3 query scenarios**: + +1. [Index query](../../../client-api/session/querying/how-to-query.mdx#indexquery) +2. [Dynamic query](../../../client-api/session/querying/how-to-query.mdx#dynamicquery) +3. [Full collection query](../../../client-api/session/querying/how-to-query.mdx#collectionquery) + +For each scenario, a different index type is used, as described below. + + + + +
+**1. Query an existing index**: + +* **Query type**: Index query + **Index used**: Static-index + +* You can specify which **STATIC-index** the query will use. + +* Static indexes are defined by the user, as opposed to auto-indexes that are created by the server + when querying a collection with some filtering applied. See [Static-index vs Auto-index](../../../studio/database/indexes/indexes-overview.mdx#auto-indexes--vs--static-indexes). + +* Example RQL:   `from index "Employees/ByFirstName" where FirstName == "Laura"` + See more examples in [querying an index](../../../indexes/querying/query-index.mdx). + + + + + +**2. Query a collection - with filtering**: + +* **Query type**: Dynamic Query + **Index used**: Auto-index + +* When querying a collection without specifying an index and with some filtering condition + (other than just the document ID) the query-optimizer will analyze the query to see if an **AUTO-index** + that can answer the query already exists, i.e. an auto-index on the collection queried with index-fields that match those queried. + +* If such auto-index (Not a static one...) is found, it will be used to fetch the results. + +* Else, if no relevant auto-index is found, + the query-optimizer will create a new auto-index with fields that match the query criteria. + At this time, and only at this time, the query will wait for the auto-indexing process to complete. + Subsequent queries that target this auto-index will be served immediately. + +* Note: if there exists an auto-index that is defined on the collection queried + but is indexing a different field than the one queried on, + then the query-optimizer will create a new auto-index that merges both the + fields from the existing auto-index and the new fields queried. + +* Once the newly created auto-index is done indexing the data, + the old auto-index is removed in favor of the new one. + +* Over time, an optimal set of indexes is generated by the query optimizer to answer your queries. + +* Example RQL:   `from Employees where FirstName == "Laura"` + See more examples [below](../../../client-api/session/querying/how-to-query.mdx#sessionquery). +* Note: Counters and Time series are an exception to this flow. + Dynamic queries on counters and time series values don't create auto-indexes. + However, a static-index can be defined on [Time series](../../../document-extensions/timeseries/indexing.mdx) and [Counters](../../../document-extensions/counters/indexing.mdx). + + + + + +**3. Query a collection - query full collection | query by ID**: + +* **Query type**: Full collection Query + **Index used**: The raw collection (internal storage indexes) + +* Full collection query: + + * When querying a collection without specifying an index and with no filtering condition, + then all documents from the specified collection are returned. + + * RavenDB uses the raw collection documents in its **internal storage indexes** as the source for this query. + No auto-index is created. + + * Example RQL:   `from Employees` + +* Query by document ID: + + * When querying a collection only by document ID or IDs, + then similar to the full collection query, no auto-index is created. + + * RavenDB uses the raw collection documents as the source for this query. + + * Example RQL:   `from Employees where id() == "employees/1-A"` + See more examples [below](../../../client-api/session/querying/how-to-query.mdx#sessionquery). + + + + +## session.query + +* The simplest way to issue a query is using session's `query` method. + +* The following examples show **dynamic queries** that do not specify which index to use. + Please refer to [querying an index](../../../indexes/querying/query-index.mdx) for other examples. + +* Querying can be enhanced using these [extension methods](../../../client-api/session/querying/how-to-query.mdx#custom-methods-and-extensions-for-linq). + + + +**Query collection - no filtering** + + + + +{`# This is a Full Collection Query +# No auto-index is created since no filtering is applied + +all_employees = list( # Execute the query + session.query(object_type=Employee) # Query for all documents from 'Employees' collection +) +`} + + + + +{`# This is a Full Collection Query +# No auto-index is created since no filtering is applied + +# Query for all documents from 'Employees' collection +query = session.query(object_type=Employee) + +# Execute the query +all_employees = list(query) + +# All 'Employee' entities are loaded and will be tracked by the session +`} + + + + +{`// This RQL is a Full Collection Query +// No auto-index is created since no filtering is applied + +from "Employees" +`} + + + + + + + + +**Query collection - by ID** + + + + +{`# Query collection by document ID +# No auto-index is created when querying only by ID + +employee = ( + session.query(object_type=Employee) + .where_equals("Id", "employees/1-A") # Query for specific document from 'Employees' collection + .first() # Execute the query +) + +# The resulting 'Employee' entity is loaded and will be tracked by the session +`} + + + + +{`# Query collection by document ID +# No auto-index is created when querying only by ID + +# Query for specific document from 'Employees' collection +query = session.query(object_type=Employee).where_equals("Id", "employees/1-A") + +# Execute the query +employee_result = query.first() + +# The resulting 'Employee' entity is loaded and will be tracked by the session +`} + + + + +{`// This RQL queries the 'Employees' collection by ID +// No auto-index is created when querying only by ID + +from "Employees" where id() == "employees/1-A" +`} + + + + + + + + +**Query collection - with filtering** + + + + +{`# Query collection - filter by document field + +# An auto-index will be created if there isn't already an existing auto-index +# that indexes this document field + +employees = list( # Execute the query + session.query(object_type=Employee).where_equals( + "first_name", "Robert" + ) # Query for all 'Employee' documents that match this predicate +) + +# The resulting 'Employee' entities are loaded and will be tracked by the session +`} + + + + +{`# Query collection - filter by document field + +# An auto-index will be created if there isn't already an existing auto-index +# that indexes this document field + +# Query for all 'Employee' documents that match this predicate +query = session.query(object_type=Employee).where_equals("first_name", "Robert") + +# Execute the query +employees = list(query) + +# The resulting 'Employee' entities are loaded and will be tracked by the session +`} + + + + +{`// Query collection - filter by document field + +// An auto-index will be created if there isn't already an existing auto-index +// that indexes the requested field + +from "Employees" where FirstName == "Robert" +`} + + + + + + + + +**Query collection - with paging** + + + + +{`# Query collection - page results +# No auto-index is created since no filtering is applied + +products = list() # Execute the query + +# The resulting 'Product' entities are loaded and will be tracked by the session +`} + + + + +{`# Query collection - page results +# No auto-index is created since no filtering is applied + +query = ( + session.query(object_type=Product).skip(5).take(10) # Skip first 5 results +) # Load up to 10 entities from 'Products' collection + +# Execute the query +products = list(query) + +# The resulting 'Product' entities are loaded and will be tracked by the session +`} + + + + +{`// Query collection - page results +// No auto-index is created since no filtering is applied + +from "Products" limit 5, 10 // skip 5, take 10 +`} + + + + +* By default, if the page size is not specified, all matching records will be retrieved from the database. + + + + + +## session.advanced.document_query + +* Below is a simple `document_query` usage sample and its RQL equivalent. + For a full description and more examples see: + * [What is a document query](../../../client-api/session/querying/document-query/what-is-document-query.mdx) + * [query -vs- document_query](../../../client-api/session/querying/document-query/query-vs-document-query.mdx) + +**Example**: + + + + +{`# Query with document_query - filter by document field + +# An auto-index will be created if there isn't already an existing auto-index +# that indexes this document field + +employees = list( # Execute the query + session.advanced.document_query(object_type=Employee).where_equals( # Use document_query + "first_name", "Robert" + ) # Query for all 'Employee' documents that match this predicate +) + +# The resulting 'Employee' entities are loaded and will be tracked by the session +`} + + + + +{`// Query collection - filter by document field + +// An auto-index will be created if there isn't already an existing auto-index +// that indexes the requested field + +from "Employees" where FirstName = "Robert" +`} + + + + + + +## session.advanced.raw_query + +* Queries defined with [query](../../../client-api/session/querying/how-to-query.mdx#sessionquery) or [document_query](../../../client-api/session/querying/how-to-query.mdx#sessionadvanceddocument_query) are translated by the RavenDB client to [RQL](../../../client-api/session/querying/what-is-rql.mdx) + when sent to the server. + +* The session also gives you a way to express the query directly in RQL using the `raw_query` method. + +**Example**: + + + + +{`# Query with RawQuery - filter by document field + +# An auto-index will be created if there isn't already an existing auto-index +# that indexes this document field + +employees = list( # Execute the query + session.advanced.raw_query( + "from 'Employees' where first_name = 'Robert'", object_type=Employee + ) # Provide RQL to RawQuery +) +# The resulting 'Employee' entities are loaded and will be tracked by the session +`} + + + + + + +## Custom methods + +Available custom methods for session's [query](../../../client-api/session/querying/how-to-query.mdx#sessionquery) method: + +- [aggregate_by](../../../client-api/session/querying/how-to-perform-a-faceted-search.mdx) +- [count](../../../client-api/session/querying/how-to-count-query-results.mdx) +- [count_lazily](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx) +- [customize](../../../client-api/session/querying/how-to-customize-query.mdx) +- [highlight](../../../client-api/session/querying/text-search/highlight-query-results.mdx) +- [include](../../../client-api/how-to/handle-document-relationships.mdx) +- [intersect](../../../client-api/session/querying/how-to-use-intersect.mdx) +- [lazily](../../../client-api/session/querying/how-to-perform-queries-lazily.mdx) +- [long_count](../../../client-api/session/querying/how-to-count-query-results.mdx) +- [more_like_this](../../../client-api/session/querying/how-to-use-morelikethis.mdx) +- [of_type](../../../client-api/session/querying/how-to-project-query-results.mdx#oftype-(as)---simple-projection) +- [order_by_distance](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#orderbydistance) +- [order_by_distance_descending](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#orderbydistancedesc) +- [order_by_score](../../../client-api/session/querying/sort-query-results.mdx#order-by-score) +- [order_by_score_descending](../../../client-api/session/querying/sort-query-results.mdx#order-by-score) +- [project_into](../../../client-api/session/querying/how-to-project-query-results.mdx) +- [search](../../../client-api/session/querying/text-search/full-text-search.mdx) +- [spatial](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx) +- [statistics](../../../client-api/session/querying/how-to-get-query-statistics.mdx) +- [suggest_using](../../../client-api/session/querying/how-to-work-with-suggestions.mdx) + + + +## Syntax + + + +{`# Overloads for querying a collection OR an index: +# ================================================ + +def query( + self, source: Optional[Query] = None, object_type: Optional[Type[_T]] = None +) -> DocumentQuery[_T]: + ... + +def query_collection( + self, collection_name: str, object_type: Optional[Type[_T]] = None +) -> DocumentQuery[_T]: + ... + +def query_index(self, index_name: str, object_type: Optional[Type[_T]] = None) -> DocumentQuery[_T]: + ... + +def document_query( + self, + index_name: str = None, + collection_name: str = None, + object_type: Type[_T] = None, + is_map_reduce: bool = False, +) -> DocumentQuery[_T]: + ... + +# Overloads for querying an index: +# ================================ +def query_index_type( + self, index_type: Type[_TIndex], object_type: Optional[Type[_T]] = None +) -> DocumentQuery[_T]: + ... + +def document_query_from_index_type( + self, index_type: Type[_TIndex], object_type: Type[_T] +) -> DocumentQuery[_T]: + ... + +# RawQuery +# ================================ +def raw_query(self, query: str, object_type: Optional[Type[_T]] = None) -> RawDocumentQuery[_T]: + ... +`} + + + + +| Parameter | Type | Description | +|--------------------|--------|------------------------------------------| +| **object_type** | `Type[_T]` | Queried entities type | +| **collection_name** | `str` | Queried collection name | +| **query** | `str` | RQL query string | +| **index_name** | `str` | Queried index name | +| **index_type** | `Type[_TIndex]` | Queried index type | +| **is_map_reduce** | `bool` | Is a map-reduce index queried | + +| Return Value | | +| - | - | +| `DocumentQuery[_T]`
`RawDocumentQuery[_T]` | Instances exposing additional query methods and [extensions](../../../client-api/session/querying/how-to-query.mdx#custom-methods-and-extensions-for-linq) | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-stream-query-results-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-stream-query-results-csharp.mdx new file mode 100644 index 0000000000..11463e30f4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-stream-query-results-csharp.mdx @@ -0,0 +1,606 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* RavenDB supports __streaming data__ from the server to the client. + Streaming is useful when processing a large number of results. + +* The data streamed can be a result of a dynamic query, a static index query, or just filtered by a prefix. + +* To stream results, use the `Stream` method from the `Advanced` session operations. + +* In this page: + + * [Streaming overview](../../../client-api/session/querying/how-to-stream-query-results.mdx#streaming-overview) + + * [Stream by query](../../../client-api/session/querying/how-to-stream-query-results.mdx#stream-by-query) + * [Stream a dynamic query](../../../client-api/session/querying/how-to-stream-query-results.mdx#stream-a-dynamic-query) + * [Stream a dynamic raw query](../../../client-api/session/querying/how-to-stream-query-results.mdx#stream-a-dynamic-raw-query) + * [Stream a projected query](../../../client-api/session/querying/how-to-stream-query-results.mdx#stream-a-projected-query) + * [Stream an index query](../../../client-api/session/querying/how-to-stream-query-results.mdx#stream-an-index-query) + * [Stream related documents](../../../client-api/session/querying/how-to-stream-query-results.mdx#stream-related-documents) + * [By query syntax](../../../client-api/session/querying/how-to-stream-query-results.mdx#by-query-syntax) + + * [Stream by prefix](../../../client-api/session/querying/how-to-stream-query-results.mdx#stream-by-prefix) + * [Stream results by prefix](../../../client-api/session/querying/how-to-stream-query-results.mdx#stream-results-by-prefix) + * [By prefix syntax](../../../client-api/session/querying/how-to-stream-query-results.mdx#by-prefix-syntax) + + +## Streaming overview + +* __Immediate processing__: + Neither the client nor the server holds the full response in memory. + Instead, as soon as the server has a single result, it sends it to the client. + Thus, your application can start processing results before the server sends them all. + +* __No tracking__: + The stream results are Not tracked by the session. + Changes made to the resulting entities will not be sent to the server when _SaveChanges_ is called. + +* __A snapshot of the data__: + The stream results are a snapshot of the data at the time when the query is computed by the server. + Results that match the query after it was already processed are Not streamed to the client. + +* __Query limitations:__: + + * A streaming query does not wait for indexing by design. + So calling [WaitForNonStaleResults](../../../client-api/session/querying/how-to-customize-query.mdx#waitfornonstaleresults) is Not supported and will result in an exception. + + * Using [Include](../../../client-api/how-to/handle-document-relationships.mdx#includes) to load a related document to the session in a streaming query is Not supported. + Learn how to __stream related documents__ here [below](../../../client-api/session/querying/how-to-stream-query-results.mdx#stream-related-documents). + + + +## Stream by query + + +#### Stream a dynamic query + + + + +{`// Define a query on a collection +IRavenQueryable query = session + .Query() + .Where(x => x.FirstName == "Robert"); + +// Call 'Stream' to execute the query +// Optionally, pass an 'out param' for getting the query stats +IEnumerator> streamResults = + session.Advanced.Stream(query, out StreamQueryStatistics streamQueryStats); + +// Read from the stream +while (streamResults.MoveNext()) +{ + // Process the received result + StreamResult currentResult = streamResults.Current; + + // Get the document from the result + // This entity will Not be tracked by the session + Employee employee = currentResult.Document; + + // The currentResult item also provides the following: + var employeeId = currentResult.Id; + var documentMetadata = currentResult.Metadata; + var documentChangeVector = currentResult.ChangeVector; + + // Can get info from the stats, i.e. get number of total results + int totalResults = streamQueryStats.TotalResults; + // Get the Auto-Index that was used/created with this dynamic query + string indexUsed = streamQueryStats.IndexName; +} +`} + + + + +{`// Define a query on a collection +IRavenQueryable query = asyncSession + .Query() + .Where(x => x.FirstName == "Robert"); + +// Call 'StreamAsync' to execute the query +// Optionally, pass an 'out param' for getting the query stats +await using (IAsyncEnumerator> streamResults = + await asyncSession.Advanced.StreamAsync(query, out StreamQueryStatistics streamQueryStats)) +{ + // Read from the stream + while (await streamResults.MoveNextAsync()) + { + // Process the received result + StreamResult currentResult = streamResults.Current; + + // Get the document from the result + // This entity will Not be tracked by the session + Employee employee = currentResult.Document; + + // The currentResult item also provides the following: + var employeeId = currentResult.Id; + var documentMetadata = currentResult.Metadata; + var documentChangeVector = currentResult.ChangeVector; + + // Can get info from the stats, i.e. get number of total results + int totalResults = streamQueryStats.TotalResults; + // Get the Auto-Index that was used/created with this dynamic query + string indexUsed = streamQueryStats.IndexName; + } +} +`} + + + + +{`// Define a document query on a collection +IDocumentQuery query = session + .Advanced + .DocumentQuery() + .WhereEquals(x => x.FirstName, "Robert"); + +// Call 'Stream' to execute the query +// Optionally, add an out param for getting the query stats +IEnumerator> streamResults = + session.Advanced.Stream(query, out StreamQueryStatistics streamQueryStats); + +// Read from the stream +while (streamResults.MoveNext()) +{ + // Process the received result + StreamResult currentResult = streamResults.Current; + + // Get the document from the result + // This entity will Not be tracked by the session + Employee employee = currentResult.Document; + + // The currentResult item also provides the following: + var employeeId = currentResult.Id; + var documentMetadata = currentResult.Metadata; + var documentChangeVector = currentResult.ChangeVector; + + // Can get info from the stats, i.e. get number of total results + int totalResults = streamQueryStats.TotalResults; + // Get the Auto-Index that was used/created with this dynamic query + string indexUsed = streamQueryStats.IndexName; +} +`} + + + + +{`// Define a document query on a collection +IAsyncDocumentQuery query = asyncSession + .Advanced + .AsyncDocumentQuery() + .WhereEquals(x => x.FirstName, "Robert"); + +// Call 'StreamAsync' to execute the query +// Optionally, add an out param for getting the query stats +await using (IAsyncEnumerator> streamResults = + await asyncSession.Advanced.StreamAsync(query, out StreamQueryStatistics streamQueryStats)) +{ + // Read from the stream + while (await streamResults.MoveNextAsync()) + { + // Process the received result + StreamResult currentResult = streamResults.Current; + + // Get the document from the result + // This entity will Not be tracked by the session + Employee employee = currentResult.Document; + + // The currentResult item also provides the following: + var employeeId = currentResult.Id; + var documentMetadata = currentResult.Metadata; + var documentChangeVector = currentResult.ChangeVector; + + // Can get info from the stats, i.e. get number of total results + int totalResults = streamQueryStats.TotalResults; + // Get the Auto-Index that was used/created with this dynamic query + string indexUsed = streamQueryStats.IndexName; + } +} +`} + + + + + + + +#### Stream a dynamic raw query + + + + +{`// Define a raw query using RQL +IRawDocumentQuery query = session + .Advanced + .RawQuery("from Employees where FirstName = 'Robert'"); + +// Call 'Stream' to execute the query +IEnumerator> streamResults = session.Advanced.Stream(query); + +while (streamResults.MoveNext()) +{ + StreamResult currentResult = streamResults.Current; + Employee employee = streamResults.Current.Document; +} +`} + + + + +{`// Define a raw query using RQL +IAsyncRawDocumentQuery query = asyncSession + .Advanced + .AsyncRawQuery("from Employees where FirstName = 'Robert'"); + +// Call 'StreamAsync' to execute the query +await using (IAsyncEnumerator> streamResults = + await asyncSession.Advanced.StreamAsync(query)) +{ + while (await streamResults.MoveNextAsync()) + { + StreamResult currentResult = streamResults.Current; + Employee employee = streamResults.Current.Document; + } +} +`} + + + + + + + +#### Stream a projected query + + + + +{`// Define a query with projected results +// Each query result is not an Emplyee document but an entity of type 'NameProjection'. +IRavenQueryable query = session + .Query() + .ProjectInto(); + +// Call 'Stream' to execute the query +IEnumerator> streamResults = session.Advanced.Stream(query); + +while (streamResults.MoveNext()) +{ + StreamResult currentResult = streamResults.Current; + NameProjection employeeName = streamResults.Current.Document; +} +`} + + + + +{`// Define a query with projected results +// Each query result is not an Employee document but an entity of type 'NameProjection'. +IRavenQueryable query = asyncSession + .Query() + .ProjectInto(); + +// Call 'StreamAsync' to execute the query +await using (IAsyncEnumerator> streamResults = + await asyncSession.Advanced.StreamAsync(query)) +{ + while (await streamResults.MoveNextAsync()) + { + StreamResult currentResult = streamResults.Current; + NameProjection employeeName = streamResults.Current.Document; + } +} +`} + + + + +{`// Each query result will be of this class type +public class NameProjection +{ + public string FirstName { get; set; } + public string LastName { get; set; } +} +`} + + + + + + + +#### Stream an index query + + + + +{`// Define a query on an index +IQueryable query = session.Query() + .Where(employee => employee.FirstName == "Robert"); + +// Call 'Stream' to execute the query +IEnumerator> streamResults = session.Advanced.Stream(query); + +while (streamResults.MoveNext()) +{ + StreamResult currentResult = streamResults.Current; + Employee employee = streamResults.Current.Document; +} +`} + + + + +{`// Define a query on an index +IQueryable query = asyncSession.Query() + .Where(employee => employee.FirstName == "Robert"); + +// Call 'StreamAsync' to execute the query +await using (IAsyncEnumerator> streamResults = + await asyncSession.Advanced.StreamAsync(query)) +{ + while (await streamResults.MoveNextAsync()) + { + StreamResult currentResult = streamResults.Current; + Employee employee = streamResults.Current.Document; + } +} +`} + + + + +{`// The index: +public class Employees_ByFirstName : AbstractIndexCreationTask +{ + public Employees_ByFirstName() + { + Map = employees => from employee in employees + select new + { + FirstName = employee.FirstName + }; + } +} +`} + + + + + + + +#### Stream related documents +__Why streaming query results does not support 'include'__: + +* A document can reference [related documents](../../../indexes/indexing-related-documents.mdx#what-are-related-documents). +* An [Include](../../../client-api/how-to/handle-document-relationships.mdx#includes) clause in a non-streamed query loads these related documents to the session + so that they can be accessed without an additional query to the server. +* Those included documents are sent to the client at the end of the query results. + This does not mesh well with streaming, which is designed to allow transferring massive amounts of data, + possibly over a significant amount of time. + +__How to stream related documents__: + +* Instead of using _include_, define the query so that it will return a [projection](../../../indexes/querying/projections.mdx). +* The projected query results will not be just the documents from the queried collection. + Instead, each result will be an entity containing the related document entities in addition to the original queried document. +* On the client side, you need to define a class that matches the projected query result. + +__Example__: + +* The below example uses RawQuery. + However, the same logic can be applied to a Query, DocumentQuery, or when querying an index. +* Note: + The projected class in the example contains the full related documents. + However, you can project just the needed properties from the related documents. + + + + +{`// Define a query with a 'select' clause to project the results. + +// The related Company & Employee documents are 'loaded', +// and returned in the projection together with the Order document itself. + +// Each query result is not an Order document but an entity of type 'AllDocsProjection'. + +IRawDocumentQuery query = session + .Advanced + .RawQuery(@"from Orders as o + where o.ShipTo.City = 'London' + load o.Company as c, o.Employee as e + select { + Order: o, + Company: c, + Employee: e + }"); + +// Call 'Stream' to execute the query +IEnumerator> streamResults = session.Advanced.Stream(query); + +while (streamResults.MoveNext()) +{ + StreamResult currentResult = streamResults.Current; + AllDocsProjection projection = streamResults.Current.Document; + + Order theOrderDoc = projection.Order; + Company theRelatedCompanyDoc = projection.Company; + Employee theRelatedEmployeeDoc = projection.Employee; +} +`} + + + + +{`// Define a query with a 'select' clause to project the results. + +// The related Company & Employee documents are 'loaded', +// and returned in the projection together with the Order document itself. + +// Each query result is not an Order document but an entity of type 'AllDocsProjection'. + +IAsyncRawDocumentQuery query = asyncSession + .Advanced + .AsyncRawQuery(@"from Orders as o + where o.ShipTo.City = 'London' + load o.Company as c, o.Employee as e + select { + Order: o, + Company: c, + Employee: e + }"); + +// Call 'StreamAsync' to execute the query +await using (IAsyncEnumerator> streamResults = + await asyncSession.Advanced.StreamAsync(query)) +{ + while (await streamResults.MoveNextAsync()) + { + StreamResult currentResult = streamResults.Current; + AllDocsProjection projection = streamResults.Current.Document; + + Order theOrderDoc = projection.Order; + Company theRelatedCompanyDoc = projection.Company; + Employee theRelatedEmployeeDoc = projection.Employee; + } +} +`} + + + + +{`// Each query result will be of this class type +public class AllDocsProjection +{ + public Order Order { get; set; } + public Employee Employee { get; set; } + public Company Company { get; set; } +} +`} + + + + + + + + + +#### By query syntax + + + +{`// Stream by query: +IEnumerator> Stream(IQueryable query); +IEnumerator> Stream(IQueryable query, out StreamQueryStatistics streamQueryStats); + +IEnumerator> Stream(IDocumentQuery query); +IEnumerator> Stream(IDocumentQuery query, out StreamQueryStatistics streamQueryStats); + +IEnumerator> Stream(IRawDocumentQuery query); +IEnumerator> Stream(IRawDocumentQuery query, out StreamQueryStatistics streamQueryStats); +`} + + + +| Parameters | type | description | +| - | - | - | +| **query** | [IQueryable](../../../client-api/session/querying/how-to-query.mdx#sessionquery), [IDocumentQuery](../../../client-api/session/querying/how-to-query.mdx#sessionadvanceddocumentquery) or [IRawDocumentQuery](../../../client-api/session/querying/how-to-query.mdx#sessionadvancedrawquery) | The query for which to stream results | +| `out` **streamQueryStats** | [StreamQueryStatistics](../../../glossary/stream-query-statistics.mdx) | Information about performed query | + +| Return Value | | +| - | - | +| IEnumerator<[StreamResult<T>](../../../glossary/stream-result.mdx)> | Enumerator with resulting entities | + + + +## Stream by prefix + + +#### Stream results by prefix +* Streamed data can also be filtered by an __ID prefix__ and by other __filtering options__, see syntax below. +* Note: No auto-index is created when streaming results by a prefix. + + + + +{`string idPrefix = "Orders/"; +string matches = "*25-A|77?-A"; + +// Filter streamed results by the passing 'prefix' and an optional 'matches' string +IEnumerator> streamResults = session.Advanced.Stream(idPrefix, matches); + +while (streamResults.MoveNext()) +{ + // Documents that will be returned are only those matching the following: + // * Document ID starts with "Orders/" + // * The rest of the ID (after prefix) must match the 'matches' string + // e.g. "Orders/325-A" or Orders/772-A", etc. + + StreamResult currentResult = streamResults.Current; + Order order = currentResult.Document; +} +`} + + + + +{`string idPrefix = "Orders/"; +string matches = "*25-A|77?-A"; + +// Filter streamed results by the passing 'prefix' and an optional 'matches' string +await using (IAsyncEnumerator> streamResults = + await asyncSession.Advanced.StreamAsync(idPrefix, matches)) +{ + while (await streamResults.MoveNextAsync()) + { + // Documents that will be returned are only those matching the following: + // * Document ID starts with "Orders/" + // * The rest of the ID (after prefix) must match the 'matches' string + // e.g. "Orders/325-A" or Orders/772-A", etc. + + StreamResult currentResult = streamResults.Current; + Order order = currentResult.Document; + } +} +`} + + + + + + + +#### By prefix syntax + + + +{`// Stream by prefix: +IEnumerator> Stream(string startsWith, string matches = null, + int start = 0, int pageSize = int.MaxValue, string startAfter = null); +`} + + + +| Parameters | type | description | +| - | - | - | +| **startsWith** | `string` | Stream documents with this ID prefix | +| **matches** | `string` | Filter the ID part that comes after the specified prefix.
Use '?' for any character, '*' any characters.
Use '|' to separate rules. | +| **start** | `int` | Number of documents to skip | +| **pageSize** | `int` | Maximum number of documents to retrieve | +| **startAfter** | `string` | Skip fetching documents until this ID is found.
Only return documents after this ID (default: null). | + +| Return Value | | +| - | - | +| IEnumerator<[StreamResult<T>](../../../glossary/stream-result.mdx)> | Enumerator with resulting entities | + +
+ + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-stream-query-results-java.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-stream-query-results-java.mdx new file mode 100644 index 0000000000..bb387bef4c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-stream-query-results-java.mdx @@ -0,0 +1,112 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +RavenDB supports __streaming data__ from the server to the client. +Streaming is useful when processing a large number of results. + +The data streamed can be a result of a dynamic query, a static index query, or just filtered by a prefix. + +To stream results, use the `stream` method from the `advanced` session operations. +## Streaming overview + +* __Immediate processing__: + Neither the client nor the server holds the full response in memory. + Instead, as soon as the server has a single result, it sends it to the client. + Thus, your application can start processing results before the server sends them all. + +* __No tracking__: + The stream results are Not tracked by the session. + Changes made to the resulting entities will not be sent to the server when _saveChanges_ is called. + +* __A snapshot of the data__: + The stream results are a snapshot of the data at the time when the query is computed by the server. + Results that match the query after it was already processed are Not streamed to the client. + +* __Query limitations:__: + + * A streaming query does not wait for indexing by design. + So calling [waitForNonStaleResults](../../../client-api/session/querying/how-to-customize-query.mdx#waitfornonstaleresults) is Not supported and will result in an exception. + + * Using [include](../../../client-api/session/loading-entities.mdx#load-with-includes) to load a related document to the session in a streaming query is Not supported. + + + +## Syntax + + + +{` CloseableIterator> stream(IDocumentQuery query); + + CloseableIterator> stream(IDocumentQuery query, Reference streamQueryStats); + + CloseableIterator> stream(IRawDocumentQuery query); + + CloseableIterator> stream(IRawDocumentQuery query, Reference streamQueryStats); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **query** | [IDocumentQuery](../../../client-api/session/querying/how-to-query.mdx#sessionadvanceddocumentquery) or [IRawDocumentQuery](../../../client-api/session/querying/how-to-query.mdx#sessionadvancedrawquery) | Query to stream results for. | +| `Reference` **streamQueryStats** | StreamQueryStatistics | Information about performed query. | + +| Return Value | | +| ------------- | ----- | +| CloseableIterator<StreamResult> | Iterator with entities. | + +## Example I - Using Static Index + + + +{`IDocumentQuery query = session + .query(Employee.class, Employees_ByFirstName.class) + .whereEquals("FirstName", "Robert"); + +CloseableIterator> results = session.advanced().stream(query); + +while (results.hasNext()) \{ + StreamResult employee = results.next(); +\} +`} + + + +## Example II - Dynamic Document Query + + + +{`IDocumentQuery query = session + .advanced() + .documentQuery(Employee.class) + .whereEquals("FirstName", "Robert"); + +Reference streamQueryStatsRef = new Reference<>(); +CloseableIterator> results = session.advanced().stream(query, streamQueryStatsRef); + +while (results.hasNext()) \{ + StreamResult employee = results.next(); +\} +`} + + + +## Example III - Dynamic Raw Query + + + +{`IRawDocumentQuery query = session.advanced() + .rawQuery(Employee.class, "from Employees where FirstName = 'Robert'"); + +CloseableIterator> results = session.advanced().stream(query); + +while (results.hasNext()) \{ + StreamResult employee = results.next(); +\} +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-stream-query-results-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-stream-query-results-nodejs.mdx new file mode 100644 index 0000000000..c457190edb --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-stream-query-results-nodejs.mdx @@ -0,0 +1,390 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* RavenDB supports __streaming data__ from the server to the client. + Streaming is useful when processing a large number of results. + +* The data streamed can be a result of a dynamic query, a static index query, or just filtered by a prefix. + +* To stream results, use the `stream` method from the `advanced` session operations. + +* In this page: + + * [Streaming overview](../../../client-api/session/querying/how-to-stream-query-results.mdx#streaming-overview) + + * [Stream by query](../../../client-api/session/querying/how-to-stream-query-results.mdx#stream-by-query) + * [Stream a dynamic query](../../../client-api/session/querying/how-to-stream-query-results.mdx#stream-a-dynamic-query) + * [Stream a dynamic raw query](../../../client-api/session/querying/how-to-stream-query-results.mdx#stream-a-dynamic-raw-query) + * [Stream a projected query](../../../client-api/session/querying/how-to-stream-query-results.mdx#stream-a-projected-query) + * [Stream an index query](../../../client-api/session/querying/how-to-stream-query-results.mdx#stream-an-index-query) + * [Stream related documents](../../../client-api/session/querying/how-to-stream-query-results.mdx#stream-related-documents) + * [By query syntax](../../../client-api/session/querying/how-to-stream-query-results.mdx#by-query-syntax) + + * [Stream by prefix](../../../client-api/session/querying/how-to-stream-query-results.mdx#stream-by-prefix) + * [Stream results by prefix](../../../client-api/session/querying/how-to-stream-query-results.mdx#stream-results-by-prefix) + * [By prefix syntax](../../../client-api/session/querying/how-to-stream-query-results.mdx#by-prefix-syntax) + + +## Streaming overview + +* __Immediate processing__: + Neither the client nor the server holds the full response in memory. + Instead, as soon as the server has a single result, it sends it to the client. + Thus, your application can start processing results before the server sends them all. + +* __No tracking__: + The stream results are Not tracked by the session. + Changes made to the resulting entities will not be sent to the server when _saveChanges_ is called. + +* __A snapshot of the data__: + The stream results are a snapshot of the data at the time when the query is computed by the server. + Results that match the query after it was already processed are Not streamed to the client. + +* __Query limitations:__: + + * A streaming query does not wait for indexing by design. + So calling [waitForNonStaleResults](../../../client-api/session/querying/how-to-customize-query.mdx#waitfornonstaleresults) is Not supported and will result in an exception. + + * Using [include](../../../client-api/session/loading-entities.mdx#load-with-includes) to load a related document to the session in a streaming query is Not supported. + Learn how to __stream related documents__ here [below](../../../client-api/session/querying/how-to-stream-query-results.mdx#stream-related-documents). + + + +## Stream by query + + +#### Stream a dynamic query + + + +{`// Define a query on a collection +const query = session.query(\{ collection: "employees" \}) + .whereEquals('FirstName', 'Robert'); + +// Call stream() to execute the query, it returns a Node.js ReadableStream. +// Parms: pass the query and an optional callback for getting the query stats. +let streamQueryStats; +const queryStream = await session.advanced.stream(query, s => streamQueryStats = s); + +// Two options to get query stats: +// * Pass a callback to stream() with an 'out param' that will be filled with query stats. +// This param can then be accessed in the 'end' event. +// * Or: Use an event listener, listen to the 'stats' event, as described below. + +// Handle stream events with callback functions: + +// Process the item received: +queryStream.on("data", resultItem => \{ + // Get the employee entity from the result item. + // Note: This entity will Not be tracked by the session. + const employee = resultItem.document; + + // The resultItem also provides the following: + const employeeId = resultItem.id; + const documentMetadata = resultItem.metadata; + const documentChangeVector = resultItem.changeVector; +\}); + +// Can get query stats by using an event listener: +queryStream.once("stats", queryStats => \{ + // Get number of total results + const totalResults = queryStats.totalResults; + // Get the Auto-Index that was used/created with this dynamic query + const indexUsed = queryStats.indexName; +\}); + +// Stream emits an 'end' event when there is no more data to read: +queryStream.on("end", () => \{ + // Get info from 'streamQueryStats', the stats object + const totalResults = streamQueryStats.totalResults; + const indexUsed = streamQueryStats.indexName; +\}); + +queryStream.on("error", err => \{ + // Handle errors +\}); +`} + + + + + + +#### Stream a dynamic raw query + + + +{`// Define a raw query using RQL +const rawQuery = session.advanced + .rawQuery("from Employees where FirstName = 'Robert'"); + +// Call stream() to execute the query +const queryStream = await session.advanced.stream(rawQuery); + +// Handle stats & stream events as described in the dynamic query example above. +`} + + + + + + +#### Stream a projected query + + + +{`// Define a query with projected results +// Each query result is not an Employee document but an entity containing selected fields only. +const projectedQuery = session.query(\{collection: 'employees'\}) + .selectFields(['FirstName', 'LastName']); + +// Call stream() to execute the query +const queryStream = await session.advanced.stream(projectedQuery); + +queryStream.on("data", resultItem => \{ + // entity contains only the projected fields + const employeeName = resultItem.document; +\}); + +// Handle stats & stream events as described in the dynamic query example above. +`} + + + + + + +#### Stream an index query + + + + +{`// Define a query on an index +const query = session.query({ indexName: "Employees/ByFirstName" }) + .whereEquals("FirstName", "Robert"); + +// Call stream() to execute the query +const queryStream = await session.advanced.stream(query); + +// Can get info about the index used from the stats +queryStream.once("stats", queryStats => { + const indexUsed = queryStats.indexName; + const isIndexStale = queryStats.stale; + const lastTimeIndexWasUpdated = queryStats.indexTimestamp; +}); + +// Handle stats & stream events as described in the dynamic query example above. +`} + + + + +{`// The index: +class Employees_ByFirstName extends AbstractJavaScriptIndexCreationTask { + + constructor () { + super(); + + this.map("Employees", employee => { + return { + firstName: employee.FirstName + } + }); + } +} +`} + + + + + + + +#### Stream related documents +__Why streaming query results does not support 'include'__: + +* A document can reference [related documents](../../../indexes/indexing-related-documents.mdx#what-are-related-documents). +* An [include](../../../client-api/session/loading-entities.mdx#load-with-includes) clause in a non-streamed query loads these related documents to the session + so that they can be accessed without an additional query to the server. +* Those included documents are sent to the client at the end of the query results. + This does not mesh well with streaming, which is designed to allow transferring massive amounts of data, + possibly over a significant amount of time. + +__How to stream related documents__: + +* Instead of using _include_, define the query so that it will return a [projection](../../../indexes/querying/projections.mdx). +* The projected query results will not be just the documents from the queried collection. + Instead, each result will be an entity containing the related document entities in addition to the original queried document. +* On the client side, you need to define a class that matches the projected query result. + +__Example__: + +* The below example uses RawQuery. + However, the same logic can be applied to a Query, DocumentQuery, or when querying an index. +* Note: + The projected class in the example contains the full related documents. + However, you can project just the needed properties from the related documents. + + + +{`// Define a query with a 'select' clause to project the results. + +// The related Company & Employee documents are 'loaded', +// and returned in the projection together with the Order document itself. + +// Each query result is not an Order document +// but an entity containing the document & the related documents. +const rawQuery = session.advanced + .rawQuery(\`from Orders as o + where o.ShipTo.City = 'London' + load o.Company as c, o.Employee as e + select \{ + order: o, + company: c, + employee: e + \}\`); + +// Call stream() to execute the query +const queryStream = await session.advanced.stream(rawQuery); + +queryStream.on("data", resultItem => \{ + const theOrderDocument = resultItem.document.order; + const theCompanyDocument = resultItem.document.company; + const theEmployeeDocument = resultItem.document.employee; +\}); + +// Handle stats & stream events as described in the dynamic query example above. +`} + + + + + + +#### By query syntax + + + +{`await session.advanced.stream(query, [statsCallback]); +`} + + + +| Parameters | type | description | +| - | - | - | +| **query** | `IDocumentQuery` or `IRawDocumentQuery` | The query for which to stream results | +| **statsCallback** | `(streamStats) => void` | <ul><li>An optional callback function with an output parameter.</li><li>The parameter passed to the callback will be filled with the `StreamQueryStatistics` object when query returns.</li></ul> | + +| Return Value | | +| - | - | +| `Promise` | A `Promise` resolving to readable stream with query results | + +| `StreamQueryStatistics` | | | +| - | - | - | +| __totalResults__ | `number` | Total number of results | +| __resultEtag__ | `number` | An Etag that is specific for the query results | +| __indexName__ | `string` | Name of index that was used for the query | +| __indexTimestamp__ | `object` | Time when index was last updated | +| __stale__ | `boolean` | `true` if index is stale | + + + + + +## Stream by prefix + + +#### Stream results by prefix +* Streamed data can also be filtered by an __ID prefix__ and by some __filtering options__, see below. +* Note: No auto-index is created when streaming results by a prefix. + + + +{`const idPrefix = "Order"; + +// Filter streamed results by passing an ID prefix +const streamResults = await session.advanced.stream(idPrefix); + +queryStream.on("data", resultItem => \{ + // Only documents with ID that starts with 'Order' + const resultDocument = resultItem.document; +\}); + +queryStream.on("end", () => \{ + // Stream ended, no more data +\}); + +queryStream.on("error", err => \{ + // Handle errors +\}); +`} + + + + +{`const idPrefix = "Orders/"; +const options = \{ + matches: "*25-A|77?-A" +\} + +// Filter streamed results by ID prefix and by options +const streamResults = await session.advanced.stream(idPrefix, options); + +queryStream.on("data", resultItem => \{ + // Documents that will be returned are only those matching the following: + // * Document ID starts with "Orders/" + // * The rest of the ID (after prefix) must match the 'matches' string + // e.g. "Orders/325-A" or Orders/772-A", etc. + + const resultDocument = resultItem.document; +\}); + +queryStream.on("end", () => \{ + // Stream ended, no more data +\}); + +queryStream.on("error", err => \{ + // Handle errors +\}); +`} + + + + + + +#### By prefix syntax + + + +{`await session.advanced.stream(idPrefix); +await session.advanced.stream(idPrefix, options); +`} + + + +| Parameters | type | description | +| - | - | - | +| **idPrefix** | `string` | Stream documents with this ID prefix | +| **options** | `StartingWithOptions` | More filtering options, see description below | + +| Return Value | | +| - | - | +| `Promise` | A `Promise` resolving to readable stream with query results | + +| `StartingWithOptions` | | | +| - | - | - | +| __matches__ | `number` | Filter the ID part that comes after the specified prefix.
Use '?' for any character, '*' any characters.
Use '|' to separate rules. | +| __start__ | `number` | Number of documents to skip | +| __pageSize__ | `number` | Maximum number of documents to retrieve | +| __exclude__ | `strring` | Maximum number of documents to retrieve | +| __startAfter__ | `string` | Skip fetching documents until this ID is found.
Only return documents after this ID (default: null). | + +
+ + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-intersect-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-intersect-csharp.mdx new file mode 100644 index 0000000000..4d78c9567d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-intersect-csharp.mdx @@ -0,0 +1,90 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To return only documents that match **all** provided sub-queries, use the `Intersect` extension which enables RavenDB to perform server-side intersection queries. + +## Syntax + + + +{`IRavenQueryable Intersect(); +`} + + + +## Example + + + + +{`// return all T-shirts that are manufactured by 'Raven' +// and contain both 'Small Blue' and 'Large Gray' types +IList tshirts = session.Query() + .Where(x => x.Manufacturer == "Raven") + .Intersect() + .Where(x => x.Color == "Blue" && x.Size == "Small") + .Intersect() + .Where(x => x.Color == "Gray" && x.Size == "Large") + .OfType() + .ToList(); +`} + + + + +{`// return all T-shirts that are manufactured by 'Raven' +// and contain both 'Small Blue' and 'Large Gray' types +IList tshirts = await asyncSession.Query() + .Where(x => x.Manufacturer == "Raven") + .Intersect() + .Where(x => x.Color == "Blue" && x.Size == "Small") + .Intersect() + .Where(x => x.Color == "Gray" && x.Size == "Large") + .OfType() + .ToListAsync(); +`} + + + + +{`public class TShirts_ByManufacturerColorSizeAndReleaseYear : AbstractIndexCreationTask +{ + public class Result + { + public string Manufacturer { get; set; } + + public string Color { get; set; } + + public string Size { get; set; } + + public int ReleaseYear { get; set; } + } + + public TShirts_ByManufacturerColorSizeAndReleaseYear() + { + Map = tshirts => from tshirt in tshirts + from type in tshirt.Types + select new + { + Manufacturer = tshirt.Manufacturer, + Color = type.Color, + Size = type.Size, + ReleaseYear = tshirt.ReleaseYear + }; + } +} +`} + + + + +{`from index 'TShirts/ByManufacturerColorSizeAndReleaseYear' +where intersect(Manufacturer = 'Raven', Color = 'Blue' and Size = 'Small', Color = 'Gray' and Size = 'Large') +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-intersect-java.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-intersect-java.mdx new file mode 100644 index 0000000000..1079435636 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-intersect-java.mdx @@ -0,0 +1,102 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To return only documents that match **all** provided sub-queries, use the `intersect` method which enables RavenDB to perform server-side intersection queries. + +## Syntax + + + +{`IDocumentQuery intersect(); +`} + + + +## Example + + + + +{`// return all T-shirts that are manufactured by 'Raven' +// and contain both 'Small Blue' and 'Large Gray' types +List tShirts = session + .query(TShirt.class, TShirts_ByManufacturerColorSizeAndReleaseYear.class) + .whereEquals("manufacturer", "Raven") + .intersect() + .whereEquals("color", "Blue") + .andAlso() + .whereEquals("size", "Small") + .intersect() + .whereEquals("color", "Gray") + .andAlso() + .whereEquals("size", "Large") + .toList(); +`} + + + + +{`public static class TShirts_ByManufacturerColorSizeAndReleaseYear extends AbstractIndexCreationTask { + public static class Result { + private String manufacturer; + private String color; + private String size; + private int releaseYear; + + public String getManufacturer() { + return manufacturer; + } + + public void setManufacturer(String manufacturer) { + this.manufacturer = manufacturer; + } + + public String getColor() { + return color; + } + + public void setColor(String color) { + this.color = color; + } + + public String getSize() { + return size; + } + + public void setSize(String size) { + this.size = size; + } + + public int getReleaseYear() { + return releaseYear; + } + + public void setReleaseYear(int releaseYear) { + this.releaseYear = releaseYear; + } + } + + public TShirts_ByManufacturerColorSizeAndReleaseYear() { + map = "docs.TShirts.SelectMany(tshirt => tshirt.types, (tshirt, type) => new {" + + " manufacturer = tshirt.manufacturer," + + " color = type.color," + + " size = type.size," + + " releaseYear = tshirt.releaseYear" + + "})"; + } +} +`} + + + + +{`from index 'TShirts/ByManufacturerColorSizeAndReleaseYear' +where intersect(Manufacturer = 'Raven', Color = 'Blue' and Size = 'Small', Color = 'Gray' and Size = 'Large') +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-intersect-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-intersect-nodejs.mdx new file mode 100644 index 0000000000..df4eefc667 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-intersect-nodejs.mdx @@ -0,0 +1,75 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To return only documents that match *all* provided sub-queries, use the `intersect()` method which enables RavenDB to perform server-side intersection queries. + +## Syntax + + + +{`query.intersect(); +`} + + + +## Example + + + + +{`// return all T-shirts that are manufactured by 'Raven' +// and contain both 'Small Blue' and 'Large Gray' types +const tShirts = await session + .query({ indexName: "TShirts/ByManufacturerColorSizeAndReleaseYear" }) + .whereEquals("manufacturer", "Raven") + .intersect() + .whereEquals("color", "Blue") + .andAlso() + .whereEquals("size", "Small") + .intersect() + .whereEquals("color", "Gray") + .andAlso() + .whereEquals("size", "Large") + .toList(); +`} + + + + +{`class TShirts_ByManufacturerColorSizeAndReleaseYearResult { + constructor(data) { + this.manufacturer = data.manufacturer; + this.color = data.color; + this.size = data.size; + this.releaseYear = data.releaseYear; + } +} + +class TShirts_ByManufacturerColorSizeAndReleaseYear extends AbstractIndexCreationTask { + + constructor() { + super(); + + this.map = \`docs.TShirts.SelectMany(tshirt => tshirt.types, (tshirt, type) => new { + manufacturer = tshirt.manufacturer, + color = type.color, + size = type.size, + releaseYear = tshirt.releaseYear + })\`; + } +} +`} + + + + +{`from index 'TShirts/ByManufacturerColorSizeAndReleaseYear' +where intersect(manufacturer = 'Raven', color = 'Blue' and size = 'Small', color = 'Gray' and size = 'Large') +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-intersect-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-intersect-php.mdx new file mode 100644 index 0000000000..a033200887 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-intersect-php.mdx @@ -0,0 +1,50 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To return only documents that match **all** provided sub-queries, use the +`intersect` extension to allow RavenDB to perform server-side intersection queries. + +## Syntax + + + +{`public function intersect(): DocumentQueryInterface; +`} + + + +## Example + + + + +{`// return all T-shirts that are manufactured by 'Raven' +// and contain both 'Small Blue' and 'Large Gray' types +/** @var array $tShirts */ +$tShirts = $session + ->query(TShirt::class, TShirts_ByManufacturerColorSizeAndReleaseYear::class) + ->whereEquals("manufacturer", "Raven") + ->intersect() + ->whereEquals("color", "Blue") + ->andAlso() + ->whereEquals("size", "Small") + ->intersect() + ->whereEquals("color", "Gray") + ->andAlso() + ->whereEquals("size", "Large") + ->toList(); +`} + + + + +{`from index 'TShirts/ByManufacturerColorSizeAndReleaseYear' +where intersect(Manufacturer = 'Raven', Color = 'Blue' and Size = 'Small', Color = 'Gray' and Size = 'Large') +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-intersect-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-intersect-python.mdx new file mode 100644 index 0000000000..dad32c8b64 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-intersect-python.mdx @@ -0,0 +1,73 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To return only documents that match **all** provided sub-queries, use the `intersect` method +which enables RavenDB to perform server-side intersection queries. + +## Syntax + + + +{`def intersect(self) -> DocumentQuery[_T]: ... +`} + + + +## Example + + + + +{`# return all T-shirts that are manufactured by 'Raven' +# and contain both 'Small Blue' and 'Large Gray' types +tshirts = list( + session.query_index("TShirts/ByManufacturerColorSizeAndReleaseYear") + .where_equals("manufacturer", "Raven") + .intersect() + .where_equals("color", "Blue") + .and_also() + .where_equals("size", "Small") + .intersect() + .where_equals("color", "Gray") + .and_also() + .where_equals("size", "Large") + .of_type(TShirt) +) +`} + + + + +{`class TShirts_ByManufacturerColorSizeAndReleaseYear(AbstractIndexCreationTask): + class Result: + def __init__(self, manufacturer: str = None, color: str = None, size: str = None, release_year: int = None): + self.manufacturer = manufacturer + self.color = color + self.size = size + self.release_year = release_year + + def __init__(self): + super().__init__() + self.map = ( + "from tshirt in docs.TShirts from type in tshirt.types select new {" + " manufacturer = tshirt.manufacturer," + " color = tshirt.color," + " size = type.size," + " release_year = tshirt.release_year" + "}" + ) +`} + + + + +{`from index 'TShirts/ByManufacturerColorSizeAndReleaseYear' +where intersect(Manufacturer = 'Raven', Color = 'Blue' and Size = 'Small', Color = 'Gray' and Size = 'Large') +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-morelikethis-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-morelikethis-csharp.mdx new file mode 100644 index 0000000000..a8aa1e5e01 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-morelikethis-csharp.mdx @@ -0,0 +1,194 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +`MoreLikeThis` is available through query extension methods and will return similar documents according to the provided criteria and options. + +## Syntax + + + +{`IRavenQueryable MoreLikeThis(MoreLikeThisBase moreLikeThis); + +IRavenQueryable MoreLikeThis(Action> builder); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **moreLikeThis** | `MoreLikeThisBase` | Defines the type of MoreLikeThis that should be executed | +| **builder** | `Action>` | Builder with fluent API that constructs the `MoreLikeThisBase` instance | + +### Builder + + + +{`IMoreLikeThisOperations UsingAnyDocument(); + +IMoreLikeThisOperations UsingDocument(string documentJson); + +IMoreLikeThisOperations UsingDocument(Expression> predicate); + +IMoreLikeThisOperations WithOptions(MoreLikeThisOptions options); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **documentJson** | string | Inline JSON document that will be used as a base for operation | +| **predicate** | `Expression>` | Filtering expression utilized to find a document that will be used as a base for operation | +| **options** | `MoreLikeThisOptions` | Non-default options that should be used for operation | + +### Options + + + +{`public int? MinimumTermFrequency \{ get; set; \} = 2; + +public int? MaximumQueryTerms \{ get; set; \} = 25; + +public int? MaximumNumberOfTokensParsed \{ get; set; \} = 5000; + +public int? MinimumWordLength \{ get; set; \} = 0; + +public int? MaximumWordLength \{ get; set; \} = 0; + +public int? MinimumDocumentFrequency \{ get; set; \} = 5; + +public int? MaximumDocumentFrequency \{ get; set; \} = int.MaxValue; + +public int? MaximumDocumentFrequencyPercentage \{ get; set; \} + +public bool? Boost \{ get; set; \} = false; + +public float? BoostFactor \{ get; set; \} = 1; + +public string StopWordsDocumentId \{ get; set; \} + +public string[] Fields \{ get; set; \} +`} + + + +| Options | | | +| ------------- | ------------- | ----- | +| **MinimumTermFrequency** | int? | Ignores terms with less than this frequency in the source doc | +| **MaximumQueryTerms** | int? | Returns a query with no more than this many terms | +| **MaximumNumberOfTokensParsed** | int? | The maximum number of tokens to parse in each example doc field that is not stored with TermVector support | +| **MinimumWordLength** | int? | Ignores words less than this length or, if 0, then this has no effect | +| **MaximumWordLength** | int? | Ignores words greater than this length or if 0 then this has no effect | +| **MinimumDocumentFrequency** | int? | Ignores words which do not occur in at least this many documents | +| **MaximumDocumentFrequency** | int? | Ignores words which occur in more than this many documents | +| **MaximumDocumentFrequencyPercentage** | int? | Ignores words which occur in more than this percentage of documents | +| **Boost** | bool? | Boost terms in query based on score | +| **BoostFactor** | float? | Boost factor when boosting based on score | +| **StopWordsDocumentId** | string | Document ID containing custom stop words | +| **Fields** | string[] | Fields to compare | + +## Example I + + + + +{`// Search for similar articles to 'articles/1' +// using 'Articles/MoreLikeThis' index and search only field 'Body' +List
articles = session + .Query
("Articles/MoreLikeThis") + .MoreLikeThis(builder => builder + .UsingDocument(x => x.Id == "articles/1") + .WithOptions(new MoreLikeThisOptions + { + Fields = new[] { "Body" } + })) + .ToList(); +`} + + + + +{`// Search for similar articles to 'articles/1' +// using 'Articles/MoreLikeThis' index and search only field 'Body' +List
articles = await asyncSession + .Query
("Articles/MoreLikeThis") + .MoreLikeThis(builder => builder + .UsingDocument(x => x.Id == "articles/1") + .WithOptions(new MoreLikeThisOptions + { + Fields = new[] { "Body" } + })) + .ToListAsync(); +`} + + + + +{`from index 'Articles/MoreLikeThis' +where morelikethis(id() = 'articles/1', '{ "Fields" : [ "Body" ] }') +`} + + + + +## Example II + + + + +{`// Search for similar articles to 'articles/1' +// using 'Articles/MoreLikeThis' index and search only field 'Body' +// where article category is 'IT' +List
articles = session + .Query
("Articles/MoreLikeThis") + .MoreLikeThis(builder => builder + .UsingDocument(x => x.Id == "articles/1") + .WithOptions(new MoreLikeThisOptions + { + Fields = new[] { "Body" } + })) + .Where(x => x.Category == "IT") + .ToList(); +`} + + + + +{`// Search for similar articles to 'articles/1' +// using 'Articles/MoreLikeThis' index and search only field 'Body' +// where article category is 'IT' +List
articles = await asyncSession + .Query
("Articles/MoreLikeThis") + .MoreLikeThis(builder => builder + .UsingDocument(x => x.Id == "articles/1") + .WithOptions(new MoreLikeThisOptions + { + Fields = new[] { "Body" } + })) + .Where(x => x.Category == "IT") + .ToListAsync(); +`} + + + + +{`from index 'Articles/MoreLikeThis' +where morelikethis(id() = 'articles/1', '{ "Fields" : [ "Body" ] }') and Category == 'IT' +`} + + + + +## Remarks + +Do not forget to add the following **using** statement which contains necessary extensions: + + + +{`using Raven.Client.Documents; +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-morelikethis-java.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-morelikethis-java.mdx new file mode 100644 index 0000000000..c0a38ada35 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-morelikethis-java.mdx @@ -0,0 +1,139 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +`MoreLikeThis` is available through query methods and will return similar documents according to the provided criteria and options. + +## Syntax + + + +{`IDocumentQuery moreLikeThis(MoreLikeThisBase moreLikeThis); + +IDocumentQuery moreLikeThis(Consumer> builder); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **moreLikeThis** | `MoreLikeThisBase` | Defines the type of MoreLikeThis that should be executed | +| **builder** | `Consumer>` | Builder with fluent API that constructs the `MoreLikeThisBase` instance | + +### Builder + + + +{`IMoreLikeThisOperations usingAnyDocument(); + +IMoreLikeThisOperations usingDocument(String documentJson); + +IMoreLikeThisOperations usingDocument(Consumer>> builder); + +IMoreLikeThisOperations withOptions(MoreLikeThisOptions options); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **documentJson** | String | Inline JSON document that will be used as a base for operation | +| **builder** | `Consumer>>` | Filtering expression utilized to find a document that will be used as a base for operation | +| **options** | `MoreLikeThisOptions` | Non-default options that should be used for operation | + +### Options + + + +{`private Integer minimumTermFrequency = 2; +private Integer maximumQueryTerms = 25; +private Integer maximumNumberOfTokensParsed = 5000; +private Integer minimumWordLength = 0; +private Integer maximumWordLength = 0; +private Integer minimumDocumentFrequency = 5; +private Integer maximumDocumentFrequency = Integer.MAX_VALUE; +private Integer maximumDocumentFrequencyPercentage; +private Boolean boost = false; +private Float boostFactor = 1f; +private String stopWordsDocumentId; +private String[] fields; + +// getters and setters +`} + + + +| Options | | | +| ------------- | ------------- | ----- | +| **MinimumTermFrequency** | Integer | Ignores terms with less than this frequency in the source doc | +| **MaximumQueryTerms** | Integer | Returns a query with no more than this many terms | +| **MaximumNumberOfTokensParsed** | Integer | The maximum number of tokens to parse in each example doc field that is not stored with TermVector support | +| **MinimumWordLength** | Integer | Ignores words less than this length or, if 0, then this has no effect | +| **MaximumWordLength** | Integer | Ignores words greater than this length or if 0 then this has no effect | +| **MinimumDocumentFrequency** | Integer | Ignores words which do not occur in at least this many documents | +| **MaximumDocumentFrequency** | Integer | Ignores words which occur in more than this many documents | +| **MaximumDocumentFrequencyPercentage** | Integer | Ignores words which occur in more than this percentage of documents | +| **Boost** | Boolean | Boost terms in query based on score | +| **BoostFactor** | Float | Boost factor when boosting based on score | +| **StopWordsDocumentId** | String | Document ID containing custom stop words | +| **Fields** | String[] | Fields to compare | + +## Example I + + + + +{`// Search for similar articles to 'articles/1' +// using 'Articles/MoreLikeThis' index and search only field 'body' +MoreLikeThisOptions options = new MoreLikeThisOptions(); +options.setFields(new String[]{ "body" }); + +List
articles = session + .query(Article.class, Query.index("Articles/MoreLikeThis")) + .moreLikeThis(builder -> builder + .usingDocument(x -> x.whereEquals("id()", "articles/1")) + .withOptions(options)) + .toList(); +`} + + + + +{`from index 'Articles/MoreLikeThis' +where morelikethis(id() = 'articles/1', '{ "Fields" : [ "body" ] }') +`} + + + + +## Example II + + + + +{`// Search for similar articles to 'articles/1' +// using 'Articles/MoreLikeThis' index and search only field 'body' +// where article category is 'IT' +MoreLikeThisOptions options = new MoreLikeThisOptions(); +options.setFields(new String[]{ "body" }); +List
articles = session + .query(Article.class, Query.index("Articles/MoreLikeThis")) + .moreLikeThis(builder -> builder + .usingDocument(x -> x.whereEquals("id()", "articles/1")) + .withOptions(options)) + .whereEquals("category", "IT") + .toList(); +`} + + + + +{`from index 'Articles/MoreLikeThis' +where morelikethis(id() = 'articles/1', '{ "Fields" : [ "body" ] }') and category == 'IT' +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-morelikethis-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-morelikethis-nodejs.mdx new file mode 100644 index 0000000000..60f81aa50b --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-morelikethis-nodejs.mdx @@ -0,0 +1,112 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +`MoreLikeThis` is available through query methods and will return similar documents according to the provided criteria and options. + +## Syntax + + + +{`query.moreLikeThis(moreLikeThis); + +query.moreLikeThis(builder); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **moreLikeThis** | `MoreLikeThisBase` | Defines the type of MoreLikeThis that should be executed | +| **builder** | `function` | Builder with fluent API that constructs the `MoreLikeThisBase` instance | + +### Builder + + + +{`builder.usingAnyDocument(); + +builder.usingDocument(documentJson); + +builder.usingDocument(filterBuilder); + +builder.withOptions(options); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **documentJson** | string | Inline JSON document that will be used as a base for operation | +| **builder** | `(filterBuilder) => void` | Filtering expression utilized to find a document that will be used as a base for operation | +| **options** | object | Non-default options that should be used for operation | +|    *minimumTermFrequency* | number | Ignores terms with less than this frequency in the source doc | +|    *maximumQueryTerms* | number | Returns a query with no more than this many terms | +|    *maximumNumberOfTokensParsed* | number | The maximum number of tokens to parse in each example doc field that is not stored with TermVector support | +|    *minimumWordLength* | number | Ignores words less than this length or, if 0, then this has no effect | +|    *maximumWordLength* | number | Ignores words greater than this length or if 0 then this has no effect | +|    *minimumDocumentFrequency* | number | Ignores words which do not occur in at least this many documents | +|    *maximumDocumentFrequency* | number | Ignores words which occur in more than this many documents | +|    *maximumDocumentFrequencyPercentage* | number | Ignores words which occur in more than this percentage of documents | +|    *boost* | boolean | Boost terms in query based on score | +|    *boostFactor* | number | Boost factor when boosting based on score | +|    *stopWordsDocumentId* | string | Document ID containing custom stop words | +|    *fields* | string[] | Fields to compare | + +## Example I + + + + +{`// Search for similar articles to 'articles/1' +// using 'Articles/MoreLikeThis' index and search only field 'body' +const options = { fields: [ "body" ] }; + +const articles = await session + .query({ indexName: "Articles/MoreLikeThis" }) + .moreLikeThis(builder => builder + .usingDocument(x => x.whereEquals("id()", "articles/1")) + .withOptions(options)) + .all(); +`} + + + + +{`from index 'Articles/MoreLikeThis' +where morelikethis(id() = 'articles/1', '{ "Fields" : [ "body" ] }') +`} + + + + +## Example II + + + + +{`// Search for similar articles to 'articles/1' +// using 'Articles/MoreLikeThis' index and search only field 'body' +// where article category is 'IT' +const options = { fields: [ "body" ] }; +const articles = await session + .query({ indexName: "Articles/MoreLikeThis" }) + .moreLikeThis(builder => builder + .usingDocument(x => x.whereEquals("id()", "articles/1")) + .withOptions(options)) + .whereEquals("category", "IT") + .all(); +`} + + + + +{`from index 'Articles/MoreLikeThis' +where morelikethis(id() = 'articles/1', '{ "Fields" : [ "body" ] }') and category == 'IT' +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-morelikethis-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-morelikethis-php.mdx new file mode 100644 index 0000000000..f8f95c0689 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-morelikethis-php.mdx @@ -0,0 +1,172 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +`moreLikeThis` is available through query extension methods and will return similar documents according to the provided criteria and options. + +## Syntax + + + +{`/** + * Usage: + * - moreLikeThis(MoreLikeThisBase $moreLikeThis); + * - moreLikeThis(function(MoreLikeThisBuilder($builder) \{...\}); + * + * @param MoreLikeThisBase|Closure|null $moreLikeThisOrBuilder + * @return DocumentQueryInterface + */ +public function moreLikeThis(null|MoreLikeThisBase|Closure $moreLikeThisOrBuilder): DocumentQueryInterface; +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **$moreLikeThisOrBuilder** | `null`
`MoreLikeThisBase`
`Closure` | Defines the type of MoreLikeThis that should be executed | + +### Builder + + + +{`/** + * Usage: + * - usingDocument(); + * - usingDocument(string $documentJson); + * - usingDocument(function(MoreLikeThisBuilder $build) \{...\}); + * + * @param string|Closure|null $documentJsonOrBuilder + * @return MoreLikeThisOperationsInterface + */ +function usingDocument(null|string|Closure $documentJsonOrBuilder): MoreLikeThisOperationsInterface; + +function usingDocumentWithJson(?string $documentJson): MoreLikeThisOperationsInterface; // same as calling usingDocument(string $documentJson) + +function usingDocumentWithBuilder(?Closure $builder): MoreLikeThisOperationsInterface; // same as calling usingDocument(function($builder) \{...\}); + +function withOptions(MoreLikeThisOptions $options): MoreLikeThisOperationsInterface; +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **$documentJsonOrBuilder** | `null`
`string`
`Closure` | Builder or Inline JSON document to be used as a base for the operation | +| **$documentJson** | ?string | Inline JSON document to be used as a base for the operation | +| **$builder** | `?Closure` | Builder with fluent API that constructs the `MoreLikeThisOperationsInterface` instance | +| **$options** | `MoreLikeThisOptions` | Available operation options (see below) | + +### Options + + + +{`public const DEFAULT_MAXIMUM_NUMBER_OF_TOKENS_PARSED = 5000; +public const DEFAULT_MINIMUM_TERM_FREQUENCY = 2; +public const DEFAULT_MINIMUM_DOCUMENT_FREQUENCY = 5; +public const DEFAULT_MAXIMUM_DOCUMENT_FREQUENCY = PhpClient::INT_MAX_VALUE; +public const DEFAULT_BOOST = false; +public const DEFAULT_BOOST_FACTOR = 1; +public const DEFAULT_MINIMUM_WORD_LENGTH = 0; +public const DEFAULT_MAXIMUM_WORD_LENGTH = 0; +public const DEFAULT_MAXIMUM_QUERY_TERMS = 25; + +private ?int $minimumTermFrequency = null; +private ?int $maximumQueryTerms = null; +private ?int $maximumNumberOfTokensParsed = null; +private ?int $minimumWordLength = null; +private ?int $maximumWordLength = null; +private ?int $minimumDocumentFrequency = null; +private ?int $maximumDocumentFrequency = null; +private ?int $maximumDocumentFrequencyPercentage = null; +private ?bool $boost = null; +private ?float $boostFactor = null; +private ?string $stopWordsDocumentId = null; +private ?array $fields = null; +`} + + + +| Options | | | +| ------------- | ------------- | ----- | +| **$minimumTermFrequency** | `?int` | Ignores terms with less than this frequency in the source doc | +| **$maximumQueryTerms** | `?int` | Returns a query with no more than this many terms | +| **$maximumNumberOfTokensParsed** | `?int` | The maximum number of tokens to parse in each example doc field that is not stored with TermVector support | +| **$minimumWordLength** | `?int` | Ignores words less than this length or, if 0, then this has no effect | +| **$maximumWordLength** | `?int` | Ignores words greater than this length or if 0 then this has no effect | +| **$minimumDocumentFrequency** | `?int` | Ignores words which do not occur in at least this many documents | +| **$maximumDocumentFrequency** | `?int` | Ignores words which occur in more than this many documents | +| **$maximumDocumentFrequencyPercentage** | `int?` | Ignores words which occur in more than this percentage of documents | +| **$boost** | `?bool` | Boost terms in query based on score | +| **$boostFactor** | `?float` | Boost factor when boosting based on score | +| **$stopWordsDocumentId** | `?string` | Document ID containing custom stop words | +| **$fields** | `?array` | Fields to compare | + +## Example I + + + + +{`// Search for similar articles to 'articles/1' +// using 'Articles/MoreLikeThis' index and search only field 'body' +$options = new MoreLikeThisOptions(); +$options->setFields([ "body" ]); + +/** @var array
$articles */ +$articles = $session + ->query(Article::class, Query::index("Articles/MoreLikeThis")) + ->moreLikeThis(function($builder) use ($options) { + $builder + ->usingDocument(function ($x) { + $x->whereEquals("id()", "articles/1"); + }) + ->withOptions($options); + }) + ->toList(); +`} + + + + +{`from index 'Articles/MoreLikeThis' +where morelikethis(id() = 'articles/1', '{ "Fields" : [ "Body" ] }') +`} + + + + +## Example II + + + + +{`// Search for similar articles to 'articles/1' +// using 'Articles/MoreLikeThis' index and search only field 'body' +// where article category is 'IT' +$options = new MoreLikeThisOptions(); +$options->setFields([ "body" ]); +/** @var array
$articles */ +$articles = $session + ->query(Article::class, Query::index("Articles/MoreLikeThis")) + ->moreLikeThis(function($builder) use ($options) { + $builder + ->usingDocument(function ($x) { + $x->whereEquals("id()", "articles/1"); + }) + ->withOptions($options); + }) + ->whereEquals("category", "IT") + ->toList(); +`} + + + + +{`from index 'Articles/MoreLikeThis' +where morelikethis(id() = 'articles/1', '{ "Fields" : [ "Body" ] }') and Category == 'IT' +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-morelikethis-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-morelikethis-python.mdx new file mode 100644 index 0000000000..b8667dc97a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-use-morelikethis-python.mdx @@ -0,0 +1,140 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +`more_like_this` is available through query methods, and will return similar documents according +to the provided criteria and options. + +## Syntax + + + +{`def more_like_this( + self, more_like_this_or_builder: Union[MoreLikeThisBase, Callable[[MoreLikeThisBuilder[_T]], None]] +) -> DocumentQuery[_T]: ... +`} + + + +| `more_like_this_or_builder` parameter | Description | +| ------------- | ------------- | +| `MoreLikeThisBase` | Defines the type of MoreLikeThis that should be executed | +| `Callable[[MoreLikeThisBuilder[_T]], None]]` | Builder with fluent API that constructs the `MoreLikeThisBase` instance | + +### Builder + + + +{`def using_any_document(self) -> MoreLikeThisOperations[_T]: ... + +def using_document( + self, document_json_or_builder: Union[str, Callable[[DocumentQuery[_T]], None]] +) -> MoreLikeThisOperations[_T]: ... + +def with_options(self, options: MoreLikeThisOptions) -> MoreLikeThisOperations[_T]: ... +`} + + + +| Builder method | Parameter | Type | Description | +| ------------- | ------------- | ----- | ----- | +| **using_any_document** | | | | +| **using_document** | `document_json_or_builder` (Union) | `str` | Inline JSON document that will be used for the operation | +| **using_document** | `document_json_or_builder` (Union) | `Callable[[DocumentQuery[_T]], None]` | Filtering expression to find a document that will be used for the operation | +| **with_options** | `options` | `MoreLikeThisOptions` | Non-default options to be used by the operation | + +### Options + + + +{`def __init__( + self, + minimum_term_frequency: int = None, + maximum_query_terms: int = None, + maximum_number_of_tokens_parsed: int = None, + minimum_word_length: int = None, + maximum_word_length: int = None, + minimum_document_frequency: int = None, + maximum_document_frequency: int = None, + maximum_document_frequency_percentage: int = None, + boost: bool = None, + boost_factor: float = None, + stop_words_document_id: str = None, + fields: List[str] = None, +): ... +`} + + + +| Options | | | +| ------------- | ------------- | ----- | +| **minimum_term_frequency** | int | Ignores terms with less than this frequency in the source doc | +| **maximum_query_terms** | int | Returns a query with no more than this many terms | +| **maximum_number_of_tokens_parsed** | int | The maximum number of tokens to parse in each example doc field that is not stored with TermVector support | +| **minimum_word_length** | int | Ignores words less than this length or, if 0, then this has no effect | +| **maximum_word_length** | int | Ignores words greater than this length or if 0 then this has no effect | +| **minimum_document_frequency** | int | Ignores words which do not occur in at least this many documents | +| **maximum_document_frequency** | int | Ignores words which occur in more than this many documents | +| **maximum_document_frequency_percentage** | int | Ignores words which occur in more than this percentage of documents | +| **boost** | bool | Boost terms in query based on score | +| **boost_factor** | float | Boost factor when boosting based on score | +| **stop_words_document_id** | str | Document ID containing custom stop words | +| **Fields** | List[str] | Fields to compare | + +## Example I + + + + +{`# Search for similar articles to 'articles/1' +# using 'Articles/MoreLikeThis' index and search only field 'body' +articles = list( + session.query_index_type(Articles_MoreLikeThis, Article).more_like_this( + lambda builder: builder.using_document( + lambda x: x.where_equals("Id", "articles/1") + ).with_options(MoreLikeThisOptions(fields=["body"])) + ) +) +`} + + + + +{`from index 'Articles/MoreLikeThis' +where morelikethis(id() = 'articles/1', '{ "Fields" : [ "Body" ] }') +`} + + + + +## Example II + + + + +{`# Search for similar articles to 'articles/1' +# using 'Articles/MoreLikeThis' index and search only field 'body' +# where article category is 'IT' +articles = list( + session.query_index_type(Articles_MoreLikeThis, Article) + .more_like_this( + lambda builder: builder.using_document( + lambda x: x.where_equals("Id", "articles/1") + ).with_options(MoreLikeThisOptions(fields=["body"])) + ) + .where_equals("category", "IT") +) +`} + + + + +{`from index 'Articles/MoreLikeThis' +where morelikethis(id() = 'articles/1', '{ "Fields" : [ "Body" ] }') and Category == 'IT' +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-work-with-suggestions-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-work-with-suggestions-csharp.mdx new file mode 100644 index 0000000000..2767086d97 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-work-with-suggestions-csharp.mdx @@ -0,0 +1,621 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Given a string term, the Suggestion feature will offer **similar terms** from your data. + +* Word similarities are found using string distance algorithms. + +* Examples in this article demonstrate getting suggestions with a **dynamic-query**. + For getting suggestions with an **index-query** see [query for suggestions with index](../../../indexes/querying/suggestions.mdx). +* In this page: + + * Overview: + * [What are terms](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#what-are-terms) + * [When to use suggestions](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#when-to-use-suggestions) + + * Examples: + * [Suggest terms - for single term](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#suggest-terms---for-single-term) + * [Suggest terms - for multiple terms](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#suggest-terms---for-multiple-terms) + * [Suggest terms - for multiple fields](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#suggest-terms---for-multiple-fields) + * [Suggest terms - customize options and display name](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#suggest-terms---customize-options-and-display-name) + + * [The auto-index terms in Studio](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#the-auto-index-terms-in-studio) + * [Syntax](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#syntax) + + +## What are terms + +* All queries in RavenDB use an index - learn more about that [here](../../../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index). + Whether making a dynamic query which generates an auto-index or using a static index, + the data from your documents is 'broken' into **terms** that are kept in the index. + +* This tokenization process (what terms will be generated) depends on the analyzer used, + various analyzers differ in the way they split the text stream. Learn more in [Analyzers](../../../indexes/using-analyzers.mdx). + +* The terms can then be queried to retrieve matching documents that contain them. + + + +## When to use suggestions + +Querying for suggestions is useful in the following scenarios: + + * **When query has no results**: + + * When searching for documents that match some condition on a given string term, + if the term is misspelled then you will Not get any results. + You can then ask RavenDB to suggest similar terms that do exist in the index. + + * The suggested terms can then be used in a new query to retrieve matching documents, + or simply presented to the user asking what they meant to query. + + * **When looking for alternative terms**: + + * When simply searching for additional alternative terms for a term that does exist. + + + +The resulting suggested terms will Not include the term for which you search, +they will only contain the similar terms. + + + + + +## Suggest terms - for single term + +Consider this example: +Based on the Northwind sample data, the following query has no resulting documents, +as no document in the Products collection contains the term `chaig` in its `Name` field. + + + +{`// This dynamic query on the 'Products' collection has NO resulting documents +List products = session + .Query() + .Where(x => x.Name == "chaig") + .ToList(); +`} + + + +* Executing the above query will generate the auto-index `Auto/Products/ByName`. + This auto-index will contain a list of all available terms from the document field `Name`. + The generated terms are visible in the Studio - see image [below](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#the-auto-index-terms-in-studio). + +* If you suspect that the term `chaig` in the query criteria is written incorrectly, + you can ask RavenDB to suggest **existing terms** that are similar to `chaig`, as follows:. + + + + +{`// Query for suggested terms for single term: +// ========================================== + +Dictionary suggestions = session + // Make a dynamic query on collection 'Products' + .Query() + // Call 'SuggestUsing' + .SuggestUsing(builder => builder + // Request to get terms from field 'Name' that are similar to 'chaig' + .ByField(x => x.Name, "chaig")) + .Execute(); +`} + + + + +{`// Query for suggested terms for single term: +// ========================================== + +Dictionary suggestions = await asyncSession + // Make a dynamic query on collection 'Products' + .Query() + // Call 'SuggestUsing' + .SuggestUsing(builder => builder + // Request to get terms from field 'Name' that are similar to 'chaig' + .ByField(x => x.Name, "chaig")) + .ExecuteAsync(); +`} + + + + +{`// Define the suggestion request for single term +var suggestionRequest = new SuggestionWithTerm("Name") +{ + // Looking for terms from field 'Name' that are similar to term 'chaig' + Term = "chaig" +}; + +// Query for suggestions +Dictionary suggestions = session + .Query() + // Call 'SuggestUsing' - pass the suggestion request + .SuggestUsing(suggestionRequest) + .Execute(); +`} + + + + +{`// Query for suggested terms for single term: +// ========================================== + +Dictionary suggestions = session.Advanced + // Make a dynamic document-query on collection 'Products' + .DocumentQuery() + // Call 'SuggestUsing' + .SuggestUsing(builder => builder + // Request to get terms from field 'Name' that are similar to 'chaig' + .ByField(x => x.Name, "chaig")) + .Execute(); +`} + + + + +{`// Query for terms from field 'Name' that are similar to 'chaig' +from "Products" +select suggest(Name, "chaig") +`} + + + + + + +{`// The resulting suggested terms: +// ============================== + +Console.WriteLine("Suggested terms in field 'Name' that are similar to 'chaig':"); +foreach (string suggestedTerm in suggestions["Name"].Suggestions) +\{ + Console.WriteLine("\\t\{0\}", suggestedTerm); +\} + +// Suggested terms in field 'Name' that are similar to 'chaig': +// chai +// chang +`} + + + + + +## Suggest terms - for multiple terms + + + + +{`// Query for suggested terms for multiple terms: +// ============================================= + +Dictionary suggestions = session + // Make a dynamic query on collection 'Products' + .Query() + // Call 'SuggestUsing' + .SuggestUsing(builder => builder + // Request to get terms from field 'Name' that are similar to 'chaig' OR 'tof' + .ByField(x => x.Name, new[] { "chaig", "tof" })) + .Execute(); +`} + + + + +{`// Query for suggested terms for multiple terms: +// ============================================= + +Dictionary suggestions = await asyncSession + // Make a dynamic query on collection 'Products' + .Query() + // Call 'SuggestUsing' + .SuggestUsing(builder => builder + // Request to get terms from field 'Name' that are similar to 'chaig' OR 'tof' + .ByField(x => x.Name, new[] { "chaig", "tof" })) + .ExecuteAsync(); +`} + + + + +{`// Define the suggestion request for multiple terms +var suggestionRequest = new SuggestionWithTerms("Name") +{ + // Looking for terms from field 'Name' that are similar to terms 'chaig' OR 'tof' + Terms = new[] { "chaig", "tof"} +}; + +// Query for suggestions +Dictionary suggestions = session + .Query() + // Call 'SuggestUsing' - pass the suggestion request + .SuggestUsing(suggestionRequest) + .Execute(); +`} + + + + +{`// Query for suggested terms for multiple terms: +// ============================================= + +Dictionary suggestions = session.Advanced + // Make a dynamic document-query on collection 'Products' + .DocumentQuery() + // Call 'SuggestUsing' + .SuggestUsing(builder => builder + // Request to get terms from field 'Name' that are similar to 'chaig' OR 'tof' + .ByField(x => x.Name, new[] { "chaig", "tof" })) + .Execute(); +`} + + + + +{`// Query for terms from field 'Name' that are similar to 'chaig' OR 'tof' +from "Products" select suggest(Name, $p0) +{ "p0" : ["chaig", "tof"] } +`} + + + + + + +{`// The resulting suggested terms: +// ============================== + +// Suggested terms in field 'Name' that are similar to 'chaig' OR to 'tof': +// chai +// chang +// tofu +`} + + + + + +## Suggest terms - for multiple fields + + + + +{`// Query for suggested terms in multiple fields: +// ============================================= + +Dictionary suggestions = session + // Make a dynamic query on collection 'Companies' + .Query() + // Call 'SuggestUsing' to get suggestions for terms that are + // similar to 'chop-soy china' in first document field (e.g. 'Name') + .SuggestUsing(builder => builder + .ByField(x => x.Name, "chop-soy china")) + // Call 'AndSuggestUsing' to get suggestions for terms that are + // similar to 'maria larson' in an additional field (e.g. 'Contact.Name') + .AndSuggestUsing(builder => builder + .ByField(x => x.Contact.Name, "maria larson")) + .Execute(); +`} + + + + +{`// Query for suggested terms in multiple fields: +// ============================================= + +Dictionary suggestions = await asyncSession + // Make a dynamic query on collection 'Companies' + .Query() + // Call 'SuggestUsing' to get suggestions for terms that are + // similar to 'chop-soy china' in first document field (e.g. 'Name') + .SuggestUsing(builder => builder + .ByField(x => x.Name, "chop-soy china")) + // Call 'AndSuggestUsing' to get suggestions for terms that are + // similar to 'maria larson' in an additional field (e.g. 'Contact.Name') + .AndSuggestUsing(builder => builder + .ByField(x => x.Contact.Name, "maria larson")) + .ExecuteAsync(); +`} + + + + +{`// Define suggestion requests for multiple fields: + +var request1 = new SuggestionWithTerm("Name") +{ + // Looking for terms from field 'Name' that are similar to 'chop-soy china' + Term = "chop-soy china" +}; + +var request2 = new SuggestionWithTerm("Contact.Name") +{ + // Looking for terms from nested field 'Contact.Name' that are similar to 'maria larson' + Term = "maria larson" +}; + +Dictionary suggestions = session + .Query() + // Call 'SuggestUsing' - pass the suggestion request for the first field + .SuggestUsing(request1) + // Call 'AndSuggestUsing' - pass the suggestion request for the second field + .AndSuggestUsing(request2) + .Execute(); +`} + + + + +{`// Query for suggested terms in multiple fields: +// ============================================= + +Dictionary suggestions = session.Advanced + // Make a dynamic document-query on collection 'Companies' + .DocumentQuery() + // Call 'SuggestUsing' to get suggestions for terms that are + // similar to 'chop-soy china' in first document field (e.g. 'Name') + .SuggestUsing(builder => builder + .ByField(x => x.Name, "chop-soy china")) + // Call 'AndSuggestUsing' to get suggestions for terms that are + // similar to 'maria larson' in an additional field (e.g. 'Contact.Name') + .AndSuggestUsing(builder => builder + .ByField(x => x.Contact.Name, "maria larson")) + .Execute(); +`} + + + + +{`// Query for suggested terms from field 'Name' and field 'Contact.Name' +from "Companies" +select suggest(Name, "chop-soy china"), suggest(Contact.Name, "maria larson") +`} + + + + + + +{`// The resulting suggested terms: +// ============================== + +// Suggested terms in field 'Name' that is similar to 'chop-soy china': +// chop-suey chinese + +// Suggested terms in field 'Contact.Name' that are similar to 'maria larson': +// maria larsson +// marie bertrand +// aria cruz +// paula wilson +// maria anders +`} + + + + + +## Suggest terms - customize options and display name + + + + +{`// Query for suggested terms - customize options and display name: +// =============================================================== + +Dictionary suggestions = session + // Make a dynamic query on collection 'Products' + .Query() + // Call 'SuggestUsing' + .SuggestUsing(builder => builder + .ByField(x => x.Name, "chaig") + // Customize suggestions options + .WithOptions(new SuggestionOptions + { + Accuracy = 0.4f, + PageSize = 5, + Distance = StringDistanceTypes.JaroWinkler, + SortMode = SuggestionSortMode.Popularity + }) + // Customize display name for results + .WithDisplayName("SomeCustomName")) + .Execute(); +`} + + + + +{`// Query for suggested terms - customize options and display name: +// =============================================================== + +Dictionary suggestions = await asyncSession + // Make a dynamic query on collection 'Products' + .Query() + // Call 'SuggestUsing' + .SuggestUsing(builder => builder + .ByField(x => x.Name, "chaig") + // Customize suggestions options + .WithOptions(new SuggestionOptions + { + Accuracy = 0.4f, + PageSize = 5, + Distance = StringDistanceTypes.JaroWinkler, + SortMode = SuggestionSortMode.Popularity + }) + // Customize display name for results + .WithDisplayName("SomeCustomName")) + .ExecuteAsync(); +`} + + + + +{`// Define the suggestion request +var suggestionRequest = new SuggestionWithTerm("Name") +{ + // Looking for terms from field 'Name' that are similar to term 'chaig' + Term = "chaig", + // Customize options + Options = new SuggestionOptions + { + Accuracy = 0.4f, + PageSize = 5, + Distance = StringDistanceTypes.JaroWinkler, + SortMode = SuggestionSortMode.Popularity + }, + // Customize display name + DisplayField = "SomeCustomName" +}; + +// Query for suggestions +Dictionary suggestions = session + .Query() + // Call 'SuggestUsing' - pass the suggestion request + .SuggestUsing(suggestionRequest) + .Execute(); +`} + + + + +{`// Query for suggested terms - customize options and display name: +// =============================================================== + +Dictionary suggestions = session.Advanced + // Make a dynamic query on collection 'Products' + .DocumentQuery() + // Call 'SuggestUsing' + .SuggestUsing(builder => builder + .ByField(x => x.Name, "chaig") + // Customize suggestions options + .WithOptions(new SuggestionOptions + { + Accuracy = 0.4f, + PageSize = 5, + Distance = StringDistanceTypes.JaroWinkler, + SortMode = SuggestionSortMode.Popularity + }) + // Customize display name for results + .WithDisplayName("SomeCustomName")) + .Execute(); +`} + + + + +{`// Query for suggested terms - customize options and display name +from "Products" +select suggest( + Name, + 'chaig', + '{ "Accuracy" : 0.4, "PageSize" : 5, "Distance" : "JaroWinkler", "SortMode" : "Popularity" }' +) as "SomeCustomName" +`} + + + + + + +{`// The resulting suggested terms: +// ============================== + +Console.WriteLine("Suggested terms:"); +// Results are available under the custom name entry +foreach (string suggestedTerm in suggestions["SomeCustomName"].Suggestions) +\{ + Console.WriteLine("\\t\{0\}", suggestedTerm); +\} + +// Suggested terms: +// chai +// chang +// chartreuse verte +`} + + + + + +## The auto-index terms in Studio + +Based on the Northwind sample data, these are the terms generated for index `Auto/Products/ByName`: + +![Figure 1. Auto-index terms](./assets/auto-index-terms.png) + +1. **The field name** - derived from the document field that was used in the dynamic-query. + In this example the field name is `Name`. + +2. **The terms** generated from the data that the Products collection documents have in their `Name` field. + + + +## Syntax + +**Suggest using**: + + + +{`// Overloads for requesting suggestions for term(s) in a field: +ISuggestionQuery SuggestUsing(SuggestionBase suggestion); +ISuggestionQuery SuggestUsing(Action> builder); + +// Overloads requesting suggestions for term(s) in another field in the same query: +ISuggestionQuery AndSuggestUsing(SuggestionBase suggestion); +ISuggestionQuery AndSuggestUsing(Action> builder); +`} + + + +| Parameter | Type | Description | +|----------------|----------------------------------------------|-------------------------------------------------------------------------------| +| **suggestion** | `SuggestionWithTerm` / `SuggestionWithTerms` | An instance of `SuggestionBase`.
Defines the type of suggestion requested. | +| **builder** | `Action>` | Builder with a fluent API that constructs a `SuggestionBase` instance. | + +**Builder operations**: + + + +{`ISuggestionOperations ByField(string fieldName, string term); +ISuggestionOperations ByField(string fieldName, string[] terms); +ISuggestionOperations ByField(Expression> path, string term); +ISuggestionOperations ByField(Expression> path, string[] terms); + +ISuggestionOperations WithDisplayName(string displayName); +ISuggestionOperations WithOptions(SuggestionOptions options); +`} + + + +| Parameter | Type | Description | +|-----------------|-------------------------------|---------------------------------------------------------| +| **fieldName** | `string` | The index field in which to search for similar terms | +| **path** | `Expression>` | The index field in which to search for similar terms | +| **term** | `string` | The term for which to get suggested similar terms | +| **terms** | `string[]` | List of terms for which to get suggested similar terms | +| **displayName** | `string` | A custom name for the suggestions result (optional). | +| **options** | `SuggestionOptions` | Non-default options to use in the operation (optional). | + +**Suggestions options**: + + + +{`public int PageSize \{ get; set; \} +public StringDistanceTypes? Distance \{ get; set; \} +public float? Accuracy \{ get; set; \} +public SuggestionSortMode SortMode \{ get; set; \} +`} + + + +| Option | Type | Description | +|--------------|-----------------------|-------------| +| **PageSize** | `int` |
  • Maximum number of suggested terms that will be returned.
  • Default is 15
| +| **Distance** | `StringDistanceTypes` |
  • String distance algorithm to use.
  • `None` / `Levenshtein` / `JaroWinkler` / `NGram`
  • Default is Levenshtein.
| +| **Accuracy** | `float?` |
  • Suggestion accuracy.
  • Default is 0.5f
| +| **SortMode** | `SuggestionSortMode` |
  • Indicates the order by which results are returned.
  • `None` / `Popularity`
  • Default is Popularity.
| diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-work-with-suggestions-java.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-work-with-suggestions-java.mdx new file mode 100644 index 0000000000..8b0c012ec9 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-work-with-suggestions-java.mdx @@ -0,0 +1,121 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +The `suggestion` feature is available through query extension methods. It gives you the ability to find word similarities using string distance algorithms. + +## Syntax + + + +{`ISuggestionDocumentQuery suggestUsing(SuggestionBase suggestion); + +ISuggestionDocumentQuery suggestUsing(Consumer> builder); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **suggestion** | `SuggestionBase` | Defines the type of suggestion that should be executed | +| **builder** | `Consumer>` | Builder with a fluent API that constructs a `SuggestionBase` instance | + +### Builder + + + +{`ISuggestionOperations byField(String fieldName, String term); + +ISuggestionOperations byField(String fieldName, String[] terms); + +ISuggestionOperations withOptions(SuggestionOptions options); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **fieldName** | String | Points to the index field that should be used for operation | +| **term** | String | Term that will be used as a basis of the suggestions | +| **terms** | String[] | Terms that will be used as a basis of the suggestions | +| **options** | `SuggestionOptions` | Non-default options that should be used for operation | + +### Options + + + +{`private int pageSize = 15; + +private StringDistanceTypes distance = StringDistanceTypes.LEVENSHTEIN; + +private Float accuracy = 0.5f; + +private SuggestionSortMode sortMode = SuggestionSortMode.POPULARITY; + +// getters and setters for fields listed above +`} + + + +| Options | | | +| ------------- | ------------- | ----- | +| **pageSize** | int | Maximum number of suggestions that will be returned | +| **distance** | `StringDistanceTypes` | String distance algorithm to use (`NONE`, `LEVENSTEIN`, `JARO_WINKLER`, `N_GRAM`) | +| **accuracy** | Float | Suggestion accuracy | +| **sortMode** | `SuggestionSortMode` | Indicates in what order the results should be returned (`None`, `Popularity`) | + +## Example I + + + + +{`SuggestionOptions options = new SuggestionOptions(); +options.setAccuracy(0.4f); +options.setPageSize(5); +options.setDistance(StringDistanceTypes.JARO_WINKLER); +options.setSortMode(SuggestionSortMode.POPULARITY); + +Map suggestions = session + .query(Employee.class, Employees_ByFullName.class) + .suggestUsing(builder -> + builder.byField("FullName", "johne") + .withOptions(options)) + .execute(); +`} + + + + +{`from index 'Employees/ByFullName' +select suggest('FullName', 'johne', '{ "Accuracy" : 0.4, "PageSize" : 5, "Distance" : "JaroWinkler", "SortMode" : "Popularity" }') +`} + + + + +## Example II + + + + +{`SuggestionWithTerm suggestionWithTerm = new SuggestionWithTerm("FullName"); +suggestionWithTerm.setTerm("johne"); + +Map suggestions = session + .query(Employee.class, Employees_ByFullName.class) + .suggestUsing(suggestionWithTerm) + .execute(); +`} + + + + +{`from index 'Employees/ByFullName' +select suggest('FullName', 'johne') +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-work-with-suggestions-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-work-with-suggestions-nodejs.mdx new file mode 100644 index 0000000000..6b6e6f9cc3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-work-with-suggestions-nodejs.mdx @@ -0,0 +1,358 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Given a string term, the Suggestion feature will offer **similar terms** from your data. + +* Word similarities are found using string distance algorithms. + +* Examples in this article demonstrate getting suggestions with a **dynamic-query**. + For getting suggestions with an **index-query** see [query for suggestions with index](../../../indexes/querying/suggestions.mdx). +* In this page: + + * Overview: + * [What are terms](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#what-are-terms) + * [When to use suggestions](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#when-to-use-suggestions) + + * Examples: + * [Suggest terms - for single term](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#suggest-terms---for-single-term) + * [Suggest terms - for multiple terms](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#suggest-terms---for-multiple-terms) + * [Suggest terms - for multiple fields](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#suggest-terms---for-multiple-fields) + * [Suggest terms - customize options and display name](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#suggest-terms---customize-options-and-display-name) + + * [The auto-index terms in Studio](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#the-auto-index-terms-in-studio) + * [Syntax](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#syntax) + + +## What are terms + +* All queries in RavenDB use an index - learn more about that [here](../../../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index). + Whether making a dynamic query which generates an auto-index or using a static index, + the data from your documents is 'broken' into **terms** that are kept in the index. + +* This tokenization process (what terms will be generated) depends on the analyzer used, + various analyzers differ in the way they split the text stream. Learn more in [Analyzers](../../../indexes/using-analyzers.mdx). + +* The terms can then be queried to retrieve matching documents that contain them. + + + +## When to use suggestions + +Querying for suggestions is useful in the following scenarios: + + * **When query has no results**: + + * When searching for documents that match some condition on a given string term, + if the term is misspelled then you will Not get any results. + You can then ask RavenDB to suggest similar terms that do exist in the index. + + * The suggested terms can then be used in a new query to retrieve matching documents, + or simply presented to the user asking what they meant to query. + + * **When looking for alternative terms**: + + * When simply searching for additional alternative terms for a term that does exist. + + + +The resulting suggested terms will Not include the term for which you search, +they will only contain the similar terms. + + + + + +## Suggest terms - for single term + +Consider this example: +Based on the **Northwind sample data**, the following query has no resulting documents, +as no document in the Products collection contains the term `chaig` in its `Name` field. + + + +{`// This dynamic query on the 'Products' collection has NO resulting documents +const products = await session + .query(\{ collection: "Products" \}) + .whereEquals("Name", "Chai") + .all(); +`} + + + +* Executing the above query will generate the auto-index `Auto/Products/ByName`. + This auto-index will contain a list of all available terms from the document field `Name`. + The generated terms are visible in the Studio - see image [below](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#the-auto-index-terms-in-studio). + +* If you suspect that the term `chaig` in the query criteria is written incorrectly, + you can ask RavenDB to suggest **existing terms** that are similar to `chaig`, as follows:. + + + + +{`// Query for suggested terms for single term: +// ========================================== + +const suggestions = await session + // Make a dynamic query on collection 'Products' + .query({ collection: "Products" }) + // Call 'suggestUsing' + .suggestUsing(x => x + // Request to get terms from field 'Name' that are similar to 'chaig' + .byField("Name", "chaig")) + .execute(); +`} + + + + +{`// Query for terms from field 'Name' that are similar to 'chaig' +from "Products" +select suggest(Name, "chaig") +`} + + + + + + +{`// The resulting suggested terms: +// ============================== + +console.log("Suggested terms in field 'Name' that are similar to 'chaig':"); +suggestions["Name"].suggestions.forEach(suggestedTerm => \{ + console.log("\\t" + suggestedTerm); +\}); + +// Suggested terms in field 'Name' that are similar to 'chaig': +// chai +// chang +`} + + + + + +## Suggest terms - for multiple terms + + + + +{`// Query for suggested terms for multiple terms: +// ============================================= + +const suggestions = await session + // Make a dynamic query on collection 'Products' + .query({ collection: "Products" }) + // Call 'suggestUsing' + .suggestUsing(x => x + // Request to get terms from field 'Name' that are similar to 'chaig' OR 'tof' + .byField("Name", ["chaig", "tof"])) + .execute(); +`} + + + + +{`// Query for terms from field 'Name' that are similar to 'chaig' OR 'tof' +from "Products" select suggest(Name, $p0) +{ "p0" : ["chaig", "tof"] } +`} + + + + + + +{`// The resulting suggested terms: +// ============================== + +// Suggested terms in field 'Name' that are similar to 'chaig' OR to 'tof': +// chai +// chang +// tofu +`} + + + + + +## Suggest terms - for multiple fields + + + + +{`// Query for suggested terms in multiple fields: +// ============================================= + +const suggestions = await session + // Make a dynamic query on collection 'Companies' + .query({ collection: "Companies" }) + // Call 'suggestUsing' to get suggestions for terms that are + // similar to 'chop-soy china' in first document field (e.g. 'Name') + .suggestUsing(x => x + .byField("Name", "chop-soy china")) + // Call 'AndSuggestUsing' to get suggestions for terms that are + // similar to 'maria larson' in an additional field (e.g. 'Contact.Name') + .andSuggestUsing(x => x + .byField("Contact.Name", "maria larson")) + .execute(); +`} + + + + +{`// Query for suggested terms from field 'Name' and field 'Contact.Name' +from "Companies" +select suggest(Name, "chop-soy china"), suggest(Contact.Name, "maria larson") +`} + + + + + + +{`// The resulting suggested terms: +// ============================== + +// Suggested terms in field 'Name' that is similar to 'chop-soy china': +// chop-suey chinese + +// Suggested terms in field 'Contact.Name' that are similar to 'maria larson': +// maria larsson +// marie bertrand +// aria cruz +// paula wilson +// maria anders +`} + + + + + +## Suggest terms - customize options and display name + + + + +{`// Query for suggested terms - customize options and display name: +// =============================================================== + +const suggestions = await session + // Make a dynamic query on collection 'Products' + .query({ collection: "Products" }) + // Call 'suggestUsing' + .suggestUsing(x => x + .byField("Name", "chaig") + // Customize suggestions options + .withOptions({ + accuracy: 0.4, + pageSize: 5, + distance: "JaroWinkler", + sortMode: "Popularity" + }) + // Customize display name for results + .withDisplayName("SomeCustomName")) + .execute(); +`} + + + + +{`// Query for suggested terms - customize options and display name +from "Products" +select suggest( + Name, + 'chaig', + '{ "Accuracy" : 0.4, "PageSize" : 5, "Distance" : "JaroWinkler", "SortMode" : "Popularity" }' +) as "SomeCustomName" +`} + + + + + + +{`// The resulting suggested terms: +// ============================== + +console.log("Suggested terms:"); +// Results are available under the custom name entry +suggestions["SomeCustomName"].suggestions.forEach(suggestedTerm => \{ + console.log("\\t" + suggestedTerm); +\}); + +// Suggested terms: +// chai +// chang +// chartreuse verte +`} + + + + + +## The auto-index terms in Studio + +Based on the Northwind sample data, these are the terms generated for index `Auto/Products/ByName`: + +![Figure 1. Auto-index terms](./assets/auto-index-terms.png) + +1. **The field name** - derived from the document field that was used in the dynamic-query. + In this example the field name is `Name`. + +2. **The terms** generated from the data that the Products collection documents have in their `Name` field. + + + +## Syntax + +**Suggest using**: + + + +{`// Requesting suggestions for term(s) in a field: +suggestUsing(action); + +// Requesting suggestions for term(s) in another field in the same query: +andSuggestUsing(action); +`} + + + +| Parameter | Type | Description | +|-------------|---------------------|---------------------------------------------------------------------------------| +| **action** | `(builder) => void` | Builder function with a fluent API that constructs a `SuggestionBase` instance. | + +**Builder operations**: + + + +{`byField(fieldName, term); +byField(fieldName, terms); + +withDisplayName(displayName); +withOptions(options); +`} + + + +| Parameter | Type | Description | +|-----------------|------------|----------------------------------------------------------------------------------------------------| +| **fieldName** | `string` | The index field in which to search for similar terms | +| **term** | `string` | The term for which to get suggested similar terms | +| **terms** | `string[]` | List of terms for which to get suggested similar terms | +| **displayName** | `string` | A custom name for the suggestions result (optional). | +| **options** | `object` | Non-default suggestion options to use in the operation (optional).
See available options below. | + +**Suggestions options**: + +| Option | Type | Description | +|--------------|----------|-------------| +| **pageSize** | `number` |
  • Maximum number of suggested terms that will be returned.
  • Default is 15
| +| **distance** | `string` |
  • String distance algorithm to use.
  • `None` / `Levenshtein` / `JaroWinkler` / `NGram`
  • Default is Levenshtein.
| +| **accuracy** | `number` |
  • Suggestion accuracy.
  • Default is 0.5
| +| **sortMode** | `string` |
  • Indicates the order by which results are returned.
  • `None` / `Popularity`
  • Default is Popularity.
| diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-work-with-suggestions-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-work-with-suggestions-php.mdx new file mode 100644 index 0000000000..dbcb968285 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-work-with-suggestions-php.mdx @@ -0,0 +1,270 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Given a string term, the Suggestion feature will offer **similar terms** from your data. + +* Word similarities are found using string distance algorithms. + +* Examples in this article demonstrate getting suggestions with a **dynamic-query**. + For getting suggestions with an **index-query** see [query for suggestions with index](../../../indexes/querying/suggestions.mdx). +* In this page: + + * Overview: + * [What are terms](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#what-are-terms) + * [When to use suggestions](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#when-to-use-suggestions) + + * Examples: + * [Suggest terms - for a single term](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#suggest-terms---for-a-single-term) + * [Suggest terms - for multiple terms](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#suggest-terms---for-multiple-terms) + + * [The auto-index terms in Studio](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#the-auto-index-terms-in-studio) + * [Syntax](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#syntax) + + +## What are terms + +* All queries in RavenDB use an index - learn more about that [here](../../../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index). + Whether making a dynamic query which generates an auto-index or using a static index, + the data from your documents is 'broken' into **terms** that are kept in the index. + +* This tokenization process (what terms will be generated) depends on the analyzer used, + various analyzers differ in the way they split the text stream. Learn more in [Analyzers](../../../indexes/using-analyzers.mdx). + +* The terms can then be queried to retrieve matching documents that contain them. + + + +## When to use suggestions + +Querying for suggestions is useful in the following scenarios: + + * **When query has no results**: + + * When searching for documents that match some condition on a given string term, + if the term is misspelled then you will Not get any results. + You can then ask RavenDB to suggest similar terms that do exist in the index. + + * The suggested terms can then be used in a new query to retrieve matching documents, + or simply presented to the user asking what they meant to query. + + * **When looking for alternative terms**: + + * When simply searching for additional alternative terms for a term that does exist. + + + +The resulting suggested terms will Not include the term for which you search, +they will only contain the similar terms. + + + + + +## Suggest terms - for a single term + +Consider this example: +Based on the Northwind sample data, the following query has no resulting documents, +as no document in the Products collection contains the term `chaig` in its `Name` field. + + + +{`/** + * Usage: + * - suggestUsing(SuggestionBase $suggestion); + * - suggestUsing(Closure $suggestionBuilder); + * + * @param SuggestionBase|Closure|null $suggestionOrBuilder + * @return SuggestionDocumentQueryInterface + */ +public function suggestUsing(null|SuggestionBase|Closure $suggestionOrBuilder): SuggestionDocumentQueryInterface; +`} + + + +* Executing the above query will generate the auto-index `Auto/Products/ByName`. + This auto-index will contain a list of all available terms from the document field `Name`. + The generated terms are visible in the Studio - see image [below](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#the-auto-index-terms-in-studio). + +* If you suspect that the term `chaig` in the query criteria is written incorrectly, + you can ask RavenDB to suggest **existing terms** that are similar to `chaig`, as follows:. + + + + +{`/** + * Usage: + * - byField("fieldName", "term"); + * - byField("fieldName", ["term1", "term2"]); + */ +function byField(?string $fieldName, null|string|StringArray|array $terms): SuggestionOperationsInterface; + +function withOptions(?SuggestionOptions $options): SuggestionOperationsInterface; +`} + + + + +{`$options = new SuggestionOptions(); +$options->setAccuracy(0.4); +$options->setPageSize(5); +$options->setDistance(StringDistanceTypes::jaroWinkler()); +$options->setSortMode(SuggestionSortMode::popularity()); + +/** @var array $suggestions */ +$suggestions = $session + ->query(Employee::class, Employees_ByFullName::class) + ->suggestUsing(function($builder) use ($options) { + $builder->byField("FullName", "johne") + ->withOptions($options); + }) + ->execute(); +`} + + + + +{`// Query for terms from field 'Name' that are similar to 'chaig' +from "Products" +select suggest(Name, "chaig") +`} + + + + + + +## Suggest terms - for multiple terms + + + + +{`private int $pageSize = 15; +private ?StringDistanceTypes $distance = null; +private float $accuracy = 0.5; +private ?SuggestionSortMode $sortMode = null; + +public function __construct() +{ + $distance = StringDistanceTypes::levenshtein(); + $sortMode = SuggestionSortMode::popularity(); + ... +} + +// getters and setters for fields listed above +`} + + + + +{`$suggestionWithTerm = new SuggestionWithTerm("FullName"); +$suggestionWithTerm->setTerm("johne"); + +/** @var array $suggestions */ +$suggestions = $session + ->query(Employee::class, Employees_ByFullName::class) + ->suggestUsing($suggestionWithTerm) + ->execute(); +`} + + + + +{`// Query for terms from field 'Name' that are similar to 'chaig' OR 'tof' +from "Products" select suggest(Name, $p0) +{ "p0" : ["chaig", "tof"] } +`} + + + + + + +## The auto-index terms in Studio + +Based on the Northwind sample data, these are the terms generated for index `Auto/Products/ByName`: + +![Figure 1. Auto-index terms](./assets/auto-index-terms.png) + +1. **The field name** - derived from the document field that was used in the dynamic-query. + In this example the field name is `Name`. + +2. **The terms** generated from the data that the Products collection documents have in their `Name` field. + + + +## Syntax + +**Suggest using**: + + + +{`/** + * Usage: + * - suggestUsing(SuggestionBase $suggestion); + * - suggestUsing(Closure $suggestionBuilder); + * + * @param SuggestionBase|Closure|null $suggestionOrBuilder + * @return SuggestionDocumentQueryInterface + */ +public function suggestUsing(null|SuggestionBase|Closure $suggestionOrBuilder): SuggestionDocumentQueryInterface; +`} + + + +| Parameter | Type | Description | +|----------------|----------------------------------------------|-------------------------------------------------------------| +| **$suggestionOrBuilder** | `SuggestionBase` | An instance of `SuggestionBase`.
Defines the type of suggestion requested. | +| **$suggestionOrBuilder** | `Closure` | Builder with a fluent API that constructs a `SuggestionBase` instance. | + +**Builder operations**: + + + +{`/** + * Usage: + * - byField("fieldName", "term"); + * - byField("fieldName", ["term1", "term2"]); + */ +function byField(?string $fieldName, null|string|StringArray|array $terms): SuggestionOperationsInterface; + +function withOptions(?SuggestionOptions $options): SuggestionOperationsInterface; +`} + + + +| Parameter | Type | Description | +|-----------------|---------------------------------|------------------------------------------------------| +| **$fieldName** | `?string` | The index field in which to search for similar terms | +| **$terms** | `null`
`string`
`StringArray`
`array` | List of terms to get suggested similar terms for | +| **$options** | `?SuggestionOptions` | Non-default options to use in the operation (optional) | + +**Suggestions options**: + + + +{`/** + * Usage: + * - byField("fieldName", "term"); + * - byField("fieldName", ["term1", "term2"]); + */ +function byField(?string $fieldName, null|string|StringArray|array $terms): SuggestionOperationsInterface; + +function withOptions(?SuggestionOptions $options): SuggestionOperationsInterface; +`} + + + +| Option | Type | Description | +|--------------|-------------------------|---------------------------------------------| +| **$pageSize** | `int` | Maximum number of suggested terms to return | +| **$distance** | `?StringDistanceTypes` | String distance algorithm to use | +| **$accuracy** | `float` | Suggestion accuracy | +| **$sortMode** | `?SuggestionSortMode` | Order to return results by | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_how-to-work-with-suggestions-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/_how-to-work-with-suggestions-python.mdx new file mode 100644 index 0000000000..e9e99cf398 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_how-to-work-with-suggestions-python.mdx @@ -0,0 +1,462 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Given a string term, the Suggestion feature will offer **similar terms** from your data. + +* Word similarities are found using string distance algorithms. + +* Examples in this article demonstrate getting suggestions with a **dynamic-query**. + For getting suggestions with an **index-query** see [query for suggestions with index](../../../indexes/querying/suggestions.mdx). +* In this page: + + * Overview: + * [What are terms](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#what-are-terms) + * [When to use suggestions](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#when-to-use-suggestions) + + * Examples: + * [Suggest terms - for single term](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#suggest-terms---for-single-term) + * [Suggest terms - for multiple terms](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#suggest-terms---for-multiple-terms) + * [Suggest terms - for multiple fields](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#suggest-terms---for-multiple-fields) + * [Suggest terms - customize options and display name](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#suggest-terms---customize-options-and-display-name) + + * [The auto-index terms in Studio](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#the-auto-index-terms-in-studio) + * [Syntax](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#syntax) + + +## What are terms + +* All queries in RavenDB use an index - learn more about that [here](../../../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index). + Whether making a dynamic query which generates an auto-index or using a static index, + the data from your documents is 'broken' into **terms** that are kept in the index. + +* This tokenization process (what terms will be generated) depends on the analyzer used, + various analyzers differ in the way they split the text stream. Learn more in [Analyzers](../../../indexes/using-analyzers.mdx). + +* The terms can then be queried to retrieve matching documents that contain them. + + + +## When to use suggestions + +Querying for suggestions is useful in the following scenarios: + + * **When query has no results**: + + * When searching for documents that match some condition on a given string term, + if the term is misspelled then you will Not get any results. + You can then ask RavenDB to suggest similar terms that do exist in the index. + + * The suggested terms can then be used in a new query to retrieve matching documents, + or simply presented to the user asking what they meant to query. + + * **When looking for alternative terms**: + + * When simply searching for additional alternative terms for a term that does exist. + + + +The resulting suggested terms will Not include the term for which you search, +they will only contain the similar terms. + + + + + +## Suggest terms - for single term + +Consider this example: +Based on the Northwind sample data, the following query has no resulting documents, +as no document in the Products collection contains the term `chaig` in its `Name` field. + + + +{`# This dynamic query on the 'Products' collection has NO documents +products = list(session.query(object_type=Product).where_equals("name", "chaig")) +`} + + + +* Executing the above query will generate the auto-index `Auto/Products/ByName`. + This auto-index will contain a list of all available terms from the document field `Name`. + The generated terms are visible in the Studio - see image [below](../../../client-api/session/querying/how-to-work-with-suggestions.mdx#the-auto-index-terms-in-studio). + +* If you suspect that the term `chaig` in the query criteria is written incorrectly, + you can ask RavenDB to suggest **existing terms** that are similar to `chaig`, as follows:. + + + + +{`# Query for suggested terms for single term: +# ========================================== +suggestions = ( + session.query(object_type=Product) + .suggest_using(lambda builder: builder.by_field("name", "chaig")) + .execute() +) +`} + + + + +{`# Define the suggestion request for single term +suggestion_request = SuggestionWithTerm("name") +suggestion_request.term = "chaig" + +# Query for suggestions +suggestions = ( + session.query(object_type=Product) + # Call 'suggest_using' - pass the suggestion request + .suggest_using(suggestion_request).execute() +) +`} + + + + +{`// Query for terms from field 'Name' that are similar to 'chaig' +from "Products" +select suggest(Name, "chaig") +`} + + + + + + +{`# The resulting suggested terms: +# ============================== + +print("Suggested terms in field 'name' that are similar to 'chaig':") +for suggested_term in suggestions["name"].suggestions: + print(f"\\t\{suggested_term\}") + +# Suggested terms in field 'Name' that are similar to 'chaig': +# chai +# chang +`} + + + + + +## Suggest terms - for multiple terms + + + + +{`# Query for suggested terms for multiple terms: +# ============================================= + +suggestions = ( + session + # Make a dynamic query on collection 'Products' + .query(object_type=Product) + # Call 'suggest_using' + .suggest_using( + lambda builder: builder + # Request to get terms from field 'name' that are similar to 'chaig' OR 'tof' + .by_field("name", ["chaig", "tof"]) + ).execute() +) +`} + + + + +{`# Define the suggestion request for multiple terms +suggestion_request = SuggestionWithTerms("name") +# Looking for terms from field 'name' that are similar to 'chaig' OR 'tof' +suggestion_request.terms = ["chaig", "tof"] + +# Query for suggestions +suggestions = ( + session.query(object_type=Product) + # Call 'suggest_using' - pass the suggestion request + .suggest_using(suggestion_request).execute() +) +`} + + + + +{`// Query for terms from field 'Name' that are similar to 'chaig' OR 'tof' +from "Products" select suggest(Name, $p0) +{ "p0" : ["chaig", "tof"] } +`} + + + + + + +{`# The resulting suggested terms: +# ============================== +# +# Suggested terms in field 'Name' that are similar to 'chaig' OR to 'tof': +# chai +# chang +# tofu +`} + + + + + +## Suggest terms - for multiple fields + + + + +{`# Query for suggested terms in multiple fields: +# ============================================= + +suggestions = ( + session + # Make a dynamic query on collection 'Companies' + .query(object_type=Company) + # Call 'suggest_using' to get suggestions for terms that are + # similar to 'chop-soy china' in first document field (e.g. 'name') + .suggest_using(lambda builder: builder.by_field("name", "chop-soy china")) + # Call 'and_suggest_using' to get suggestions for terms that are + # similar to 'maria larson' in an additional field (e.g. 'Contact.Name') + .and_suggest_using(lambda builder: builder.by_field("contact.name", "maria larson")).execute() +) +`} + + + + +{`# Define suggestion requests for multiple fields: + +request1 = SuggestionWithTerm("name") +# Looking for terms from field 'Name' that are similar to 'chop-soy china' +request1.term = "chop-soy china" + +request2 = SuggestionWithTerm("contact.name") +# Looking for terms from nested field 'Contact.Name' that are similar to 'maria larson' +request2.term = ["maria larson"] + +suggestions = ( + session.query(object_type=Company) + # Call 'suggest_using' - pass the suggestion request for the first field + .suggest_using(request1) + # Call 'and_suggest_using' - pass the suggestion request for the second field + .and_suggest_using(request2).execute() +) +`} + + + + +{`// Query for suggested terms from field 'Name' and field 'Contact.Name' +from "Companies" +select suggest(Name, "chop-soy china"), suggest(Contact.Name, "maria larson") +`} + + + + + + +{`# The resulting suggested terms: +# ============================== +# +# Suggested terms in field 'name' that is similar to 'chop-soy china': +# chop-suey chinese +# +# Suggested terms in field 'contact.name' that are similar to 'maria larson': +# maria larsson +# marie bertrand +# aria cruz +# paula wilson +# maria anders +`} + + + + + +## Suggest terms - customize options and display name + + + + +{`# Query for suggested terms - customize options and display name: +# =============================================================== +suggestions = ( + session + # Make a dynamic query on collection 'Products' + .query(object_type=Product) + # Call 'suggest_using' + .suggest_using( + lambda builder: builder.by_field("name", "chaig") + # Customize suggestion options + .with_options( + SuggestionOptions( + accuracy=0.4, + page_size=5, + distance=StringDistanceTypes.JARO_WINKLER, + sort_mode=SuggestionSortMode.POPULARITY, + ) + ) + # Customize display name for results + .with_display_name("SomeCustomName") + ).execute() +) +`} + + + + +{`# Define the suggestion request +suggestion_request = SuggestionWithTerm("name") +# Looking for terms from field 'Name' that are similar to term 'chaig' +suggestion_request.term = "chaig" +# Customize options +suggestion_request.options = SuggestionOptions( + accuracy=5, + page_size=5, + distance=StringDistanceTypes.JARO_WINKLER, + sort_mode=SuggestionSortMode.POPULARITY, +) +# Customize display name +suggestion_request.display_field = "SomeCustomName" + +# Query for suggestions +suggestions = ( + session.query(object_type=Product) + # Call 'suggest_using' - pass the suggestion request + .suggest_using(suggestion_request).execute() +) +`} + + + + +{`// Query for suggested terms - customize options and display name +from "Products" +select suggest( + Name, + 'chaig', + '{ "Accuracy" : 0.4, "PageSize" : 5, "Distance" : "JaroWinkler", "SortMode" : "Popularity" }' +) as "SomeCustomName" +`} + + + + + + +{`# The resulting suggested terms: +# ============================== + +print("Suggested terms:") +# Results are available under the custom name entry +for suggested_term in suggestions["SomeCustomName"].suggestions: + print(f"\\t\{suggested_term\}") + +# Suggested terms: +# chai +# chang +# chartreuse verte +`} + + + + + +## The auto-index terms in Studio + +Based on the Northwind sample data, these are the terms generated for index `Auto/Products/ByName`: + +![Figure 1. Auto-index terms](./assets/auto-index-terms.png) + +1. **The field name** - derived from the document field that was used in the dynamic-query. + In this example the field name is `Name`. + +2. **The terms** generated from the data that the Products collection documents have in their `Name` field. + + + +## Syntax + +**Suggest using**: + + + +{`# Method for requesting suggestions for term(s) in a field: +def suggest_using( + self, suggestion_or_builder: Union[SuggestionBase, Callable[[SuggestionBuilder[_T]], None]] +) -> SuggestionDocumentQuery[_T]: ... + +# Method for requesting suggestions for term(s) in another field in the same query: +def and_suggest_using( + self, suggestion_or_builder: Union[SuggestionBase, Callable[[SuggestionBuilder[_T]], None]] +) -> SuggestionDocumentQuery[_T]: ... +`} + + + +| Parameter | Type | Description | +|----------------|----------------------------------------------|--------------| +| **suggestion_or_builder**
(Union) | `SuggestionBase`| **Suggestion instance**
Pass `suggest_using` a `SuggestionBase` instance with the term or terms (`SuggestionWithTerm` or `SuggestionWithTerms`) it will generate suggestions by. | +| | `Callable[[SuggestionBuilder[_T]], None]` | **Suggestion builder**
Use `suggest_using`'s fluent API to pass it a method that takes `SuggestionBuilder` as a parameter and generate a suggestion definition that matches your needs. | + +| Return type | Description | +|----------------|--------------| +| `SuggestionDocumentQuery[_T]` | The generated suggestions query, that can now be executed using `execute()` or further altered.
When `execute()` is called, it will return the suggestions in a `Dict[str, SuggestionResult]` dictionary. | + + +**Builder operations**: + + + +{`def by_field(self, field_name: str, term_or_terms: Union[str, List[str]]) -> SuggestionOperations[_T]: ... + +def with_display_name(self, display_name: str) -> SuggestionOperations[_T]: ... +def with_options(self, options: SuggestionOptions) -> SuggestionOperations[_T]: ... +`} + + + +| Parameter | Type | Description | +|-----------------|--------------------------------|---------------------------------------------| +| **field_name** | `str` | The index field to search for similar terms | +| **term_or_terms** (Union) | `str` or `List[str]` | Term or List of terms to get suggested similar terms for | +| **display_name** | `str` | A custom name for the suggestions result | +| **options** | `SuggestionOptions` | Non-default options to use in the operation | + +**Suggestion options**: + + + +{`DEFAULT_ACCURACY = 0.5 +DEFAULT_PAGE_SIZE = 15 +DEFAULT_DISTANCE = StringDistanceTypes.LEVENSHTEIN +DEFAULT_SORT_MODE = SuggestionSortMode.POPULARITY + +def __init__( + self, + page_size: int = DEFAULT_PAGE_SIZE, + distance: StringDistanceTypes = DEFAULT_DISTANCE, + accuracy: float = DEFAULT_ACCURACY, + sort_mode: SuggestionSortMode = DEFAULT_SORT_MODE, +): + self.page_size = page_size + self.distance = distance + self.accuracy = accuracy + self.sort_mode = sort_mode +`} + + + +| Parameter | Type | Description | +|---------------|-----------------------|--------------------------------------------------------------------------------------------------------------| +| **page_size** | `int` | Maximum number of suggested terms that will be returned
Default: **15** | +| **distance** | `StringDistanceTypes` | String distance algorithm to use (`NONE` / `LEVENSHTEIN` / `JAROWINKLER` / `NGRAM`)
Default: **LEVENSHTEIN** | +| **accuracy** | `float` | Suggestion accuracy
Default: **0.5** | +| **sort_mode** | `SuggestionSortMode` | Indicates the order by which results are returned (`NONE` / `POPULARITY`)
Default: **POPULARITY** | diff --git a/versioned_docs/version-7.1/client-api/session/querying/_sort-query-results-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/_sort-query-results-csharp.mdx new file mode 100644 index 0000000000..aa15e1f854 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_sort-query-results-csharp.mdx @@ -0,0 +1,760 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When making a query, the server will return the results __sorted__ only if explicitly requested by the query. + If no sorting method is specified when issuing the query then results will not be sorted. + + * Note: An exception to the above rule is when [Boosting](../../../indexes/boosting.mdx) is involved in the query. + Learn more in [Automatic score-based ordering](../../../indexes/boosting.mdx#automatic-score-based-ordering). + +* Sorting is applied by the server after the query filtering stage. + Applying filtering is recommended as it reduces the number of results RavenDB needs to sort + when querying a large dataset. + +* Multiple sorting actions can be chained. + +* This article provides examples of sorting query results when making a __dynamic-query__. + For sorting results when querying a __static-index__ see [sort index query results](../../../indexes/querying/sorting.mdx). + +* In this page: + * [Order by field value](../../../client-api/session/querying/sort-query-results.mdx#order-by-field-value) + + * [Order by score](../../../client-api/session/querying/sort-query-results.mdx#order-by-score) + * [Get resulting score](../../../client-api/session/querying/sort-query-results.mdx#get-resulting-score) + + * [Order by random](../../../client-api/session/querying/sort-query-results.mdx#order-by-random) + + * [Order by spatial](../../../client-api/session/querying/sort-query-results.mdx#order-by-spatial) + + * [Order by count (aggregation query)](../../../client-api/session/querying/sort-query-results.mdx#order-by-count-(aggregation-query)) + + * [Order by sum (aggregation query)](../../../client-api/session/querying/sort-query-results.mdx#order-by-sum-(aggregation-query)) + + * [Force ordering type](../../../client-api/session/querying/sort-query-results.mdx#force-ordering-type) + + * [Chain ordering](../../../client-api/session/querying/sort-query-results.mdx#chain-ordering) + + * [Custom sorters](../../../client-api/session/querying/sort-query-results.mdx#custom-sorters) + + * [Syntax](../../../client-api/session/querying/sort-query-results.mdx#syntax) + + +## Order by field value + +* Use `OrderBy` or `OrderByDescending` to order the results by the specified document-field. + + + + +{`List products = session + // Make a dynamic query on the Products collection + .Query() + // Apply filtering (optional) + .Where(x => x.UnitsInStock > 10) + // Call 'OrderBy', pass the document-field by which to order the results + .OrderBy(x => x.UnitsInStock) + .ToList(); + +// Results will be sorted by the 'UnitsInStock' value in ascending order, +// with smaller values listed first. +`} + + + + +{`List products = await asyncSession + // Make a dynamic query on the Products collection + .Query() + // Apply filtering (optional) + .Where(x => x.UnitsInStock > 10) + // Call 'OrderBy', pass the document-field by which to order the results + .OrderBy(x => x.UnitsInStock) + .ToListAsync(); + +// Results will be sorted by the 'UnitsInStock' value in ascending order, +// with smaller values listed first. +`} + + + + +{`List products = session.Advanced + // Make a DocumentQuery on the Products collection + .DocumentQuery() + // Apply filtering (optional) + .WhereGreaterThan(x => x.UnitsInStock, 10) + // Call 'OrderBy', pass the document-field by which to order the results + .OrderBy(x => x.UnitsInStock) + .ToList(); + +// Results will be sorted by the 'UnitsInStock' value in ascending order, +// with smaller values listed first. +`} + + + + +{`from "Products" +where UnitsInStock > 10 +order by UnitsInStock as long +`} + + + + + + +__Ordering Type__: + +* By default, the `OrderBy` methods will determine the `OrderingType` from the property path expression + and specify that ordering type in the generated RQL that is sent to the server. + +* E.g. in the above example, ordering by `x => x.UnitsInStock` will result in `OrderingType.Long` + because that property data type is an integer. + +* Different ordering can be forced - see [Force ordering type](../../../client-api/session/querying/sort-query-results.mdx#force-ordering-type) below. + + + + + +## Order by score + +* When querying with some filtering conditions, a basic score is calculated for each item in the results + by the underlying indexing engine. + +* The higher the score value the better the match. + +* Use `OrderByScore` or `OrderByScoreDescending` to order by this score. + + + + +{`List products = session + .Query() + // Apply filtering + .Where(x => x.UnitsInStock < 5 || x.Discontinued) + // Call 'OrderByScore' + .OrderByScore() + .ToList(); + +// Results will be sorted by the score value +// with best matching documents (higher score values) listed first. +`} + + + + +{`List products = await asyncSession + .Query() + // Apply filtering + .Where(x => x.UnitsInStock < 5 || x.Discontinued) + // Call 'OrderByScore' + .OrderByScore() + .ToListAsync(); + +// Results will be sorted by the score value +// with best matching documents (higher score values) listed first. +`} + + + + +{`List products = session.Advanced + .DocumentQuery() + // Apply filtering + .WhereLessThan(x => x.UnitsInStock, 5) + .OrElse() + .WhereEquals(x => x.Discontinued, true) + // Call 'OrderByScore' + .OrderByScore() + .ToList(); + +// Results will be sorted by the score value +// with best matching documents (higher score values) listed first. +`} + + + + +{`from "Products" +where UnitsInStock < 5 or Discontinued == true +order by score() +`} + + + + + + +#### Get resulting score: +The score details can be retrieved by either: + +* __Request to include explanations__: + You can get the score details and see how it was calculated by requesting to include explanations in the query. + Currently, this is only available when using Lucene as the underlying indexing engine. + Learn more in [Include query explanations](../../../client-api/session/querying/debugging/include-explanations.mdx). + +* __Get score from metadata__: + + * The score is available in the `@index-score` metadata property within each result. + Note the following difference between the underlying indexing engines: + + * When using __Lucene__: + This metadata property is always available in the results. + Read more about Lucene scoring [here](https://lucene.apache.org/core/3_3_0/scoring.html). + + * When using __Corax__: + In order to enhance performance, this metadata property is Not included in the results by default. + To get this metadata property you must set the [Indexing.Corax.IncludeDocumentScore](../../../server/configuration/indexing-configuration.mdx#indexingcoraxincludedocumentscore) configuration value to _true_. + Learn about the available methods for setting an indexing configuration key in this [indexing-configuration](../../../server/configuration/indexing-configuration.mdx) article. + + * The following example shows how to get the score from the metadata of the resulting entities that were loaded to the session: + + + +{`// Make a query: +// ============= + +List employees = session + .Query() + .Search(x => x.Notes, "English") + .Search(x => x.Notes, "Italian", boost: 10) + .ToList(); + +// Get the score: +// ============== + +// Call 'GetMetadataFor', pass an entity from the resulting employees list +var metadata = session.Advanced.GetMetadataFor(employees[0]); + +// Score is available in the '@index-score' metadata property +var score = metadata[Constants.Documents.Metadata.IndexScore]; +`} + + + + + + + +## Order by random + +* Use `RandomOrdering` to randomize the order of the query results. + +* An optional seed parameter can be passed. + + + + +{`List products = session + .Query() + .Where(x => x.UnitsInStock > 10) + // Call 'Customize' with 'RandomOrdering' + .Customize(x => x.RandomOrdering()) + // An optional seed can be passed, e.g.: + // .Customize(x => x.RandomOrdering('someSeed')) + .ToList(); + +// Results will be randomly ordered. +`} + + + + +{`List products = await asyncSession + .Query() + .Where(x => x.UnitsInStock > 10) + // Call 'Customize' with 'RandomOrdering' + .Customize(x => x.RandomOrdering()) + // An optional seed can be passed, e.g.: + // .Customize(x => x.RandomOrdering('someSeed')) + .ToListAsync(); + +// Results will be randomly ordered. +`} + + + + +{`List products = session.Advanced + .DocumentQuery() + .WhereGreaterThan(x => x.UnitsInStock, 10) + // Call 'RandomOrdering' + .RandomOrdering() + // An optional seed can be passed, e.g.: + // .RandomOrdering('someSeed') + .ToList(); + +// Results will be randomly ordered. +`} + + + + +{`from "Products" +where UnitsInStock > 10 +order by random() +// order by random(someSeed) +`} + + + + + + +## Order by spatial + +* If your data contains geographical locations, + spatial query results can be sorted based on their distance from a specific point. + +* See detailed explanation in [Spatial Sorting](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#spatial-sorting). + + + +## Order by count (aggregation query) + +* The results of a [group-by query](../../../client-api/session/querying/how-to-perform-group-by-query.mdx) can be sorted by the `Count` aggregation operation used in the query. + + + + +{`var numberOfProductsPerCategory = session + .Query() + // Make an aggregation query + .GroupBy(x => x.Category) + .Select(x => new + { + // Group by Category + Category = x.Key, + // Count the number of product documents per category + Count = x.Count() + }) + // Order by the Count value + .OrderBy(x => x.Count) + .ToList(); + +// Results will contain the number of Product documents per category +// ordered by that count in ascending order. +`} + + + + +{`var numberOfProductsPerCategory = await asyncSession + .Query() + // Make an aggregation query + .GroupBy(x => x.Category) + .Select(x => new + { + // Group by Category + Category = x.Key, + // Count the number of product documents per category + Count = x.Count() + }) + // Order by the Count value + .OrderBy(x => x.Count) + .ToListAsync(); + +// Results will contain the number of Product documents per category +// ordered by that count in ascending order. +`} + + + + +{`var numberOfProductsPerCategory = session.Advanced + .DocumentQuery() + // Group by Category + .GroupBy("Category") + .SelectKey("Category") + // Count the number of product documents per category + .SelectCount() + // Order by the Count value + // Here you need to specify the ordering type explicitly + .OrderBy("Count", OrderingType.Long) + .ToList(); + +// Results will contain the number of Product documents per category +// ordered by that count in ascending order. +`} + + + + +{`from "Products" +group by Category +order by count() as long +select key() as "Category", count() +`} + + + + + + +## Order by sum (aggregation query) + +* The results of a [group-by query](../../../client-api/session/querying/how-to-perform-group-by-query.mdx) can be sorted by the `Sum` aggregation operation used in the query. + + + + +{`var numberOfUnitsInStockPerCategory = session + .Query() + // Make an aggregation query + .GroupBy(x => x.Category) + .Select(x => new + { + // Group by Category + Category = x.Key, + // Sum the number of units in stock per category + Sum = x.Sum(x => x.UnitsInStock) + }) + // Order by the Sum value + .OrderBy(x => x.Sum) + .ToList(); + +// Results will contain the total number of units in stock per category +// ordered by that number in ascending order. +`} + + + + +{`var numberOfUnitsInStockPerCategory = await asyncSession + .Query() + // Make an aggregation query + .GroupBy(x => x.Category) + .Select(x => new + { + // Group by Category + Category = x.Key, + // Sum the number of units in stock per category + Sum = x.Sum(x => x.UnitsInStock) + }) + // Order by the Sum value + .OrderBy(x => x.Sum) + .ToListAsync(); + +// Results will contain the total number of units in stock per category +// ordered by that number in ascending order. +`} + + + + +{`var numberOfUnitsInStockPerCategory = session.Advanced + .DocumentQuery() + // Group by Category + .GroupBy("Category") + .SelectKey("Category") + // Sum the number of units in stock per category + .SelectSum(new GroupByField + { + FieldName = "UnitsInStock", + ProjectedName = "Sum" + }) + // Order by the Sum value + // Here you need to specify the ordering type explicitly + .OrderBy("Sum", OrderingType.Long) + .ToList(); + +// Results will contain the total number of units in stock per category +// ordered by that number in ascending order. +`} + + + + +{`from "Products" +group by Category +order by Sum as long +select key() as 'Category', sum(UnitsInStock) as Sum +`} + + + + + + +## Force ordering type + +* By default, the `OrderBy` methods will determine the `OrderingType` from the property path expression + and specify that ordering type in the generated RQL that is sent to the server. + +* A different ordering can be forced by passing the ordering type explicitly to `OrderBy` or `OrderByDescending`. + +* The following ordering types are available: + + * `OrderingType.Long` + * `OrderingType.Double` + * `OrderingType.AlphaNumeric` + * `OrderingType.String` (lexicographic ordering) + +* When using RQL directly, if no ordering type is specified, then the server defaults to lexicographic ordering. + + + +__Using alphanumeric ordering example__: + +* When ordering mixed-character strings by the default lexicographical ordering + then comparison is done character by character based on the Unicode values. + For example, "Abc9" will come after "Abc10" since 9 is greater than 1. + +* If you want the digit characters to be ordered as numbers then use alphanumeric ordering + where "Abc10" will result after "Abc9". + + + + +{`List products = session + .Query() + // Call 'OrderBy', order by field 'QuantityPerUnit' + // Pass a second param, requesting to order the text alphanumerically + .OrderBy(x => x.QuantityPerUnit, OrderingType.AlphaNumeric) + .ToList(); +`} + + + + +{`List products = await asyncSession + .Query() + // Call 'OrderBy', order by field 'QuantityPerUnit' + // Pass a second param, requesting to order the text alphanumerically + .OrderBy(x => x.QuantityPerUnit, OrderingType.AlphaNumeric) + .ToListAsync(); +`} + + + + +{`List products = session.Advanced + .DocumentQuery() + // Call 'OrderBy', order by field 'QuantityPerUnit' + // Pass a second param, requesting to order the text alphanumerically + .OrderBy(x => x.QuantityPerUnit, OrderingType.AlphaNumeric) + .ToList(); +`} + + + + +{`from "Products" +order by QuantityPerUnit as alphanumeric +`} + + + + + + +{`// Running the above query on the NorthWind sample data, +// would produce the following order for the QuantityPerUnit field: +// ================================================================ + +// "1 kg pkg." +// "1k pkg." +// "2 kg box." +// "4 - 450 g glasses" +// "5 kg pkg." +// ... + +// While running with the default Lexicographical ordering would have produced: +// ============================================================================ + +// "1 kg pkg." +// "10 - 200 g glasses" +// "10 - 4 oz boxes" +// "10 - 500 g pkgs." +// "10 - 500 g pkgs." +// ... +`} + + + + + + + +## Chain ordering + +* It is possible to chain multiple orderings in the query. + Any combination of secondary sorting is possible as the fields are indexed independently of one another. + +* There is no limit on the number of sorting actions that can be chained. + +* This is achieved by using the `ThenBy` (`ThenByDescending`) and `ThenByScore` (`ThenByScoreDescending`) methods. + + + + +{`List products = session + .Query() + .Where(x => x.UnitsInStock > 10) + // Apply the primary sort by 'UnitsInStock' + .OrderByDescending(x => x.UnitsInStock) + // Apply a secondary sort by the score (for products with the same # of units in stock) + .ThenByScore() + // Apply another sort by 'Name' (for products with same # of units in stock and same score) + .ThenBy(x => x.Name) + .ToList(); + +// Results will be sorted by the 'UnitsInStock' value (descending), +// then by score, +// and then by 'Name' (ascending). +`} + + + + +{`List products = await asyncSession + .Query() + .Where(x => x.UnitsInStock > 10) + // Apply the primary sort by 'UnitsInStock' + .OrderByDescending(x => x.UnitsInStock) + // Apply a secondary sort by the score (for products with the same # of units in stock) + .ThenByScore() + // Apply another sort by 'Name' (for products with same # of units in stock and same score) + .ThenBy(x => x.Name) + .ToListAsync(); + +// Results will be sorted by the 'UnitsInStock' value (descending), +// then by score, +// and then by 'Name' (ascending). +`} + + + + +{`List products = session.Advanced + .DocumentQuery() + .WhereGreaterThan(x => x.UnitsInStock, 10) + // Apply the primary sort by 'UnitsInStock' + .OrderByDescending(x => x.UnitsInStock) + // Apply a secondary sort by the score + .OrderByScore() + // Apply another sort by 'Name' + .OrderBy(x => x.Name) + .ToList(); + +// Results will be sorted by the 'UnitsInStock' value (descending), +// then by score, +// and then by 'Name' (ascending). +`} + + + + +{`from "Products" +where UnitsInStock > 10 +order by UnitsInStock as long desc, score(), Name +`} + + + + + + +## Custom sorters + +* The Lucene indexing engine allows you to create your own custom sorters. + Custom sorters are not supported by [Corax](../../../indexes/search-engine/corax.mdx). + +* Custom sorters can be deployed to the server by either: + + * Sending the [Put Sorters Operation](../../../client-api/operations/maintenance/sorters/put-sorter.mdx) from your code. + + * Uploading a custom sorter from Studio, see [Custom Sorters View](../../../studio/database/settings/custom-sorters.mdx). + +* Once the custom sorter is deployed, you can sort the query results with it. + + + + +{`List products = session + .Query() + .Where(x => x.UnitsInStock > 10) + // Order by field 'UnitsInStock', pass the name of your custom sorter class + .OrderBy(x => x.UnitsInStock, "MySorter") + .ToList(); + +// Results will be sorted by the 'UnitsInStock' value +// according to the logic from 'MySorter' class +`} + + + + +{`List products = await asyncSession + .Query() + .Where(x => x.UnitsInStock > 10) + // Order by field 'UnitsInStock', pass the name of your custom sorter class + .OrderBy(x => x.UnitsInStock, "MySorter") + .ToListAsync(); + +// Results will be sorted by the 'UnitsInStock' value +// according to the logic from 'MySorter' class +`} + + + + +{`List products = session.Advanced + .DocumentQuery() + .WhereGreaterThan(x => x.UnitsInStock, 10) + // Order by field 'UnitsInStock', pass the name of your custom sorter class + .OrderBy(x => x.UnitsInStock, "MySorter") + .ToList(); + +// Results will be sorted by the 'UnitsInStock' value +// according to the logic from 'MySorter' class +`} + + + + +{`from "Products" +where UnitsInStock > 10 +order by custom(UnitsInStock, "MySorter") +`} + + + + + + +## Syntax + + + +{`// OrderBy overloads: +IOrderedQueryable OrderBy(string path, OrderingType ordering); +IOrderedQueryable OrderBy(Expression> path, OrderingType ordering); +IOrderedQueryable OrderBy(string path, string sorterName); +IOrderedQueryable OrderBy(Expression> path, string sorterName); + +// OrderByDescending overloads: +IOrderedQueryable OrderByDescending(string path, OrderingType ordering); +IOrderedQueryable OrderByDescending(Expression> path, OrderingType ordering); +IOrderedQueryable OrderByDescending(string path, string sorterName); +IOrderedQueryable OrderByDescending(Expression> path, string sorterName); +`} + + + +| Parameter | Type | Description | +|----------------|-------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| __path__ | `string` | The name of the field to sort by | +| __path__ | `Expression>` | A lambda expression to the field by which to sort | +| __ordering__ | `QueryStatistics` | The ordering type that will be used to sort the results:
`OrderingType.Long`
`OrderingType.Double`
`OrderingType.AlphaNumeric`
`OrderingType.String` (default) | +| __sorterName__ | `string` | The name of your custom sorter class | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_sort-query-results-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/_sort-query-results-nodejs.mdx new file mode 100644 index 0000000000..d68b699fbc --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_sort-query-results-nodejs.mdx @@ -0,0 +1,478 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When making a query, the server will return the results __sorted__ only if explicitly requested by the query. + If no sorting method is specified when issuing the query then results will not be sorted. + + * Note: An exception to the above rule is when [Boosting](../../../indexes/boosting.mdx) is involved in the query. + Learn more in [Automatic score-based ordering](../../../indexes/boosting.mdx#automatic-score-based-ordering). + +* Sorting is applied by the server after the query filtering stage. + Applying filtering is recommended as it reduces the number of results RavenDB needs to sort + when querying a large dataset. + +* Multiple sorting actions can be chained. + +* This article provides examples of sorting query results when making a __dynamic-query__. + For sorting results when querying a __static-index__ see [sort index query results](../../../indexes/querying/sorting.mdx). + +* In this page: + * [Order by field value](../../../client-api/session/querying/sort-query-results.mdx#order-by-field-value) + + * [Order by score](../../../client-api/session/querying/sort-query-results.mdx#order-by-score) + * [Get resulting score](../../../client-api/session/querying/sort-query-results.mdx#get-resulting-score) + + * [Order by random](../../../client-api/session/querying/sort-query-results.mdx#order-by-random) + + * [Order by spatial](../../../client-api/session/querying/sort-query-results.mdx#order-by-spatial) + + * [Order by count (aggregation query)](../../../client-api/session/querying/sort-query-results.mdx#order-by-count-(aggregation-query)) + + * [Order by sum (aggregation query)](../../../client-api/session/querying/sort-query-results.mdx#order-by-sum-(aggregation-query)) + + * [Force ordering type](../../../client-api/session/querying/sort-query-results.mdx#force-ordering-type) + + * [Chain ordering](../../../client-api/session/querying/sort-query-results.mdx#chain-ordering) + + * [Custom sorters](../../../client-api/session/querying/sort-query-results.mdx#custom-sorters) + + * [Syntax](../../../client-api/session/querying/sort-query-results.mdx#syntax) + + +## Order by field value + +* Use `orderBy` or `orderByDescending` to order the results by the specified document-field. + + + + +{`const products = await session + // Make a dynamic query on the 'products' collection + .query({ collection: "products" }) + // Apply filtering (optional) + .whereGreaterThan("UnitsInStock", 10) + // Call 'orderBy' + // Pass the document-field by which to order the results and the ordering type + .orderBy("UnitsInStock", "Long") + .all(); + +// Results will be sorted by the 'UnitsInStock' value in ascending order, +// with smaller values listed first. +`} + + + + +{`from "Products" +where UnitsInStock > 10 +order by UnitsInStock as long +`} + + + + + + +__Ordering Type__: + +* If no ordering type is specified in the query then the server will apply the default lexicographical ordering. + +* In the above example, the ordering type was set to `Long`. + +* Different ordering can be forced - see [Force ordering type](../../../client-api/session/querying/sort-query-results.mdx#force-ordering-type) below. + + + + + +## Order by score + +* When querying with some filtering conditions, a basic score is calculated for each item in the results + by the underlying indexing engine. + +* The higher the score value the better the match. + +* Use `orderByScore` or `orderByScoreDescending` to order by this score. + + + + +{`const products = await session + .query({ collection: "products" }) + // Apply filtering + .whereLessThan("UnitsInStock", 5) + .orElse() + .whereEquals("Discontinued", true) + // Call 'orderByScore' + .orderByScore() + .all(); + +// Results will be sorted by the score value +// with best matching documents (higher score values) listed first. +`} + + + + +{`from "Products" +where UnitsInStock < 5 or Discontinued == true +order by score() +`} + + + + + + +#### Get resulting score: +The score details can be retrieved by either: + +* __Request to include explanations__: + You can get the score details and see how it was calculated by requesting to include explanations in the query. + Currently, this is only available when using Lucene as the underlying indexing engine. + Learn more in [Include query explanations](../../../client-api/session/querying/debugging/include-explanations.mdx). + +* __Get score from metadata__: + + * The score is available in the `@index-score` metadata property within each result. + Note the following difference between the underlying indexing engines: + + * When using __Lucene__: + This metadata property is always available in the results. + Read more about Lucene scoring [here](https://lucene.apache.org/core/3_3_0/scoring.html). + + * When using __Corax__: + In order to enhance performance, this metadata property is Not included in the results by default. + To get this metadata property you must set the [Indexing.Corax.IncludeDocumentScore](../../../server/configuration/indexing-configuration.mdx#indexingcoraxincludedocumentscore) configuration value to _true_. + Learn about the available methods for setting an indexing configuration key in this [indexing-configuration](../../../server/configuration/indexing-configuration.mdx) article. + + * The following example shows how to get the score from the metadata of the resulting entities that were loaded to the session: + + + +{`// Make a query: +// ============= + +const employees = await session + .query(\{ collection: "Employees"\}) + .search('Notes', 'English') + .search('Notes', 'Italian') + .boost(10) + .all(); + +// Get the score: +// ============== + +// Call 'getMetadataFor', pass an entity from the resulting employees list +const metadata = session.advanced.getMetadataFor(employees[0]); + +// Score is available in the '@index-score' metadata property +const score = metadata[CONSTANTS.Documents.Metadata.INDEX_SCORE]; +`} + + + + + + + +## Order by random + +* Use `randomOrdering` to randomize the order of the query results. + +* An optional seed parameter can be passed. + + + + +{`const products = await session + .query({ collection: "products" }) + .whereGreaterThan("UnitsInStock", 10) + // Call 'randomOrdering' + .randomOrdering() + // An optional seed can be passed, e.g.: + // .randomOrdering("someSeed") + .all(); + +// Results will be randomly ordered. +`} + + + + +{`from "Products" +where UnitsInStock > 10 +order by random() +// order by random(someSeed) +`} + + + + + + +## Order by spatial + +* If your data contains geographical locations, + spatial query results can be sorted based on their distance from a specific point. + +* See detailed explanation in [Spatial Sorting](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#spatial-sorting). + + + +## Order by count (aggregation query) + +* The results of a [group-by query](../../../client-api/session/querying/how-to-perform-group-by-query.mdx) can be sorted by the `count` aggregation operation used in the query. + + + + +{`const numberOfProductsPerCategory = await session + .query({ collection: "products" }) + // Group by category + .groupBy("Category") + .selectKey("Category") + // Count the number of product documents per category + .selectCount() + // Order by the count value + .orderBy("count", "Long") + .all(); + +// Results will contain the number of Product documents per category +// ordered by that count in ascending order. +`} + + + + +{`from "Products" +group by Category +order by count() as long +select key() as "Category", count() +`} + + + + + + +## Order by sum (aggregation query) + +* The results of a [group-by query](../../../client-api/session/querying/how-to-perform-group-by-query.mdx) can be sorted by the `sum` aggregation operation used in the query. + + + + +{`const numberOfUnitsInStockPerCategory = await session + .query({ collection: "products" }) + // Group by category + .groupBy("Category") + .selectKey("Category") + // Sum the number of units in stock per category + .selectSum(new GroupByField("UnitsInStock", "sum")) + // Order by the sum value + .orderBy("sum", "Long") + .all(); + +// Results will contain the total number of units in stock per category +// ordered by that number in ascending order. +`} + + + + +{`from "Products" +group by Category +order by sum as long +select key() as 'Category', sum(UnitsInStock) as sum +`} + + + + + + +## Force ordering type + +* If no ordering type is specified in the query then the server will apply the default lexicographical ordering. + +* A different ordering can be forced by passing the ordering type explicitly to `orderBy` or `orderByDescending`. + +* The following ordering types are available: + + * `Long` + * `Double` + * `AlphaNumeric` + * `String` (lexicographic ordering) + + + +__Using alphanumeric ordering example__: + +* When ordering mixed-character strings by the default lexicographical ordering + then comparison is done character by character based on the Unicode values. + For example, "Abc9" will come after "Abc10" since 9 is greater than 1. + +* If you want the digit characters to be ordered as numbers then use alphanumeric ordering + where "Abc10" will result after "Abc9". + + + + +{`const products = await session + .query({ collection: "products" }) + // Call 'OrderBy', order by field 'QuantityPerUnit' + // Pass a second param, requesting to order the text alphanumerically + .orderBy("QuantityPerUnit", "AlphaNumeric") + .all(); +`} + + + + +{`from "Products" +order by QuantityPerUnit as alphanumeric +`} + + + + + + +{`// Running the above query on the NorthWind sample data, +// would produce the following order for the QuantityPerUnit field: +// ================================================================ + +// "1 kg pkg." +// "1k pkg." +// "2 kg box." +// "4 - 450 g glasses" +// "5 kg pkg." +// ... + +// While running with the default Lexicographical ordering would have produced: +// ============================================================================ + +// "1 kg pkg." +// "10 - 200 g glasses" +// "10 - 4 oz boxes" +// "10 - 500 g pkgs." +// "10 - 500 g pkgs." +// ... +`} + + + + + + + +## Chain ordering + +* It is possible to chain multiple orderings in the query. + Any combination of secondary sorting is possible as the fields are indexed independently of one another. + +* There is no limit on the number of sorting actions that can be chained. + + + + +{`const products = await session + .query({ collection: "products" }) + .whereGreaterThan("UnitsInStock", 10) + // Apply the primary sort by 'UnitsInStock' + .orderByDescending("UnitsInStock", "Long") + // Apply a secondary sort by the score + .orderByScore() + // Apply another sort by 'Name' + .orderBy("Name") + .all(); + +// Results will be sorted by the 'UnitsInStock' value (descending), +// then by score, +// and then by 'Name' (ascending). +`} + + + + +{`from "Products" +where UnitsInStock > 10 +order by UnitsInStock as long desc, score(), Name +`} + + + + + + +## Custom sorters + +* The Lucene indexing engine allows you to create your own custom sorters. + Custom sorters are not supported by [Corax](../../../indexes/search-engine/corax.mdx). + +* Custom sorters can be deployed to the server by either: + + * Sending the [Put Sorters Operation](../../../client-api/operations/maintenance/sorters/put-sorter.mdx) from your code. + + * Uploading a custom sorter from Studio, see [Custom Sorters View](../../../studio/database/settings/custom-sorters.mdx). + +* Once the custom sorter is deployed, you can sort the query results with it. + + + + +{`const products = await session + .query({ collection: "products" }) + .whereGreaterThan("UnitsInStock", 10) + // Order by field 'UnitsInStock', pass the name of your custom sorter class + .orderBy("UnitsInStock", { sorterName: "MySorter" }) + .all(); + +// Results will be sorted by the 'UnitsInStock' value +// according to the logic from 'MySorter' class +`} + + + + +{`from "Products" +where UnitsInStock > 10 +order by custom(UnitsInStock, "MySorter") +`} + + + + + + +## Syntax + + + +{`// orderBy overloads: +orderBy(field); +orderBy(field, ordering); +orderBy(field, options); + +// orderByDescending overloads: +orderByDescending(field); +orderByDescending(field, ordering); +orderByDescending(field, options); +`} + + + +| Parameter | Type | Description | +|--------------|----------|------------------------------------------------------------------------------------------------------------------------| +| __field__ | `string` | The name of the field to sort by | +| __ordering__ | `string` | The ordering type that will be used to sort the results:
`Long`
`Double`
`AlphaNumeric`
`String` (default) | +| __options__ | `object` | An object that specifies the custom `sorterName` | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_sort-query-results-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/_sort-query-results-php.mdx new file mode 100644 index 0000000000..386cd45a1e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_sort-query-results-php.mdx @@ -0,0 +1,593 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When making a query, the server will return the results **sorted** only if explicitly requested by the query. + If no sorting method is specified when issuing the query then results will not be sorted. + + * Note: An exception to the above rule is when [Boosting](../../../indexes/boosting.mdx) is involved in the query. + Learn more in [Automatic score-based ordering](../../../indexes/boosting.mdx#automatic-score-based-ordering). + +* Sorting is applied by the server after the query filtering stage. + Applying filtering is recommended as it reduces the number of results RavenDB needs to sort + when querying a large dataset. + +* Multiple sorting actions can be chained. + +* This article provides examples of sorting query results when making a **dynamic-query**. + For sorting results when querying a **static-index** see [sort index query results](../../../indexes/querying/sorting.mdx). + +* In this page: + * [Order by field value](../../../client-api/session/querying/sort-query-results.mdx#order-by-field-value) + + * [Order by score](../../../client-api/session/querying/sort-query-results.mdx#order-by-score) + * [Get resulting score](../../../client-api/session/querying/sort-query-results.mdx#get-resulting-score) + + * [Order by random](../../../client-api/session/querying/sort-query-results.mdx#order-by-random) + + * [Order by spatial](../../../client-api/session/querying/sort-query-results.mdx#order-by-spatial) + + * [Order by count (aggregation query)](../../../client-api/session/querying/sort-query-results.mdx#order-by-count-(aggregation-query)) + + * [Order by sum (aggregation query)](../../../client-api/session/querying/sort-query-results.mdx#order-by-sum-(aggregation-query)) + + * [Force ordering type](../../../client-api/session/querying/sort-query-results.mdx#force-ordering-type) + + * [Chain ordering](../../../client-api/session/querying/sort-query-results.mdx#chain-ordering) + + * [Custom sorters](../../../client-api/session/querying/sort-query-results.mdx#custom-sorters) + + * [Syntax](../../../client-api/session/querying/sort-query-results.mdx#syntax) + + +## Order by field value + +* Use `orderBy` or `orderByDescending` (see below) to order the results by the specified document field. + + + + +{`/** @var array $products */ +$products = $session + // Make a dynamic query on the Products collection + ->query(Product::class) + // Apply filtering (optional) + ->whereGreaterThan("UnitsInStock", 10) + // Call 'OrderBy', pass the document-field by which to order the results + ->orderBy("UnitsInStock") + ->toList(); + +// Results will be sorted by the 'UnitsInStock' value in ascending order, +// with smaller values listed first. +`} + + + + +{`/** @var array $products */ +$products = $session->advanced() + // Make a DocumentQuery on the Products collection + ->documentQuery(Product::class) + // Apply filtering (optional) + ->whereGreaterThan("UnitsInStock", 10) + // Call 'OrderBy', pass the document-field by which to order the results + ->orderBy("UnitsInStock") + ->toList(); + +// Results will be sorted by the 'UnitsInStock' value in ascending order, +// with smaller values listed first. +`} + + + + +{`from "Products" +where UnitsInStock > 10 +order by UnitsInStock as long +`} + + + + + + +**Ordering Type**: + +* By default, the `orderBy` methods will determine the `OrderingType` from the property path expression + and specify that ordering type in the generated RQL that is sent to the server. + +* E.g. in the above example, ordering by `UnitsInStock` will result in `OrderingType::long` + because this property's data type is integer. + +* Different ordering can be forced - see [Force ordering type](../../../client-api/session/querying/sort-query-results.mdx#force-ordering-type) below. + + + + + +## Order by score + +* When querying with some filtering conditions, a basic score is calculated for each item in the results + by the underlying indexing engine. (Read more about Lucene scoring [here](https://lucene.apache.org/core/3_3_0/scoring.html)). + +* The higher the score value the better the match. + +* Use `orderByScore` to order the query results by this score. + + + + +{`/** @var array $products */ +$products = $session + ->query(Product::class) + // Apply filtering + ->whereLessThan("UnitsInStock", 5) + ->orElse() + ->whereEquals("Discontinued", true) + // Call 'orderByScore' + ->orderByScore() + ->toList(); + +// Results will be sorted by the score value +// with best matching documents (higher score values) listed first. +`} + + + + +{`/** @var array $products */ +$products = $session->advanced() + ->documentQuery(Product::class) + // Apply filtering + ->whereLessThan("UnitsInStock", 5) + ->orElse() + ->whereEquals("Discontinued", true) + // Call 'orderByScore' + ->orderByScore() + ->toList(); + +// Results will be sorted by the score value +// with best matching documents (higher score values) listed first. +`} + + + + +{`from "Products" +where UnitsInStock < 5 or Discontinued == true +order by score() +`} + + + + + + +#### Get resulting score: +The score details can be retrieved by either: + + * **Request to include explanations**: + You can get the score details and see how it was calculated by requesting to include explanations in the query. + Currently, this is only available when using Lucene as the underlying indexing engine. + Learn more in [Include query explanations](../../../client-api/session/querying/debugging/include-explanations.mdx). + + * **Get score from metadata**: + The score is available in the `INDEX_SCORE` metadata property within each result. + The following example shows how to get the score from the metadata of the resulting entities that were loaded to the session: + + + +{`// Make a query: +// ============= + +$employees = $session + ->query(Employee::class) + ->search("Notes", "English") + ->search("Notes", "Italian") + ->boost(10) + ->toList(); + +// Get the score: +// ============== + +// Call 'GetMetadataFor', pass an entity from the resulting employees list +$metadata = $session->advanced()->getMetadataFor($employees[0]); + +// Score is available in the 'INDEX_SCORE' metadata property +$score = $metadata[DocumentsMetadata::INDEX_SCORE]; +`} + + + + + + + +## Order by random + +* Use `randomOrdering` to randomize the order of the query results. + +* An optional seed parameter can be passed. + + + + +{`/** @var array $products */ +$products = $session->query(Product::class) + ->whereGreaterThan("UnitsInStock", 10) + // Call 'randomOrdering' + ->randomOrdering() + // An optional seed can be passed, e.g.: + // ->randomOrdering('someSeed') + ->toList(); + +// Results will be randomly ordered. +`} + + + + +{`/** @var array $products */ +$products = $session->advanced() + ->documentQuery(Product::class) + ->whereGreaterThan("UnitsInStock", 10) + // Call 'randomOrdering' + ->randomOrdering() + // An optional seed can be passed, e.g.: + // ->randomOrdering('someSeed') + ->toList(); + +// Results will be randomly ordered. +`} + + + + +{`from "Products" +where UnitsInStock > 10 +order by random() +// order by random(someSeed) +`} + + + + + + +## Order by spatial + +* If your data contains geographical locations, + spatial query results can be sorted based on their distance from a specific point. + +* See detailed explanation in [Spatial Sorting](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#spatial-sorting). + + + +## Order by count (aggregation query) + +* The results of a [group-by query](../../../client-api/session/querying/how-to-perform-group-by-query.mdx) can be sorted by the `Count` aggregation operation used in the query. + + + + +{`$numberOfProductsPerCategory = $session + ->query(Product::class) + // Make an aggregation query + ->groupBy("Category") + ->selectKey("Category") + // Count the number of product documents per category + ->selectCount() + // Order by the Count value + // Here you need to specify the ordering type explicitly + ->orderBy("Count", OrderingType::long()) + ->toList(); + +// Results will contain the number of Product documents per category +// ordered by that count in ascending order. +`} + + + + +{`$numberOfProductsPerCategory = $session->advanced() + ->documentQuery(Product::class) + // Group by Category + ->groupBy("Category") + ->selectKey("Category") + // Count the number of product documents per category + ->selectCount() + // Order by the Count value + // Here you need to specify the ordering type explicitly + ->orderBy("Count", OrderingType::long()) + ->toList(); + +// Results will contain the number of Product documents per category +// ordered by that count in ascending order. +`} + + + + +{`from "Products" +group by Category +order by count() as long +select key() as "Category", count() +`} + + + + + + +## Order by sum (aggregation query) + +* The results of a [group-by query](../../../client-api/session/querying/how-to-perform-group-by-query.mdx) can be sorted by the `Sum` aggregation operation used in the query. + + + + +{`$numberOfUnitsInStockPerCategory = $session + ->query(Product::class) + // Make an aggregation query + // Group by Category + ->groupBy("Category") + // Order by the Sum value + ->selectKey("Category") + ->selectSum(new GroupByField("UnitsInStock", "Sum")) + ->orderBy("Sum") + ->toList(); + +// Results will contain the total number of units in stock per category +// ordered by that number in ascending order. +`} + + + + +{`$numberOfUnitsInStockPerCategory = $session->advanced() + ->documentQuery(Product::class) + // Group by Category + ->groupBy("Category") + ->selectKey("Category") + // Sum the number of units in stock per category + ->selectSum(new GroupByField("UnitsInStock", "Sum")) + // Order by the Sum value + // Here you need to specify the ordering type explicitly + ->orderBy("Sum", OrderingType::long()) + ->toList(); + +// Results will contain the total number of units in stock per category +// ordered by that number in ascending order. +`} + + + + +{`from "Products" +group by Category +order by Sum as long +select key() as 'Category', sum(UnitsInStock) as Sum +`} + + + + + + +## Force ordering type + +* By default, the `orderBy` methods will determine the `OrderingType` from the property path expression + and specify that ordering type in the generated RQL that is sent to the server. + +* A different ordering can be forced by passing the ordering type explicitly to `orderBy` or `orderByDescending`. + +* The following ordering types are available: + + * `OrderingType::long` + * `OrderingType::double` + * `OrderingType::alphaNumeric` + * `OrderingType::string` (lexicographic ordering) + +* When using RQL directly, if no ordering type is specified, then the server defaults to lexicographic ordering. + + + +**Using alphanumeric ordering example**: + +* When ordering mixed-character strings by the default lexicographical ordering + then comparison is done character by character based on the Unicode values. + For example, "Abc9" will come after "Abc10" since 9 is greater than 1. + +* If you want the digit characters to be ordered as numbers then use alphanumeric ordering + where "Abc10" will result after "Abc9". + + + + +{`/** @var array $products */ +$products = $session + ->query(Product::class) + // Call 'OrderBy', order by field 'QuantityPerUnit' + // Pass a second param, requesting to order the text alphanumerically + ->orderBy("QuantityPerUnit", OrderingType::alphaNumeric()) + ->toList(); +`} + + + + +{`/** @var array $products */ +$products = $session->advanced() + ->documentQuery(Product::class) + // Call 'OrderBy', order by field 'QuantityPerUnit' + // Pass a second param, requesting to order the text alphanumerically + ->orderBy("QuantityPerUnit", OrderingType::alphaNumeric()) + ->toList(); +`} + + + + +{`from "Products" +order by QuantityPerUnit as alphanumeric +`} + + + + + + + + +## Chain ordering + +* It is possible to chain multiple orderings in the query. + Any combination of secondary sorting is possible as the fields are indexed independently of one another. + +* There is no limit on the number of sorting actions that can be chained. + + + + +{`/** @var array $products */ +$products = $session + ->query(Product::class) + ->whereGreaterThan("UnitsInStock", 10) + // Apply the primary sort by 'UnitsInStock' + ->orderByDescending("UnitsInStock") + // Apply a secondary sort by the score (for products with the same # of units in stock) + ->orderByScore() + // Apply another sort by 'Name' (for products with same # of units in stock and same score) + ->orderBy("Name") + ->toList(); + +// Results will be sorted by the 'UnitsInStock' value (descending), +// then by score, +// and then by 'Name' (ascending). +`} + + + + +{`/** @var array $products */ +$products = $session->advanced() + ->documentQuery(Product::class) + ->whereGreaterThan("UnitsInStock", 10) + // Apply the primary sort by 'UnitsInStock' + ->orderByDescending("UnitsInStock") + // Apply a secondary sort by the score + ->orderByScore() + // Apply another sort by 'Name' + ->orderBy("Name") + ->toList(); + +// Results will be sorted by the 'UnitsInStock' value (descending), +// then by score, +// and then by 'Name' (ascending). +`} + + + + +{`from "Products" +where UnitsInStock > 10 +order by UnitsInStock as long desc, score(), Name +`} + + + + + + +## Custom sorters + +* The Lucene indexing engine allows you to create your own custom sorters. + Custom sorters can be deployed to the server by either: + + * Sending the [Put Sorters Operation](../../../client-api/operations/maintenance/sorters/put-sorter.mdx) from your code. + + * Uploading a custom sorter from Studio, see [Custom Sorters View](../../../studio/database/settings/custom-sorters.mdx). + +* Once the custom sorter is deployed, you can sort the query results with it. + + + + +{`/** @var array $products */ +$products = $session + ->query(Product::class) + ->whereGreaterThan("UnitsInStock", 10) + // Order by field 'UnitsInStock', pass the name of your custom sorter class + ->orderBy("UnitsInStock", "MySorter") + ->toList(); + +// Results will be sorted by the 'UnitsInStock' value +// according to the logic from 'MySorter' class +`} + + + + +{`/** @var array $products */ +$products = $session->advanced() + ->documentQuery(Product::class) + ->whereGreaterThan("UnitsInStock", 10) + // Order by field 'UnitsInStock', pass the name of your custom sorter class + ->orderBy("UnitsInStock", "MySorter") + ->toList(); + +// Results will be sorted by the 'UnitsInStock' value +// according to the logic from 'MySorter' class +`} + + + + +{`from "Products" +where UnitsInStock > 10 +order by custom(UnitsInStock, "MySorter") +`} + + + + + + +## Syntax + + + +{`/** + * Usage: + * - orderBy("lastName"); // same as call: orderBy("lastName", OrderingType::string()) + * - orderBy("lastName", OrderingType::string()); + * + * - orderBy("units_in_stock", "MySorter"); + * // Results will be sorted by the 'units_in_stock' value according to the logic from 'MySorter' class + */ +function orderBy(string $field, $sorterNameOrOrdering = null): DocumentQueryInterface; + +/** + * Usage: + * - orderByDescending("lastName"); // same as call: orderBy("lastName", OrderingType::string()) + * - orderByDescending("lastName", OrderingType::string()); + * + * - orderByDescending("units_in_stock", "MySorter"); + * // Results will be sorted by the 'units_in_stock' value according to the logic from 'MySorter' class + */ +function orderByDescending(string $field, $sorterNameOrOrdering = null): DocumentQueryInterface; +`} + + + +| Parameter | Type | Description | +|------------|----------|----------------------| +| **$field** | `string` | The field to sort by | +| **$sorterNameOrOrdering** | `string` | The ordering type to sort the results by:
`OrderingType::long`
`OrderingType::double`
`OrderingType::alphaNumeric`
`OrderingType::string` (default) | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_sort-query-results-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/_sort-query-results-python.mdx new file mode 100644 index 0000000000..adaef66a5a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_sort-query-results-python.mdx @@ -0,0 +1,428 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When making a query, the server will return the results **sorted** only if explicitly requested by the query. + If no sorting method is specified when issuing the query then results will not be sorted. + + * Note: An exception to the above rule is when [Boosting](../../../indexes/boosting.mdx) is involved in the query. + Learn more in [Automatic score-based ordering](../../../indexes/boosting.mdx#automatic-score-based-ordering). + +* Sorting is applied by the server after the query filtering stage. + Applying filtering is recommended as it reduces the number of results RavenDB needs to sort + when querying a large dataset. + +* Multiple sorting actions can be chained. + +* This article provides examples of sorting query results when making a **dynamic-query**. + For sorting results when querying a **static-index** see [sort index query results](../../../indexes/querying/sorting.mdx). + +* In this page: + * [Order by field value](../../../client-api/session/querying/sort-query-results.mdx#order-by-field-value) + + * [Order by score](../../../client-api/session/querying/sort-query-results.mdx#order-by-score) + * [Get resulting score](../../../client-api/session/querying/sort-query-results.mdx#get-resulting-score) + + * [Order by random](../../../client-api/session/querying/sort-query-results.mdx#order-by-random) + + * [Order by spatial](../../../client-api/session/querying/sort-query-results.mdx#order-by-spatial) + + * [Order by count (aggregation query)](../../../client-api/session/querying/sort-query-results.mdx#order-by-count-(aggregation-query)) + + * [Order by sum (aggregation query)](../../../client-api/session/querying/sort-query-results.mdx#order-by-sum-(aggregation-query)) + + * [Force ordering type](../../../client-api/session/querying/sort-query-results.mdx#force-ordering-type) + + * [Chain ordering](../../../client-api/session/querying/sort-query-results.mdx#chain-ordering) + + * [Custom sorters](../../../client-api/session/querying/sort-query-results.mdx#custom-sorters) + + * [Syntax](../../../client-api/session/querying/sort-query-results.mdx#syntax) + + +## Order by field value + +* Use `OrderBy` or `OrderByDescending` to order the results by the specified document-field. + + + + +{`products = list( + session + # Make a dynamic query on the 'Products' collection + .query_collection("Products") + # Apply filtering (optional) + .where_greater_than("units_in_stock", 10) + # Call 'order_by' + # Pass the document-field by which to order the results and the ordering type + .order_by("units_in_stock", OrderingType.LONG) +) + +# Results will be sorted by the 'units_in_stock' value in ascending order, +# with smaller values listed first +`} + + + + +{`from "Products" +where UnitsInStock > 10 +order by UnitsInStock as long +`} + + + + + + +**Ordering Type**: + +* By default, the `order_by` methods will determine the `OrderingType` from the property path expression + and specify that ordering type in the generated RQL that is sent to the server. + +* E.g. in the above example, ordering by `x => x.units_in_stock` will result in `OrderingType.LONG` + because that property data type is an integer. + +* Different ordering can be forced - see [Force ordering type](../../../client-api/session/querying/sort-query-results.mdx#force-ordering-type) below. + + + + + +## Order by score + +* When querying with some filtering conditions, a basic score is calculated for each item in the results + by the underlying indexing engine. + +* The higher the score value the better the match. + +* Use `order_by_score` or `order_by_score_descending` to order the query results by this score. + + + + +{`products = list( + session.query_collection("Products") + # Apply filtering + .where_less_than("units_in_stock", 5) + .or_else() + .where_equals("discontinued", True) + # Call 'order_by_score' + .order_by_score() +) + +# Results will be sorted by the score value +# with best matching documents (higher score values) listed first. +`} + + + + +{`from "Products" +where UnitsInStock < 5 or Discontinued == true +order by score() +`} + + + + + + +#### Get resulting score: +The score details can be retrieved by either: + +* **Request to include explanations**: + You can get the score details and see how it was calculated by requesting to include explanations in the query. + Currently, this is only available when using Lucene as the underlying indexing engine. + Learn more in [Include query explanations](../../../client-api/session/querying/debugging/include-explanations.mdx). + +* **Get score from metadata**: + + * The score is available in the `@index-score` metadata property within each result. + Note the following difference between the underlying indexing engines: + + * When using **Lucene**: + This metadata property is always available in the results. + Read more about Lucene scoring [here](https://lucene.apache.org/core/3_3_0/scoring.html). + + * When using **Corax**: + In order to enhance performance, this metadata property is Not included in the results by default. + To get this metadata property you must set the [Indexing.Corax.IncludeDocumentScore](../../../server/configuration/indexing-configuration.mdx#indexingcoraxincludedocumentscore) configuration value to `True` + Learn about the available methods for setting an indexing configuration key in this [indexing-configuration](../../../server/configuration/indexing-configuration.mdx) article. + + + + + +## Order by random + +* Use `RandomOrdering` to randomize the order of the query results. + +* An optional seed parameter can be passed. + + + + +{`products = list( + session.query_collection("Products").where_greater_than("units_in_stock", 10) + # Call 'random_ordering' + .random_ordering() + # An optional seed can be passed, e.g.: + # .random_ordering("someSeed") +) + +# Results will be randomly ordered +`} + + + + +{`from "Products" +where UnitsInStock > 10 +order by random() +// order by random(someSeed) +`} + + + + + + +## Order by spatial + +* If your data contains geographical locations, + spatial query results can be sorted based on their distance from a specific point. + +* See detailed explanation in [Spatial Sorting](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#spatial-sorting). + + + +## Order by count (aggregation query) + +* The results of a [group_by query](../../../client-api/session/querying/how-to-perform-group-by-query.mdx) can be sorted by the `count` aggregation operation used in the query. + + + + +{`number_of_products_per_category = list( + session.query_collection("Products", Product) + # Group by category + .group_by("category").select_key("category") + # Count the number of product documents per category + .select_count() + # Order by the count value + .order_by("count", OrderingType.LONG) +) + +# Results will contain the number of Product documents per category +# ordered by that count in ascending order. +`} + + + + +{`from "Products" +group by Category +order by count() as long +select key() as "Category", count() +`} + + + + + + +## Order by sum (aggregation query) + +* The results of a [group_by query](../../../client-api/session/querying/how-to-perform-group-by-query.mdx) can be sorted by the `sum` aggregation operation used in the query. + + + + +{`number_of_units_in_stock_per_category = list( + session.query_collection("Products", Product) + # Group by category + .group_by("category").select_key("category") + # Sum the number of units in stock per category + .select_sum(GroupByField("units_in_stock", "sum")) + # Order by the sum value + .order_by("sum", OrderingType.LONG) +) + +# Results will contain the total number of units in stock per category +# ordered by that number in ascending order. +`} + + + + +{`from "Products" +group by Category +order by Sum as long +select key() as 'Category', sum(UnitsInStock) as Sum +`} + + + + + + +## Force ordering type + +* By default, the `order_by` methods will determine the `OrderingType` from the property path expression + and specify that ordering type in the generated RQL that is sent to the server. + +* A different ordering can be forced by passing the ordering type explicitly to `order_by` or `order_by_descending`. + +* The following ordering types are available: + + * `OrderingType.LONG` + * `OrderingType.DOUBLE` + * `OrderingType.ALPHA_NUMERIC` + * `OrderingType.STRING` (lexicographic ordering) + +* When using RQL directly, if no ordering type is specified, then the server defaults to lexicographic ordering. + + + +**Using alphanumeric ordering example**: + +* When ordering mixed-character strings by the default lexicographical ordering + then comparison is done character by character based on the Unicode values. + For example, "Abc9" will come after "Abc10" since 9 is greater than 1. + +* If you want the digit characters to be ordered as numbers then use alphanumeric ordering + where "Abc10" will result after "Abc9". + + + + +{`products = list( + session.query_collection("products") + # Call 'order_by', order by field 'quantity_per_unit' + # Pass a second param, requesting to order the text alphanumerically + .order_by("quantity_per_unit", OrderingType.ALPHA_NUMERIC) +) +`} + + + + +{`from "Products" +order by QuantityPerUnit as alphanumeric +`} + + + + + + + + +## Chain ordering + +* It is possible to chain multiple orderings in the query. + Any combination of secondary sorting is possible as the fields are indexed independently of one another. + +* There is no limit on the number of sorting actions that can be chained. + +* This is achieved by using the `then_by` (`then_by_descending`) and `then_by_score` (`then_by_score_descending`) methods. + + + + +{`products = list( + session.query_collection("Products").where_greater_than("units_in_stock", 10) + # Apply the primary sort by 'units_in_stock' + .order_by_descending("units_in_stock", OrderingType.LONG) + # Apply a secondary sort by the score + .order_by_score() + # Apply another sort by 'Name' + .order_by("name") +) + +# Results will be sorted by the 'units_in_stock' value (descending), +# then by score, +# and then by 'name' (ascending). +`} + + + + +{`from "Products" +where UnitsInStock > 10 +order by UnitsInStock as long desc, score(), Name +`} + + + + + + +## Custom sorters + +* The Lucene indexing engine allows you to create your own custom sorters. + Custom sorters are not supported by [Corax](../../../indexes/search-engine/corax.mdx). + +* Custom sorters can be deployed to the server by either: + + * Sending the [Put Sorters Operation](../../../client-api/operations/maintenance/sorters/put-sorter.mdx) from your code. + + * Uploading a custom sorter from Studio, see [Custom Sorters View](../../../studio/database/settings/custom-sorters.mdx). + +* Once the custom sorter is deployed, you can sort the query results with it. + + + + +{`products = list( + session.query(object_type=Product).where_greater_than("units_in_stock", 10) + # Order by field 'units_in_stock', pass the name of your custom sorter class + .order_by("units_in_stock", "MySorter") +) + +# Results will be sorted by the 'units_in_stock' value +# according to the logic from 'MySorter' class +`} + + + + +{`from "Products" +where UnitsInStock > 10 +order by custom(UnitsInStock, "MySorter") +`} + + + + + + +## Syntax + + + +{`# order_by: +def order_by( + self, field: str, sorter_name_or_ordering_type: Union[str, OrderingType] = OrderingType.STRING +) -> DocumentQuery[_T]: ... + +# order_by_descending: +def order_by_descending( + self, field: str, sorter_name_or_ordering_type: Union[str, OrderingType] = OrderingType.STRING +) -> DocumentQuery[_T]: ... +`} + + + +| Parameter | Type | Description | +|----------------|-------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **field** | `str` | The name of the field to sort by | +| **sorter_name_or_ordering_type** | `Union[str, OrderingType]` | The custom sorter class name
**-or-**
The results sorting ordering type
Can be:
`OrderingType.LONG`
`OrderingType.DOUBLE`
`OrderingType.ALPHA_NUMERIC`
`OrderingType.STRING` (default) | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/_vector-search-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/_vector-search-csharp.mdx new file mode 100644 index 0000000000..9f82d9506f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/_vector-search-csharp.mdx @@ -0,0 +1,21 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Vector search enables you to retrieve data based on **contextual relevance**, rather than relying on exact keyword matches. + +* In addition to its other capabilities, RavenDB serves as a **vector database** that allows you to efficiently store, index, and search vector representations. + +* You can perform vector searches to locate documents based on their **content's similarity** to a given search item in your queries. + +* This feature is covered in detail in the following articles: + + * [RavenDB as a Vector Database](../../../ai-integration/vector-search/ravendb-as-vector-database.mdx) + * [Vector Search using a Dynamic Query](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx) + * [Vector Search using a Static Index](../../../ai-integration/vector-search/vector-search-using-static-index.mdx) + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/assets/auto-index-terms.png b/versioned_docs/version-7.1/client-api/session/querying/assets/auto-index-terms.png new file mode 100644 index 0000000000..21d961f3ec Binary files /dev/null and b/versioned_docs/version-7.1/client-api/session/querying/assets/auto-index-terms.png differ diff --git a/versioned_docs/version-7.1/client-api/session/querying/assets/non-existing-field-studio-rql.png b/versioned_docs/version-7.1/client-api/session/querying/assets/non-existing-field-studio-rql.png new file mode 100644 index 0000000000..8d83aca3f0 Binary files /dev/null and b/versioned_docs/version-7.1/client-api/session/querying/assets/non-existing-field-studio-rql.png differ diff --git a/versioned_docs/version-7.1/client-api/session/querying/assets/snagit/non-existing-field-studio-rql.snag b/versioned_docs/version-7.1/client-api/session/querying/assets/snagit/non-existing-field-studio-rql.snag new file mode 100644 index 0000000000..9bb93bd88c Binary files /dev/null and b/versioned_docs/version-7.1/client-api/session/querying/assets/snagit/non-existing-field-studio-rql.snag differ diff --git a/versioned_docs/version-7.1/client-api/session/querying/assets/spatial_1.png b/versioned_docs/version-7.1/client-api/session/querying/assets/spatial_1.png new file mode 100644 index 0000000000..697e58d9bf Binary files /dev/null and b/versioned_docs/version-7.1/client-api/session/querying/assets/spatial_1.png differ diff --git a/versioned_docs/version-7.1/client-api/session/querying/debugging/_category_.json b/versioned_docs/version-7.1/client-api/session/querying/debugging/_category_.json new file mode 100644 index 0000000000..73174d836b --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/debugging/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 20, + "label": Debugging, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/debugging/_include-explanations-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/debugging/_include-explanations-csharp.mdx new file mode 100644 index 0000000000..ced793543f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/debugging/_include-explanations-csharp.mdx @@ -0,0 +1,159 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When making a query, each document in the query results is assigned a **score**. + This score determines the order by which the documents come back in the results when requesting + to [order by score](../../../../client-api/session/querying/sort-query-results.mdx#order-by-score). + +* Each document in the results includes this score under the `@index-score` property in its metadata. + +* **To get the score details** and see how it was calculated, + use method `IncludeExplanations`, which is available in the [IDocumentQuery](../../../../client-api/session/querying/document-query/what-is-document-query.mdx) interface. + + + * Including explanations is available only when using **Lucene** as the underlying search engine. + * You can configure which search engine will be used. Learn how in [Selecting the search engine](../../../../indexes/search-engine/corax.mdx#selecting-the-search-engine). + +* This article provides examples of including explanations when making a **dynamic-query**. + For including explanations when querying a **static-index** see [Include explanations in index query](../../../../indexes/querying/include-explanations.mdx). + +* In this page: + * [Include explanations in a query](../../../../client-api/session/querying/debugging/include-explanations.mdx#include-explanations-in-a-query) + * [View explanations](../../../../client-api/session/querying/debugging/include-explanations.mdx#view-explanations) + * [Syntax](../../../../client-api/session/querying/debugging/include-explanations.mdx#syntax) + +## Include explanations in a query + + + + +{`var products = session + .Query() + // Convert the IRavenQueryable to IDocumentQuery + // to be able to use 'IncludeExplanations' + .ToDocumentQuery() + // Call IncludeExplanations, provide an out param for the explanations results + .IncludeExplanations(out Explanations explanations) + // Convert back to IRavenQueryable + // to continue building the query using LINQ + .ToQueryable() + // Define query criteria + // e.g. search for docs containing Syrup -or- Lager in their Name field + .Search(x => x.Name, "Syrup Lager") + // Execute the query + .ToList(); + +// Get the score details for a specific document from the results +// Call GetExplanations on the resulting Explanations object +string[] scoreDetails = explanations.GetExplanations(products[0].Id); +`} + + + + +{`var products = await asyncSession + .Query() + // Convert the IRavenQueryable to IDocumentQuery + // to be able to use 'IncludeExplanations' + .ToAsyncDocumentQuery() + // Call IncludeExplanations, provide an out param for the explanations results + .IncludeExplanations(out Explanations explanations) + // Convert back to IRavenQueryable + // to continue building the query using LINQ + .ToQueryable() + // Define query criteria + // e.g. search for docs containing Syrup -or- Lager in their Name field + .Search(x => x.Name, "Syrup Lager") + // Execute the query + .ToListAsync(); + +// Get the score details for a specific document from the results +// Call GetExplanations on the resulting Explanations object +string[] scoreDetails = explanations.GetExplanations(products[0].Id); +`} + + + + +{`// Query with \`DocumentQuery\` +var products = session.Advanced + .DocumentQuery() + // Call IncludeExplanations, provide an out param for the explanations results + .IncludeExplanations(out Explanations explanations) + // Define query criteria + // e.g. search for docs containing Syrup -or- Lager in their Name field + .Search(x => x.Name, "Syrup Lager") + // Execute the query + .ToList(); + +// Get the score details for a specific document from the results +// Call GetExplanations on the resulting Explanations object +string[] scoreDetails = explanations.GetExplanations(products[0].Id); +`} + + + + +{`// Query with \`AsyncDocumentQuery\` +var products = await asyncSession.Advanced + .AsyncDocumentQuery() + // Call IncludeExplanations, provide an out param for the explanations results + .IncludeExplanations(out Explanations explanations) + // Define query criteria + // e.g. search for docs containing Syrup -or- Lager in their Name field + .Search(x => x.Name, "Syrup Lager") + // Execute the query + .ToListAsync(); + +// Get the score details for a specific document from the results +// Call GetExplanations on the resulting Explanations object +string[] scoreDetails = explanations.GetExplanations(products[0].Id); +`} + + + + +{`from "Products" +where search(Name, "Syrup") or search(Name, "Lager") +include explanations() +`} + + + + + + +## View explanations + +* The detailed explanations can be viewed from the **Query view** in Studio. + +* Running a query with `include explanations()` will show an additional **Explanations Tab**. + +![Figure 1. Explanations in the Studio](./assets/include-explanations-1.png) + +* Sample score details: + +![Figure 2. View explanations](./assets/include-explanations-2.png) + + + +## Syntax + + + +{`IDocumentQuery IncludeExplanations(out Explanations explanations); +`} + + + +| Parameter | Type | Description | +|------------------|----------------|------------------------------------------------------------------| +| **explanations** | `Explanations` | An _out_ param that will be filled with the explanations results | + +| `Explanations` | Description | +|------------------------------------------|-------------| +| `string[] GetExplanations(string docId)` |
  • Pass the resulting document ID for which to get score details.
  • Returns a list with all explanations.
| diff --git a/versioned_docs/version-7.1/client-api/session/querying/debugging/_include-explanations-java.mdx b/versioned_docs/version-7.1/client-api/session/querying/debugging/_include-explanations-java.mdx new file mode 100644 index 0000000000..10bc4de813 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/debugging/_include-explanations-java.mdx @@ -0,0 +1,58 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +`includeExplanations` allows to investigate details related to score assigned for each query result. + + + +* Including explanations is available only when using **Lucene** as the underlying search engine. +* You can configure which search engine will be used. Learn how in [Selecting the search engine](../../../../indexes/search-engine/corax.mdx#selecting-the-search-engine). + + +## Syntax + + + + +{`IDocumentQuery includeExplanations(Reference explanations); +`} + + + +## Example + + + + +{`Reference explanationRef = new Reference<>(); +List syrups = session.advanced().documentQuery(Product.class) + .includeExplanations(explanationRef) + .search("Name", "Syrup") + .toList(); + +String[] scoreDetails = explanationRef.value.getExplanations(syrups.get(0).getId()); +`} + + + + +{`from "Products" +where search(Name, "Syrup") +include explanations() +`} + + + + +--- +Sample explanation: + +``` +4.650658 = (MATCH) fieldWeight(search(Name):syrup in 2), product of: + 1 = tf(termFreq(search(Name):syrup)=1) + 4.650658 = idf(docFreq=1, maxDocs=77) + 1 = fieldNorm(field=search(Name), doc=2) +``` + diff --git a/versioned_docs/version-7.1/client-api/session/querying/debugging/_include-explanations-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/debugging/_include-explanations-nodejs.mdx new file mode 100644 index 0000000000..6c818a97a8 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/debugging/_include-explanations-nodejs.mdx @@ -0,0 +1,105 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When making a query, each document in the query results is assigned a **score**. + This score determines the order by which the documents come back in the results when requesting + to [order by score](../../../../client-api/session/querying/sort-query-results.mdx#order-by-score). + +* Each document in the results includes this score under the `@index-score` property in its metadata. + +* Use `includeExplanations` in your query **to get the score details** and see how it was calculated. + + + * Including explanations is available only when using **Lucene** as the underlying search engine. + * You can configure which search engine will be used. Learn how in [Selecting the search engine](../../../../indexes/search-engine/corax.mdx#selecting-the-search-engine). + +* This article provides examples of including explanations when making a **dynamic-query**. + For including explanations when querying a **static-index** see [Include explanations in index query](../../../../indexes/querying/include-explanations.mdx). + +* In this page: + * [Include explanations in a query](../../../../client-api/session/querying/debugging/include-explanations.mdx#include-explanations-in-a-query) + * [View explanations](../../../../client-api/session/querying/debugging/include-explanations.mdx#view-explanations) + * [Syntax](../../../../client-api/session/querying/debugging/include-explanations.mdx#syntax) + +## Include explanations in a query + + + + +{`// Define an object that will receive the explanations results +let explanationsResults; + +const products = await session.query({ collection: "Products" }) + // Call includeExplanations, pass a callback function + // Output param 'explanationsResults' will be filled with explanations results when query returns + .includeExplanations(e => explanationsResults = e) + // Define query criteria + // i.e. search for docs containing Syrup -or- Lager in their Name field + .search("Name", "Syrup Lager") + // Execute the query + .all(); + +// Get the score details for a specific document from 'explanationsResults' +const id = session.advanced.getDocumentId(products[0]); +const scoreDetails = explanationsResults.explanations[id]; +`} + + + + +{`from "Products" +where search(Name, "Syrup") or search(Name, "Lager") +include explanations() +`} + + + + + + +## View explanations + +* The detailed explanations can be viewed from the **Query view** in Studio. +* Running a query with `include explanations()` will show an additional **Explanations Tab**. + +![Figure 1. Explanations in the Studio](./assets/include-explanations-1.png) + +* Sample score details: + +![Figure 2. View explanations](./assets/include-explanations-2.png) + + + +## Syntax + + + +{`query.includeExplanations(explanationsCallback) +`} + + + +| Parameter | Type | Description | +|--------------------------|---------------------------------|-------------| +| **explanationsCallback** | `(explanationsResults) => void` |
  • A callback function with an output parameter.
  • The parameter passed to the callback will be filled with the `Explanations` object when the query returns.
| + + + +{`// The Explanations object: +// ======================== + +class Explanations \{ + get explanations(): \{ + [key: string]: string[]; // An explanations list per document ID key + \}; +\} +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/debugging/_include-explanations-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/debugging/_include-explanations-php.mdx new file mode 100644 index 0000000000..a875c9f1c3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/debugging/_include-explanations-php.mdx @@ -0,0 +1,85 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When making a query, each document in the query results is assigned a **score**. + This score determines the order by which the documents come back in the results when requesting + to [order by score](../../../../client-api/session/querying/sort-query-results.mdx#order-by-score). + +* Each document in the results includes this score under the `@index-score` property in its metadata. + +* Use `includeExplanations` to get the score details and see how it was calculated. + + + * Including explanations is available only when using **Lucene** as the underlying search engine. + * You can configure which search engine will be used. Learn how in [Selecting the search engine](../../../../indexes/search-engine/corax.mdx#selecting-the-search-engine). + +* In this page: + * [Include explanations in a query](../../../../client-api/session/querying/debugging/include-explanations.mdx#include-explanations-in-a-query) + * [View explanations](../../../../client-api/session/querying/debugging/include-explanations.mdx#view-explanations) + * [Syntax](../../../../client-api/session/querying/debugging/include-explanations.mdx#syntax) + +## Include explanations in a query + + + + +{`$explanations = new Explanations(); + +/** @var array $syrups */ +$syrups = $session->advanced()->documentQuery(Product::class) + ->includeExplanations(null, $explanations) + ->search("Name", "Syrup") + ->toList(); + +$scoreDetails = $explanations->getExplanations($syrups[0]->getId()); +`} + + + + +{`from "Products" +where search(Name, "Syrup") or search(Name, "Lager") +include explanations() +`} + + + + + +Please note that the First parameter is optional. +If you intend to use the default options, just paste `null` instead of the options object. + + + + +## View explanations + +* The detailed explanations can be viewed from the **Query view** in Studio. + +* Running a query with `includeExplanations` will show an additional **Explanations Tab**. + +![Figure 1. Explanations in the Studio](./assets/include-explanations-1.png) + +* Sample score details: + +![Figure 2. View explanations](./assets/include-explanations-2.png) + + + +## Syntax + + + +{`public function includeExplanations(?ExplanationOptions $options, Explanations &$explanations): DocumentQueryInterface; +`} + + + +| Parameter | Type | Description | +|--------------------|-----------------------|-------------| +| **$options** | `?ExplanationOptions` | This object is optional.
If you intend to use the default options, place `null` here. | +| **&$explanations** | `Explanations` |
  • A callback function (action) that takes `Explanations` as an argument. It will be called by the client with the resulting `Explanations`.
  • You can interact with the resulting `Explanations` inside your callback.
| diff --git a/versioned_docs/version-7.1/client-api/session/querying/debugging/_include-explanations-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/debugging/_include-explanations-python.mdx new file mode 100644 index 0000000000..576dcfe967 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/debugging/_include-explanations-python.mdx @@ -0,0 +1,102 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When making a query, each document in the query results is assigned a **score**. + This score determines the order by which the documents come back in the results when requesting + to [order by score](../../../../client-api/session/querying/sort-query-results.mdx#order-by-score). + +* Each document in the results includes this score under the `@index-score` property in its metadata. + +* Use `include_explanations` to get the score details** and see how it was calculated. + + + * Including explanations is available only when using **Lucene** as the underlying search engine. + * You can configure which search engine will be used. Learn how in [Selecting the search engine](../../../../indexes/search-engine/corax.mdx#selecting-the-search-engine). + +* In this page: + * [Include explanations in a query](../../../../client-api/session/querying/debugging/include-explanations.mdx#include-explanations-in-a-query) + * [View explanations](../../../../client-api/session/querying/debugging/include-explanations.mdx#view-explanations) + * [Syntax](../../../../client-api/session/querying/debugging/include-explanations.mdx#syntax) + + +## Include explanations in a query + + + + +{`# Prepare a callback +explanations_results: Optional[Explanations] = None + +def explanations_callback(explanations: Explanations): + explanations_results = explanations + +# Query with 'document_query' + +# Execute the query +results = list( + # Prepare a query + session.advanced.document_query(object_type=Product) + # Call include_expirations, provide an out param for the explanations results + .include_explanations() + # Define query criteria + # i.e. search for docs containing Syrup -or- Lager in their Name field + .search("Name", "Syrup Lager") +) + +# Get the score details for a specific document from the results +# Get explanations from the resulting Explanations object +score_details = explanations_results.explanations[results[0].Id] +`} + + + + +{`from "Products" +where search(Name, "Syrup") or search(Name, "Lager") +include explanations() +`} + + + + + + +## View explanations + +* The detailed explanations can be viewed from the **Query view** in Studio. + +* Running a query with `include_explanations` will show an additional **Explanations Tab**. + +![Figure 1. Explanations in the Studio](./assets/include-explanations-1.png) + +* Sample score details: + +![Figure 2. View explanations](./assets/include-explanations-2.png) + + + +## Syntax + + + +{`def include_explanations( + self, + options: Optional[ExplanationOptions] = None, + explanations_callback: Callable[[Explanations], None] = None, +) -> DocumentQuery[_T]: ... +`} + + + +| Parameter | Type | Description | +|---------------------------|----------------------------------|-------------| +| **explanations_callback** | `Callable[[Explanations], None]` |
  • A callback function (action) that takes `Explanations` as an argument. It will be called by the client with the resulting `Explanations`.
  • You can interact with the resulting `Explanations` inside your callback.
| +| **options** (Optional) | `ExplanationOptions` | Can be a `group_key` string. | + +| `Explanations` | Description | +|------------------------|-------------| +| `Dict[str, List[str]]` |
  • Pass the resulting document ID for which to get score details.
  • Returns a list with all explanations.
| diff --git a/versioned_docs/version-7.1/client-api/session/querying/debugging/_query-timings-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/debugging/_query-timings-csharp.mdx new file mode 100644 index 0000000000..90c08b9c95 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/debugging/_query-timings-csharp.mdx @@ -0,0 +1,176 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When making a query, + you can request to get detailed stats of the time spent by RavenDB on each part of the query. + E.g. duration of search, loading documents, transforming results, total duration, etc. + +* By default, the timings stats are Not included in the query results, to avoid the measuring overhead. + +* __To include the query timings__ in the query results: + add a call to `Timings()` in your query code, or add `include timings()` to an RQL query. + See examples below. + +* In this page: + * [Include timings in a query](../../../../client-api/session/querying/debugging/query-timings.mdx#include-timings-in-a-query) + * [View timings](../../../../client-api/session/querying/debugging/query-timings.mdx#view-timings) + * [Syntax](../../../../client-api/session/querying/debugging/query-timings.mdx#syntax) + * [Timings in a sharded database](../../../../client-api/session/querying/debugging/query-timings.mdx#timings-in-a-sharded-database) + + +## Include timings in a query + + + + +{`// Define an object that will receive the query timings +QueryTimings timings = null; + +var results = session.Query() + // Use the Customize method to Call Timings, provide an out param for the timings results + .Customize(x => x.Timings(out timings)) + // Define query criteria + // i.e. search for docs containing Syrup -or- Lager in their Name field + .Search(x => x.Name, "Syrup Lager") + // Execute the query + .ToList(); + +// Get total query duration: +// ========================= +var totalQueryDuration = timings.DurationInMs; + +// Get specific parts duration: +// ============================ +IDictionary timingsDictionary = timings.Timings; +var optimizerDuration = timingsDictionary["Optimizer"].DurationInMs; +var luceneDuration = timingsDictionary["Query"].Timings["Lucene"].DurationInMs; +`} + + + + +{`// Define an object that will receive the query timings +QueryTimings timings = null; + +var results = await asyncSession.Query() + // Use the Customize method to Call Timings, provide an out param for the timings results + .Customize(x => x.Timings(out timings)) + // Define the search criteria + // Search for docs containing Syrup -or- Lager in their Name field + .Search(x => x.Name, "Syrup Lager") + // Execute the query + .ToListAsync(); + +// Get total query duration: +// ========================= +var totalQueryDuration = timings.DurationInMs; + +// Get specific parts duration: +// ============================ +IDictionary timingsDictionary = timings.Timings; +var optimizerDuration = timingsDictionary["Optimizer"].DurationInMs; +var luceneDuration = timingsDictionary["Query"].Timings["Lucene"].DurationInMs; +`} + + + + +{`var results = session.Advanced.DocumentQuery() + // Call Timings, provide an out param for the timings results + .Timings(out QueryTimings timings) + // Define query criteria + // i.e. search for docs containing Syrup -or- Lager in their Name field + .Search(x => x.Name, "Syrup Lager") + // Execute the query + .ToList(); + +// Get total query duration: +// ========================= +var totalQueryDuration = timings.DurationInMs; + +// Get specific parts duration: +// ============================ +IDictionary timingsDictionary = timings.Timings; +var optimizerDuration = timingsDictionary["Optimizer"].DurationInMs; +var luceneDuration = timingsDictionary["Query"].Timings["Lucene"].DurationInMs; +`} + + + + +{`var results = await asyncSession.Advanced.AsyncDocumentQuery() + // Call Timings, provide an out param for the timings results + .Timings(out QueryTimings timings) + // Define the search criteria + // Search for docs containing Syrup -or- Lager in their Name field + .Search(x => x.Name, "Syrup Lager") + // Execute the query + .ToListAsync(); + +// Get total query duration: +// ========================= +var totalQueryDuration = timings.DurationInMs; + +// Get specific parts duration: +// ============================ +IDictionary timingsDictionary = timings.Timings; +var optimizerDuration = timingsDictionary["Optimizer"].DurationInMs; +var luceneDuration = timingsDictionary["Query"].Timings["Lucene"].DurationInMs; +`} + + + + +{`from "Products" +where search(Name, "Syrup") or search(Name, "Lager") +include timings() +`} + + + + + + +## View timings + +* The detailed timings can be viewed from the [Query view](../../../../studio/database/queries/query-view.mdx) in the Studio. + +* Running an RQL query with `include timings()` will show an additional __Timings Tab__ + with a graphical representation of the time spent in each query part. + +![Figure 1. Include timings graphical results](./assets/include-timings.png) + + + +## Syntax + + + +{`IDocumentQueryCustomization Timings(out QueryTimings timings); +`} + + + +| Parameter | Type | Description | +|-------------|----------------|-------------------------------------------------------------| +| __timings__ | `QueryTimings` | An _out_ param that will be filled with the timings results | + +| `QueryTimings` | | | +|------------------|-------------------------------------|---------------------------------------------------| +| __DurationInMs__ | `long` | Total duration | +| __Timings__ | `IDictionary` | Dictionary with `QueryTimings` info per time part | + + + +## Timings in a sharded database + +* In a sharded database, timings for each part are provided __per shard__. + +* Learn more in [timings in a sharded database](../../../../sharding/querying.mdx#timing-queries). + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/debugging/_query-timings-java.mdx b/versioned_docs/version-7.1/client-api/session/querying/debugging/_query-timings-java.mdx new file mode 100644 index 0000000000..82050a7150 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/debugging/_query-timings-java.mdx @@ -0,0 +1,88 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When making a query, + you can request to get detailed stats of the time spent by RavenDB on each part of the query. + E.g. duration of search, loading documents, transforming results, total duration, etc. + +* By default, the timings stats are Not included in the query results, to avoid the measuring overhead. + +* __To include the query timings__ in the query results: + add a call to `timings()` in your query code, or add `include timings()` to an RQL query. + See examples below. + +* In this page: + * [Include timings in a query](../../../../client-api/session/querying/debugging/query-timings.mdx#include-timings-in-a-query) + * [View timings](../../../../client-api/session/querying/debugging/query-timings.mdx#view-timings) + * [Syntax](../../../../client-api/session/querying/debugging/query-timings.mdx#syntax) + * [Timings in a sharded database](../../../../client-api/session/querying/debugging/query-timings.mdx#timings-in-a-sharded-database) + + +## Include timings in a query + + + + +{`Reference timingsRef = new Reference<>(); +List resultsWithTimings = session.advanced().documentQuery(Product.class) + .timings(timingsRef) + .search("Name", "Syrup") + .toList(); + +Map timingsMap = timingsRef.value.getTimings(); +`} + + + + +{`from "Products" +where search(Name, "Syrup") or search(Name, "Lager") +include timings() +`} + + + + + + +## View timings + +* The detailed timings can be viewed from the [Query view](../../../../studio/database/queries/query-view.mdx) in the Studio. + +* Running an RQL query with `include timings()` will show an additional __Timings Tab__ + with a graphical representation of the time spent in each query part. + +![Figure 1. Include timings graphical results](./assets/include-timings.png) + + + +## Syntax + + + +{`IDocumentQueryCustomization timings(Reference timings); +`} + + + +The `QueryTimings` object will be filled with the timings when the query returns. + +| `QueryTimings` | | | +|------------------|-----------------------------|---------------------------------------------------| +| __durationInMs__ | `long` | Total duration | +| __timings__ | `Map` | Dictionary with `QueryTimings` info per time part | + + + +## Timings in a sharded database + +* In a sharded database, timings for each part are provided __per shard__. + +* Learn more in [timings in a sharded database](../../../../sharding/querying.mdx#timing-queries). + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/debugging/_query-timings-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/debugging/_query-timings-nodejs.mdx new file mode 100644 index 0000000000..2594955761 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/debugging/_query-timings-nodejs.mdx @@ -0,0 +1,106 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When making a query, + you can request to get detailed stats of the time spent by RavenDB on each part of the query. + E.g. duration of search, loading documents, transforming results, total duration, etc. + +* By default, the timings stats are Not included in the query results, to avoid the measuring overhead. + +* __To include the query timings__ in the query results: + add a call to `timings()` in your query code, or add `include timings()` to an RQL query. + See examples below. + +* In this page: + * [Include timings in a query](../../../../client-api/session/querying/debugging/query-timings.mdx#include-timings-in-a-query) + * [View timings](../../../../client-api/session/querying/debugging/query-timings.mdx#view-timings) + * [Syntax](../../../../client-api/session/querying/debugging/query-timings.mdx#syntax) + * [Timings in a sharded database](../../../../client-api/session/querying/debugging/query-timings.mdx#timings-in-a-sharded-database) + + +## Include timings in a query + + + + +{`// Define an object that will receive the timings results +let timingsResults; + +const results = await session.query({ collection: "Products" }) + // Call timings, pass a callback function + // Output param 'timingsResults' will be filled with the timings details when query returns + .timings(t => timingsResults = t) + // Define query criteria + // i.e. search for docs containing Syrup -or- Lager in their Name field + .search("Name", "Syrup Lager") + // Execute the query + .all(); + +// Get total query duration: +// ========================= +const totalQueryDuration = timingsResults.durationInMs; + +// Get specific parts duration: +// ============================ +const optimizerDuration = timingsResults.timings.optimizer.durationInMs; +// or: timingsResults.timings["optimizer"].durationInMs; +const luceneDuration = timingsResults.timings.query.timings.lucene.durationInMs; +// or: timingsResults.timings["query"].timings.["lucene"].durationInMs; +`} + + + + +{`from "Products" +where search(Name, "Syrup") or search(Name, "Lager") +include timings() +`} + + + + + + +## View timings + +* The detailed timings can be viewed from the [Query view](../../../../studio/database/queries/query-view.mdx) in the Studio. + +* Running an RQL query with `include timings()` will show an additional __Timings Tab__ + with a graphical representation of the time spent in each query part. + +![Figure 1. Include timings graphical results](./assets/include-timings.png) + + + +## Syntax + + + +{`query.timings(timingsCallback) +`} + + + +| Parameter | Type | Description | +|---------------------|-----------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| __timingsCallback__ | `(timingsCallback) => void` | <ul><li>A callback function with an output parameter.</li><li>The parameter passed to the callback will be filled with the `QueryTimings` object when query returns.</li></ul> | + +| `QueryTimings` | | | +|------------------|--------------------------------|---------------------------------------------------| +| __durationInMs__ | `number` | Total duration | +| __timings__ | `Record` | Dictionary with `QueryTimings` info per time part | + + + +## Timings in a sharded database + +* In a sharded database, timings for each part are provided __per shard__. + +* Learn more in [timings in a sharded database](../../../../sharding/querying.mdx#timing-queries). + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/debugging/_query-timings-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/debugging/_query-timings-php.mdx new file mode 100644 index 0000000000..447455323e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/debugging/_query-timings-php.mdx @@ -0,0 +1,88 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When making a query, + you can request to get detailed stats of the time spent by RavenDB on each part of the query. + E.g. duration of search, loading documents, transforming results, total duration, etc. + +* By default, the timings stats are Not included in the query results, to avoid the measuring overhead. + +* **To include the query timings** in the query results: + add a call to the `timings()` method in your query code, or add `include timings()` to an RQL query. + See examples below. + +* In this page: + * [Include timings in a query](../../../../client-api/session/querying/debugging/query-timings.mdx#include-timings-in-a-query) + * [View timings](../../../../client-api/session/querying/debugging/query-timings.mdx#view-timings) + * [Syntax](../../../../client-api/session/querying/debugging/query-timings.mdx#syntax) + * [Timings in a sharded database](../../../../client-api/session/querying/debugging/query-timings.mdx#timings-in-a-sharded-database) + + +## Include timings in a query + + + + +{`$timings = new QueryTimings(); + +/** @var array $resultsWithTimings */ +$resultsWithTimings = $session->advanced()->documentQuery(Product::class) + ->timings($timings) + ->search("Name", "Syrup") + ->toList(); + +/** @var array $timingsMap */ +$timingsMap = $timings->getTimings(); +`} + + + + +{`from "Products" +where search(Name, "Syrup") or search(Name, "Lager") +include timings() +`} + + + + + + +## View timings + +* The detailed timings can be viewed from Studio's [Query view](../../../../studio/database/queries/query-view.mdx). + +* Running an RQL query with `include timings()` will show an additional **Timings Tab** + with a graphical representation of the time spent in each query part. + +![Figure 1. Include timings graphical results](./assets/include-timings.png) + + + +## Syntax + + + +{`function timings(QueryTimings &$timings): DocumentQueryInterface; +`} + + + +| Parameter | Type | Description | +|-------------|----------------|---------------| +| **&$timings** | `QueryTimings` | A callback function (action) that takes `QueryTimings` as an argument. It will be called by the client with the resulting `QueryTimings`. You can interact with the resulting `QueryTimings` inside your callback. | + + + +## Timings in a sharded database + +* In a sharded database, timings for each part are provided **per shard**. + +* Learn more in [timings in a sharded database](../../../../sharding/querying.mdx#timing-queries). + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/debugging/_query-timings-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/debugging/_query-timings-python.mdx new file mode 100644 index 0000000000..a24ff39584 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/debugging/_query-timings-python.mdx @@ -0,0 +1,118 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When making a query, + you can request to get detailed stats of the time spent by RavenDB on each part of the query. + E.g. duration of search, loading documents, transforming results, total duration, etc. + +* By default, the timings stats are Not included in the query results, to avoid the measuring overhead. + +* **To include the query timings** in the query results: + add a call to the `timings()` method in your query code, or add `include timings()` to an RQL query. + See examples below. + +* In this page: + * [Include timings in a query](../../../../client-api/session/querying/debugging/query-timings.mdx#include-timings-in-a-query) + * [View timings](../../../../client-api/session/querying/debugging/query-timings.mdx#view-timings) + * [Syntax](../../../../client-api/session/querying/debugging/query-timings.mdx#syntax) + * [Timings in a sharded database](../../../../client-api/session/querying/debugging/query-timings.mdx#timings-in-a-sharded-database) + + +## Include timings in a query + + + + +{`timings: Optional[QueryTimings] = None + +# Prepare a callback +def timings_callback(timings_from_server: QueryTimings): + timings = timings_from_server + +results = list( + session.advanced.document_query(object_type=Product) + # Call timings, provide a callback function that will be called with result timings + .timings(timings_callback) + # Define query criteria + # i.e. search for docs containing Syrup -or- Lager in their Name field + .search("Name", "Syrup Lager") + # Execute the query +) + +# Get total query duration: +# ========================= +total_query_duration = timings.duration_in_ms + +# Get specific parts duration: +# ============================ +timings_dictionary = timings.timings +optimizer_duration = timings_dictionary["Optimizer"].duration_in_ms +lucene_duration = timings_dictionary["Query"].timings["lucene"].duration_in_ms +`} + + + + +{`from "Products" +where search(Name, "Syrup") or search(Name, "Lager") +include timings() +`} + + + + + + +## View timings + +* The detailed timings can be viewed from the [Query view](../../../../studio/database/queries/query-view.mdx) in the Studio. + +* Running an RQL query with `include timings()` will show an additional **Timings Tab** + with a graphical representation of the time spent in each query part. + +![Figure 1. Include timings graphical results](./assets/include-timings.png) + + + +## Syntax + + + +{`def timings(self, timings_callback: Callable[[QueryTimings], None]) -> DocumentQuery[_T]: ... +`} + + + +| Parameter | Type | Description | +|-------------|----------------|---------------| +| **timings_callback** | `Callable[[QueryTimings], None]` | A callback function (action) that takes `QueryTimings` as an argument. It will be called by the client with the resulting `QueryTimings`. You can interact with the resulting `QueryTimings` inside your callback. | + + + +{`class QueryTimings: + def __init__(self, duration_in_ms: int = None, timings: Dict[str, QueryTimings] = None): + self.duration_in_ms = duration_in_ms + self.timings = timings or \{\} +`} + + + +| `QueryTimings` | | | +|------------------|-------------------------------------|---------------------------------------------------| +| **duration_in_ms** | `int` | Total duration | +| **timings** | `Dict[str, QueryTimings]` | Dictionary with `QueryTimings` info per time part | + + + +## Timings in a sharded database + +* In a sharded database, timings for each part are provided **per shard**. + +* Learn more in [timings in a sharded database](../../../../sharding/querying.mdx#timing-queries). + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/debugging/assets/include-explanations-1.png b/versioned_docs/version-7.1/client-api/session/querying/debugging/assets/include-explanations-1.png new file mode 100644 index 0000000000..74fa7d8b7c Binary files /dev/null and b/versioned_docs/version-7.1/client-api/session/querying/debugging/assets/include-explanations-1.png differ diff --git a/versioned_docs/version-7.1/client-api/session/querying/debugging/assets/include-explanations-2.png b/versioned_docs/version-7.1/client-api/session/querying/debugging/assets/include-explanations-2.png new file mode 100644 index 0000000000..82829e1125 Binary files /dev/null and b/versioned_docs/version-7.1/client-api/session/querying/debugging/assets/include-explanations-2.png differ diff --git a/versioned_docs/version-7.1/client-api/session/querying/debugging/assets/include-timings.png b/versioned_docs/version-7.1/client-api/session/querying/debugging/assets/include-timings.png new file mode 100644 index 0000000000..5477fecf9c Binary files /dev/null and b/versioned_docs/version-7.1/client-api/session/querying/debugging/assets/include-timings.png differ diff --git a/versioned_docs/version-7.1/client-api/session/querying/debugging/include-explanations.mdx b/versioned_docs/version-7.1/client-api/session/querying/debugging/include-explanations.mdx new file mode 100644 index 0000000000..4207ae1873 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/debugging/include-explanations.mdx @@ -0,0 +1,44 @@ +--- +title: "Include Query Explanations" +hide_table_of_contents: true +sidebar_label: Include Query Explanations +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import IncludeExplanationsJava from './_include-explanations-java.mdx'; +import IncludeExplanationsCsharp from './_include-explanations-csharp.mdx'; +import IncludeExplanationsPython from './_include-explanations-python.mdx'; +import IncludeExplanationsPhp from './_include-explanations-php.mdx'; +import IncludeExplanationsNodejs from './_include-explanations-nodejs.mdx'; + +export const supportedLanguages = ["java", "csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/debugging/query-timings.mdx b/versioned_docs/version-7.1/client-api/session/querying/debugging/query-timings.mdx new file mode 100644 index 0000000000..8c72a57a20 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/debugging/query-timings.mdx @@ -0,0 +1,44 @@ +--- +title: "Include Query Timings" +hide_table_of_contents: true +sidebar_label: Include Query Timings +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import QueryTimingsCsharp from './_query-timings-csharp.mdx'; +import QueryTimingsJava from './_query-timings-java.mdx'; +import QueryTimingsPython from './_query-timings-python.mdx'; +import QueryTimingsPhp from './_query-timings-php.mdx'; +import QueryTimingsNodejs from './_query-timings-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/document-query/_category_.json b/versioned_docs/version-7.1/client-api/session/querying/document-query/_category_.json new file mode 100644 index 0000000000..034d384f39 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/document-query/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 2, + "label": DocumentQuery, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/document-query/_how-to-use-lucene-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/document-query/_how-to-use-lucene-csharp.mdx new file mode 100644 index 0000000000..3332be07d3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/document-query/_how-to-use-lucene-csharp.mdx @@ -0,0 +1,58 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Lucene flavored syntax can be used with the `WhereLucene` method, a part of the filtering methods available in `IDocumentQuery`. + +## Syntax + + + +{`IDocumentQuery WhereLucene(string fieldName, string whereClause); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **fieldName** | string | Name of a field in an index (default field) | +| **whereClause** | string | Lucene-syntax based clause | + +## Example + + + + +{`List results = session + .Advanced + .DocumentQuery() + .WhereLucene("Name", "bistro") + .ToList(); +`} + + + + +{`List results = await asyncSession + .Advanced + .AsyncDocumentQuery() + .WhereLucene("Name", "bistro") + .ToListAsync(); +`} + + + + +{`from Companies +where lucene(Name, 'bistro') +`} + + + + +## Advanced Usage + +The `fieldName` argument corresponds to Lucene's default field convention. It is mandatory to pass it to the `.WhereLucene` but the `whereClause` can contain clause that omits the field entirely giving you the opportunity to pass a complex expression e.g. `.WhereLucene("Name", "Name:bistro OR Phone:981-443655")`. It is advised to use this approach against Static Index where all fields are known, because there is no guarantee that a proper Auto Index will be created or used. + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/document-query/_how-to-use-lucene-java.mdx b/versioned_docs/version-7.1/client-api/session/querying/document-query/_how-to-use-lucene-java.mdx new file mode 100644 index 0000000000..689e8fef83 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/document-query/_how-to-use-lucene-java.mdx @@ -0,0 +1,49 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Lucene flavored syntax can be used with the `whereLucene` method, a part of the filtering methods available in `IDocumentQuery`. + +## Syntax + + + +{`IDocumentQuery whereLucene(String fieldName, String whereClause); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **fieldName** | String | Name of a field in an index (default field) | +| **whereClause** | String | Lucene-syntax based clause | + +## Example + + + + +{`session + .advanced() + .documentQuery(Company.class) + .whereLucene("Name", "bistro") + .toList(); +`} + + + + +{`from Companies +where lucene(Name, 'bistro') +`} + + + + +## Advanced Usage + +The `fieldName` argument corresponds to Lucene's default field convention. It is mandatory to pass it to the `.whereLucene` but the `whereClause` can contain clause that omits the field entirely giving you the opportunity to pass a complex expression e.g. `.whereLucene("Name", "Name:bistro OR Phone:981-443655")`. It is advised to use this approach against Static Index where all fields are known, because there is no guarantee that a proper Auto Index will be created or used. + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/document-query/_how-to-use-lucene-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/document-query/_how-to-use-lucene-nodejs.mdx new file mode 100644 index 0000000000..c6b5170f0a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/document-query/_how-to-use-lucene-nodejs.mdx @@ -0,0 +1,48 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Lucene flavored syntax can be used with the `whereLucene()` method, a part of the filtering methods available in `IDocumentQuery`. + +## Syntax + + + +{`query.whereLucene(fieldName, whereClause, exact); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **fieldName** | string | Name of a field in an index (default field) | +| **whereClause** | string | Lucene-syntax based clause | +| **exact** | boolean | (optional, default false) Match exact | + +## Example + + + + +{`const companies = await session.advanced + .documentQuery({ collection: "Companies" }) + .whereLucene("Name", "bistro") + .all(); +`} + + + + +{`from Companies +where lucene(name, 'bistro') +`} + + + + +## Advanced Usage + +The `fieldName` argument corresponds to Lucene's default field convention. It is mandatory to pass it to the `whereLucene()` but the `whereClause` can contain clause that omits the field entirely giving you the opportunity to pass a complex expression e.g. `.whereLucene("name", "name:bistro OR phone:981-443655")`. It is advised to use this approach against Static Index where all fields are known, because there is no guarantee that a proper Auto Index will be created or used. + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/document-query/_how-to-use-not-operator-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/document-query/_how-to-use-not-operator-csharp.mdx new file mode 100644 index 0000000000..642e82a87e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/document-query/_how-to-use-not-operator-csharp.mdx @@ -0,0 +1,68 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +`IDocumentQuery` contains `NOT` operator which can be used to negate **next** predicate + + +`NOT` operator cannot be used alone without succeeding predicate. + + +## Example I + + + +{`// load all entities from 'Employees' collection +// where FirstName NOT equals 'Robert' +List employees = session + .Advanced + .DocumentQuery() + .Not + .WhereEquals(x => x.FirstName, "Robert") + .ToList(); +`} + + + +## Example II + + + +{`// load all entities from 'Employees' collection +// where FirstName NOT equals 'Robert' +// and LastName NOT equals 'King' +List employees = session + .Advanced + .DocumentQuery() + .Not + .OpenSubclause() + .WhereEquals(x => x.FirstName, "Robert") + .AndAlso() + .WhereEquals(x => x.LastName, "King") + .CloseSubclause() + .ToList(); +`} + + + +## Example III + + + +{`// load all entities from 'Employees' collection +// where FirstName NOT equals 'Robert' +// and LastName NOT equals 'King' +// identical to 'Example II' but 'WhereNotEquals' is used +List employees = session + .Advanced + .DocumentQuery() + .WhereNotEquals(x => x.FirstName, "Robert") + .AndAlso() + .WhereNotEquals(x => x.LastName, "King") + .ToList(); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/document-query/_how-to-use-not-operator-java.mdx b/versioned_docs/version-7.1/client-api/session/querying/document-query/_how-to-use-not-operator-java.mdx new file mode 100644 index 0000000000..cd36431a1b --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/document-query/_how-to-use-not-operator-java.mdx @@ -0,0 +1,68 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +`IDocumentQuery` contains `NOT` operator which can be used to negate **next** predicate + + +`NOT` operator cannot be used alone without succeeding predicate. + + +## Example I + + + +{`// load all entities from 'Employees' collection +// where firstName NOT equals 'Robert' +List employees = session + .advanced() + .documentQuery(Employee.class) + .not() + .whereEquals("FirstName", "Robert") + .toList(); +`} + + + +## Example II + + + +{`// load all entities from 'Employees' collection +// where firstName NOT equals 'Robert' +// and lastName NOT equals 'King' +List employees = session + .advanced() + .documentQuery(Employee.class) + .not() + .openSubclause() + .whereEquals("FirstName", "Robert") + .andAlso() + .whereEquals("LastName", "King") + .closeSubclause() + .toList(); +`} + + + +## Example III + + + +{`// load all entities from 'Employees' collection +// where firstName NOT equals 'Robert' +// and lastName NOT equals 'King' +// identical to 'Example II' but 'whereNotEquals' is used +List employees = session + .advanced() + .documentQuery(Employee.class) + .whereNotEquals("FirstName", "Robert") + .andAlso() + .whereNotEquals("LastName", "King") + .toList(); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/document-query/_how-to-use-not-operator-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/document-query/_how-to-use-not-operator-nodejs.mdx new file mode 100644 index 0000000000..f070e948c6 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/document-query/_how-to-use-not-operator-nodejs.mdx @@ -0,0 +1,65 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +`IDocumentQuery` contains `NOT` operator which can be used to negate **next** predicate + + +`NOT` operator cannot be used alone without succeeding predicate. + + +## Example I + + + +{`// load all entities from 'Employees' collection +// where firstName NOT equals 'Robert' +const employees = await session.advanced + .documentQuery(\{ collection: "Employees" \}) + .not() + .whereEquals("FirstName", "Robert") + .all(); +`} + + + +## Example II + + + +{`// load all entities from 'Employees' collection +// where firstName NOT equals 'Robert' +// and lastName NOT equals 'King' +const employees = await session.advanced + .documentQuery(\{ collection: "Employees" \}) + .not() + .openSubclause() + .whereEquals("FirstName", "Robert") + .andAlso() + .whereEquals("LastName", "King") + .closeSubclause() + .all(); +`} + + + +## Example III + + + +{`// load all entities from 'Employees' collection +// where firstName NOT equals 'Robert' +// and lastName NOT equals 'King' +// identical to 'Example II' but 'whereNotEquals' is used +const employees = await session.advanced + .documentQuery(\{ collection: "Employees" \}) + .whereNotEquals("FirstName", "Robert") + .andAlso() + .whereNotEquals("LastName", "King") + .all(); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/document-query/_query-vs-document-query-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/document-query/_query-vs-document-query-csharp.mdx new file mode 100644 index 0000000000..07f1b6e91e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/document-query/_query-vs-document-query-csharp.mdx @@ -0,0 +1,136 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* RavenDB Queries can be executed using `query` or `document_query`, or by passing + [RQL](../../../../client-api/session/querying/what-is-rql.mdx) directly to the server + via `raw_query`. + Learn more in [Query Overview](../../../../client-api/session/querying/how-to-query.mdx). + +* The main differences between `Query` and `DocumentQuery` are outlined in this article. + +* In this page: + * [API support](../../../../client-api/session/querying/document-query/query-vs-document-query.mdx#api-support) + * [Immutability](../../../../client-api/session/querying/document-query/query-vs-document-query.mdx#immutability) + * [Default query operator](../../../../client-api/session/querying/document-query/query-vs-document-query.mdx#default-query-operator) + + +## API support + +**Query**: + +* The `Query` API supports LINQ, the essential data access solution in .NET. + +* The API exposed by the _Query_ method is a wrapper of _DocumentQuery_ and is built on top of it. + +* When using _Query_, the query is translated into a _DocumentQuery_ object, + which then builds into an RQL that is sent to the server. + +* The available _Query_ methods and extensions are listed [here](../../../../client-api/session/querying/how-to-query.mdx#custom-methods-and-extensions-for-linq). +**DocumentQuery**: + +* `DocumentQuery` does Not support LINQ. + +* It exposes a lower-level API that provides more flexibility and control when building a query. + +* When using _DocumentQuery_, the query is translated into an RQL that is sent to the server. + +* The available _DocumentQuery_ methods and extensions are listed [here](../../../../client-api/session/querying/document-query/what-is-document-query.mdx#custom-methods-and-extensions). + + +**Note**: + +`Query` and `DocumentQuery` can be converted to one another. +This enables you to take advantage of all available API methods & extensions. +See [Convert between DocumentQuery and Query](../../../../client-api/session/querying/document-query/what-is-document-query.mdx#convert-between-documentquery-and-query). + + + + + +## Immutability + +* `Query` is **immutable** while `DocumentQuery` is **mutable**. + You might get different results if you try to *reuse* a query. +* The usage of the `Query` method in the following example: + + + +{`IRavenQueryable query = session + .Query() + .Where(x => x.Name.StartsWith("A")); + +IRavenQueryable ageQuery = query + .Where(x => x.Age > 21); + +IRavenQueryable eyeQuery = query + .Where(x => x.EyeColor == "blue"); +`} + + + + will result with the following Lucene-syntax queries: + + `query: from Users where startsWith(Name, 'A')` + + `ageQuery: from Users where startsWith(Name, 'A') and Age > 21` + + `eyeQuery: from Users where startsWith(Name, 'A') and EyeColor = 'blue'` +* A similar usage with `DocumentQuery`: + + + +{`IDocumentQuery documentQuery = session + .Advanced + .DocumentQuery() + .WhereStartsWith(x => x.Name, "A"); + +IDocumentQuery ageDocumentQuery = documentQuery + .WhereGreaterThan(x => x.Age, 21); + +IDocumentQuery eyeDocumentQuery = documentQuery + .WhereEquals(x => x.EyeColor, "blue"); + +// Here all of the DocumentQuery variables have the same reference +`} + + + + will result with the following Lucene queries: + + `documentQuery: from Users where startsWith(Name, 'A')` + (before creating `ageDocumentQuery`) + + `ageDocumentQuery: from Users where startsWith(Name, 'A') and Age > 21` + (before creating `eyeDocumentQuery`) + + `eyeDocumentuery: from Users where startsWith(Name, 'A') and Age > 21 and EyeColor = 'blue'` + + All created Lucene queries are the same query (actually the same instance). + This is an important hint to be aware of if you are going to reuse `DocumentQuery`. + + + +## Default Query Operator + +* Starting from version 4.0, both `Query` and `DocumentQuery` use `AND` as the default operator. + (Previously, `Query` used `AND` and `DocumentQuery` used `OR`). + +* This behavior can be modified by calling `UsingDefaultOperator`: + + + +{`session + .Advanced + .DocumentQuery() + .UsingDefaultOperator(QueryOperator.Or); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/document-query/_query-vs-document-query-java.mdx b/versioned_docs/version-7.1/client-api/session/querying/document-query/_query-vs-document-query-java.mdx new file mode 100644 index 0000000000..6b9e73291f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/document-query/_query-vs-document-query-java.mdx @@ -0,0 +1,84 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Unlike .NET client, Java client offers single API for querying documents. + +Below you can find examples of equivalent calls. + + +`query` method in `DocumentSession` is shorthand for calling `session.advanced().documentQuery`. + + + +{`session.query(Order.class) +`} + + + +is equivalent to: + + + +{`session.advanced().documentQuery(Order.class) +`} + + + +--- + + + +{`session.query(Order.class, Orders_ByShipToAndLines.class) +`} + + + +is equivalent to: + + + +{`session.advanced().documentQuery(Order.class, Orders_ByShipToAndLines.class) +`} + + + +--- + + + +{`session.query(Order.class, Query.index("Orders/ByShipToAndLines")) +`} + + + +is equivalent to: + + + +{`session.advanced().documentQuery(Order.class, "Orders/ByShipToAndLines", null, false); +`} + + + +--- + + + +{`session.query(Order.class, Query.collection("orders")) +`} + + + +is equivalent to: + + + +{`session.advanced().documentQuery(Order.class, null, "orders", false); +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/document-query/_query-vs-document-query-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/document-query/_query-vs-document-query-nodejs.mdx new file mode 100644 index 0000000000..4d8b1f4bc3 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/document-query/_query-vs-document-query-nodejs.mdx @@ -0,0 +1,65 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The Node.js client provides a **unified API** for querying documents via the `session.query()` method. + All available methods for the session's _query_ method are listed [here](../../../../client-api/session/querying/how-to-query.mdx#query-api). + +* The `query` method is essentially a shorthand for invoking the `documentQuery` method. + Examples of those equivalent calls are listed below. + + + + + +{`// This collection query: +session.query(Order); + +// is equivalent to this documentQuery: +session.advanced.documentQuery(Order); +`} + + + + +{`// This collection query: +session.query(\{ collection: "orders" \}); + +// is equivalent to this documentQuery +session.advanced.documentQuery(\{ + collection: "orders", + indexName: null, + isMapReduce: false +\}); +`} + + + + +{`// This index query: +session.query(\{ indexName: "Orders/ByShipToAndLines" \}); + +// is equivalent to this documentQuery: +session.advanced.documentQuery(\{ indexName: "Orders/ByShipToAndLines" \}); +`} + + + + +{`// This index query: +session.query(\{ indexName: "Orders/ByShipToAndLines" \}); + +// is equivalent to this documentQuery: +session.advanced.documentQuery(\{ + indexName: "Orders/ByShipToAndLines", + isMapReduce: false, + collection: null +\}); +`} + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/document-query/_query-vs-document-query-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/document-query/_query-vs-document-query-php.mdx new file mode 100644 index 0000000000..9f86e1325d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/document-query/_query-vs-document-query-php.mdx @@ -0,0 +1,106 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* RavenDB Queries can be executed using `query` or `document_query`, or by passing + [RQL](../../../../client-api/session/querying/what-is-rql.mdx) directly to the server + via `raw_query`. + Learn more in [Query Overview](../../../../client-api/session/querying/how-to-query.mdx). + +* In the PHP client API, `query` methods and their equivalent `documentQuery` methods + provide the same functionality. (This is different from the C# client implementation, + which often provides different functionality for `Query` methods and their `DocumentQuery` + counterparts.) + The PHP documentation therefore often provides `query` usage samples without adding + `documentQuery` examples as well. + +* In this page: + * [API support](../../../../client-api/session/querying/document-query/query-vs-document-query.mdx#api-support) + * [`query `and `documentQuery` equivalents](../../../../client-api/session/querying/document-query/query-vs-document-query.mdx#queryand-documentquery-equivalents) + + +## API support + +* `query` and `documentQquery` queries are translated to RQL and sent to the server. +* Available _query_ methods are listed [here](../../../../client-api/session/querying/how-to-query.mdx#custom-methods). +* Available _documentQuery_ methods and extensions are listed [here](../../../../client-api/session/querying/document-query/what-is-document-query.mdx#custom-methods-and-extensions). + + + +## `query `and `documentQuery` equivalents + +#### 1. + + + +{`$session->query(Order::class) +`} + + + +is equivalent to: + + + +{`$session->advanced()->documentQuery(Order::class) +`} + + +#### 2. + + + +{`$session->query(Order::class, Orders_ByShipToAndLines::class) +`} + + + +is equivalent to: + + + +{`$session->advanced()->documentQuery(Order::class, Orders_ByShipToAndLines::class) +`} + + +#### 3. + + + +{`$session->query(Order::class, Query::index("Orders/ByShipToAndLines")) +`} + + + +is equivalent to: + + + +{`$session->advanced()->documentQuery(Order::class, "Orders/ByShipToAndLines", null, false); +`} + + +#### 4. + + + +{`$session->query(Order::class, Query::collection("orders")) +`} + + + +is equivalent to: + + + +{`$session->advanced()->documentQuery(Order::class, null, "orders", false); +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/document-query/_query-vs-document-query-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/document-query/_query-vs-document-query-python.mdx new file mode 100644 index 0000000000..9ac51ca4b2 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/document-query/_query-vs-document-query-python.mdx @@ -0,0 +1,104 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* RavenDB Queries can be executed using `query` or `document_query`, or by passing + [RQL](../../../../client-api/session/querying/what-is-rql.mdx) directly to the server + via `raw_query`. + Learn more in [Query Overview](../../../../client-api/session/querying/how-to-query.mdx). + +* In the Python client API, `query` methods and their equivalent `document_query` methods + provide the same functionality. (This is different from the C# client implementation, + which often provides different functionality for `Query` methods and their `DocumentQuery` + counterparts.) + The Python documentation therefore often provides `query` usage samples without adding + `document_query` examples as well. + +* In this page: + * [API support](../../../../client-api/session/querying/document-query/query-vs-document-query.mdx#api-support) + * [Mutability](../../../../client-api/session/querying/document-query/query-vs-document-query.mdx#mutability) + * [Default query operator](../../../../client-api/session/querying/document-query/query-vs-document-query.mdx#default-query-operator) + + +## API support + +* `query` and `document_query` queries are translated to RQL and sent to the server. +* Available _query_ methods are listed [here](../../../../client-api/session/querying/how-to-query.mdx#custom-methods). +* Available _document_query_ methods and extensions are listed [here](../../../../client-api/session/querying/document-query/what-is-document-query.mdx#custom-methods-and-extensions). + + + +## Mutability + +* All Python queries (`query` and `document_query`) are **mutable**. + You may get different results if you try to *reuse* a query. + +* The usage of the `Query` method in the following example: + + + +{`query = session.query(object_type=User).where_starts_with("name", "A") + +age_query = query.where_greater_than_or_equal("age", 21) + +eye_query = query.where_equals("eye_color", "blue") +`} + + + + will result with the following Lucene-syntax queries: + + `query: from Users where startsWith(name, 'A')` + + `ageQuery: from Users where startsWith(name, 'A') and age > 21` + + `eyeQuery: from Users where startsWith(name, 'A') and eye_color = 'blue'` +* A similar usage with `document_query`: + + + +{`document_query = session.advanced.document_query(object_type=User).where_starts_with("name", "A") + +age_document_query = document_query.where_greater_than_or_equal("age", 21) + +eye_document_query = document_query.where_equals("eye_color", "blue") + +# Here all of the DocumentQuery variables have the same reference +`} + + + + will result with the following Lucene queries: + + `documentQuery: from Users where startsWith(name, 'A')` + (before creating `ageDocumentQuery`) + + `ageDocumentQuery: from Users where startsWith(name, 'A') and age > 21` + (before creating `eyeDocumentQuery`) + + `eyeDocumentuery: from Users where startsWith(name, 'A') and age > 21 and eye_color = 'blue'` + + All created Lucene queries are the same query (actually the same instance). + This is an important hint to be aware of if you are going to reuse `document_query`. + + + +## Default Query Operator + +* Queries use `AND` as the default operator. + +* The operator can be replaced by calling `using_default_operator`: + + + +{`session.advanced.document_query(object_type=User).using_default_operator(QueryOperator.OR) +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/document-query/_what-is-document-query-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/document-query/_what-is-document-query-csharp.mdx new file mode 100644 index 0000000000..82cabb2198 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/document-query/_what-is-document-query-csharp.mdx @@ -0,0 +1,393 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* RavenDB Queries can be executed via `Query`, `DocumentQuery` or directly using `RQL`. + Learn more in [Query Overview](../../../../client-api/session/querying/how-to-query.mdx). + +* Unlike the `Query` method, the `DocumentQuery` method does Not support LINQ. + However, it gives you more flexibility and control over the process of building a query, + as it provides low-level querying capabilities. See [Query -vs- DocumentQuery](../../../../client-api/session/querying/document-query/query-vs-document-query.mdx) for all differences. + +* In this page: + * [DocumentQuery examples](../../../../client-api/session/querying/document-query/what-is-document-query.mdx#documentquery-examples) + * [Convert between DocumentQuery and Query](../../../../client-api/session/querying/document-query/what-is-document-query.mdx#convert-between-documentquery-and-query) + * [Custom Methods and Extensions](../../../../client-api/session/querying/document-query/what-is-document-query.mdx#custom-methods-and-extensions) + * [Syntax](../../../../client-api/session/querying/document-query/what-is-document-query.mdx#syntax) + + +## DocumentQuery examples + +#### Query collection - no filtering + + + + +{`// Query for all documents from 'Employees' collection +List allEmployees = session.Advanced + .DocumentQuery() + .ToList(); +`} + + + + +{`// Query for all documents from 'Employees' collection +List allEmployees = await asyncSession.Advanced + .AsyncDocumentQuery() + .ToListAsync(); +`} + + + + +{`from "Employees" +`} + + + +#### Query collection - by ID + + + + +{`// Query collection by document ID +Employee employee = session.Advanced + .DocumentQuery() + .WhereEquals(x => x.Id, "employees/1-A") + .FirstOrDefault(); +`} + + + + +{`// Query collection by document ID +Employee employee = await asyncSession.Advanced + .AsyncDocumentQuery() + .WhereEquals(x => x.Id, "employees/1-A") + .FirstOrDefaultAsync(); +`} + + + + +{`from "Employees" where id() == "employees/1-A" +`} + + + +#### Query collection - with filtering + + + + +{`// Query collection - filter by document field +List employees = session.Advanced + .DocumentQuery() + .WhereEquals(x => x.FirstName, "Robert") + .ToList(); +`} + + + + +{`// Query collection - filter by document field +List employees = await asyncSession.Advanced + .AsyncDocumentQuery() + .WhereEquals(x => x.FirstName, "Robert") + .ToListAsync(); +`} + + + + +{`from "Employees" where FirstName == "Robert" +`} + + + +#### Query collection - with paging + + + + +{`// Query collection - page results +List products = session.Advanced + .DocumentQuery() + .Skip(5) // Skip first 5 results + .Take(10) // Load up to 10 entities from 'Products' collection + .ToList(); +`} + + + + +{`// Query collection - page results +List products = await asyncSession.Advanced + .AsyncDocumentQuery() + .Skip(5) // Skip first 5 results + .Take(10) // Load up to 10 entities from 'Products' collection + .ToListAsync(); +`} + + + + +{`from "Products" limit 5, 10 // skip 5, take 10 +`} + + + +#### Query an index + +Please refer to [Querying an index](../../../../indexes/querying/query-index.mdx#sessionadvanceddocumentquery) for examples of querying an index using a DocumentQuery. + + + +## Convert between DocumentQuery and Query + +#### DocumentQuery to Query + +A `DocumentQuery` can be converted to a `Query`. +This enables you to take advantage of all available LINQ extensions provided by RavenDB. + + + + +{`// Define a DocumentQuery +var docQuery = session.Advanced + .DocumentQuery(); // 'IDocumentQuery' instance + +// Convert to Query +var query = docQuery.ToQueryable(); // 'IRavenQueryable' instance + +// Apply any 'IRavenQueryable' LINQ extension +var queryResults = query + .Where(x => x.Freight > 25) + .ToList(); +`} + + + + +{`// Define a DocumentQuery +var docQuery = asyncSession.Advanced + .AsyncDocumentQuery(); // 'IAsyncDocumentQuery' instance + +// Convert to Query +var query = docQuery.ToQueryable(); // 'IRavenQueryable' instance + +// Apply any 'IRavenQueryable' LINQ extension +var queryResults = await query + .Where(x => x.Freight > 25) + .ToListAsync(); +`} + + + + +{`from "Orders" where Freight > 25 +`} + + + + +Convert `DocumentQuery` to `Query` when you need to project data from a related document +in a dynamic query. + + + + +{`// Define a DocumentQuery +var docQuery = session.Advanced + .DocumentQuery() + .WhereGreaterThan("Freight", 25); + +// Convert to Query +var query = docQuery.ToQueryable(); + +// Define the projection on the query using LINQ +var projectedQuery = from order in query + // Load the related document + let company = session.Load(order.Company) + // Define the projection + select new + { + Freight = order.Freight, // data from the Order document + CompanyName = company.Name // data from the related Company document + }; + +// Execute the query +var queryResults = projectedQuery.ToList(); +`} + + + + +{`from "Orders" as o +where o.Freight > 25 +load o.Company as c +select { + Freight: o.Freight, + CompanyName: c.Name +} +`} + + + +#### Query to DocumentQuery + +A `Query` can be converted to a `DocumentQuery`. +This enables you to take advantage of the API available only for _DocumentQuery_. + + + + +{`// Define a Query +var query = session + .Query() + .Where(x => x.Freight > 25); + +// Convert to DocumentQuery +var docQuery = query.ToDocumentQuery(); + +// Apply a DocumentQuery method (e.g. IncludeExplanations is Not available on Query) +docQuery.IncludeExplanations(out Explanations exp); + +// Execute the query +var docQueryResults = docQuery.ToList(); +`} + + + + +{`// Define a Query +var query = asyncSession + .Query() + .Where(x => x.Freight > 25); + +// Convert to DocumentQuery +var docQuery = query.ToAsyncDocumentQuery(); + +// Apply a DocumentQuery method (e.g. IncludeExplanations is Not available on Query) +docQuery.IncludeExplanations(out Explanations exp); + +// Execute the query +var docQueryResults = docQuery.ToListAsync(); +`} + + + + +{`from "Orders" +where Freight > 25 +include explanations() +`} + + + + + + +## Custom Methods and Extensions + + + +Several methods share the same functionality as their `Query` counterparts. +Refer to the corresponding documentation articles, marked with links starting with "[Query]" in the list below. + + + +Available custom methods and extensions: + +- AddOrder +- [Query] [AfterQueryExecuted](../../../../client-api/session/querying/how-to-customize-query.mdx#afterqueryexecuted) +- [Query] [AfterStreamExecuted](../../../../client-api/session/querying/how-to-customize-query.mdx#afterstreamexecuted) +- [Query] [AggregateBy](../../../../client-api/session/querying/how-to-perform-a-faceted-search.mdx) +- [Query] [AggregateUsing](../../../../client-api/session/querying/how-to-perform-a-faceted-search.mdx) +- AndAlso +- [Query] [BeforeQueryExecuted](../../../../client-api/session/querying/how-to-customize-query.mdx#beforequeryexecuted) +- [Boost](../../../../client-api/session/querying/text-search/boost-search-results.mdx) +- CloseSubclause +- CmpXchg +- ContainsAll +- ContainsAny +- [Count](../../../../client-api/session/querying/how-to-count-query-results.mdx) +- [CountLazily](../../../../client-api/session/querying/how-to-perform-queries-lazily.mdx#lazy-count-query) +- Distinct +- ExplainScores +- First +- FirstOrDefault +- Fuzzy +- GetIndexQuery +- GetQueryResult +- [GroupBy](../../../../client-api/session/querying/how-to-perform-group-by-query.mdx) +- [GroupByArrayValues](../../../../client-api/session/querying/how-to-perform-group-by-query.mdx#by-array-values) +- [GroupByArrayContent](../../../../client-api/session/querying/how-to-perform-group-by-query.mdx#by-array-content) +- [Query] [Highlight](../../../../client-api/session/querying/text-search/highlight-query-results.mdx) +- Include +- IncludeExplanations +- Intersect +- InvokeAfterQueryExecuted +- InvokeAfterStreamExecuted +- [Query] [Lazily](../../../../client-api/session/querying/how-to-perform-queries-lazily.mdx) +- [LongCount](../../../../client-api/session/querying/how-to-count-query-results.mdx) +- MoreLikeThis +- NegateNext +- [Not](../../../../client-api/session/querying/document-query/how-to-use-not-operator.mdx) +- [Query] [NoCaching](../../../../client-api/session/querying/how-to-customize-query.mdx#nocaching) +- [Query] [NoTracking](../../../../client-api/session/querying/how-to-customize-query.mdx#notracking) +- OfType +- OpenSubclause +- [OrderBy](../../../../client-api/session/querying/sort-query-results.mdx) +- [OrderByDescending](../../../../client-api/session/querying/sort-query-results.mdx) +- [Query] [OrderByDistance](../../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#orderbydistance) +- [Query] [OrderByDistanceDescending](../../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#orderbydistancedesc) +- [OrderByScore](../../../../client-api/session/querying/sort-query-results.mdx#order-by-score) +- [OrderByScoreDescending](../../../../client-api/session/querying/sort-query-results.mdx#order-by-score) +- OrElse +- [Query] [Projection](../../../../client-api/session/querying/how-to-customize-query.mdx#projection) +- Proximity +- [Query] [RandomOrdering](../../../../client-api/session/querying/how-to-customize-query.mdx#randomordering) +- [Query] [RelatesToShape](../../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#search-by-shape) +- [Search](../../../../client-api/session/querying/text-search/full-text-search.mdx) +- SelectFields +- SelectTimeSeries +- Single +- SingleOrDefault +- Skip +- [Query] [Spatial](../../../../client-api/session/querying/how-to-make-a-spatial-query.mdx) +- Statistics +- [SuggestUsing](../../../../client-api/session/querying/how-to-work-with-suggestions.mdx) +- Take +- [Query] [Timings](../../../../client-api/session/querying/how-to-customize-query.mdx#timings) +- UsingDefaultOperator +- [Query] [WaitForNonStaleResults](../../../../client-api/session/querying/how-to-customize-query.mdx#waitfornonstaleresults) +- Where +- WhereBetween +- [WhereEndsWith](../../../../client-api/session/querying/text-search/ends-with-query.mdx) +- WhereEquals +- [WhereExists](../../../../client-api/session/querying/how-to-filter-by-field.mdx) +- WhereGreaterThan +- WhereGreaterThanOrEqual +- WhereIn +- WhereLessThan +- WhereLessThanOrEqual +- [WhereLucene](../../../../client-api/session/querying/document-query/how-to-use-lucene.mdx) +- WhereNotEquals +- [WhereRegex](../../../../client-api/session/querying/text-search/using-regex.mdx) +- [WhereStartsWith](../../../../client-api/session/querying/text-search/starts-with-query.mdx) +- WithinRadiusOf + + + +## Syntax + +The available `DocumentQuery` overloads are listed in this [Syntax section](../../../../client-api/session/querying/how-to-query.mdx#syntax) in the [Query Overview](../../../../client-api/session/querying/how-to-query.mdx). + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/document-query/_what-is-document-query-java.mdx b/versioned_docs/version-7.1/client-api/session/querying/document-query/_what-is-document-query-java.mdx new file mode 100644 index 0000000000..b571070327 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/document-query/_what-is-document-query-java.mdx @@ -0,0 +1,171 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Querying capabilities can be accessed via the `documentQuery` method in advanced session operations. +`DocumentQuery` gives you more flexibility and control over the process of building a query. + +## Syntax + + + +{` IDocumentQuery documentQuery(Class clazz); + + IDocumentQuery documentQuery(Class clazz, String indexName, String collectionName, boolean isMapReduce); +`} + + + +| Parameters | | | +| ------------- | ------------- |-------------------------------------------------------------------------------------------------------| +| **indexName** | String | Name of an index to perform a query on
(mutually exclusive with **collectionName**) | +| **collectionName** | String | Name of a collection to perform a query on
(mutually exclusive with **indexName**) | +| **isMapReduce** | bool | Indicates if a queried index is a map-reduce index
(modifies how we treat identifier properties) | + +| Return Value | | +| ------------- | ----- | +| **IDocumentQuery** | Instance implementing IDocumentQuery interface containing additional query methods and extensions | + +## Example I - Basic + + + +{`// load all entities from 'Employees' collection +List employees = session + .advanced() + .documentQuery(Employee.class) + .toList(); +`} + + + + + +{`// load all entities from 'Employees' collection +// where firstName equals 'Robert' +List employees = session + .advanced() + .documentQuery(Employee.class) + .whereEquals("FirstName", "Robert") + .toList(); +`} + + + +## Example II - Querying Specified Index + + + +{`// load all entities from 'Employees' collection +// where firstName equals 'Robert' +// using 'My/Custom/Index' +List employees = session + .advanced() + .documentQuery(Employee.class, "My/Custom/Index", null, false) + .whereEquals("FirstName", "Robert") + .toList(); +`} + + + +or + + + +{`// load all entities from 'Employees' collection +// where firstName equals 'Robert' +// using 'My/Custom/Index' +List employees = session + .advanced() + .documentQuery(Employee.class, MyCustomIndex.class) + .whereEquals("FirstName", "Robert") + .toList(); +`} + + + +## Custom Methods and Extensions + + +Functionality of most of the methods match the functionality of their `query` counterparts and therefore will not be described again. Please refer to the appropriate counterpart documentation articles. Links starting with `[query]` are marking those articles. + + +Available methods: + +- addAfterQueryExecutedListener +- addBeforeQueryExecutedListener +- addOrder +- addParameter +- [Query] [aggregateBy](../../../../client-api/session/querying/how-to-perform-a-faceted-search.mdx) +- [Query] [aggregateUsing](../../../../client-api/session/querying/how-to-perform-a-faceted-search.mdx) +- andAlso +- boost +- closeSubclause +- containsAll +- containsAny +- count +- countLazily +- distinct +- first +- firstOrDefault +- fuzzy +- [groupBy](../../../../client-api/session/querying/how-to-perform-group-by-query.mdx) +- [groupByArrayValues](../../../../client-api/session/querying/how-to-perform-group-by-query.mdx#by-array-values) +- [groupByArrayContent](../../../../client-api/session/querying/how-to-perform-group-by-query.mdx#by-array-content) +- include +- intersect +- invokeAfterQueryExecuted +- invokeAfterStreamExecuted +- [Query] [lazily](../../../../client-api/session/querying/how-to-perform-queries-lazily.mdx) +- moreLikeThis +- negateNext +- [Query] [noCaching](../../../../client-api/session/querying/how-to-customize-query.mdx#nocaching) +- [not](../../../../client-api/session/querying/document-query/how-to-use-not-operator.mdx) +- [Query] [noTracking](../../../../client-api/session/querying/how-to-customize-query.mdx#notracking) +- ofType +- openSubclause +- orderBy +- orderByDescending +- [Query] [orderByDistance](../../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#orderbydistance) +- [Query] [orderByDistanceDescending](../../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#orderbydistancedescending) +- orderByScore +- orderByScoreDescending +- orElse +- proximity +- randomOrdering +- [Query] [relatesToShape](../../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#example-ii) +- search +- selectFields +- single +- singleOrDefault +- skip +- [Query] [spatial](../../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#spatial) +- statistics +- suggestUsing +- take +- usingDefaultOperator +- [Query] [waitForNonStaleResults](../../../../client-api/session/querying/how-to-customize-query.mdx#waitfornonstaleresults) +- [Query] [waitForNonStaleResultsAsOf](../../../../client-api/session/querying/how-to-customize-query.mdx#waitfornonstaleresultsasof) +- where +- whereBetween +- whereEndsWith +- whereEquals +- [whereExists](../../../../client-api/session/querying/how-to-filter-by-field.mdx) +- whereGreaterThan +- whereGreaterThanOrEqual +- whereIn +- whereLessThan +- whereLessThanOrEqual +- [whereLucene](../../../../client-api/session/querying/document-query/how-to-use-lucene.mdx) +- whereNotEquals +- [whereRegex](../../../../client-api/session/querying/text-search/using-regex.mdx) +- whereStartsWith +- withinRadiusOf + + +## Remarks + +By default, if the `page size` is not specified, all of the matching records will be retrieved from a database. + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/document-query/_what-is-document-query-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/document-query/_what-is-document-query-nodejs.mdx new file mode 100644 index 0000000000..0a56ea9425 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/document-query/_what-is-document-query-nodejs.mdx @@ -0,0 +1,17 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In contrast to the .NET client, + the Node.js client provides a **unified API** for querying documents via the `session.query()` method. + +* The `query` method is essentially a shorthand for invoking the `documentQuery` method. + See examples of those equivalent calls in [query vs documentQuery](../../../../client-api/session/querying/document-query/query-vs-document-query.mdx). + +* All available methods for the session's `query` method are listed [here](../../../../client-api/session/querying/how-to-query.mdx#query-api). + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/document-query/_what-is-document-query-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/document-query/_what-is-document-query-php.mdx new file mode 100644 index 0000000000..3623e4fe07 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/document-query/_what-is-document-query-php.mdx @@ -0,0 +1,201 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* RavenDB queries can be executed via `query`, `documentQuery` or directly using `RQL`. + Learn more in [Query Overview](../../../../client-api/session/querying/how-to-query.mdx). + +* See [Query -vs- documentQuery](../../../../client-api/session/querying/document-query/query-vs-document-query.mdx) + for additional details. + +* In this page: + * [documentQuery examples](../../../../client-api/session/querying/document-query/what-is-document-query.mdx#documentquery-examples) + * [Custom methods](../../../../client-api/session/querying/document-query/what-is-document-query.mdx#custom-methods) + * [Syntax](../../../../client-api/session/querying/document-query/what-is-document-query.mdx#syntax) + + +## documentQuery examples + +#### Query collection - no filtering + + + + +{`// load all entities from 'Employees' collection +$employees = $session + ->advanced() + ->documentQuery(Employee::class) + ->toList(); +`} + + + + +{`from "Employees" +`} + + + +#### Query collection - with filtering + + + + +{`// load all entities from 'Employees' collection +// where firstName equals 'Robert' +$employees = $session + ->advanced() + ->documentQuery(Employee::class) + ->whereEquals("FirstName", "Robert") + ->toList(); +`} + + + + +{`from "Employees" where FirstName == "Robert" +`} + + + +#### Query an index + +* Using a Path string + + +{`// load all entities from 'Employees' collection +// where firstName equals 'Robert' +// using 'My/Custom/Index' +$employees = $session + ->advanced() + ->documentQuery(Employee::class, "My/Custom/Index", null, false) + ->whereEquals("FirstName", "Robert") + ->toList(); +`} + + + +* Using an index Class + + +{`// load all entities from 'Employees' collection +// where firstName equals 'Robert' +// using 'MyCustomIndex::class' +$employees = $session + ->advanced() + ->documentQuery(Employee::class, MyCustomIndex::class) + ->whereEquals("FirstName", "Robert") + ->toList(); +`} + + + + +Please refer to [Querying an index](../../../../indexes/querying/query-index.mdx#sessionadvanceddocumentquery) for examples of querying an index using a documentQuery. + + + + +## Custom Methods + + + +Several methods share the same functionality as their `query` counterparts. +Refer to the corresponding documentation articles, marked with links starting with "[Query]" in the list below. + + + +Available custom methods: + +- AddOrder +- [query] [afterQueryExecuted](../../../../client-api/session/querying/how-to-customize-query.mdx#afterqueryexecuted) +- [query] [afterStreamExecuted](../../../../client-api/session/querying/how-to-customize-query.mdx#afterstreamexecuted) +- [query] [aggregateBy](../../../../client-api/session/querying/how-to-perform-a-faceted-search.mdx) +- [query] [aggregateUsing](../../../../client-api/session/querying/how-to-perform-a-faceted-search.mdx) +- andAlso +- [query] [beforeQueryExecuted](../../../../client-api/session/querying/how-to-customize-query.mdx#beforequeryexecuted) +- [boost](../../../../client-api/session/querying/text-search/boost-search-results.mdx) +- closeSubclause +- cmpXchg +- containsAll +- containsAny +- [count](../../../../client-api/session/querying/how-to-count-query-results.mdx) +- [countLazily](../../../../client-api/session/querying/how-to-perform-queries-lazily.mdx#lazy-count-query) +- distinct +- explainScores +- first +- firstOrDefault +- fuzzy +- getIndexQuery +- getQueryResult +- [groupBy](../../../../client-api/session/querying/how-to-perform-group-by-query.mdx) +- [groupByArrayValues](../../../../client-api/session/querying/how-to-perform-group-by-query.mdx#by-array-values) +- [groupByArrayContent](../../../../client-api/session/querying/how-to-perform-group-by-query.mdx#by-array-content) +- [query] [Highlight](../../../../client-api/session/querying/text-search/highlight-query-results.mdx) +- include +- includeExplanations +- intersect +- invokeAfterQueryExecuted +- invokeAfterStreamExecuted +- [query] [lazily](../../../../client-api/session/querying/how-to-perform-queries-lazily.mdx) +- [longCount](../../../../client-api/session/querying/how-to-count-query-results.mdx) +- moreLikeThis +- negateNext +- [not](../../../../client-api/session/querying/document-query/how-to-use-not-operator.mdx) +- [query] [noCaching](../../../../client-api/session/querying/how-to-customize-query.mdx#nocaching) +- [query] [noTracking](../../../../client-api/session/querying/how-to-customize-query.mdx#notracking) +- ofType +- openSubclause +- [orderBy](../../../../client-api/session/querying/sort-query-results.mdx) +- [orderByDescending](../../../../client-api/session/querying/sort-query-results.mdx) +- [query] [orderByDistance](../../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#orderbydistance) +- [query] [orderByDistanceDescending](../../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#orderbydistancedesc) +- [orderByScore](../../../../client-api/session/querying/sort-query-results.mdx#order-by-score) +- [orderByScoreDescending](../../../../client-api/session/querying/sort-query-results.mdx#order-by-score) +- orElse +- [query] [projection](../../../../client-api/session/querying/how-to-customize-query.mdx#projection) +- proximity +- [query] [randomOrdering](../../../../client-api/session/querying/how-to-customize-query.mdx#randomordering) +- [query] [relatesToShape](../../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#search-by-shape) +- [search](../../../../client-api/session/querying/text-search/full-text-search.mdx) +- selectFields +- selectTimeSeries +- single +- singleOrDefault +- skip +- [query] [spatial](../../../../client-api/session/querying/how-to-make-a-spatial-query.mdx) +- statistics +- [suggestUsing](../../../../client-api/session/querying/how-to-work-with-suggestions.mdx) +- take +- [query] [timings](../../../../client-api/session/querying/how-to-customize-query.mdx#timings) +- usingDefaultOperator +- [query] [waitForNonStaleResults](../../../../client-api/session/querying/how-to-customize-query.mdx#waitfornonstaleresults) +- where +- whereBetween +- [whereEndsWith](../../../../client-api/session/querying/text-search/ends-with-query.mdx) +- whereEquals +- [whereExists](../../../../client-api/session/querying/how-to-filter-by-field.mdx) +- whereGreaterThan +- whereGreaterThanOrEqual +- whereIn +- whereLessThan +- whereLessThanOrEqual +- [whereLucene](../../../../client-api/session/querying/document-query/how-to-use-lucene.mdx) +- whereNotEquals +- [whereRegex](../../../../client-api/session/querying/text-search/using-regex.mdx) +- [whereStartsWith](../../../../client-api/session/querying/text-search/starts-with-query.mdx) +- withinRadiusOf + + + +## Syntax + +The definition for `documentQuery` is listed in the [Syntax section](../../../../client-api/session/querying/how-to-query.mdx#syntax) +of the [Query Overview](../../../../client-api/session/querying/how-to-query.mdx). + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/document-query/_what-is-document-query-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/document-query/_what-is-document-query-python.mdx new file mode 100644 index 0000000000..afbcde05ae --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/document-query/_what-is-document-query-python.mdx @@ -0,0 +1,211 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* RavenDB Queries can be executed via `query`, `document_query` or directly using `RQL`. + Learn more in [Query Overview](../../../../client-api/session/querying/how-to-query.mdx). +* In the Python client API, `query` methods and their equivalent `document_query` methods + provide the same functionality. (This is different from the C# client implementation, + which often provides different functionality for `Query` methods and their `DocumentQuery` + counterparts.) + Therefore the Python documentation often provides `query` usage samples without adding + `document_query` examples as well. + +* In this page: + * [`document_query` Examples](../../../../client-api/session/querying/document-query/what-is-document-query.mdx#document_query-examples) + * [Convert between `document_query` and `query`](../../../../client-api/session/querying/document-query/what-is-document-query.mdx#convert-between-document_query-and-query) + * [Available Custom Methods and Extensions](../../../../client-api/session/querying/document-query/what-is-document-query.mdx#convert-between-document_query-and-query) + + +## `document_query` Examples + +#### Query collection - no filtering + + + + +{`all_employees = list(session.advanced.document_query(object_type=Employee)) +`} + + + + +{`from "Employees" +`} + + + +#### Query collection - by ID + + + + +{`# Query collection by document ID +employee = ( + session.advanced.document_query(object_type=Employee) + .where_equals("Id", "employees/1-A") + .first() +) +`} + + + + +{`from "Employees" where id() == "employees/1-A" +`} + + + +#### Query collection - with filtering + + + + +{`# Query collection - filter by document field +employees = list( + session.advanced.document_query(object_type=Employee).where_equals("first_name", "Robert") +) +`} + + + + +{`from "Employees" where FirstName == "Robert" +`} + + + +#### Query collection - with paging + + + + +{`# Query collection - page results +products = list(session.advanced.document_query(object_type=Product).skip(5).take(10)) +`} + + + + +{`from "Products" limit 5, 10 // skip 5, take 10 +`} + + + +#### Query an index + +Please refer to [Querying an index](../../../../indexes/querying/query-index.mdx#sessionadvanceddocumentquery) for examples of querying an index using document_query. + + + +## Convert between `document_query` and `query` + +A `document_query` can be converted to a `query`. + + + + +{`# Define a document_query +doc_query = session.advanced.document_query(object_type=Order) # 'document_query' instance + +query_results = list(doc_query.where_greater_than("freight", 25)) +`} + + + + +{`from "Orders" where Freight > 25 +`} + + + + + + +## Available Custom Methods and Extensions + +- add_or_der +- [query] [after_query_executed](../../../../client-api/session/querying/how-to-customize-query.mdx#afterqueryexecuted) +- [query] [after_stream_executed](../../../../client-api/session/querying/how-to-customize-query.mdx#afterstreamexecuted) +- [query] [aggregate_by](../../../../client-api/session/querying/how-to-perform-a-faceted-search.mdx) +- [query] [aggregate_using](../../../../client-api/session/querying/how-to-perform-a-faceted-search.mdx) +- and_also +- [query] [before_query_executed](../../../../client-api/session/querying/how-to-customize-query.mdx#beforequeryexecuted) +- [boost](../../../../client-api/session/querying/text-search/boost-search-results.mdx) +- close_subclause +- cmp_xchg +- contains_all +- contains_any +- [count](../../../../client-api/session/querying/how-to-count-query-results.mdx) +- [count_lazily](../../../../client-api/session/querying/how-to-perform-queries-lazily.mdx#lazy-count-query) +- distinct +- explain_scores +- first +- first_or_default +- fuzzy +- get_index_query +- get_query_result +- [group_by](../../../../client-api/session/querying/how-to-perform-group-by-query.mdx) +- [group_by_array_values](../../../../client-api/session/querying/how-to-perform-group-by-query.mdx#by-array-values) +- [group_by_array_content](../../../../client-api/session/querying/how-to-perform-group-by-query.mdx#by-array-content) +- [query] [highlight](../../../../client-api/session/querying/text-search/highlight-query-results.mdx) +- include +- include_explanations +- intersect +- invoke_after_query_executed +- invoke_after_stream_eExecuted +- [query] [lazily](../../../../client-api/session/querying/how-to-perform-queries-lazily.mdx) +- [long_count](../../../../client-api/session/querying/how-to-count-query-results.mdx) +- more_like_this +- negate_next +- [not](../../../../client-api/session/querying/document-query/how-to-use-not-operator.mdx) +- [query] [no_caching](../../../../client-api/session/querying/how-to-customize-query.mdx#nocaching) +- [query] [no_tracking](../../../../client-api/session/querying/how-to-customize-query.mdx#notracking) +- of_type +- open_subclause +- [order_by](../../../../client-api/session/querying/sort-query-results.mdx) +- [order_by_descending](../../../../client-api/session/querying/sort-query-results.mdx) +- [query] [order_by_distance](../../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#orderbydistance) +- [query] [order_by_distance_descending](../../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#orderbydistancedesc) +- [order_by_score](../../../../client-api/session/querying/sort-query-results.mdx#order-by-score) +- [order_by_score_descending](../../../../client-api/session/querying/sort-query-results.mdx#order-by-score) +- or_else +- [query] [projection](../../../../client-api/session/querying/how-to-customize-query.mdx#projection) +- proximity +- [query] [random_ordering](../../../../client-api/session/querying/how-to-customize-query.mdx#randomordering) +- [query] [relates_to_shape](../../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#search-by-shape) +- [search](../../../../client-api/session/querying/text-search/full-text-search.mdx) +- select_fields +- select_time_series +- single +- single_or_default +- skip +- [query] [spatial](../../../../client-api/session/querying/how-to-make-a-spatial-query.mdx) +- statistics +- [suggest_using](../../../../client-api/session/querying/how-to-work-with-suggestions.mdx) +- take +- [query] [timings](../../../../client-api/session/querying/how-to-customize-query.mdx#timings) +- using_default_operator +- [query] [wait_for_non_stale_results](../../../../client-api/session/querying/how-to-customize-query.mdx#waitfornonstaleresults) +- where +- WhereBetween +- [where_ends_with](../../../../client-api/session/querying/text-search/ends-with-query.mdx) +- where_equals +- [where_exists](../../../../client-api/session/querying/how-to-filter-by-field.mdx) +- where_greater_than +- where_greater_than_or_equal +- where_in +- where_less_than +- where_less_than_or_equal +- [where_lucene](../../../../client-api/session/querying/document-query/how-to-use-lucene.mdx) +- where_not_equals +- [where_regex](../../../../client-api/session/querying/text-search/using-regex.mdx) +- [where_starts_with](../../../../client-api/session/querying/text-search/starts-with-query.mdx) +- within_radius_of + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/document-query/how-to-use-lucene.mdx b/versioned_docs/version-7.1/client-api/session/querying/document-query/how-to-use-lucene.mdx new file mode 100644 index 0000000000..1ad1646526 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/document-query/how-to-use-lucene.mdx @@ -0,0 +1,37 @@ +--- +title: "Session: Querying: How to use Lucene" +hide_table_of_contents: true +sidebar_label: How to Use Lucene +sidebar_position: 3 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToUseLuceneCsharp from './_how-to-use-lucene-csharp.mdx'; +import HowToUseLuceneJava from './_how-to-use-lucene-java.mdx'; +import HowToUseLuceneNodejs from './_how-to-use-lucene-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/document-query/how-to-use-not-operator.mdx b/versioned_docs/version-7.1/client-api/session/querying/document-query/how-to-use-not-operator.mdx new file mode 100644 index 0000000000..66661013cb --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/document-query/how-to-use-not-operator.mdx @@ -0,0 +1,38 @@ +--- +title: "Session: Querying: How to Use NOT Operator" +hide_table_of_contents: true +sidebar_label: How to Use NOT Operator +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToUseNotOperatorCsharp from './_how-to-use-not-operator-csharp.mdx'; +import HowToUseNotOperatorJava from './_how-to-use-not-operator-java.mdx'; +import HowToUseNotOperatorNodejs from './_how-to-use-not-operator-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/document-query/query-vs-document-query.mdx b/versioned_docs/version-7.1/client-api/session/querying/document-query/query-vs-document-query.mdx new file mode 100644 index 0000000000..76c996b36f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/document-query/query-vs-document-query.mdx @@ -0,0 +1,55 @@ +--- +title: "Query vs DocumentQuery" +hide_table_of_contents: true +sidebar_label: Query vs DocumentQuery +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import QueryVsDocumentQueryCsharp from './_query-vs-document-query-csharp.mdx'; +import QueryVsDocumentQueryJava from './_query-vs-document-query-java.mdx'; +import QueryVsDocumentQueryPython from './_query-vs-document-query-python.mdx'; +import QueryVsDocumentQueryPhp from './_query-vs-document-query-php.mdx'; +import QueryVsDocumentQueryNodejs from './_query-vs-document-query-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/document-query/what-is-document-query.mdx b/versioned_docs/version-7.1/client-api/session/querying/document-query/what-is-document-query.mdx new file mode 100644 index 0000000000..7dad53dadb --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/document-query/what-is-document-query.mdx @@ -0,0 +1,53 @@ +--- +title: "What is a Document Query?" +hide_table_of_contents: true +sidebar_label: What is a Document Query +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import WhatIsDocumentQueryCsharp from './_what-is-document-query-csharp.mdx'; +import WhatIsDocumentQueryJava from './_what-is-document-query-java.mdx'; +import WhatIsDocumentQueryPython from './_what-is-document-query-python.mdx'; +import WhatIsDocumentQueryPhp from './_what-is-document-query-php.mdx'; +import WhatIsDocumentQueryNodejs from './_what-is-document-query-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/how-to-count-query-results.mdx b/versioned_docs/version-7.1/client-api/session/querying/how-to-count-query-results.mdx new file mode 100644 index 0000000000..b72461e9d6 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/how-to-count-query-results.mdx @@ -0,0 +1,49 @@ +--- +title: "Count Query Results" +hide_table_of_contents: true +sidebar_label: Count Query Results +sidebar_position: 11 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToCountQueryResultsCsharp from './_how-to-count-query-results-csharp.mdx'; +import HowToCountQueryResultsPython from './_how-to-count-query-results-python.mdx'; +import HowToCountQueryResultsPhp from './_how-to-count-query-results-php.mdx'; +import HowToCountQueryResultsNodejs from './_how-to-count-query-results-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/how-to-customize-query.mdx b/versioned_docs/version-7.1/client-api/session/querying/how-to-customize-query.mdx new file mode 100644 index 0000000000..785654adc4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/how-to-customize-query.mdx @@ -0,0 +1,56 @@ +--- +title: "Customize Query" +hide_table_of_contents: true +sidebar_label: Customize Query +sidebar_position: 4 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToCustomizeQueryCsharp from './_how-to-customize-query-csharp.mdx'; +import HowToCustomizeQueryJava from './_how-to-customize-query-java.mdx'; +import HowToCustomizeQueryPython from './_how-to-customize-query-python.mdx'; +import HowToCustomizeQueryPhp from './_how-to-customize-query-php.mdx'; +import HowToCustomizeQueryNodejs from './_how-to-customize-query-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/how-to-filter-by-field.mdx b/versioned_docs/version-7.1/client-api/session/querying/how-to-filter-by-field.mdx new file mode 100644 index 0000000000..69346bcf12 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/how-to-filter-by-field.mdx @@ -0,0 +1,51 @@ +--- +title: "Filter by Field Presence" +hide_table_of_contents: true +sidebar_label: Filter by Field Presence +sidebar_position: 12 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToFilterByFieldCsharp from './_how-to-filter-by-field-csharp.mdx'; +import HowToFilterByFieldJava from './_how-to-filter-by-field-java.mdx'; +import HowToFilterByFieldPython from './_how-to-filter-by-field-python.mdx'; +import HowToFilterByFieldPhp from './_how-to-filter-by-field-php.mdx'; +import HowToFilterByFieldNodejs from './_how-to-filter-by-field-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/how-to-filter-by-non-existing-field.mdx b/versioned_docs/version-7.1/client-api/session/querying/how-to-filter-by-non-existing-field.mdx new file mode 100644 index 0000000000..7be6726d09 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/how-to-filter-by-non-existing-field.mdx @@ -0,0 +1,54 @@ +--- +title: "Filter by Non-Existing Field" +hide_table_of_contents: true +sidebar_label: Filter by Non-Existing Field +sidebar_position: 13 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToFilterByNonExistingFieldCsharp from './_how-to-filter-by-non-existing-field-csharp.mdx'; +import HowToFilterByNonExistingFieldPython from './_how-to-filter-by-non-existing-field-python.mdx'; +import HowToFilterByNonExistingFieldPhp from './_how-to-filter-by-non-existing-field-php.mdx'; +import HowToFilterByNonExistingFieldNodejs from './_how-to-filter-by-non-existing-field-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/how-to-get-query-statistics.mdx b/versioned_docs/version-7.1/client-api/session/querying/how-to-get-query-statistics.mdx new file mode 100644 index 0000000000..d7dd05dc54 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/how-to-get-query-statistics.mdx @@ -0,0 +1,49 @@ +--- +title: "Get Query Statistics" +hide_table_of_contents: true +sidebar_label: Get Query Statistics +sidebar_position: 10 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToGetQueryStatisticsCsharp from './_how-to-get-query-statistics-csharp.mdx'; +import HowToGetQueryStatisticsJava from './_how-to-get-query-statistics-java.mdx'; +import HowToGetQueryStatisticsPython from './_how-to-get-query-statistics-python.mdx'; +import HowToGetQueryStatisticsPhp from './_how-to-get-query-statistics-php.mdx'; +import HowToGetQueryStatisticsNodejs from './_how-to-get-query-statistics-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/how-to-make-a-spatial-query.mdx b/versioned_docs/version-7.1/client-api/session/querying/how-to-make-a-spatial-query.mdx new file mode 100644 index 0000000000..2201b59431 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/how-to-make-a-spatial-query.mdx @@ -0,0 +1,53 @@ +--- +title: "Make a Spatial Query" +hide_table_of_contents: true +sidebar_label: Make a Spatial Query +sidebar_position: 9 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToMakeASpatialQueryCsharp from './_how-to-make-a-spatial-query-csharp.mdx'; +import HowToMakeASpatialQueryJava from './_how-to-make-a-spatial-query-java.mdx'; +import HowToMakeASpatialQueryPython from './_how-to-make-a-spatial-query-python.mdx'; +import HowToMakeASpatialQueryNodejs from './_how-to-make-a-spatial-query-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/how-to-perform-a-faceted-search.mdx b/versioned_docs/version-7.1/client-api/session/querying/how-to-perform-a-faceted-search.mdx new file mode 100644 index 0000000000..840085510f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/how-to-perform-a-faceted-search.mdx @@ -0,0 +1,50 @@ +--- +title: "Perform a Faceted (Aggregated) Search" +hide_table_of_contents: true +sidebar_label: Perform Faceted Search +sidebar_position: 8 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToPerformAFacetedSearchCsharp from './_how-to-perform-a-faceted-search-csharp.mdx'; +import HowToPerformAFacetedSearchJava from './_how-to-perform-a-faceted-search-java.mdx'; +import HowToPerformAFacetedSearchPython from './_how-to-perform-a-faceted-search-python.mdx'; +import HowToPerformAFacetedSearchPhp from './_how-to-perform-a-faceted-search-php.mdx'; +import HowToPerformAFacetedSearchNodejs from './_how-to-perform-a-faceted-search-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/how-to-perform-group-by-query.mdx b/versioned_docs/version-7.1/client-api/session/querying/how-to-perform-group-by-query.mdx new file mode 100644 index 0000000000..a776edcc7a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/how-to-perform-group-by-query.mdx @@ -0,0 +1,50 @@ +--- +title: "Perform Dynamic Group By Query" +hide_table_of_contents: true +sidebar_label: Perform Group By Query +sidebar_position: 7 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToPerformGroupByQueryCsharp from './_how-to-perform-group-by-query-csharp.mdx'; +import HowToPerformGroupByQueryJava from './_how-to-perform-group-by-query-java.mdx'; +import HowToPerformGroupByQueryPython from './_how-to-perform-group-by-query-python.mdx'; +import HowToPerformGroupByQueryPhp from './_how-to-perform-group-by-query-php.mdx'; +import HowToPerformGroupByQueryNodejs from './_how-to-perform-group-by-query-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/how-to-perform-queries-lazily.mdx b/versioned_docs/version-7.1/client-api/session/querying/how-to-perform-queries-lazily.mdx new file mode 100644 index 0000000000..d0964e8a5d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/how-to-perform-queries-lazily.mdx @@ -0,0 +1,50 @@ +--- +title: "Perform a Lazy Query" +hide_table_of_contents: true +sidebar_label: Perform a Lazy Query +sidebar_position: 15 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToPerformQueriesLazilyCsharp from './_how-to-perform-queries-lazily-csharp.mdx'; +import HowToPerformQueriesLazilyJava from './_how-to-perform-queries-lazily-java.mdx'; +import HowToPerformQueriesLazilyPython from './_how-to-perform-queries-lazily-python.mdx'; +import HowToPerformQueriesLazilyPhp from './_how-to-perform-queries-lazily-php.mdx'; +import HowToPerformQueriesLazilyNodejs from './_how-to-perform-queries-lazily-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/how-to-project-query-results.mdx b/versioned_docs/version-7.1/client-api/session/querying/how-to-project-query-results.mdx new file mode 100644 index 0000000000..7955817c5c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/how-to-project-query-results.mdx @@ -0,0 +1,59 @@ +--- +title: "Project Query Results" +hide_table_of_contents: true +sidebar_label: Project Query Results +sidebar_position: 6 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToProjectQueryResultsCsharp from './_how-to-project-query-results-csharp.mdx'; +import HowToProjectQueryResultsJava from './_how-to-project-query-results-java.mdx'; +import HowToProjectQueryResultsPython from './_how-to-project-query-results-python.mdx'; +import HowToProjectQueryResultsPhp from './_how-to-project-query-results-php.mdx'; +import HowToProjectQueryResultsNodejs from './_how-to-project-query-results-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/how-to-query.mdx b/versioned_docs/version-7.1/client-api/session/querying/how-to-query.mdx new file mode 100644 index 0000000000..f2d4e41bc7 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/how-to-query.mdx @@ -0,0 +1,56 @@ +--- +title: "Query Overview" +hide_table_of_contents: true +sidebar_label: Query Overview +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToQueryCsharp from './_how-to-query-csharp.mdx'; +import HowToQueryPython from './_how-to-query-python.mdx'; +import HowToQueryPhp from './_how-to-query-php.mdx'; +import HowToQueryNodejs from './_how-to-query-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/how-to-stream-query-results.mdx b/versioned_docs/version-7.1/client-api/session/querying/how-to-stream-query-results.mdx new file mode 100644 index 0000000000..e9f1b23999 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/how-to-stream-query-results.mdx @@ -0,0 +1,41 @@ +--- +title: "Stream Query Results" +hide_table_of_contents: true +sidebar_label: Stream Query Results +sidebar_position: 3 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToStreamQueryResultsCsharp from './_how-to-stream-query-results-csharp.mdx'; +import HowToStreamQueryResultsJava from './_how-to-stream-query-results-java.mdx'; +import HowToStreamQueryResultsNodejs from './_how-to-stream-query-results-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/how-to-use-intersect.mdx b/versioned_docs/version-7.1/client-api/session/querying/how-to-use-intersect.mdx new file mode 100644 index 0000000000..a2a2cb5454 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/how-to-use-intersect.mdx @@ -0,0 +1,50 @@ +--- +title: "Using Intersect" +hide_table_of_contents: true +sidebar_label: Using Intersect +sidebar_position: 14 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToUseIntersectCsharp from './_how-to-use-intersect-csharp.mdx'; +import HowToUseIntersectJava from './_how-to-use-intersect-java.mdx'; +import HowToUseIntersectPython from './_how-to-use-intersect-python.mdx'; +import HowToUseIntersectPhp from './_how-to-use-intersect-php.mdx'; +import HowToUseIntersectNodejs from './_how-to-use-intersect-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/how-to-use-morelikethis.mdx b/versioned_docs/version-7.1/client-api/session/querying/how-to-use-morelikethis.mdx new file mode 100644 index 0000000000..10c0412679 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/how-to-use-morelikethis.mdx @@ -0,0 +1,50 @@ +--- +title: "Using MoreLikeThis" +hide_table_of_contents: true +sidebar_label: Using MoreLikeThis +sidebar_position: 17 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToUseMorelikethisCsharp from './_how-to-use-morelikethis-csharp.mdx'; +import HowToUseMorelikethisJava from './_how-to-use-morelikethis-java.mdx'; +import HowToUseMorelikethisPython from './_how-to-use-morelikethis-python.mdx'; +import HowToUseMorelikethisPhp from './_how-to-use-morelikethis-php.mdx'; +import HowToUseMorelikethisNodejs from './_how-to-use-morelikethis-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/how-to-work-with-suggestions.mdx b/versioned_docs/version-7.1/client-api/session/querying/how-to-work-with-suggestions.mdx new file mode 100644 index 0000000000..abf48d66b7 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/how-to-work-with-suggestions.mdx @@ -0,0 +1,51 @@ +--- +title: "Query for Suggestions" +hide_table_of_contents: true +sidebar_label: Query for Suggestions +sidebar_position: 16 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HowToWorkWithSuggestionsJava from './_how-to-work-with-suggestions-java.mdx'; +import HowToWorkWithSuggestionsCsharp from './_how-to-work-with-suggestions-csharp.mdx'; +import HowToWorkWithSuggestionsPython from './_how-to-work-with-suggestions-python.mdx'; +import HowToWorkWithSuggestionsPhp from './_how-to-work-with-suggestions-php.mdx'; +import HowToWorkWithSuggestionsNodejs from './_how-to-work-with-suggestions-nodejs.mdx'; + +export const supportedLanguages = ["java", "csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/sort-query-results.mdx b/versioned_docs/version-7.1/client-api/session/querying/sort-query-results.mdx new file mode 100644 index 0000000000..4ec757a5e5 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/sort-query-results.mdx @@ -0,0 +1,48 @@ +--- +title: "Sort Query Results" +hide_table_of_contents: true +sidebar_label: Sort Query Results +sidebar_position: 5 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import SortQueryResultsCsharp from './_sort-query-results-csharp.mdx'; +import SortQueryResultsPython from './_sort-query-results-python.mdx'; +import SortQueryResultsPhp from './_sort-query-results-php.mdx'; +import SortQueryResultsNodejs from './_sort-query-results-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_boost-search-results-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_boost-search-results-csharp.mdx new file mode 100644 index 0000000000..5d3341eb3e --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_boost-search-results-csharp.mdx @@ -0,0 +1,193 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When querying with some filtering conditions, + a basic score is calculated for each document in the results by the underlying engine. + +* Providing a boost value to some fields allows you to prioritize the resulting documents. + The boost value is integrated with the basic score, making the document rank higher. + +* Boosting can be achieved in the following ways: + + * **At query time**: + Apply a boost factor to searched terms at query time - as described in this article. + + * **Via index definition**: + Apply a boost factor in your index definition - see this [boosting](../../../../indexes/boosting.mdx) indexing article. + +* The automatic ordering of the results by the score is configurable. + Learn more here: [automatic score-based ordering](../../../../indexes/boosting.mdx#automatic-score-based-ordering) + +* The calculated score details of the results can be retrieved if needed. + Learn more here: [get resulting score](../../../../client-api/session/querying/sort-query-results.mdx#get-resulting-score) + +* In this page: + + * [Boost results - when making a full-text search](../../../../client-api/session/querying/text-search/boost-search-results.mdx#boost-results---when-making-a-full-text-search) + * [Boost results - when querying with where clause](../../../../client-api/session/querying/text-search/boost-search-results.mdx#boost-results---when-querying-with-where-clause) + + +## Boost results - when making a full-text search + +When making a full-text search with the `Search()` method then boosting can be applied +to both `Query` and `DocumentQuery`. + + + + +{`List employees = session + // Make a dynamic full-text search Query on 'Employees' collection + .Query() + // This search predicate will use the default boost value of 1 + .Search(x => x.Notes, "English") + // * Pass the boost value using the 'boost' parameter + // * This search predicate will use a boost value of 10 + .Search(x => x.Notes, "Italian", boost: 10) + .ToList(); + +// * Results will contain all Employee documents that have +// EITHER 'English' OR 'Italian' in their 'Notes' field (case-insensitive). +// +// * Matching documents that contain 'Italian' will get a HIGHER score +// than those that contain 'English'. +// +// * Unless configured otherwise, the resulting documents will be ordered by their score. +`} + + + + +{`List employees = await asyncSession + // Make a dynamic full-text search Query on 'Employees' collection + .Query() + // This search predicate will use the default boost value of 1 + .Search(x => x.Notes, "English") + // * Pass the boost value using the 'boost' parameter + // * This search predicate will use a boost value of 10 + .Search(x => x.Notes, "Italian", boost: 10) + .ToListAsync(); + +// * Results will contain all Employee documents that have +// EITHER 'English' OR 'Italian' in their 'Notes' field (case-insensitive). +// +// * Matching documents that contain 'Italian' will get a HIGHER score +// than those that contain 'English'. +// +// * Unless configured otherwise, the resulting documents will be ordered by their score. +`} + + + + +{`List employees = session.Advanced + // Make a dynamic full-text search DocumentQuery on 'Employees' collection + .DocumentQuery() + // This search predicate will use the default boost value of 1 + .Search(x => x.Notes, "English") + // This search predicate will use a boost value of 10 + .Search(x => x.Notes, "Italian") + // Call 'Boost' to set the boost value of the previous 'Search' call + .Boost(10) + .ToList(); + +// * Results will contain all Employee documents that have +// EITHER 'English' OR 'Italian' in their 'Notes' field (case-insensitive). +// +// * Matching documents that contain 'Italian' will get a HIGHER score +// than those that contain 'English'. +// +// * Unless configured otherwise, the resulting documents will be ordered by their score. +`} + + + + +{`from "Employees" where +search(Notes, "English") or boost(search(Notes, "Italian"), 10) +`} + + + + + + +## Boost results - when querying with where clause + +When querying with `Where` clauses (using an OR condition in between) then boosting can be applied +only with `DocuemtQuery`. + + + + +{`List companies = session.Advanced + // Make a dynamic DocumentQuery on 'Companies' collection + .DocumentQuery() + // Define a 'Where' condition + .WhereStartsWith(x => x.Name, "O") + // Call 'Boost' to set the boost value of the previous 'Where' predicate + .Boost(10) + // Call 'OrElse' so that OR operator will be used between statements + .OrElse() + .WhereStartsWith(x => x.Name, "P") + .Boost(50) + .OrElse() + .WhereEndsWith(x => x.Name, "OP") + .Boost(90) + .ToList(); + +// * Results will contain all Company documents that either +// (start-with 'O') OR (start-with 'P') OR (end-with 'OP') in their 'Name' field (case-insensitive). +// +// * Matching documents that end-with 'OP' will get the HIGHEST scores. +// Matching documents that start-with 'O' will get the LOWEST scores. +// +// * Unless configured otherwise, the resulting documents will be ordered by their score. +`} + + + + +{`List companies = await asyncSession.Advanced + // Make a dynamic DocumentQuery on 'Companies' collection + .AsyncDocumentQuery() + // Define a 'Where' condition + .WhereStartsWith(x => x.Name, "O") + // Call 'Boost' to set the boost value of the previous 'Where' predicate + .Boost(10) + // Call 'OrElse' so that OR operator will be used between statements + .OrElse() + .WhereStartsWith(x => x.Name, "P") + .Boost(50) + .OrElse() + .WhereEndsWith(x => x.Name, "OP") + .Boost(90) + .ToListAsync(); + +// * Results will contain all Company documents that either +// (start-with 'O') OR (start-with 'P') OR (end-with 'OP') in their 'Name' field (case-insensitive). +// +// * Matching documents that end-with 'OP' will get the HIGHEST scores. +// Matching documents that start-with 'O' will get the LOWEST scores. +// +// * Unless configured otherwise, the resulting documents will be ordered by their score. +`} + + + + +{`from "Companies" where +boost(startsWith(Name, "O"), 10) or +boost(startsWith(Name, "P"), 50) or +boost(endsWith(Name, "OP"), 90) +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_boost-search-results-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_boost-search-results-nodejs.mdx new file mode 100644 index 0000000000..172a9dcecd --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_boost-search-results-nodejs.mdx @@ -0,0 +1,124 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When querying with some filtering conditions, + a basic score is calculated for each document in the results by the underlying engine. + +* Providing a boost value to some fields allows you to prioritize the resulting documents. + The boost value is integrated with the basic score, making the document rank higher. + +* Boosting can be achieved in the following ways: + + * **At query time**: + Apply a boost factor to searched terms at query time - as described in this article. + + * **Via index definition**: + Apply a boost factor in your index definition - see this [boosting](../../../../indexes/boosting.mdx) indexing article. + +* The automatic ordering of the results by the score is configurable. + Learn more here: [automatic score-based ordering](../../../../indexes/boosting.mdx#automatic-score-based-ordering) + +* The calculated score details of the results can be retrieved if needed. + Learn more here: [get resulting score](../../../../client-api/session/querying/sort-query-results.mdx#get-resulting-score) + +* In this page: + + * [Boost results - when making a full-text search](../../../../client-api/session/querying/text-search/boost-search-results.mdx#boost-results---when-making-a-full-text-search) + * [Boost results - when querying with where clause](../../../../client-api/session/querying/text-search/boost-search-results.mdx#boost-results---when-querying-with-where-clause) + + +## Boost results - when making a full-text search + + + + + + +{`const employees = await session + // Make a dynamic full-text search Query on 'Employees' collection + .query({ collection: "Employees"}) + // This search predicate will use the default boost value of 1 + .search('Notes', 'English') + // This search predicate will use a boost value of 10 + .search('Notes', 'Italian') + // Call 'boost' to set the boost value of the previous 'search' call + .boost(10) + .all(); + +// * Results will contain all Employee documents that have +// EITHER 'English' OR 'Italian' in their 'Notes' field (case-insensitive). +// +// * Matching documents that contain 'Italian' will get a HIGHER score +// than those that contain 'English'. +// +// * Unless configured otherwise, the resulting documents will be ordered by their score. +`} + + + + +{`from "Employees" where +search(Notes, "English") or boost(search(Notes, "Italian"), 10) +{"p0":"English","p1":"Italian"} +`} + + + + + + + + +## Boost results - when querying with where clause + + + + + + +{`const companies = await session + // Make a dynamic DocumentQuery on 'Companies' collection + .query({ collection: "Companies"}) + // Define a 'where' condition + .whereStartsWith("Name", "O") + // Call 'boost' to set the boost value of the previous 'where' predicate + .boost(10) + // Call 'orElse' so that OR operator will be used between statements + .orElse() + .whereStartsWith("Name", "P") + .boost(50) + .orElse() + .whereEndsWith("Name", "OP") + .boost(90) + .all(); + +// * Results will contain all Company documents that either +// (start-with 'O') OR (start-with 'P') OR (end-with 'OP') in their 'Name' field (case-insensitive). +// +// * Matching documents that end-with 'OP' will get the HIGHEST scores. +// Matching documents that start-with 'O' will get the LOWEST scores. +// +// * Unless configured otherwise, the resulting documents will be ordered by their score. +`} + + + + +{`from "Companies" where +boost(startsWith(Name, "O"), 10) or +boost(startsWith(Name, "P"), 50) or +boost(endsWith(Name, "OP"), 90) +`} + + + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_boost-search-results-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_boost-search-results-php.mdx new file mode 100644 index 0000000000..4ccabb6f99 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_boost-search-results-php.mdx @@ -0,0 +1,146 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When querying with some filtering conditions, + a basic score is calculated for each document in the results by the underlying engine. + +* Providing a boost value to some fields allows you to prioritize the resulting documents. + The boost value is integrated with the basic score, making the document rank higher. + +* Boosting can be achieved in the following ways: + + * **At query time**: + Apply a boost factor to searched terms at query time - as described in this article. + + * **Via index definition**: + Apply a boost factor in your index definition - see this [boosting](../../../../indexes/boosting.mdx) indexing article. + +* The automatic ordering of the results by the score is configurable. + Learn more here: [automatic score-based ordering](../../../../indexes/boosting.mdx#automatic-score-based-ordering) + +* The calculated score details of the results can be retrieved if needed. + Learn more here: [get resulting score](../../../../client-api/session/querying/sort-query-results.mdx#get-resulting-score) + +* In this page: + + * [Boost results - when making a full-text search](../../../../client-api/session/querying/text-search/boost-search-results.mdx#boost-results---when-making-a-full-text-search) + * [Boost results - when querying with where clause](../../../../client-api/session/querying/text-search/boost-search-results.mdx#boost-results---when-querying-with-where-clause) + + +## Boost results - when making a full-text search + +To apply boosting while running a full-text search, use the +`boost()` method to prioritize the preceding `search()` results. + + + + +{`/** @var array $employees */ +$employees = $session + // Make a dynamic full-text search Query on 'Employees' collection + ->query(Employee::class) + // This search predicate will use the default boost value of 1 + ->search("Notes", "English") + // * This search predicate will use a boost value of 10 + ->search("Notes", "Italian") + // Call 'boost()' to set the boost value of the previous 'search()' call + ->boost(10) + ->toList(); + +// * Results will contain all Employee documents that have +// EITHER 'English' OR 'Italian' in their 'Notes' field (case-insensitive). +// +// * Matching documents that contain 'Italian' will get a HIGHER score +// than those that contain 'English'. +// +// * Unless configured otherwise, the resulting documents will be ordered by their score. +`} + + + + +{`List employees = session.Advanced + // Make a dynamic full-text search DocumentQuery on 'Employees' collection + .DocumentQuery() + // This search predicate will use the default boost value of 1 + .Search(x => x.Notes, "English") + // This search predicate will use a boost value of 10 + .Search(x => x.Notes, "Italian") + // Call 'Boost' to set the boost value of the previous 'Search' call + .Boost(10) + .ToList(); + +// * Results will contain all Employee documents that have +// EITHER 'English' OR 'Italian' in their 'Notes' field (case-insensitive). +// +// * Matching documents that contain 'Italian' will get a HIGHER score +// than those that contain 'English'. +// +// * Unless configured otherwise, the resulting documents will be ordered by their score. +`} + + + + +{`from "Employees" where +search(Notes, "English") or boost(search(Notes, "Italian"), 10) +`} + + + + + + +## Boost results - when querying with where clause + +`boost()` can be used to give different priorities to the results +returned by different `where` clauses. + + + + +{`/** @var array $companies */ +$companies = $session->advanced() + // Make a dynamic DocumentQuery on 'Companies' collection + ->documentQuery(Company::class) + // Define a 'Where' condition + ->WhereStartsWith("Name", "O") + // Call 'Boost' to set the boost value of the previous 'Where' predicate + ->boost(10) + // Call 'OrElse' so that OR operator will be used between statements + ->orElse() + ->whereStartsWith("Name", "P") + ->boost(50) + ->orElse() + ->whereEndsWith("Name", "OP") + ->boost(90) + ->toList(); + +// * Results will contain all Company documents that either +// (start-with 'O') OR (start-with 'P') OR (end-with 'OP') in their 'Name' field (case-insensitive). +// +// * Matching documents that end-with 'OP' will get the HIGHEST scores. +// Matching documents that start-with 'O' will get the LOWEST scores. +// +// * Unless configured otherwise, the resulting documents will be ordered by their score. +`} + + + + +{`from "Companies" where +boost(startsWith(Name, "O"), 10) or +boost(startsWith(Name, "P"), 50) or +boost(endsWith(Name, "OP"), 90) +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_boost-search-results-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_boost-search-results-python.mdx new file mode 100644 index 0000000000..28f66286be --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_boost-search-results-python.mdx @@ -0,0 +1,124 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When querying with some filtering conditions, + a basic score is calculated for each document in the results by the underlying engine. + +* Providing a boost value to some fields allows you to prioritize the resulting documents. + The boost value is integrated with the basic score, making the document rank higher. + +* Boosting can be achieved in the following ways: + + * **At query time**: + Apply a boost factor to searched terms at query time - as described in this article. + + * **Via index definition**: + Apply a boost factor in your index definition - see this [boosting](../../../../indexes/boosting.mdx) indexing article. + +* The automatic ordering of the results by the score is configurable. + Learn more here: [automatic score-based ordering](../../../../indexes/boosting.mdx#automatic-score-based-ordering) + +* The calculated score details of the results can be retrieved if needed. + Learn more here: [get resulting score](../../../../client-api/session/querying/sort-query-results.mdx#get-resulting-score) + +* In this page: + + * [Boost results - when making a full-text search](../../../../client-api/session/querying/text-search/boost-search-results.mdx#boost-results---when-making-a-full-text-search) + * [Boost results - when querying with where clause](../../../../client-api/session/querying/text-search/boost-search-results.mdx#boost-results---when-querying-with-where-clause) + + +## Boost results - when making a full-text search + +To apply boosting while running a full-text search, use the +`boost()` method to prioritize the preceding `search()` results. + + + + +{`employees = list( + session + # Make a dynamic full-text search Query on 'Employees' collection + .query(object_type=Employee) + # This search predicate will use the default boost value of 1 + .search("Notes", "English") + # This search predicate will use a boost value of 10 + .search("Notes", "Italian") + # Call 'boost' to set the boost value to previous 'search' call + .boost(10) +) + +# * Results will contain all Employee documents that have +# EITHER 'English' OR 'Italian' in their 'Notes' field. +# +# * Matching documents with 'Italian' will be listed FIRST in the results, +# before those with 'English'. +# +# * Search is case-insensitive. +`} + + + + +{`from "Employees" where +search(Notes, "English") or boost(search(Notes, "Italian"), 10) +`} + + + + + + +## Boost results - when querying with where clause + +`boost()` can be used to give different priorities to the results +returned by different `where` clauses. + + + + +{`companies = list( + session.advanced + # Make a dynamic DocumentQuery on 'Companies' collection + .document_query(object_type=Company) + # Define a 'where' condition + .where_starts_with("Name", "O") + # Call 'boost' to set the boost value of the previous 'where' predicate + .boost(10) + # Call 'or_else' so that OR operator will be used between statements + .or_else() + .where_starts_with("Name", "P") + .boost(50) + .or_else() + .where_ends_with("Name", "OP") + .boost(90) +) + +# * Results will contain all Company documents that either +# (start-with 'O') OR (start-with 'P') OR (end-with 'OP') in their 'Name' field. +# +# * Matching documents the end-with 'OP' will be listed FIRST. +# Matching documents that start-with 'P' will then be listed. +# Matching documents that start-with 'O' will be listed LAST. +# +# * Search is case-insensitive. +`} + + + + +{`from "Companies" where +boost(startsWith(Name, "O"), 10) or +boost(startsWith(Name, "P"), 50) or +boost(endsWith(Name, "OP"), 90) +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_category_.json b/versioned_docs/version-7.1/client-api/session/querying/text-search/_category_.json new file mode 100644 index 0000000000..16ce9c6eea --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 19, + "label": Text Search, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_ends-with-query-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_ends-with-query-csharp.mdx new file mode 100644 index 0000000000..edbbc22559 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_ends-with-query-csharp.mdx @@ -0,0 +1,191 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can query for documents having a field that ends with some specified string. + +* Unless explicitly specified, the string comparisons are case-insensitive by default. + +* **Note**: + This postfix search causes the server to perform a full index scan. + Instead, consider using a static index that indexes the field in reverse order + and then query with a [prefix search](../../../../client-api/session/querying/text-search/starts-with-query.mdx), which is much faster. + +* In this page: + * [EndsWith](../../../../client-api/session/querying/text-search/ends-with-query.mdx#endswith) + * [EndsWith (case-sensitive)](../../../../client-api/session/querying/text-search/ends-with-query.mdx#endswith-(case-sensitive)) + * [Negate EndsWith](../../../../client-api/session/querying/text-search/ends-with-query.mdx#negate-endswith) + + +## EndsWith + + + + +{`List products = session + .Query() + // Call 'EndsWith' on the field + // Pass the postfix to search by + .Where(x => x.Name.EndsWith("Lager")) + .ToList(); + +// Results will contain only Product documents having a 'Name' field +// that ends with any case variation of 'lager' +`} + + + + +{`List products = await asyncSession + .Query() + // Call 'EndsWith' on the field + // Pass the postfix to search by + .Where(x => x.Name.EndsWith("Lager")) + .ToListAsync(); + +// Results will contain only Product documents having a 'Name' field +// that ends with any case variation of 'lager' +`} + + + + +{`List products = session.Advanced + .DocumentQuery() + // Call 'WhereEndsWith' + // Pass the document field and the postfix to search by + .WhereEndsWith(x => x.Name, "Lager") + .ToList(); + +// Results will contain only Product documents having a 'Name' field +// that ends with any case variation of 'lager' +`} + + + + +{`from "Products" +where endsWith(Name, "Lager") +`} + + + + + + +## EndsWith (case-sensitive) + + + + +{`List products = session + .Query() + // Pass 'exact: true' to search for an EXACT postfix match + .Where(x => x.Name.EndsWith("Lager"), exact: true) + .ToList(); + +// Results will contain only Product documents having a 'Name' field +// that ends with 'Lager' +`} + + + + +{`List products = await asyncSession + .Query() + // Pass 'exact: true' to search for an EXACT postfix match + .Where(x => x.Name.EndsWith("Lager"), exact: true) + .ToListAsync(); + +// Results will contain only Product documents having a 'Name' field +// that ends with 'Lager' +`} + + + + +{`List products = session.Advanced + .DocumentQuery() + // Call 'WhereEndsWith' + // Pass 'exact: true' to search for an EXACT postfix match + .WhereEndsWith(x => x.Name, "Lager", exact: true) + .ToList(); + +// Results will contain only Product documents having a 'Name' field +// that ends with 'Lager' +`} + + + + +{`from "Products" +where exact(endsWith(Name, "Lager")) +`} + + + + + + +## Negate EndsWith + + + + +{`List products = session + .Query() + // Call 'EndsWith' on the field + // Pass the postfix to search by + .Where(x => x.Name.EndsWith("Lager") == false) + .ToList(); + +// Results will contain only Product documents having a 'Name' field +// that does NOT end with 'lager' or any other case variations of it +`} + + + + +{`List products = await asyncSession + .Query() + // Call 'EndsWith' on the field + // Pass the postfix to search by + .Where(x => x.Name.EndsWith("Lager") == false) + .ToListAsync(); + +// Results will contain only Product documents having a 'Name' field +// that does NOT end with 'lager' or any other case variations of it +`} + + + + +{`List products = session.Advanced + .DocumentQuery() + // Call 'Not' to negate the next predicate + .Not + // Call 'WhereEndsWith' + // Pass the document field and the postfix to search by + .WhereEndsWith(x => x.Name, "Lager") + .ToList(); + +// Results will contain only Product documents having a 'Name' field +// that does NOT end with 'lager' or any other case variations of it +`} + + + + +{`from "Products" +where (true and not endsWith(Name, "Lager")) +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_ends-with-query-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_ends-with-query-nodejs.mdx new file mode 100644 index 0000000000..6f3a02a586 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_ends-with-query-nodejs.mdx @@ -0,0 +1,130 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `whereEndsWith` to query for documents having a field that ends with some specified string. + +* Unless explicitly specified, the string comparisons are case-insensitive by default. + +* **Note**: + This postfix search causes the server to perform a full index scan. + Instead, consider using a static index that indexes the field in reverse order + and then query with a [prefix search](../../../../client-api/session/querying/text-search/starts-with-query.mdx), which is much faster. + +* In this page: + * [whereEndsWith](../../../../client-api/session/querying/text-search/ends-with-query.mdx#whereendswith) + * [whereEndsWith (case-sensitive)](../../../../client-api/session/querying/text-search/ends-with-query.mdx#whereendswith-(case-sensitive)) + * [Negate whereEndsWith](../../../../client-api/session/querying/text-search/ends-with-query.mdx#negate-whereendswith) + * [Syntax](../../../../client-api/session/querying/text-search/ends-with-query.mdx#syntax) + + +## whereEndsWith + + + + +{`const products = await session + .query({ collection: "Products" }) + // Call 'whereEndsWith' + // Pass the document field and the postfix to search by + .whereEndsWith("Name", "Lager") + .all(); + +// Results will contain only Product documents having a 'Name' field +// that ends with any case variation of 'lager' +`} + + + + +{`from "Products" +where endsWith(Name, "Lager") +`} + + + + + + +## whereEndsWith (case-sensitive) + + + + +{`const products = await session + .query({ collection: "Products" }) + // Call 'whereEndsWith' + // Pass 'true' as the 3'rd parameter to search for an EXACT postfix match + .whereEndsWith("Name", "Lager", true) + .all(); + +// Results will contain only Product documents having a 'Name' field +// that ends with 'Lager' +`} + + + + +{`from "Products" +where exact(endsWith(Name, "Lager")) +`} + + + + + + +## Negate whereEndsWith + + + + +{`const products = await session + .query({ collection: "Products" }) + // Call 'Not' to negate the next predicate + .not() + // Call 'whereEndWith' + // Pass the document field and the postfix to search by + .whereEndsWith("Name", "Lager") + .all(); + +// Results will contain only Product documents having a 'Name' field +// that does NOT end with 'lager' or any other case variations of it +`} + + + + +{`from "Products" +where exists(Name) and not endsWith(Name, "Lager") +`} + + + + + + +## Syntax + + + +{`// Available overloads: +whereStartsWith(fieldName, value); +whereStartsWith(fieldName, value, exact); +`} + + + +| Parameter | Type | Description | +|---------------|---------|---------------------------------------------------------------------------| +| **fieldName** | string | The field name in which to search | +| **value** | string | The **postfix** string to search by | +| **exact** | boolean | `false` - search is case-insensitive
`true` - search is case-sensitive | + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_ends-with-query-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_ends-with-query-php.mdx new file mode 100644 index 0000000000..45329c25a1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_ends-with-query-php.mdx @@ -0,0 +1,155 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can query for documents having a field that ends with some specified string. + +* Unless explicitly specified, the string comparisons are case-insensitive by default. + +* **Note**: + This postfix search causes the server to perform a full index scan. + Instead, consider using a static index that indexes the field in reverse order + and then query with a [prefix search](../../../../client-api/session/querying/text-search/starts-with-query.mdx), which is much faster. + +* In this page: + * [where_ends_with](../../../../client-api/session/querying/text-search/ends-with-query.mdx#where_ends_with) + * [where_ends_with (case-sensitive)](../../../../client-api/session/querying/text-search/ends-with-query.mdx#where_ends_with-(case-sensitive)) + * [Negate where_ends_with](../../../../client-api/session/querying/text-search/ends-with-query.mdx#negate-where_ends_with) + + +## where_ends_with + + + + +{`/** @var array $products */ +$products = $session + ->query(Product::class) + // Call 'whereEndsWith' on the field + // Pass the postfix to search by + ->whereEndsWith("Name", "Lager") + ->toList(); + +// Results will contain only Product documents having a 'Name' field +// that ends with any case variation of 'lager' +`} + + + + +{`List products = session.Advanced + .DocumentQuery() + // Call 'WhereEndsWith' + // Pass the document field and the postfix to search by + .WhereEndsWith(x => x.Name, "Lager") + .ToList(); + +// Results will contain only Product documents having a 'Name' field +// that ends with any case variation of 'lager' +`} + + + + +{`from "Products" +where endsWith(Name, "Lager") +`} + + + + + + +## where_ends_with (case-sensitive) + + + + +{`/** @var array $products */ +$products = $session + ->query(Product::class) + // Pass 'exact: true' to search for an EXACT postfix match + ->whereEndsWith("Name", "Lager", true) + ->toList(); + +// Results will contain only Product documents having a 'Name' field +// that ends with 'Lager' +`} + + + + +{`List products = session.Advanced + .DocumentQuery() + // Call 'WhereEndsWith' + // Pass 'exact: true' to search for an EXACT postfix match + .WhereEndsWith(x => x.Name, "Lager", exact: true) + .ToList(); + +// Results will contain only Product documents having a 'Name' field +// that ends with 'Lager' +`} + + + + +{`from "Products" +where exact(endsWith(Name, "Lager")) +`} + + + + + + +## Negate where_ends_with + + + + +{`/** @var array $products */ +$products = $session + ->query(Product::class) + // Call 'Not' to negate the next predicate + ->not() + // Call 'whereEndsWith' on the field + // Pass the postfix to search by + ->whereEndsWith("Name", "Lager") + ->toList(); + +// Results will contain only Product documents having a 'Name' field +// that does NOT end with 'lager' or any other case variations of it +`} + + + + +{`List products = session.Advanced + .DocumentQuery() + // Call 'Not' to negate the next predicate + .Not + // Call 'WhereEndsWith' + // Pass the document field and the postfix to search by + .WhereEndsWith(x => x.Name, "Lager") + .ToList(); + +// Results will contain only Product documents having a 'Name' field +// that does NOT end with 'lager' or any other case variations of it +`} + + + + +{`from "Products" +where (true and not endsWith(Name, "Lager")) +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_ends-with-query-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_ends-with-query-python.mdx new file mode 100644 index 0000000000..24c35f8fc6 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_ends-with-query-python.mdx @@ -0,0 +1,107 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can query for documents having a field that ends with some specified string. + +* Unless explicitly specified, the string comparisons are case-insensitive by default. + +* **Note**: + This postfix search causes the server to perform a full index scan. + Instead, consider using a static index that indexes the field in reverse order + and then query with a [prefix search](../../../../client-api/session/querying/text-search/starts-with-query.mdx), which is much faster. + +* In this page: + * [where_ends_with](../../../../client-api/session/querying/text-search/ends-with-query.mdx#where_ends_with) + * [where_ends_with (case-sensitive)](../../../../client-api/session/querying/text-search/ends-with-query.mdx#where_ends_with-(case-sensitive)) + * [Negate where_ends_with](../../../../client-api/session/querying/text-search/ends-with-query.mdx#negate-where_ends_with) + + +## where_ends_with + + + + +{`products = list( + session.query(object_type=Product) + # Call 'where_ends_with' on the field + # Pass the postfix to search by + .where_ends_with("Name", "Lager") +) + +# Results will contain only Product documents having a 'Name' field +# that ends with any case variation of 'lager' +`} + + + + +{`from "Products" +where endsWith(Name, "Lager") +`} + + + + + + +## where_ends_with (case-sensitive) + + + + +{`products = list( + session.query(object_type=Product) + # Pass 'exact=True' to search for an EXACT postfix match + .where_ends_with("Name", "Lager", exact=True) +) + +# Results will contain only Product documents having a 'Name' field +# that ends with 'Lager' +`} + + + + +{`from "Products" +where exact(endsWith(Name, "Lager")) +`} + + + + + + +## Negate where_ends_with + + + + +{`products = list( + session.query(object_type=Product) + # Negate next statement + .not_() + # Call 'where_starts_with' on the field + # Pass the prefix to search by + .where_ends_with("Name", "Lager") +) +# Results will contain only Product documents having a 'Name' field +# that does NOT end with 'lager' or any other case variations of it +`} + + + + +{`from "Products" +where (true and not endsWith(Name, "Lager")) +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_exact-match-query-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_exact-match-query-csharp.mdx new file mode 100644 index 0000000000..7a0a8107f4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_exact-match-query-csharp.mdx @@ -0,0 +1,162 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, when making a query that filters by strings, the string comparisons are **case-insensitive**. + +* Use the `exact` parameter to perform a search that is **case-sensitive**. + +* When making a dynamic query with an exact match, + the auto-index created by the server indexes the text of the document field + using the [default exact analyzer](../../../../indexes/using-analyzers.mdx#ravendb) where the casing of the original text is not changed. + +* In this page: + * [Query with exact match](../../../../client-api/session/querying/text-search/exact-match-query.mdx#query-with-exact-match) + * [Query with exact match - nested object](../../../../client-api/session/querying/text-search/exact-match-query.mdx#query-with-exact-match---nested-object) + * [Syntax](../../../../client-api/session/querying/text-search/exact-match-query.mdx#syntax) + + +## Query with exact match + + + + + + +{`List employees = session + // Make a dynamic query on 'Employees' collection + .Query() + // Query for all documents where 'FirstName' equals 'Robert' + // Pass 'exact: true' for a case-sensitive match + .Where(x => x.FirstName == "Robert", exact: true) + .ToList(); +`} + + + + +{`List employees = await asyncSession + // Make a dynamic query on 'Employees' collection + .Query() + // Query for all documents where 'FirstName' equals 'Robert' + // Pass 'exact: true' for a case-sensitive match + .Where(x => x.FirstName == "Robert", exact: true) + .ToListAsync(); +`} + + + + +{`List employees = session.Advanced + // Make a dynamic DocumentQuery on 'Employees' collection + .DocumentQuery() + // Query for all documents where 'FirstName' equals 'Robert' + // Pass 'exact: true' for a case-sensitive match + .WhereEquals(x => x.FirstName, "Robert", exact: true) + .ToList(); +`} + + + + +{`from "Employees" +where exact(FirstName == "Robert") +`} + + + + + + +* Executing the above query will generate the auto-index `Auto/Employees/ByExact(FirstName)`. + +* This auto-index will contain the following two index-fields: + + * `FirstName` + Contains terms with the text from the indexed document field 'FirstName'. + Text is lower-cased and not tokenized. + + * `exact(FirstName)` + Contain terms with the original text from the indexed document field 'FirstName'. + Casing is exactly the same as in the original text, and the text is not tokenized. + Making an exact query targets these terms to find matching documents. + + + +## Query with exact match - nested object + + + + + + +{`List orders = session + // Make a dynamic query on 'Orders' collection + .Query() + // Query for documents that contain at least one order line with 'Teatime Chocolate Biscuits' + .Where(x => x.Lines.Any(p => p.ProductName == "Teatime Chocolate Biscuits"), + // Pass 'exact: true' for a case-sensitive match + exact: true) + .ToList(); +`} + + + + +{`List orders = await asyncSession + // Make a dynamic query on 'Orders' collection + .Query() + // Query for documents that contain at least one order line with 'Teatime Chocolate Biscuits' + .Where(x => x.Lines.Any(p => p.ProductName == "Teatime Chocolate Biscuits"), + // Pass 'exact: true' for a case-sensitive match + exact: true) + .ToListAsync(); +`} + + + + +{`List orders = session.Advanced + // Make a dynamic DocumentQuery on 'Orders' collection + .DocumentQuery() + // Query for documents that contain at least one order line with 'Teatime Chocolate Biscuits' + .WhereEquals("Lines.ProductName", "Teatime Chocolate Biscuits", + // Pass 'exact: true' for a case-sensitive match + exact: true) + .ToList(); +`} + + + + +{`from "Orders" +where exact(Lines.ProductName == "Teatime Chocolate Biscuits") +`} + + + + + + + + +## Syntax + + + +{`IRavenQueryable Where(Expression> predicate, bool exact); +`} + + + +| Parameter | Type | Description | +|---------------|---------------------------|---------------------------------------------------------------------------| +| **predicate** | Expression<Func<T, bool>> | Predicate with match condition | +| **exact** | bool | `false` - search is case-insensitive
`true` - search is case-sensitive | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_exact-match-query-java.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_exact-match-query-java.mdx new file mode 100644 index 0000000000..e1000f052d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_exact-match-query-java.mdx @@ -0,0 +1,73 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +By default, the `whereXXX` methods in `query` uses a case-insensitive match. + +To perform a case-sensitive match you should use the `exact` parameter. + +### Syntax + + + +{`IDocumentQuery whereEquals(String fieldName, Object value, boolean exact); +IDocumentQuery whereNotEquals(String fieldName, Object value, boolean exact); + +// ... rest of where methods +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **fieldName** | String | Object field to use | +| **value** | Object | Predicate value | +| **exact** | boolean | Indicates if `predicate` should be matched in case-sensitive manner | + +### Example I - Query With Exact Match + + + + +{`// load all entities from 'Employees' collection +// where firstName equals 'Robert' (case sensitive match) +List employees = session.query(Employee.class) + .whereEquals("FirstName", "Robert", true) + .toList(); +`} + + + + +{`from Employees where exact(FirstName == 'Robert') +`} + + + + +### Example II - Query With Inner Exact Match + + + + +{`// return all entities from 'Orders' collection +// which contain at least one order line with +// 'Singaporean Hokkien Fried Mee' product +// perform a case-sensitive match +List orders = session.query(Order.class) + .whereEquals("Lines[].ProductName", "Singaporean Hokkien Fried Mee", true) + .toList(); +`} + + + + +{`from Orders +where exact(Lines[].ProductName == 'Singaporean Hokkien Fried Mee') +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_exact-match-query-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_exact-match-query-nodejs.mdx new file mode 100644 index 0000000000..79a2ef73e0 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_exact-match-query-nodejs.mdx @@ -0,0 +1,118 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, when making a query that filters by strings, the string comparisons are **case-insensitive**. + +* Use the `exact` parameter to perform a search that is **case-sensitive**. + +* When making a dynamic query with an exact match, + the auto-index created by the server indexes the text of the document field + using the [default exact analyzer](../../../../indexes/using-analyzers.mdx#ravendb) where the casing of the original text is not changed. + +* In this page: + * [Query with exact match](../../../../client-api/session/querying/text-search/exact-match-query.mdx#query-with-exact-match) + * [Query with exact match - nested object](../../../../client-api/session/querying/text-search/exact-match-query.mdx#query-with-exact-match---nested-object) + * [Syntax](../../../../client-api/session/querying/text-search/exact-match-query.mdx#syntax) + + +## Query with exact match + + + + + + +{`const employees = await session + // Make a dynamic query on 'Employees' collection + .query({ collection: "Employees" }) + // Query for all documents where 'FirstName' equals 'Robert' + // Pass 'true' as the 3'rd param for a case-sensitive match + .whereEquals("FirstName", "Robert", true) + .all(); +`} + + + + +{`from "Employees" +where exact(FirstName == "Robert") +`} + + + + + + +* Executing the above query will generate the auto-index `Auto/Employees/ByExact(FirstName)`. + +* This auto-index will contain the following two index-fields: + + * `FirstName` + Contains terms with text from the indexed document field 'FirstName'. + Text is lower-cased and not tokenized. + + * `exact(FirstName)` + Contain terms with the original text from the indexed document field 'FirstName'. + Casing is exactly the same as in the original text, and the text is not tokenized. + Making an exact query targets these terms to find matching documents. + + + +## Query with exact match - nested object + + + + + + +{`const orders = await session + // Make a dynamic query on 'Orders' collection + .query({ collection: "Orders" }) + // Query for documents that contain at least one order line with 'Teatime Chocolate Biscuits' + // Pass 'true' as the 3'rd param for a case-sensitive match + .whereEquals("Lines.ProductName", "Teatime Chocolate Biscuits", true) + .all(); +`} + + + + +{`from "Orders" +where exact(Lines.ProductName == "Teatime Chocolate Biscuits") +`} + + + + + + + + +## Syntax + + + +{`// Available overloads: + +whereEquals(fieldName, value); +whereEquals(fieldName, value, exact); + +whereNotEquals(fieldName, value); +whereNotEquals(fieldName, value, exact); +`} + + + +| Parameter | Type | Description | +|---------------|---------|---------------------------------------------------------------------------| +| **fieldName** | string | Name of field in which to search | +| **value** | any | The value searched for | +| **exact** | boolean | `false` - search is case-insensitive
`true` - search is case-sensitive | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_exact-match-query-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_exact-match-query-php.mdx new file mode 100644 index 0000000000..330148ab83 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_exact-match-query-php.mdx @@ -0,0 +1,120 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, when querying strings the string comparisons are **case-insensitive**. + +* To perform a **case-sensitive** search, use the `whereEquals` or `whereNotEquals` + method with its `exact` parameter set to `true`. + +* When making a dynamic query with an exact match, the auto-index created by the server indexes + the text of the document field using the [default exact analyzer](../../../../indexes/using-analyzers.mdx#ravendb) + where the casing of the original text is unchanged. + +* In this page: + * [Query with exact match](../../../../client-api/session/querying/text-search/exact-match-query.mdx#query-with-exact-match) + * [Query with exact match - nested object](../../../../client-api/session/querying/text-search/exact-match-query.mdx#query-with-exact-match---nested-object) + * [Syntax](../../../../client-api/session/querying/text-search/exact-match-query.mdx#syntax) + + +## Query with exact match + + + + + + +{`// load all entities from 'Employees' collection +// where FirstName field's contents equals 'Robert' (case sensitive match) + +/** @var array $employees */ +$employees = $session->query(Employee::class) + ->whereEquals("FirstName", "Robert", true) + ->toList(); +`} + + + + +{`from "Employees" +where exact(FirstName == "Robert") +`} + + + + + + +* Executing the above query will generate the auto-index `Auto/Employees/ByExact(FirstName)`. + +* This auto-index will contain the following two index-fields: + + * `FirstName` + Contains terms with the text from the indexed document field 'FirstName'. + Text is lower-cased and not tokenized. + + * `exact(FirstName)` + Contain terms with the original text from the indexed document field 'FirstName'. + Casing is exactly the same as in the original text, and the text is not tokenized. + Making an exact query targets these terms to find matching documents. + + + +## Query with exact match - nested object + + + + + + +{`// return all entities from the 'Orders' collection which contain +// at least one order line with 'Teatime Chocolate Biscuits' product +// perform a case-sensitive match +/** @var array $orders */ +$orders = $session->query(Order::class) + ->whereEquals("Lines[].ProductName", "Teatime Chocolate Biscuits", true) + ->toList(); +`} + + + + +{`from "Orders" +where exact(Lines.ProductName == "Teatime Chocolate Biscuits") +`} + + + + + + + + +## Syntax + + + +{`function whereEquals(string $fieldName, $value, bool $exact = false): DocumentQueryInterface; +function whereNotEquals(string $fieldName, $value, bool $exact = false): DocumentQueryInterface; + +// ... rest of where methods +`} + + + +| Parameter | Type | Description | +|---------------|----------------------------|-------------| +| **$fieldName** | `string` | Search-field name | +| **$value** | `string` | string to match with match condition | +| **$exact** | `bool` | `false` - search is case-insensitive
`true` - search is case-sensitive | + +| Return Type | Description | +| ------------- | ----- | +| `DocumentQueryInterface` | Query results | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_exact-match-query-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_exact-match-query-python.mdx new file mode 100644 index 0000000000..5077818573 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_exact-match-query-python.mdx @@ -0,0 +1,124 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, when querying strings the string comparisons are **case-insensitive**. + +* Use the `where_equals` method `exact` parameter to perform a search that is **case-sensitive**. + +* When making a dynamic query with an exact match, the auto-index created by the server indexes + the text of the document field using the [default exact analyzer](../../../../indexes/using-analyzers.mdx#ravendb) + where the casing of the original text is unchanged. + +* In this page: + * [Query with exact match](../../../../client-api/session/querying/text-search/exact-match-query.mdx#query-with-exact-match) + * [Query with exact match - nested object](../../../../client-api/session/querying/text-search/exact-match-query.mdx#query-with-exact-match---nested-object) + * [Syntax](../../../../client-api/session/querying/text-search/exact-match-query.mdx#syntax) + + +## Query with exact match + + + + + + +{`employees = list( + session. + # Make a query on 'Employees' collection + query(object_type=Employee) + # Query for all documents where 'FirstName' equals 'Robert' + # Pass 'exact=True' for a case-sensitive match + .where_equals("FirstName", "Robert", exact=True) +) +`} + + + + +{`from "Employees" +where exact(FirstName == "Robert") +`} + + + + + + +* Executing the above query will generate the auto-index `Auto/Employees/ByExact(FirstName)`. + +* This auto-index will contain the following two index-fields: + + * `FirstName` + Contains terms with the text from the indexed document field 'FirstName'. + Text is lower-cased and not tokenized. + + * `exact(FirstName)` + Contain terms with the original text from the indexed document field 'FirstName'. + Casing is exactly the same as in the original text, and the text is not tokenized. + Making an exact query targets these terms to find matching documents. + + + +## Query with exact match - nested object + + + + + + +{`orders = list( + session + # Make a query on 'Orders' collection + .query(object_type=Order) + # Query for documents that contain at least one order line with 'Teatime Chocolate Biscuits' + .where_equals( + "Lines[].ProductName", + "Teatime Chocolate Biscuits", + # Pass 'exact=True' for a case-sensitive match + exact=True, + ) +) +`} + + + + +{`from "Orders" +where exact(Lines.ProductName == "Teatime Chocolate Biscuits") +`} + + + + + + + + +## Syntax + + + +{`def where_equals( + self, field_name: str, value_or_method: Union[object, MethodCall], exact: Optional[bool] = None +) -> DocumentQuery[_T]: ... +`} + + + +| Parameter | Type | Description | +|---------------|----------------------------|--------------------------------------------------------------| +| **field_name** | `str` | Search-field name | +| **value_or_method** | `Union[object, MethodCall]` | `object` to match with
-or-
`MethodCall` with match condition | +| **exact** (optional) | `bool` | `False` - search is case-insensitive
`True` - search is case-sensitive | + +| Return Type | Description | +| ------------- | ----- | +| `DocumentQuery[_T]` | `where_equals` query results | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_full-text-search-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_full-text-search-csharp.mdx new file mode 100644 index 0000000000..7cd18d0e1d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_full-text-search-csharp.mdx @@ -0,0 +1,931 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article is about running a full-text search with a **dynamic query**. + To learn how to run a full-text search using a static-index, see [full-text search with index](../../../../indexes/querying/searching.mdx). + +* Use the `Search()` method to query for documents that contain specified term/s + within the text of the specified document field/s. + +* When running a full-text search with a dynamic query, the **auto-index** created by the server + breaks down the text of the searched document field using the [default search analyzer](../../../../indexes/using-analyzers.mdx#ravendb). + All generated terms are lower-cased, so the search is **case-insensitive**. + +* Gain additional control over term tokenization by running a full-text search + using a [static-index](../../../../indexes/querying/searching.mdx), where the used + analyzer is configurable. + + + +* A **boost** value can be set for each search to prioritize results. + Learn more in [boost search results](../../../../client-api/session/querying/text-search/boost-search-results.mdx). + +* User experience can be enhanced by requesting text fragments that **highlight** + the searched terms in the results. Learn more in [highlight search results](../../../../client-api/session/querying/text-search/highlight-query-results.mdx). + +* In this page: + * [Search for single term](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-for-single-term) + * [Search for multiple terms](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-for-multiple-terms) + * [Search in multiple fields](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-in-multiple-fields) + * [Search in complex object](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-in-complex-object) + * [Search operators](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-operators) + * [Search options](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-options) + * [Using wildcards](../../../../client-api/session/querying/text-search/full-text-search.mdx#using-wildcards) + * [Syntax](../../../../client-api/session/querying/text-search/full-text-search.mdx#syntax) + + +## Search for single term + + + + + + +{`List employees = session + // Make a dynamic query on Employees collection + .Query() + // * Call 'Search' to make a Full-Text search + // * Search is case-insensitive + // * Look for documents containing the term 'University' within their 'Notes' field + .Search(x => x.Notes, "University") + .ToList(); + +// Results will contain Employee documents that have +// any case variation of the term 'university' in their 'Notes' field. +`} + + + + +{`List employees = await asyncSession + // Make a dynamic query on Employees collection + .Query() + // * Call 'Search' to make a Full-Text search + // * Search is case-insensitive + // * Look for documents containing the term 'University' within their 'Notes' field + .Search(x => x.Notes, "University") + .ToListAsync(); + +// Results will contain Employee documents that have +// any case variation of the term 'university' in their 'Notes' field. +`} + + + + +{`List employees = session.Advanced + // Make a dynamic DocumentQuery on Employees collection + .DocumentQuery() + // * Call 'Search' to make a Full-Text search + // * Search is case-insensitive + // * Look for documents containing the term 'University' within their 'Notes' field + .Search(x => x.Notes, "University") + .ToList(); + +// Results will contain Employee documents that have +// any case variation of the term 'university' in their 'Notes' field. +`} + + + + +{`from "Employees" +where search(Notes, "University") +`} + + + + + + +* Executing the above query will generate the auto-index `Auto/Employees/BySearch(Notes)`. + +* This auto-index will contain the following two index-fields: + + * `Notes` + Contains terms with the original text from the indexed document field 'Notes'. + Text is lower-cased and Not tokenized. + + * `search(Notes)` + Contains __lower-cased terms__ that were tokenized from the 'Notes' field by the [default search analyzer](../../../../indexes/using-analyzers.mdx#ravendb) (RavenStandardAnalyzer). + Calling the `Search()` method targets these terms to find matching documents. + + + +## Search for multiple terms + +* You can search for multiple terms in the __same field__ in a single search method. + +* By default, the logical operator between these terms is 'OR'. + +* This behavior can be modified. See section [Search operators](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-operators). + + + +__Pass terms in a string__: + + + + +{`List employees = session + .Query() + // * Pass multiple terms in a single string, separated by spaces. + // * Look for documents containing either 'University' OR 'Sales' OR 'Japanese' + // within their 'Notes' field + .Search(x => x.Notes, "University Sales Japanese") + .ToList(); + +// * Results will contain Employee documents that have at least one of the specified terms. +// * Search is case-insensitive. +`} + + + + +{`List employees = await asyncSession + .Query() + // * Pass multiple terms in a single string, separated by spaces. + // * Look for documents containing either 'University' OR 'Sales' OR 'Japanese' + // within their 'Notes' field + .Search(x => x.Notes, "University Sales Japanese") + .ToListAsync(); + +// * Results will contain Employee documents that have at least one of the specified terms. +// * Search is case-insensitive. +`} + + + + +{`List employees = session.Advanced + .DocumentQuery() + // * Pass multiple terms in a single string, separated by spaces. + // * Look for documents containing either 'University' OR 'Sales' OR 'Japanese' + // within their 'Notes' field + .Search(x => x.Notes, "University Sales Japanese") + .ToList(); + +// * Results will contain Employee documents that have at least one of the specified terms. +// * Search is case-insensitive. +`} + + + + +{`from "Employees" +where search(Notes, "University Sales Japanese") +`} + + + + + + + + +__Pass terms in a list__: + + + + +{`List employees = session + .Query() + // * Pass terms in IEnumerable. + // * Look for documents containing either 'University' OR 'Sales' OR 'Japanese' + // within their 'Notes' field + .Search(x => x.Notes, new[] { "University", "Sales", "Japanese" }) + .ToList(); + +// * Results will contain Employee documents that have at least one of the specified terms. +// * Search is case-insensitive. +`} + + + + +{`List employees = await asyncSession + .Query() + // * Pass terms in IEnumerable. + // * Look for documents containing either 'University' OR 'Sales' OR 'Japanese' + // within their 'Notes' field + .Search(x => x.Notes, new[] { "University", "Sales", "Japanese" }) + .ToListAsync(); + +// * Results will contain Employee documents that have at least one of the specified terms. +// * Search is case-insensitive. +`} + + + + +{`from "Employees" +where search(Notes, "University Sales Japanese") +`} + + + + + + + + +## Search in multiple fields + +* You can search for terms in __different fields__ by making multiple search calls. + +* By default, the logical operator between consecutive search methods is 'OR'. + +* This behavior can be modified. See section [Search options](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-operators). + + + + + + +{`List employees = session + .Query() + // * Look for documents containing: + // 'French' in their 'Notes' field OR 'President' in their 'Title' field + .Search(x => x.Notes, "French") + .Search(x => x.Title, "President") + .ToList(); + +// * Results will contain Employee documents that have +// at least one of the specified fields with the specified terms. +// * Search is case-insensitive. +`} + + + + +{`List employees = await asyncSession + .Query() + // * Look for documents containing: + // 'French' in their 'Notes' field OR 'President' in their 'Title' field + .Search(x => x.Notes, "French") + .Search(x => x.Title, "President") + .ToListAsync(); + +// * Results will contain Employee documents that have +// at least one of the specified fields with the specified terms. +// * Search is case-insensitive. +`} + + + + +{`List employees = session.Advanced + .DocumentQuery() + // * Look for documents containing: + // 'French' in their 'Notes' field OR 'President' in their 'Title' field + .Search(x => x.Notes, "French") + .Search(x => x.Title, "President") + .ToList(); + +// * Results will contain Employee documents that have +// at least one of the specified fields with the specified terms. +// * Search is case-insensitive. +`} + + + + +{`from "Employees" +where (search(Notes, "French") or search(Title, "President")) +`} + + + + + + + + +## Search in complex object + +* You can search for terms within a complex object. + +* Any nested text field within the object is searchable. + + + + + + +{`List companies = session + .Query() + // * Look for documents that contain: + // the term 'USA' OR 'London' in any field within the complex 'Address' object + .Search(x => x.Address, "USA London") + .ToList(); + +// * Results will contain Company documents that are located either in 'USA' OR in 'London'. +// * Search is case-insensitive. +`} + + + + +{`List companies = await asyncSession + .Query() + // * Look for documents that contain: + // the term 'USA' OR 'London' in any field within the complex 'Address' object + .Search(x => x.Address, "USA London") + .ToListAsync(); + +// * Results will contain Company documents that are located either in 'USA' OR in 'London'. +// * Search is case-insensitive. +`} + + + + +{`List companies = session.Advanced + .DocumentQuery() + // * Look for documents that contain: + // the term 'USA' OR 'London' in any field within the complex 'Address' object + .Search(x => x.Address, "USA London") + .ToList(); + +// * Results will contain Company documents that are located either in 'USA' OR in 'London'. +// * Search is case-insensitive. +`} + + + + +{`from "Companies" +where search(Address, "USA London") +`} + + + + + + + + +## Search operators + +* By default, the logical operator between multiple terms within the __same field__ in a search call is __OR__. + +* This can be modified using the `@operator` parameter as follows: + + + +__AND__: + + + + +{`List employees = session + .Query() + // * Pass \`@operator\` with 'SearchOperator.And' + .Search(x => x.Notes, "College German", @operator: SearchOperator.And) + .ToList(); + +// * Results will contain Employee documents that have BOTH 'College' AND 'German' +// in their 'Notes' field. +// * Search is case-insensitive. +`} + + + + +{`List employees = await asyncSession + .Query() + // * Pass \`@operator\` with 'SearchOperator.And' + .Search(x => x.Notes, "College German", @operator: SearchOperator.And) + .ToListAsync(); + +// * Results will contain Employee documents that have BOTH 'College' AND 'German' +// in their 'Notes' field. +// * Search is case-insensitive. +`} + + + + +{`List employees = session.Advanced + .DocumentQuery() + // * Pass \`@operator\` with 'SearchOperator.And' + .Search(x => x.Notes, "College German", @operator: SearchOperator.And) + .ToList(); + +// * Results will contain Employee documents that have BOTH 'College' AND 'German' +// in their 'Notes' field. +// * Search is case-insensitive. +`} + + + + +{`from "Employees" +where search(Notes, "College German", and) +`} + + + + + + + + +__OR__: + + + + +{`List employees = session + .Query() + // * Pass \`@operator\` with 'SearchOperator.Or' (or don't pass this param at all) + .Search(x => x.Notes, "College German", @operator: SearchOperator.Or) + .ToList(); + +// * Results will contain Employee documents that have EITHER 'College' OR 'German' +// in their 'Notes' field. +// * Search is case-insensitive. +`} + + + + +{`List employees = await asyncSession + .Query() + // * Pass \`@operator\` with 'SearchOperator.Or' (or don't pass this param at all) + .Search(x => x.Notes, "College German", @operator: SearchOperator.Or) + .ToListAsync(); + +// * Results will contain Employee documents that have EITHER 'College' OR 'German' +// in their 'Notes' field. +// * Search is case-insensitive. +`} + + + + +{`List employees = session.Advanced + .DocumentQuery() + // * Pass \`@operator\` with 'SearchOperator.Or' (or don't pass this param at all) + .Search(x => x.Notes, "College German", @operator: SearchOperator.Or) + .ToList(); + +// * Results will contain Employee documents that have EITHER 'College' OR 'German' +// in their 'Notes' field. +// * Search is case-insensitive. +`} + + + + +{`from "Employees" +where search(Notes, "College German") +`} + + + + + + + + +## Search options + +* Search options allow to: + * Negate a search criteria. + * Specify the logical operator used between __consecutive search calls__. + +* When using `Query`: use the `options` parameter. + When using `DocumentQuery`: follow the specific syntax in each example below. + + + +__Negate search__: + + + + +{`List companies = session + .Query() + // Pass 'options' with 'SearchOptions.Not' + .Search(x => x.Address, "USA", options: SearchOptions.Not) + .ToList(); + +// * Results will contain Company documents are NOT located in 'USA' +// * Search is case-insensitive +`} + + + + +{`List companies = await asyncSession + .Query() + // Pass 'options' with 'SearchOptions.Not' + .Search(x => x.Address, "USA", options: SearchOptions.Not) + .ToListAsync(); + +// * Results will contain Company documents are NOT located in 'USA' +// * Search is case-insensitive +`} + + + + +{`List companies = session.Advanced + .DocumentQuery() + .OpenSubclause() + // Call 'Not' to negate the next search call + .Not + .Search(x => x.Address, "USA") + .CloseSubclause() + .ToList(); + +// * Results will contain Company documents are NOT located in 'USA' +// * Search is case-insensitive +`} + + + + +{`from "Companies" +where (exists(Address) and not search(Address, "USA")) +`} + + + + + + + + +__Default behavior between search calls__: + +* By default, the logical operator between consecutive search methods is __OR__. + + + + +{`List companies = session + .Query() + .Where(x => x.Contact.Title == "Owner") + // Operator AND will be used with previous 'Where' predicate + .Search(x => x.Address.Country, "France") + // Operator OR will be used between the two 'Search' calls by default + .Search(x => x.Name, "Markets") + .ToList(); + +// * Results will contain Company documents that have: +// ('Owner' as the 'Contact.Title') +// AND +// (are located in 'France' OR have 'Markets' in their 'Name' field) +// +// * Search is case-insensitive +`} + + + + +{`List companies = await asyncSession + .Query() + .Where(x => x.Contact.Title == "Owner") + // Operator AND will be used with previous 'Where' predicate + .Search(x => x.Address.Country, "France") + // Operator OR will be used between the two 'Search' calls by default + .Search(x => x.Name, "Markets") + .ToListAsync(); + +// * Results will contain Company documents that have: +// ('Owner' as the 'Contact.Title') +// AND +// (are located in 'France' OR have 'Markets' in their 'Name' field) +// +// * Search is case-insensitive +`} + + + + +{`List companies = session.Advanced + .DocumentQuery() + .WhereEquals(x => x.Contact.Title, "Owner") + // Operator AND will be used with previous 'Where' predicate + // Call 'OpenSubclause' to open predicate block + .OpenSubclause() + .Search(x => x.Address.Country, "France") + // Operator OR will be used between the two 'Search' calls by default + .Search(x => x.Name, "Markets") + // Call 'CloseSubclause' to close predicate block + .CloseSubclause() + .ToList(); + +// * Results will contain Company documents that have: +// ('Owner' as the 'Contact.Title') +// AND +// (are located in 'France' OR have 'Markets' in their 'Name' field) +// +// * Search is case-insensitive +`} + + + + +{`from "Companies" +where Contact.Title == "Owner" and +(search(Address.Country, "France") or search(Name, "Markets")) +`} + + + + + + + + +__AND search calls__: + + + + +{`List employees = session + .Query() + .Search(x => x.Notes, "French") + // * Pass 'options' with 'SearchOptions.And' to the second 'Search' + // * Operator AND will be used with previous the 'Search' call + .Search(x => x.Title, "Manager", options: SearchOptions.And) + .ToList(); + +// * Results will contain Employee documents that have: +// ('French' in their 'Notes' field) +// AND +// ('Manager' in their 'Title' field) +// +// * Search is case-insensitive +`} + + + + +{`List employees = await asyncSession + .Query() + .Search(x => x.Notes, "French") + // * Pass 'options' with 'SearchOptions.And' to this second 'Search' + // * Operator AND will be used with previous the 'Search' call + .Search(x => x.Title, "Manager", options: SearchOptions.And) + .ToListAsync(); + +// * Results will contain Employee documents that have: +// ('French' in their 'Notes' field) +// AND +// ('Manager' in their 'Title' field) +// +// * Search is case-insensitive +`} + + + + +{`List employees = session.Advanced + .DocumentQuery() + .Search(x => x.Notes, "French") + // Call 'AndAlso' so that operator AND will be used with previous 'Search' call + .AndAlso() + .Search(x => x.Title, "Manger") + .ToList(); + +// * Results will contain Employee documents that have: +// ('French' in their 'Notes' field) +// AND +// ('Manager' in their 'Title' field) +// +// * Search is case-insensitive +`} + + + + +{`from "Employees" +where search(Notes, "French") and search(Title, "Manager") +`} + + + + + + + + +__Use options as bit flags__: + + + + +{`List employees = session + .Query() + .Search(x => x.Notes, "French") + // Pass logical operators as flags in the 'options' parameter + .Search(x => x.Title, "Manager", options: SearchOptions.Not | SearchOptions.And) + .ToList(); + +// * Results will contain Employee documents that have: +// ('French' in their 'Notes' field) +// AND +// (do NOT have 'Manager' in their 'Title' field) +// +// * Search is case-insensitive +`} + + + + +{`List employees = await asyncSession + .Query() + .Search(x => x.Notes, "French") + // Pass logical operators as flags in the 'options' parameter + .Search(x => x.Title, "Manager", options: SearchOptions.Not | SearchOptions.And) + .ToListAsync(); + +// * Results will contain Employee documents that have: +// ('French' in their 'Notes' field) +// AND +// (do NOT have 'Manager' in their 'Title' field) +// +// * Search is case-insensitive +`} + + + + +{`List employees = session.Advanced + .DocumentQuery() + .Search(x => x.Notes, "French") + // Call 'AndAlso' so that operator AND will be used with previous 'Search' call + .AndAlso() + .OpenSubclause() + // Call 'Not' to negate the next search call + .Not + .Search(x => x.Title, "Manager") + .CloseSubclause() + .ToList(); + +// * Results will contain Employee documents that have: +// ('French' in their 'Notes' field) +// AND +// (do NOT have 'Manager' in their 'Title' field) +// +// * Search is case-insensitive +`} + + + + +{`from "Employees" +where search(Notes, "French") and +(exists(Title) and not search(Title, "Manager")) +`} + + + + + + + + +## Using wildcards + +* Wildcards can be used to replace: + * Prefix of a searched term + * Postfix of a searched term + * Both prefix & postfix + +* Note: + + * Searching with a wildcard as the prefix of the term (e.g. `*text`) is less recommended, + as it will cause the server to perform a full index scan. + + * Instead, consider using a static-index that indexes the field in reverse order + and then query with a wildcard as the postfix, which is much faster. + + + + + + +{`List employees = session + .Query() + // Use '*' to replace one or more characters + .Search(x => x.Notes, "art*") + .Search(x => x.Notes, "*logy") + .Search(x => x.Notes, "*mark*") + .ToList(); + +// Results will contain Employee documents that have in their 'Notes' field: +// (terms that start with 'art') OR +// (terms that end with 'logy') OR +// (terms that have the text 'mark' in the middle) +// +// * Search is case-insensitive +`} + + + + +{`List employees = await asyncSession + .Query() + // Use '*' to replace one or more characters + .Search(x => x.Notes, "art*") + .Search(x => x.Notes, "*logy") + .Search(x => x.Notes, "*mark*") + .ToListAsync(); + +// Results will contain Employee documents that have in their 'Notes' field: +// (terms that start with 'art') OR +// (terms that end with 'logy') OR +// (terms that have the text 'mark' in the middle) +// +// * Search is case-insensitive +`} + + + + +{`List employees = session.Advanced + .DocumentQuery() + // Use '*' to replace one or more characters + .Search(x => x.Notes, "art*") + .Search(x => x.Notes, "*logy") + .Search(x => x.Notes, "*mark*") + .ToList(); + +// Results will contain Employee documents that have in their 'Notes' field: +// (terms that start with 'art') OR +// (terms that end with 'logy') OR +// (terms that have the text 'mark' in the middle) +// +// * Search is case-insensitive +`} + + + + +{`from "Employees" where +search(Notes, "art*") or +search(Notes, "*logy") or +search(Notes, "*mark*") +`} + + + + + + + + +## Syntax + + + +{`// Query overloads: +// ================ + +IRavenQueryable Search( + Expression> fieldSelector, + string searchTerms, + decimal boost, + SearchOptions options, + SearchOperator @operator); + +IRavenQueryable Search( + Expression> fieldSelector, + IEnumerable searchTerms, + decimal boost, + SearchOptions options, + SearchOperator @operator); + +// DocumentQuery overloads: +// ======================== + +IDocumentQueryBase Search( + string fieldName, + string searchTerms, + SearchOperator @operator); + +IDocumentQueryBase Search( + Expression> propertySelector, + string searchTerms, + SearchOperator @operator); +`} + + + +| Parameter | Type | Description | +|-------------------|--------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| __fieldSelector__ | `Expression>` | Points to the field in which you search. | +| __fieldName__ | string | Name of the field in which you search. | +| __searchTerms__ | string / `IEnumerable` | A string containing the term or terms (separated by spaces) to search for.
Or, can pass an array (or other `IEnumerable`) with terms to search for. | +| __boost__ | decimal | The boost value.
Learn more in [boost search results](../../../../client-api/session/querying/text-search/boost-search-results.mdx).
Default is `1.0` | +| __options__ | `SearchOptions` enum | Logical operator to use between consecutive Search methods.
Can be `Or`, `And`, `Not`, or `Guess`.
Default is `SearchOptions.Guess` | +| __@operator__ | `SearchOperator` enum | Logical operator to use between multiple terms in the same Search method.
Can be `Or` or `And`.
Default is `SearchOperation.Or` | diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_full-text-search-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_full-text-search-nodejs.mdx new file mode 100644 index 0000000000..354c9f03c4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_full-text-search-nodejs.mdx @@ -0,0 +1,417 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article is about running a full-text search with a **dynamic query**. + To learn how to run a full-text search using a static-index, see [full-text search with index](../../../../indexes/querying/searching.mdx). + +* Use the `search()` method to query for documents that contain specified term/s + within the text of the specified document field/s. + +* When running a full-text search with a dynamic query, the **auto-index** created by the server + breaks down the text of the searched document field using the [default search analyzer](../../../../indexes/using-analyzers.mdx#ravendb). + All generated terms are lower-cased, so the search is **case-insensitive**. + +* Gain additional control over term tokenization by running a full-text search + using a [static-index](../../../../indexes/querying/searching.mdx), where the used + analyzer is configurable. + + + +* A **boost** value can be set for each search to prioritize results. + Learn more in [boost search results](../../../../client-api/session/querying/text-search/boost-search-results.mdx). + +* User experience can be enhanced by requesting text fragments that **highlight** + the searched terms in the results. Learn more in [highlight search results](../../../../client-api/session/querying/text-search/highlight-query-results.mdx). + +* In this page: + * [Search for single term](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-for-single-term) + * [Search for multiple terms](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-for-multiple-terms) + * [Search in multiple fields](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-in-multiple-fields) + * [Search in complex object](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-in-complex-object) + * [Using wildcards](../../../../client-api/session/querying/text-search/full-text-search.mdx#using-wildcards) + * [Syntax](../../../../client-api/session/querying/text-search/full-text-search.mdx#syntax) + + +## Search for single term + + + + + + +{`const employees = await session + // Make a dynamic query on 'Employees' collection + .query({ collection: "Employees" }) + // * Call 'search' to make a Full-Text search + // * Search is case-insensitive + // * Look for documents containing the term 'University' within their 'Notes' field + .search("Notes", "University") + .all(); + +// Results will contain Employee documents that have +// any case variation of the term 'university' in their 'Notes' field. +`} + + + + +{`from "Employees" +where search(Notes, "University") +`} + + + + + + +* Executing the above query will generate the auto-index `Auto/Employees/BySearch(Notes)`. + +* This auto-index will contain the following two index-fields: + + * `Notes` + Contains terms with the original text from the indexed document field 'Notes'. + Text is lower-cased and Not tokenized. + + * `search(Notes)` + Contains __lower-cased terms__ that were tokenized from the 'Notes' field by the [default search analyzer](../../../../indexes/using-analyzers.mdx#ravendb) (RavenStandardAnalyzer). + Calling the `search()` method targets these terms to find matching documents. + + + +## Search for multiple terms + +* You can search for multiple terms in the __same field__ in a single search method. + +* By default, the logical operator between these terms is __OR__. + Specify __AND__ explicitly To perform an 'and' operation between these terms. + + + +__AND__: + + + + +{`const employees = await session + .query({ collection: "Employees" }) + // * Pass multiple terms in a single string, separated by spaces. + // * Pass 'AND' as the third parameter + .search("Notes", "College German", "AND") + .all(); + +// * Results will contain Employee documents that have BOTH 'College' AND 'German' +// in their 'Notes' field. +// +// * Search is case-insensitive. +`} + + + + +{`from "Employees" +where search(Notes, "College German", and) +`} + + + + + + + + +__OR__: + + + + +{`const employees = await session + .query({ collection: "Employees" }) + // * Pass multiple terms in a single string, separated by spaces. + // * Pass 'OR' as the third parameter (or don't pass this param at all) + .search("Notes", "University Sales Japanese", "OR") + .all(); + +// * Results will contain Employee documents that have +// either 'University' OR 'Sales' OR 'Japanese' within their 'Notes' field +// +// * Search is case-insensitive. +`} + + + + +{`from "Employees" +where search(Notes, "University Sales Japanese") +`} + + + + + + + + +## Search in multiple fields + +* You can search for terms in __different fields__ by making multiple search calls. + +* By default, the logical operator between __consecutive search methods__ is 'OR'. + This behavior can be modified. See examples below. + + + +__Default behavior between search calls__: + + + + +{`const employees = await session + .query({ collection: "Employees" }) + .search("Notes", "French") + // Operator OR will be used between the two 'Search' calls by default + .search("Title", "President") + .all(); + +// * Results will contain Employee documents that have: +// ('French' in their 'Notes' field) OR ('President' in their 'Title' field) +// +// * Search is case-insensitive. +`} + + + + +{`from "Employees" +where search(Notes, "French") or search(Title, "President") +`} + + + + + + + +{`const companies = await session + .query({ collection: "Companies" }) + .whereEquals("Contact.Title", "Owner") + // Operator AND will be used with previous 'where' predicate + // Call 'openSubclause' to open predicate block + .openSubclause() + .search("Address.Country", "France") + // Operator OR will be used between the two 'Search' calls by default + .search("Name", "Markets") + // Call 'closeSubclause' to close predicate block + .closeSubclause() + .all(); + +// * Results will contain Company documents that have: +// ('Owner' as the 'Contact.Title') +// AND +// (are located in 'France' OR have 'Markets' in their 'Name' field) +// +// * Search is case-insensitive +`} + + + + +{`from "Companies" +where Contact.Title = "Owner" and +(search(Address.Country, "France") or search(Name, "Markets")) +`} + + + + + + + + +__AND search calls:__: + + + + +{`const employees = await session + .query({ collection: "Employees" }) + .search("Notes", "French") + // Call 'andAlso' so that operator AND will be used with previous 'search' call + .andAlso() + .search("Title", "Manager") + .all(); + +// * Results will contain Employee documents that have: +// ('French' in their 'Notes' field) +// AND +// ('Manager' in their 'Title' field) +// +// * Search is case-insensitive +`} + + + + +{`from "Employees" +where search(Notes, "French") and search(Title, "Manger") +`} + + + + + + + + +__Negate search__: + + + + +{`const employees = await session + .query({ collection: "Employees" }) + .search("Notes", "French") + .andAlso() + // Call 'openSubclause' to open predicate block + .openSubclause() + // Call 'not' to negate the next search call + .not() + .search("Title", "Manager") + // Call 'closeSubclause' to close predicate block + .closeSubclause() + .all(); + +// * Results will contain Employee documents that have: +// ('French' in their 'Notes' field) +// AND +// (do NOT have 'Manager' in their 'Title' field) +// +// * Search is case-insensitive +`} + + + + +{`from "Employees" +where search(Notes, "French") and +(exists(Title) and not search(Title, "Manager")) +`} + + + + + + + + +## Search in complex object + +* You can search for terms within a complex object. + +* Any nested text field within the object is searchable. + + + + + + +{`const companies = await session + .query({ collection: "Companies" }) + // * Look for documents that contain: + // the term 'USA' OR 'London' in any field within the complex 'Address' object + .search("Address", "USA London") + .all(); + +// * Results will contain Company documents that are located either in 'USA' OR in 'London'. +// * Search is case-insensitive. +`} + + + + +{`from "Companies" +where search(Address, "USA London") +`} + + + + + + + + +## Using wildcards + +* Wildcards can be used to replace: + * Prefix of a searched term + * Postfix of a searched term + * Both prefix & postfix + +* Note: + + * Searching with a wildcard as the prefix of the term (e.g. `*text`) is less recommended, + as it will cause the server to perform a full index scan. + + * Instead, consider using a static-index that indexes the field in reverse order + and then query with a wildcard as the postfix, which is much faster. + + + + + + +{`const employees = await session + .query({ collection: "Employees" }) + // Use '*' to replace one ore more characters + .search("Notes", "art*") + .search("Notes", "*logy") + .search("Notes", "*mark*") + .all(); + +// Results will contain Employee documents that have in their 'Notes' field: +// (terms that start with 'art') OR +// (terms that end with 'logy') OR +// (terms that have the text 'mark' in the middle) +// +// * Search is case-insensitive +`} + + + + +{`from "Employees" where +search(Notes, "art*") or +search(Notes, "*logy") or +search(Notes, "*mark*") +`} + + + + + + + + +## Syntax + + + +{`// Available overloads: +search(fieldName, searchTerms); +search(fieldName, searchTerms, operator); +`} + + + +| Parameter | Type | Description | +|---------------------|----------|---------------------------------------------------------------------------------------------------------------| +| __fieldName__ | string | Name of the field in which you search. | +| __searchTerms__ | string | A string containing the term or terms (separated by spaces) to search for. | +| __operator__ | string | Logical operator to use between multiple terms in the same Search method.
Can be `AND` or `OR` (default). | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_full-text-search-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_full-text-search-php.mdx new file mode 100644 index 0000000000..2ace240e8f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_full-text-search-php.mdx @@ -0,0 +1,741 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article is about running a full-text search with a **dynamic query**. + To learn how to run a full-text search using a static-index, see [full-text search with index](../../../../indexes/querying/searching.mdx). + +* Use the `search()` method to query for documents that contain specified term/s + within the text of the specified document field/s. + +* When running a full-text search with a dynamic query, the **auto-index** created by the server + breaks down the text of the searched document field using the [default search analyzer](../../../../indexes/using-analyzers.mdx#ravendb). + All generated terms are lower-cased, so the search is **case-insensitive**. + +* Gain additional control over term tokenization by running a full-text search + using a [static-index](../../../../indexes/querying/searching.mdx), where the used + analyzer is configurable. + + + +* A **boost** value can be set for each search to prioritize results. + Learn more in [boost search results](../../../../client-api/session/querying/text-search/boost-search-results.mdx). + +* User experience can be enhanced by requesting text fragments that **highlight** + the searched terms in the results. Learn more in [highlight search results](../../../../client-api/session/querying/text-search/highlight-query-results.mdx). + +* In this page: + * [Search for single term](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-for-single-term) + * [Search for multiple terms](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-for-multiple-terms) + * [Search in multiple fields](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-in-multiple-fields) + * [Search in complex object](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-in-complex-object) + * [Search operators](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-operators) + * [Search options](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-options) + * [Using wildcards](../../../../client-api/session/querying/text-search/full-text-search.mdx#using-wildcards) + * [Syntax](../../../../client-api/session/querying/text-search/full-text-search.mdx#syntax) + + +## Search for single term + + + + + + +{`/** @var array $employees */ +$employees = $session + // Make a dynamic query on Employees collection + ->query(Employee::class) + // * Call 'Search' to make a Full-Text search + // * Search is case-insensitive + // * Look for documents containing the term 'University' within their 'Notes' field + ->search("Notes", "University") + ->toList(); + +// Results will contain Employee documents that have +// any case variation of the term 'university' in their 'Notes' field. +`} + + + + +{`/** @var array $employees */ +$employees = $session->advanced() + // Make a dynamic DocumentQuery on Employees collection + ->documentQuery(Employee::class) + // * Call 'Search' to make a Full-Text search + // * Search is case-insensitive + // * Look for documents containing the term 'University' within their 'Notes' field + ->search("Notes", "University") + ->toList(); + +// Results will contain Employee documents that have +// any case variation of the term 'university' in their 'Notes' field. +`} + + + + +{`from "Employees" +where search(Notes, "University") +`} + + + + + + +* Executing the above query will generate the auto-index `Auto/Employees/BySearch(Notes)`. + +* This auto-index will contain the following two index-fields: + + * `Notes` + Contains terms with the original text from the indexed document field 'Notes'. + Text is lower-cased and Not tokenized. + + * `search(Notes)` + Contains **lower-cased terms** that were tokenized from the 'Notes' field by the [default search analyzer](../../../../indexes/using-analyzers.mdx#ravendb) (RavenStandardAnalyzer). + Calling the `search()` method targets these terms to find matching documents. + + + +## Search for multiple terms + +* You can search for multiple terms in the **same field** in a single search method. + +* By default, the logical operator between these terms is 'OR'. + +* This behavior can be modified. See section [Search operators](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-operators). + + + +**Pass terms in a string**: + + + + +{`/** @var array $employees */ +$employees = $session + ->query(Employee::class) + // * Pass multiple terms in a single string, separated by spaces. + // * Look for documents containing either 'University' OR 'Sales' OR 'Japanese' + // within their 'Notes' field + ->search("Notes", "University Sales Japanese") + ->toList(); + +// * Results will contain Employee documents that have at least one of the specified terms. +// * Search is case-insensitive. +`} + + + + +{`/** @var array $employees */ +$employees = $session->advanced() + ->documentQuery(Employee::class) + // * Pass multiple terms in a single string, separated by spaces. + // * Look for documents containing either 'University' OR 'Sales' OR 'Japanese' + // within their 'Notes' field + ->search("Notes", "University Sales Japanese") + ->toList(); + +// * Results will contain Employee documents that have at least one of the specified terms. +// * Search is case-insensitive. +`} + + + + +{`from "Employees" +where search(Notes, "University Sales Japanese") +`} + + + + + + + + +**Pass terms in a list**: + + + + +{`/** @var array $employees */ +$employees = $session + ->query(Employee::class) + // * Pass terms in array. + // * Look for documents containing either 'University' OR 'Sales' OR 'Japanese' + // within their 'Notes' field + ->search("Notes", ["University", "Sales", "Japanese"]) + ->toList(); + +// * Results will contain Employee documents that have at least one of the specified terms. +// * Search is case-insensitive. +`} + + + + +{`from "Employees" +where search(Notes, "University Sales Japanese") +`} + + + + + + + + +## Search in multiple fields + +* You can search for terms in **different fields** by making multiple search calls. + +* By default, the logical operator between consecutive search methods is 'OR'. + +* This behavior can be modified. See section [Search operators](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-operators). + + + + + + +{`/** @var array $employees */ +$employees = $session + ->query(Employee::class) + // * Look for documents containing: + // 'French' in their 'Notes' field OR 'President' in their 'Title' field + ->search("Notes", "French") + ->search("Title", "President") + ->toList(); + +// * Results will contain Employee documents that have +// at least one of the specified fields with the specified terms. +// * Search is case-insensitive. +`} + + + + +{`/** @var array $employees */ +$employees = $session->advanced() + ->documentQuery(Employee::class) + // * Look for documents containing: + // 'French' in their 'Notes' field OR 'President' in their 'Title' field + ->search("Notes", "French") + ->search("Title", "President") + ->toList(); + +// * Results will contain Employee documents that have +// at least one of the specified fields with the specified terms. +// * Search is case-insensitive. +`} + + + + +{`from "Employees" +where (search(Notes, "French") or search(Title, "President")) +`} + + + + + + + + +## Search in complex object + +* You can search for terms within a complex object. + +* Any nested text field within the object is searchable. + + + + + + +{`/** @var array $companies */ +$companies = $session + ->query(Company::class) + // * Look for documents that contain: + // the term 'USA' OR 'London' in any field within the complex 'Address' object + ->search("Address", "USA London") + ->toList(); + +// * Results will contain Company documents that are located either in 'USA' OR in 'London'. +// * Search is case-insensitive. +`} + + + + +{`/** @var array $companies */ +$companies = $session->advanced() + ->documentQuery(Company::class) + // * Look for documents that contain: + // the term 'USA' OR 'London' in any field within the complex 'Address' object + ->search("Address", "USA London") + ->toList(); + +// * Results will contain Company documents that are located either in 'USA' OR in 'London'. +// * Search is case-insensitive. +`} + + + + +{`from "Companies" +where search(Address, "USA London") +`} + + + + + + + + +## Search operators + +* By default, the logical operator between multiple terms within the **same field** in a search call is **OR**. + +* This can be modified using the `@operator` parameter as follows: + + + +**AND**: + + + + +{`/** @var array $employees */ +$employees = $session + ->query(Employee::class) + // * Pass operator with SearchOperator::and() + ->search("Notes", "College German", SearchOperator::and()) + ->toList(); + +// * Results will contain Employee documents that have BOTH 'College' AND 'German' +// in their 'Notes' field. +// * Search is case-insensitive. +`} + + + + +{`/** @var array $employees */ +$employees = $session->advanced() + ->documentQuery(Employee::class) + // * Pass operator with SearchOperator::and() + ->search("Notes", "College German", SearchOperator::and()) + ->toList(); + +// * Results will contain Employee documents that have BOTH 'College' AND 'German' +// in their 'Notes' field. +// * Search is case-insensitive. +`} + + + + +{`from "Employees" +where search(Notes, "College German", and) +`} + + + + + + + + +**OR**: + + + + +{`/** @var array $employees */ +$employees = $session + ->query(Employee::class) + // * Pass operator with SearchOperator::or() + ->search("Notes", "College German", SearchOperator::or()) + ->toList(); + +// * Results will contain Employee documents that have EITHER 'College' OR 'German' +// in their 'Notes' field. +// * Search is case-insensitive. +`} + + + + +{`/** @var array $employees */ +$employees = $session->advanced() + ->documentQuery(Employee::class) + // * Pass operator with SearchOperator::or() + ->search("Notes", "College German", SearchOperator::or()) + ->toList(); + +// * Results will contain Employee documents that have EITHER 'College' OR 'German' +// in their 'Notes' field. +// * Search is case-insensitive. +`} + + + + +{`from "Employees" +where search(Notes, "College German") +`} + + + + + + + + +## Search options + +Search options allow to: + +* Negate a search criteria. +* Specify the logical operator used between **consecutive search calls**. + + + +**Negate search**: + + + + +{`/** @var array $companies */ +$companies = $session + ->query(Company::class) + # Call 'not()' to negate the next search call + ->not() + ->search("Address", "USA") + ->toList(); + +// * Results will contain Company documents are NOT located in 'USA' +// * Search is case-insensitive +`} + + + + +{`/** @var array $companies */ +$companies = $session->advanced() + ->documentQuery(Company::class) + ->openSubclause() + // Call 'not()' to negate the next search call + ->not() + ->search("Address", "USA") + ->closeSubclause() + ->toList(); + +// * Results will contain Company documents are NOT located in 'USA' +// * Search is case-insensitive +`} + + + + +{`from "Companies" +where (exists(Address) and not search(Address, "USA")) +`} + + + + + + + + +**Default behavior between search calls**: + +* By default, the logical operator between consecutive search methods is **OR**. + + + + +{`/** @var array $companies */ +$companies = $session + ->query(Company::class) + ->whereEquals("Contact.Title", "Owner") + // Operator AND will be used with previous 'Where' predicate + ->search("Address.Country", "France") + // Operator OR will be used between the two 'Search' calls by default + ->search("Name", "Markets") + ->toList(); + +// * Results will contain Company documents that have: +// ('Owner' as the 'Contact.Title') +// AND +// (are located in 'France' OR have 'Markets' in their 'Name' field) +// +// * Search is case-insensitive +`} + + + + +{`/** @var array $companies */ +$companies = $session->advanced() + ->documentQuery(Company::class) + ->whereEquals("Contact.Title", "Owner") + // Operator AND will be used with previous 'Where' predicate + // Call 'openSubclause()' to open predicate block + ->openSubclause() + ->search("Address.Country", "France") + // Operator OR will be used between the two 'Search' calls by default + ->search("Name", "Markets") + // Call 'closeSubclause()' to close predicate block + ->closeSubclause() + ->toList(); + +// * Results will contain Company documents that have: +// ('Owner' as the 'Contact.Title') +// AND +// (are located in 'France' OR have 'Markets' in their 'Name' field) +// +// * Search is case-insensitive +`} + + + + +{`from "Companies" +where Contact.Title == "Owner" and +(search(Address.Country, "France") or search(Name, "Markets")) +`} + + + + + + + +**AND search calls**: + + + + +{`/** @var array $employees */ +$employees = $session + ->query(Employee::class) + ->search("Notes", "French") + // Call 'AndAlso' so that operator AND will be used with previous 'search' call + ->andAlso() + ->search("Title", "Manager") + ->toList(); + +// * Results will contain Employee documents that have: +// ('French' in their 'Notes' field) +// AND +// ('Manager' in their 'Title' field) +// +// * Search is case-insensitive +`} + + + + +{`/** @var array $employees */ +$employees = $session->advanced() + ->documentQuery(Employee::class) + ->search("Notes", "French") + // Call 'andAlso()' so that operator AND will be used with previous 'search' call + ->andAlso() + ->search("Title", "Manger") + ->toList(); + +// * Results will contain Employee documents that have: +// ('French' in their 'Notes' field) +// AND +// ('Manager' in their 'Title' field) +// +// * Search is case-insensitive +`} + + + + +{`from "Employees" +where search(Notes, "French") and search(Title, "Manager") +`} + + + + + + + + +**Use options as bit flags**: + + + + +{`/** @var array $employees */ +$employees = $session + ->query(Employee::class) + ->search("Notes", "French") + // Call 'andAlso()' so that operator AND will be used with previous 'search' call + ->andAlso() + ->openSubclause() + // Call 'not()' to negate the next search call + ->not() + ->search("Title", "Manager") + ->closeSubclause() + ->toList(); + +// * Results will contain Employee documents that have: +// ('French' in their 'Notes' field) +// AND +// (do NOT have 'Manager' in their 'Title' field) +// +// * Search is case-insensitive +`} + + + + +{`/** @var array $employees */ +$employees = $session->advanced() + ->documentQuery(Employee::class) + ->search("Notes", "French") + // Call 'andAlso()' so that operator AND will be used with previous 'search' call + ->andAlso() + ->openSubclause() + // Call 'not()' to negate the next search call + ->not() + ->search("Title", "Manager") + ->closeSubclause() + ->toList(); + +// * Results will contain Employee documents that have: +// ('French' in their 'Notes' field) +// AND +// (do NOT have 'Manager' in their 'Title' field) +// +// * Search is case-insensitive +`} + + + + +{`from "Employees" +where search(Notes, "French") and +(exists(Title) and not search(Title, "Manager")) +`} + + + + + + + + +## Using wildcards + +* Wildcards can be used to replace: + * Prefix of a searched term + * Postfix of a searched term + * Both prefix & postfix + +* Note: + + * Searching with a wildcard as the prefix of the term (e.g. `*text`) is + not advised as it will cause the server to perform a full index scan. + + * Instead, consider using a static-index that indexes the field in reverse order + and then query with a wildcard as the postfix, which is much faster. + + + + + + +{`/** @var array $employees */ +$employees = $session + ->query(Employee::class) + // Use '*' to replace one or more characters + ->search("Notes", "art*") + ->search("Notes", "*logy") + ->search("Notes", "*mark*") + ->ToList(); + +// Results will contain Employee documents that have in their 'Notes' field: +// (terms that start with 'art') OR +// (terms that end with 'logy') OR +// (terms that have the text 'mark' in the middle) +// +// * Search is case-insensitive +`} + + + + +{`/** @var array $employees */ +$employees = $session->advanced() + ->documentQuery(Employee::class) + // Use '*' to replace one ore more characters + ->search("Notes", "art*") + ->search("Notes", "*logy") + ->search("Notes", "*mark*") + ->toList(); + +// Results will contain Employee documents that have in their 'Notes' field: +// (terms that start with 'art') OR +// (terms that end with 'logy') OR +// (terms that have the text 'mark' in the middle) +// +// * Search is case-insensitive +`} + + + + +{`from "Employees" where +search(Notes, "art*") or +search(Notes, "*logy") or +search(Notes, "*mark*") +`} + + + + + + + + +## Syntax + + + +{`public function search(string $fieldName, string $searchTerms, ?SearchOperator $operator = null): DocumentQueryInterface; +`} + + + +| Parameter | Type | Description | +|-----------|------|-------------| +| **$fieldName** | `string` | Name of the searched field. | +| **$searchTerms** | `string` | A string containing the term or terms (separated by spaces) to search for. | +| **$operator** | `?SearchOperator ` | Logical operator to use between multiple terms in the same Search method.
**Can be**: `SearchOperator::or` or `SearchOperator::and`
**Default**: `SearchOperator::or` | + +| Return Type | Description | +| ------------| ----------- | +| `DocumentQueryInterface` | Query results | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_full-text-search-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_full-text-search-python.mdx new file mode 100644 index 0000000000..c631a150e7 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_full-text-search-python.mdx @@ -0,0 +1,456 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article is about running a full-text search with a **dynamic query**. + To learn how to run a full-text search using a static-index, see [full-text search with index](../../../../indexes/querying/searching.mdx). + +* Use the `search()` method to query for documents that contain specified term/s + within the text of the specified document field/s. + +* When running a full-text search with a dynamic query, the **auto-index** created by the server + breaks down the text of the searched document field using the [default search analyzer](../../../../indexes/using-analyzers.mdx#ravendb). + All generated terms are lower-cased, so the search is **case-insensitive**. + +* Gain additional control over term tokenization by running a full-text search + using a [static-index](../../../../indexes/querying/searching.mdx), where the used + analyzer is configurable. + + + +* A **boost** value can be set for each search to prioritize results. + Learn more in [boost search results](../../../../client-api/session/querying/text-search/boost-search-results.mdx). + +* User experience can be enhanced by requesting text fragments that **highlight** + the searched terms in the results. Learn more in [highlight search results](../../../../client-api/session/querying/text-search/highlight-query-results.mdx). + +* In this page: + * [Search for single term](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-for-single-term) + * [Search for multiple terms](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-for-multiple-terms) + * [Search in multiple fields](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-in-multiple-fields) + * [Search in complex object](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-in-complex-object) + * [Search operators](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-operators) + * [Search options](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-options) + * [Using wildcards](../../../../client-api/session/querying/text-search/full-text-search.mdx#using-wildcards) + * [Syntax](../../../../client-api/session/querying/text-search/full-text-search.mdx#syntax) + + +## Search for single term + + + + + + +{`employees = list( + session + # Make a dynamic query on Employees collection + .query(object_type=Employee) + # * Call 'Search' to make a Full-Text search + # * Search is case-insensitive + # * Look for documents containing the term 'University' within their 'Notes' field + .search("Notes", "University") +) +# Results will contain Employee documents that have +# any case variation of the term 'university' in their 'Notes' field. +`} + + + + +{`from "Employees" +where search(Notes, "University") +`} + + + + + + +* Executing the above query will generate the auto-index `Auto/Employees/BySearch(Notes)`. + +* This auto-index will contain the following two index-fields: + + * `Notes` + Contains terms with the original text from the indexed document field 'Notes'. + Text is lower-cased and Not tokenized. + + * `search(Notes)` + Contains **lower-cased terms** that were tokenized from the 'Notes' field by the [default search analyzer](../../../../indexes/using-analyzers.mdx#ravendb) (RavenStandardAnalyzer). + Calling the `search()` method targets these terms to find matching documents. + + + +## Search for multiple terms + +* You can search for multiple terms in the **same field** in a single search method. + +* By default, the logical operator between these terms is 'OR'. + +* This behavior can be modified. See section [Search operators](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-operators). + + + +**Pass terms in a string**: + + + + +{`employees = list( + session.query(object_type=Employee) + # * Pass multiple terms in a single string, separated by spaces. + # * Look for documents containing either 'University' OR 'Sales' OR 'Japanese' + # within their 'Notes' field + .search("Notes", "University Sales Japanese") +) + +# * Results will contain Employee documents that have at least one of the specified terms. +# * Search is case-insensitive. +`} + + + + +{`from "Employees" +where search(Notes, "University Sales Japanese") +`} + + + + + + + + +## Search in multiple fields + +* You can search for terms in **different fields** by making multiple search calls. + +* By default, the logical operator between consecutive search methods is 'OR'. + +* This behavior can be modified. See section [Search options](../../../../client-api/session/querying/text-search/full-text-search.mdx#search-operators). + + + + + + +{`employees = list( + session.query(object_type=Employee) + # * Look for documents containing: + # 'French' in their 'Notes' field OR 'President' in their 'Title' field + .search("Notes", "French").search("Title", "President") +) +# * Results will contain Employee documents that have +# at least one of the specified fields with the specified terms. +# * Search is case-insensitive. +`} + + + + +{`from "Employees" +where (search(Notes, "French") or search(Title, "President")) +`} + + + + + + + + +## Search in complex object + +* You can search for terms within a complex object. + +* Any nested text field within the object is searchable. + + + + + + +{`companies = list( + session.query(object_type=Company) + # * Look for documents that contain: + # the term 'USA' OR 'London' in any field within the complex 'Address' object + .search("Address", "USA London") +) +`} + + + + +{`from "Companies" +where search(Address, "USA London") +`} + + + + + + + + +## Search operators + +* By default, the logical operator between multiple terms within the **same field** in a search call is **OR**. + +* This can be modified using the `@operator` parameter as follows: + + + +**AND**: + + + + +{`employees = list( + session.query(object_type=Employee) + # * Pass \`@operator\` with 'SearchOperator.AND' + .search("Notes", "College German", operator=SearchOperator.AND) +) +# * Results will contain Employee documents that have BOTH 'College' AND 'German' +# in their 'Notes' field. +# * Search is case-insensitive. +`} + + + + +{`from "Employees" +where search(Notes, "College German", and) +`} + + + + + + + + +**OR**: + + + + +{`employees = list( + session.query(object_type=Employee) + # * Pass \`@operator\` with 'SearchOperator.OR' (or don't pass this param at all) + .search("Notes", "College German", operator=SearchOperator.OR) +) +# * Results will contain Employee documents that have BOTH 'College' OR 'German' +# in their 'Notes' field. +# * Search is case-insensitive. +`} + + + + +{`from "Employees" +where search(Notes, "College German") +`} + + + + + + + + +## Search options + +Search options allow to: + +* Negate a search criteria. +* Specify the logical operator used between **consecutive search calls**. + + + +**Negate search**: + + + + +{`companies = list( + session.query(object_type=Company) + .open_subclause() + # Call 'Not' to negate the next search call + .not_() + .search("Address", "USA") + .close_subclause() +) +# * Results will contain Company documents are NOT located in 'USA' +# * Search is case-insensitive +`} + + + + +{`from "Companies" +where (exists(Address) and not search(Address, "USA")) +`} + + + + + + + + +**Default behavior between search calls**: + +* By default, the logical operator between consecutive search methods is **OR**. + + + + +{`companies = list( + session.query(object_type=Company).where_equals("Contact.Title", "Owner") + # Operator AND will be used with previous 'where_equals' predicate + .search("Address.Country", "France") + # Operator OR will be used between the two 'search' calls by default + .search("Name", "Markets") +) + +# * Results will contain Company documents that have: +# ('Owner' as the 'Contact.Title') +# AND +# (are located in 'France' OR have 'Markets' in their 'Name' field) +# +# * Search is case-insensitive +`} + + + + +{`from "Companies" +where Contact.Title == "Owner" and +(search(Address.Country, "France") or search(Name, "Markets")) +`} + + + + + + + + +**Use options as bit flags**: + + + + +{`employees = list( + session.advanced.document_query(object_type=Employee) + .search("Notes", "French") + # Call 'AndAlso' so that operator AND will be used with previous 'Search' call + .and_also() + .open_subclause() + # Call 'Not' to negate the next search call + .not_() + .search("Title", "Manager") + .close_subclause() +) + +# * Results will contain Employee documents that have: +# ('French' in their 'Notes' field) +# AND +# (do NOT have 'Manager' in their 'Title' field) +# +# * Search is case-insensitive +`} + + + + +{`from "Employees" +where search(Notes, "French") and +(exists(Title) and not search(Title, "Manager")) +`} + + + + + + + + +## Using wildcards + +* Wildcards can be used to replace: + * Prefix of a searched term + * Postfix of a searched term + * Both prefix & postfix + +* Note: + + * Searching with a wildcard as the prefix of the term (e.g. `*text`) is + not advised as it will cause the server to perform a full index scan. + + * Instead, consider using a static-index that indexes the field in reverse order + and then query with a wildcard as the postfix, which is much faster. + + + + + + +{`employees = list( + session.query(object_type=Employee) + # Use '*' to replace one or more characters + .search("Notes", "art*") + .search("Notes", "*logy") + .search("Notes", "*mark*") +) + +# Results will contain Employee documents that have in their 'Notes' field: +# (terms that start with 'art') OR +# (terms that end with 'logy') OR +# (terms that have the text 'mark' in the middle) +# +# * Search is case-insensitive +`} + + + + +{`from "Employees" where +search(Notes, "art*") or +search(Notes, "*logy") or +search(Notes, "*mark*") +`} + + + + + + + + +## Syntax + + + +{`def search(self, field_name: str, search_terms: str, operator: SearchOperator = None) -> DocumentQuery[_T]: ... +`} + + + +| Parameter | Type | Description | +|-------------------|-------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **field_name** | `str` | Name of the searched field. | +| **search_terms** | `str` | A string containing the term or terms (separated by spaces) to search for. | +| **operator** | `SearchOperator ` | Logical operator to use between multiple terms in the same Search method.
**Can be**: `SearchOperator.OR` or `SearchOperator.AND`
**Default**: `SearchOperation.OR` | + +| Return Type | Description | +| ------------------- | ------------- | +| `DocumentQuery[_T]` | The same object used for the query | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_fuzzy-search-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_fuzzy-search-csharp.mdx new file mode 100644 index 0000000000..ce72050e5a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_fuzzy-search-csharp.mdx @@ -0,0 +1,82 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A **fuzzy search** retrieves documents containing terms that closely match a given term instead of exact matches, assisting in finding relevant results when the search term is misspelled or has minor variations. + +* Fuzzy search is available only via [DocumentQuery](../../../../client-api/session/querying/document-query/what-is-document-query.mdx) or RQL. + +* Use the `Fuzzy` method when querying with `WhereEquals`. + +* Note: this is a Lucene feature, available only for Lucene-based indexes. + +* In this page: + * [Fuzzy search example](../../../../client-api/session/querying/text-search/fuzzy-search.mdx#fuzzy-search-example) + * [Syntax](../../../../client-api/session/querying/text-search/fuzzy-search.mdx#syntax) + + +## Fuzzy search example + + + + +{`List companies = session.Advanced + .DocumentQuery() + // Query with a term that is misspelled + .WhereEquals(x => x.Name, "Ernts Hnadel") + // Call 'Fuzzy' + // Pass the required similarity, a decimal param between 0.0 and 1.0 + .Fuzzy(0.5m) + .ToList(); + +// Running the above query on the Northwind sample data returns document: companies/20-A +// which contains "Ernst Handel" in its Name field. +`} + + + + +{`List companies = await asyncSession.Advanced + .AsyncDocumentQuery() + // Query with a term that is misspelled + .WhereEquals(x => x.Name, "Ernts Hnadel") + // Call 'Fuzzy' + // Pass the required similarity, a decimal param between 0.0 and 1.0 + .Fuzzy(0.5m) + .ToListAsync(); + +// Running the above query on the Northwind sample data returns document: companies/20-A +// which contains "Ernst Handel" in its Name field. +`} + + + + +{`from "Companies" +where fuzzy(Name = "Ernts Hnadel", 0.5) +`} + + + + + + +## Syntax + + + +{`IDocumentQuery Fuzzy(decimal fuzzy); +`} + + + +| Parameter | Type | Description | +|-------------|-----------|-------------------------------------------------------------------------------------------------------------------| +| **fuzzy** | `decimal` | A value between `0.0` and `1.0`.
With a value closer to `1.0`, terms with a higher similarity will be matched. | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_fuzzy-search-java.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_fuzzy-search-java.mdx new file mode 100644 index 0000000000..3bad3781c6 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_fuzzy-search-java.mdx @@ -0,0 +1,48 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +* A **fuzzy search** retrieves documents containing terms that closely match a given term instead of exact matches, assisting in finding relevant results when the search term is misspelled or has minor variations. + +* Fuzzy search is available only via [documentQuery](../../../../client-api/session/querying/document-query/what-is-document-query.mdx) or RQL. + +* Use the `fuzzy` method when querying with `whereEquals`. + +* Note: this is a Lucene feature, available only for Lucene-based indexes. + +## Syntax + + + +{`IDocumentQuery fuzzy(double var1); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **fuzzy** | `double` | Value between 0.0 and 1.0 where 1.0 means closer match. | + +## Example + + + + +{`session.advanced().documentQuery(Company.class) + .whereEquals("Name", "Ernts Hnadel") + .fuzzy(0.5) + .toList(); +`} + + + + +{`from Companies +where fuzzy(Name = 'Ernts Hnadel', 0.5) +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_fuzzy-search-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_fuzzy-search-nodejs.mdx new file mode 100644 index 0000000000..ac2f43ced5 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_fuzzy-search-nodejs.mdx @@ -0,0 +1,67 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A **fuzzy search** retrieves documents containing terms that closely match a given term instead of exact matches, assisting in finding relevant results when the search term is misspelled or has minor variations. + +* Fuzzy search is available only via [DocumentQuery](../../../../client-api/session/querying/document-query/what-is-document-query.mdx) or RQL. + +* Use the `fuzzy` method when querying with `whereEquals`. + +* Note: this is a Lucene feature, available only for Lucene-based indexes. + + +* In this page: + * [Fuzzy search example](../../../../client-api/session/querying/text-search/fuzzy-search.mdx#fuzzy-search-example) + * [Syntax](../../../../client-api/session/querying/text-search/fuzzy-search.mdx#syntax) + + +## Fuzzy search example + + + + +{`const employees = await session + .query({ collection: "Companies" }) + // Query with a term that is misspelled + .whereEquals("Name", "Ernts Hnadel") + // Call 'fuzzy' + // Pass the required similarity, a number between 0.0 and 1.0 + .fuzzy(0.5) + .all(); + +// Running the above query on the Northwind sample data returns document: companies/20-A +// which contains "Ernst Handel" in its Name field. +`} + + + + +{`from "Companies" +where fuzzy(Name = "Ernts Hnadel", 0.5) +`} + + + + + + +## Syntax + + + +{`fuzzy(fuzzy); +`} + + + +| Parameter | Type | Description | +|-------------|----------|-------------------------------------------------------------------------------------------------------------------| +| **fuzzy** | `number` | A value between `0.0` and `1.0`.
With a value closer to `1.0`, terms with a higher similarity will be matched. | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_fuzzy-search-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_fuzzy-search-php.mdx new file mode 100644 index 0000000000..5903bbb52f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_fuzzy-search-php.mdx @@ -0,0 +1,67 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A **fuzzy search** retrieves documents containing terms that closely match a given term instead of exact matches, assisting in finding relevant results when the search term is misspelled or has minor variations. + +* Note: this is a Lucene feature, available only for Lucene-based indexes. + +* In this page: + * [Fuzzy search example](../../../../client-api/session/querying/text-search/fuzzy-search.mdx#fuzzy-search-example) + * [Syntax](../../../../client-api/session/querying/text-search/fuzzy-search.mdx#syntax) + + +## Fuzzy search example + + + + +{`/** @var array $companies */ +$companies = $session->advanced() + ->documentQuery(Company::class) + // Query with a term that is misspelled + ->whereEquals("Name", "Ernts Hnadel") + // Call 'Fuzzy' + // Pass the required similarity, a decimal param between 0.0 and 1.0 + ->fuzzy(0.5) + ->toList(); + +// Running the above query on the Northwind sample data returns document: companies/20-A +// which contains "Ernst Handel" in its Name field. +`} + + + + +{`from "Companies" +where fuzzy(Name = "Ernts Hnadel", 0.5) +`} + + + + + + +## Syntax + + + +{`public function fuzzy(float $fuzzy): DocumentQueryInterface; +`} + + + +| Parameter | Type | Description | +|-------------|-----------|-------------| +| **$fuzzy** | `float` | A value between `0.0` and `1.0`.
With a value closer to `1.0`, terms with a higher similarity are matched. | + +| Return Type | Description | +| ----------- | ----------- | +| `DocumentQueryInterface` | Query results | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_fuzzy-search-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_fuzzy-search-python.mdx new file mode 100644 index 0000000000..d308cd3563 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_fuzzy-search-python.mdx @@ -0,0 +1,67 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A **fuzzy search** retrieves documents containing terms that closely match a given term instead of exact matches, assisting in finding relevant results when the search term is misspelled or has minor variations. + +* Use the `fuzzy` method when querying with `where_equals`. + +* Note: this is a Lucene feature, available only for Lucene-based indexes. + +* In this page: + * [Fuzzy search example](../../../../client-api/session/querying/text-search/fuzzy-search.mdx#fuzzy-search-example) + * [Syntax](../../../../client-api/session/querying/text-search/fuzzy-search.mdx#syntax) + + +## Fuzzy search example + + + + +{`companies = list( + session.advanced.document_query(object_type=Company) + # Query with a term that is misspelled + .where_equals("Name", "Ernts Hhandel") + # Call 'fuzzy' + # Pass the required similarity, a decimal param between 0.0 and 1.0 + .fuzzy(0.5) +) +# Running the above query on the Northwind sample data returns document: companies/20-A +# which contains "Ernst Handel" in its Name field. +`} + + + + +{`from "Companies" +where fuzzy(Name = "Ernts Hnadel", 0.5) +`} + + + + + + +## Syntax + + + +{`def fuzzy(self, fuzzy: float) -> DocumentQuery[_T]: ... +`} + + + +| Parameter | Type | Description | +|-------------|-----------|------------------| +| **fuzzy** | `float` | A value between `0.0` and `1.0`.
With a value closer to `1.0`, terms with a higher similarity are matched. | + +| Return Type | Description | +| ----------- | ----------- | +| `DocumentQuery[_T]` | The same object used for the query | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_highlight-query-results-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_highlight-query-results-csharp.mdx new file mode 100644 index 0000000000..f310662934 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_highlight-query-results-csharp.mdx @@ -0,0 +1,462 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When making a [Full-Text Search query](../../../../client-api/session/querying/text-search/full-text-search.mdx), + in addition to retrieving documents that contain the searched terms in the results, + you can also request to get a **list of text fragments that highlight the searched terms**. + +* The highlighted terms can enhance user experience when searching for documents with specific content. + +* This article shows highlighting search results when making a **dynamic-query**. + For highlighting search results when querying a **static-index** see [highlight index search results](../../../../indexes/querying/highlighting.mdx). +* In this page: + * [Highlight - basic example](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight---basic-example) + * [Highlight tags](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight-tags) + * [Highlight results in Studio](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight-results-in-studio) + * [Highlight - customize tags](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight---customize-tags) + * [Highlight - projected results](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight---projected-results) + * [Syntax](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#syntax) + + +## Highlight - basic example + + + + +{`// Make a full-text search dynamic query: +// ====================================== +List employeesResults = session + // Make a dynamic query on 'Employees' collection + .Query() + // Search for documents containing the term 'sales' in their 'Notes' field + .Search(x => x.Notes, "sales") + // Request to highlight the searched term by calling 'Highlight' + .Highlight( + x => x.Notes, // The document-field name in which we search + 35, // Max length of each text fragment + 4, // Max number of fragments to return per document + out Highlightings salesHighlights) // An out param for getting the highlighted text fragments + // Execute the query + .ToList(); +`} + + + + +{`// Make a full-text search dynamic query: +// ====================================== +List employeesResults = await asyncSession + // Make a dynamic query on 'Employees' collection + .Query() + // Search for documents containing the term 'sales' in their 'Notes' field + .Search(x => x.Notes, "sales") + // Request to highlight the searched term by calling 'Highlight' + .Highlight( + x => x.Notes, // The document-field name in which we search + 35, // Max length of each text fragment + 4, // Max number of fragments to return per document + out Highlightings salesHighlights) // An out param for getting the highlighted text fragments + // Execute the query + .ToListAsync(); +`} + + + + +{`// Make a full-text search dynamic DocumentQuery: +// ============================================== +List employeesResults = session.Advanced + // Make a dynamic documentQuery on 'Employees' collection + .DocumentQuery() + // Search for documents containing the term 'sales' in their 'Notes' field + .Search(x => x.Notes, "sales") + // Request to highlight the searched term by calling 'Highlight' + .Highlight( + x => x.Notes, // The document-field name in which we search + 35, // Max length of each text fragment + 4, // Max number of fragments to return per document + out Highlightings salesHighlights) // An out param for getting the highlighted text fragments + // Execute the documentQuery + .ToList(); +`} + + + + +{`from "Employees" +where search(Notes, "sales") +include highlight(Notes, 35, 4) +`} + + + + + + +{`// Process results: +// ================ + +// 'employeesResults' contains all Employee DOCUMENTS that have 'sales' in their 'Notes' field. +// 'salesHighlights' contains the text FRAGMENTS that highlight the 'sales' term. + +StringBuilder builder = new StringBuilder().AppendLine("
    "); + +foreach (var employee in employeesResults) +\{ + // Call 'GetFragments' to get all fragments for the specified employee Id + string[] fragments = salesHighlights.GetFragments(employee.Id); + foreach (var fragment in fragments) + \{ + builder.AppendLine( + $"
  • Doc: \{employee.Id\} Fragment: \{fragment\}
  • "); + \} +\} + +string fragmentsHtml = builder.AppendLine("
").ToString(); + +// The resulting fragmentsHtml: +// ============================ + +//
    +//
  • Doc: employees/2-A Fragment: company as a sales
  • +//
  • Doc: employees/2-A Fragment: promoted to sales manager in
  • +//
  • Doc: employees/2-A Fragment: president of sales in March 1993
  • +//
  • Doc: employees/2-A Fragment: member of the Sales Management
  • +//
  • Doc: employees/3-A Fragment: hired as a sales associate in
  • +//
  • Doc: employees/3-A Fragment: promoted to sales representativ
  • +//
  • Doc: employees/5-A Fragment: company as a sales representativ
  • +//
  • Doc: employees/5-A Fragment: promoted to sales manager in
  • +//
  • Doc: employees/5-A Fragment: Sales Management."
  • +//
  • Doc: employees/6-A Fragment: for the Sales Professional.
  • +//
+`} +
+
+ + + +#### Highlight tags +* By default, the highlighted term is wrapped with the following html: + `term` + +* When requesting to highlight multiple terms, + the background color returned for each different term will be in the following order: + + - <span style="border-left: 10px solid yellow"> </span>yellow, + - <span style="border-left: 10px solid lawngreen"> </span>lawngreen, + - <span style="border-left: 10px solid aquamarine"> </span>aquamarine, + - <span style="border-left: 10px solid magenta"> </span>magenta, + - <span style="border-left: 10px solid palegreen"> </span>palegreen, + - <span style="border-left: 10px solid coral"> </span>coral, + - <span style="border-left: 10px solid wheat"> </span>wheat, + - <span style="border-left: 10px solid khaki"> </span>khaki, + - <span style="border-left: 10px solid lime"> </span>lime, + - <span style="border-left: 10px solid deepskyblue"> </span>deepskyblue, + - <span style="border-left: 10px solid deeppink"> </span>deeppink, + - <span style="border-left: 10px solid salmon"> </span>salmon, + - <span style="border-left: 10px solid peachpuff"> </span>peachpuff, + - <span style="border-left: 10px solid violet"> </span>violet, + - <span style="border-left: 10px solid mediumpurple"> </span>mediumpurple, + - <span style="border-left: 10px solid palegoldenrod"> </span>palegoldenrod, + - <span style="border-left: 10px solid darkkhaki"> </span>darkkhaki, + - <span style="border-left: 10px solid springgreen"> </span>springgreen, + - <span style="border-left: 10px solid turquoise"> </span>turquoise, + - <span style="border-left: 10px solid powderblue"> </span>powderblue + +* The html tags that wrap the highlighted terms can be **customized** to any other tags. + See [customize tags](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight---customize-tags) below. + + + + + +#### Highlight results in Studio +![Figure 1. Fragments results](./assets/fragmentsResults.png) + +1. **Auto-Index** + This is the auto-index that was created by the server to serve the dynamic-query. + +2. **Results tab** + The results tab contains the resulting **documents** that match the provided RQL query. + +3. **Highlight tab** + The highlight tab shows the resulting **fragments** that were included in the query result. + + + + + +## Highlight - customize tags + +* The html tags that wrap the highlighted terms can be **customized** to any other tags. + + + + +{`// Define customized tags to use for highlighting the searched terms +// ================================================================= +HighlightingOptions tagsToUse = new HighlightingOptions +{ + // Provide strings of your choice to 'PreTags' & 'PostTags', e.g.: + // the first term searched for will be wrapped with '+++' + // the second term searched for will be wrapped with '<<<' & '>>>' + PreTags = new[] { "+++", "<<<" }, + PostTags = new[] { "+++", ">>>" } +}; + +// Make a full-text search dynamic query: +// ====================================== +List employeesResults = session + .Query() + // Search for: + // * documents containing the term 'sales' in their 'Notes' field + // * OR for documents containing the term 'manager' in their 'Title' field + .Search(x => x.Notes, "sales") + .Search(x => x.Title, "manager") + // Call 'Highlight' for each field searched + // Pass 'tagsToUse' to OVERRIDE the default tags used + .Highlight(x => x.Notes, 35, 1, tagsToUse, out Highlightings salesHighlights) + .Highlight(x => x.Title, 35, 1, tagsToUse, out Highlightings managerHighlights) + .ToList(); +`} + + + + +{`// Define customized tags to use for highlighting the searched terms +// ================================================================= +HighlightingOptions tagsToUse = new HighlightingOptions +{ + // Provide strings of your choice to 'PreTags' & 'PostTags', e.g.: + // The first term searched for will be wrapped with '+++' + // the second term searched for will be wrapped with '<<<' & '>>>' + PreTags = new[] { "+++", "<<<" }, + PostTags = new[] { "+++", ">>>" } +}; + +// Make a full-text search dynamic query: +// ====================================== +List employeesResults = await asyncSession + .Query() + // Search for: + // * documents containing the term 'sales' in their 'Notes' field + // * OR for documents containing the term 'manager' in their 'Title' field + .Search(x => x.Notes, "sales") + .Search(x => x.Title, "manager") + // Call 'Highlight' for each field searched + // Pass 'tagsToUse' to OVERRIDE the default tags used + .Highlight(x => x.Notes, 35, 1, tagsToUse, out Highlightings salesHighlights) + .Highlight(x => x.Title, 35, 1, tagsToUse, out Highlightings managerHighlights) + .ToListAsync(); +`} + + + + +{`from "Employees" +where (search(Notes, "sales") or search(Title, "manager")) +include highlight(Notes, 35, 1, $p0), highlight(Title, 35, 1, $p1) +{ +"p0":{"PreTags":["+++","<<<"],"PostTags":["+++",">>>"]}, +"p1":{"PreTags":["+++","<<<"],"PostTags":["+++",">>>"]} +} +`} + + + + + + +{`// The resulting salesHighlights fragments: +// ======================================== + +// "for the +++Sales+++ Professional." +// "hired as a +++sales+++ associate in" +// "company as a +++sales+++" +// "company as a +++sales+++ representativ" + +// The resulting managerHighlights fragments: +// ========================================== + +// "Sales <<>>" +`} + + + + + +## Highlight - projected results + +* Highlighting can also be used when [projecting query results](../../../../client-api/session/querying/how-to-project-query-results.mdx). + + + + +{`// Make a full-text search dynamic query & project results: +// ======================================================== +var employeesProjectedResults = session + .Query() + // Search for documents containing 'sales' or 'german' in their 'Notes' field + .Search(x => x.Notes, "manager german") + // Request to highlight the searched terms from the 'Notes' field + .Highlight(x => x.Notes, 35, 2, out Highlightings termsHighlights) + // Define the projection + .Select(x => new + { + // These fields will be returned instead of the whole document + // Note: it is Not mandatory to return the field in which we search for the highlights + Name = $"{x.FirstName} {x.LastName}", + x.Title + }) + .ToList(); +`} + + + + +{`// Make a full-text search dynamic query & project results: +// ======================================================== +var employeesProjectedResults = await asyncSession + .Query() + // Search for documents containing 'sales' or 'german' in their 'Notes' field + .Search(x => x.Notes, "manager german") + // Request to highlight the searched terms from the 'Notes' field + .Highlight(x => x.Notes, 35, 2, out Highlightings termsHighlights) + // Define the projection + .Select(x => new + { + // These fields will be returned instead of the whole document + // Note: it is Not mandatory to return the field in which we search for the highlights + Name = $"{x.FirstName} {x.LastName}", + x.Title + }) + .ToListAsync(); +`} + + + + +{`from "Employees" as x +where search(x.Notes, "manager german") +select { Name : "{0} {1}".format(x.FirstName, x.LastName), Title : x.Title } +include highlight(Notes, 35, 2) +`} + + + + + + +{`// The resulting fragments from termsHighlights: +// ============================================= + +// "to sales manager in March" +// "and reads German. He joined" +// "to sales manager in January" +// "in French and German." + +// NOTE: each search term is wrapped with a different color +// 'manager' is wrapped with yellow +// 'german' is wrapped with lawngreen +`} + + + + + +## Syntax + + + +{`IRavenQueryable Highlight( + string fieldName, + int fragmentLength, + int fragmentCount, + out Highlightings highlightings); + +IRavenQueryable Highlight( + string fieldName, + int fragmentLength, + int fragmentCount, + HighlightingOptions options, + out Highlightings highlightings); + +IRavenQueryable Highlight( + Expression> path, + int fragmentLength, + int fragmentCount, + out Highlightings highlightings); + +IRavenQueryable Highlight( + Expression> path, + int fragmentLength, + int fragmentCount, + HighlightingOptions options, + out Highlightings highlightings); +`} + + + +| Parameter | Type | Description | +|--------------------|-------------------------------|-------------------------------------------------------------------------------------------| +| **fieldName** | string | Name of the field that contains the searched terms to highlight. | +| **path** | `Expression>` | Path to the field that contains the searched terms to highlight. | +| **fragmentLength** | int | Maximum length of a text fragment. Must be `>= 18`. | +| **fragmentCount** | int | Maximum number of text fragments that will be returned. | +| **options** | `HighlightingOptions` | Customizing options. | +| **highlightings** | `Highlightings` | An 'out' param that will contain the highlighted text fragments for each returned result. | + +
+ +**Highlighting options**: + + + +{`public string GroupKey \{ get; set; \} +public string[] PreTags \{ get; set; \} +public string[] PostTags \{ get; set; \} +`} + + + +| Option | Type | Description | +|--------------|-----------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **GroupKey** | string | Grouping key for the results.
Used when highlighting query results from a [Map-Reduce index](../../../../indexes/querying/highlighting.mdx#highlight-results---map-reduce-index).
If `null` results are grouped by document ID (default).
Note: Highlighting is Not available for dynamic aggregation queries. | +| **PreTags** | string[] | Array of PRE tags used to wrap the highlighted search terms in the text fragments. | +| **PostTags** | string[] | Array of POST tags used to wrap the highlighted search terms in the text fragments. | + +
+ +**Highlightings object**: + + + +{`public string FieldName \{ get; \} +public IEnumerable ResultIndents; +`} + + + +| Property | Type | Description | +|-------------------|----------------------|------------------------------------------------------------------| +| **FieldName** | string | Name of the field that contains the searched terms to highlight. | +| **ResultIndents** | `IEumerable` | The resulting keys (document IDs, or the map-reduce keys). | + + + +{`public string[] GetFragments(string key); +`} + + + +| Method | Description | +|------------------|-------------------------------------------------------------------------------------------------------| +| **GetFragments** | Returns the list of the highlighted text fragments for the passed document ID, or the map-reduce key. | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_highlight-query-results-java.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_highlight-query-results-java.mdx new file mode 100644 index 0000000000..f4b26eb7d8 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_highlight-query-results-java.mdx @@ -0,0 +1,118 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +**Highlighting** can be a great feature for increasing search UX. To take leverage of it use `highlight` method. + +## Syntax + + + +{`IDocumentQuery highlight(String fieldName, + int fragmentLength, + int fragmentCount, + Reference highlightings); + +IDocumentQuery highlight(String fieldName, + int fragmentLength, + int fragmentCount, + HighlightingOptions options, + Reference highlightings); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **fieldName** | string | Name of a field to highlight. | +| **path** | string | Path to a field to highlight. | +| **fragmentLength** | int | Maximum length of text fragments that will be returned. | +| **fragmentCount** | int | Maximum number of fragments that will be returned. | +| **options** | `HighlightingOptions` | Options that can be used for customization. | +| **highlightings** | `Highlightings` | Instance of a Highlightings that contains the highlight fragments for each returned result. | + +### Options + + + +{`private String groupKey; +private String[] preTags; +private String[] postTags; + +// getters and setters +`} + + + +| Options | | | +| ------------- | ------------- | ----- | +| **groupKey** | string | Grouping key for the results. If `null` results are grouped by document ID (default). | +| **preTags** | `string[]` | Array of pre tags used when highlighting. | +| **postTags** | `string[]` | Array of post tags used when highlighting. | + +## Example + + + + +{`Reference highlightingsRef = new Reference<>(); +List results = session + .query(SearchItem.class, ContentSearchIndex.class) + .highlight("text", 128, 1, highlightingsRef) + .search("text", "raven") + .toList(); + +StringBuilder builder = new StringBuilder(); +builder.append("
    "); + +for (SearchItem result : results) { + String[] fragments = highlightingsRef.value.getFragments(result.getId()); + builder.append("
  • ") + .append(fragments[0]) + .append("
  • "); +} + +builder.append("
"); +String ul = builder.toString(); +`} +
+
+ + +{`from index 'ContentSearchIndex' +where search(text, 'raven') +include highlight(text, 128, 1) +`} + + +
+ +## Remarks + + +Default `` tags are coloured and colours are returned in following order: + +- <span style="border-left: 10px solid yellow"> </span>yellow, +- <span style="border-left: 10px solid lawngreen"> </span>lawngreen, +- <span style="border-left: 10px solid aquamarine"> </span>aquamarine, +- <span style="border-left: 10px solid magenta"> </span>magenta, +- <span style="border-left: 10px solid palegreen"> </span>palegreen, +- <span style="border-left: 10px solid coral"> </span>coral, +- <span style="border-left: 10px solid wheat"> </span>wheat, +- <span style="border-left: 10px solid khaki"> </span>khaki, +- <span style="border-left: 10px solid lime"> </span>lime, +- <span style="border-left: 10px solid deepskyblue"> </span>deepskyblue, +- <span style="border-left: 10px solid deeppink"> </span>deeppink, +- <span style="border-left: 10px solid salmon"> </span>salmon, +- <span style="border-left: 10px solid peachpuff"> </span>peachpuff, +- <span style="border-left: 10px solid violet"> </span>violet, +- <span style="border-left: 10px solid mediumpurple"> </span>mediumpurple, +- <span style="border-left: 10px solid palegoldenrod"> </span>palegoldenrod, +- <span style="border-left: 10px solid darkkhaki"> </span>darkkhaki, +- <span style="border-left: 10px solid springgreen"> </span>springgreen, +- <span style="border-left: 10px solid turquoise"> </span>turquoise, +- <span style="border-left: 10px solid powderblue"> </span>powderblue + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_highlight-query-results-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_highlight-query-results-nodejs.mdx new file mode 100644 index 0000000000..2941391736 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_highlight-query-results-nodejs.mdx @@ -0,0 +1,339 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When making a [Full-Text Search query](../../../../client-api/session/querying/text-search/full-text-search.mdx), + in addition to retrieving documents that contain the searched terms in the results, + you can also request to get a __list of text fragments that highlight the searched terms__. + +* The highlighted terms can enhance user experience when searching for documents with specific content. + +* This article shows highlighting search results when making a __dynamic-query__. + For highlighting search results when querying a __static-index__ see [highlight index search results](../../../../indexes/querying/highlighting.mdx). +* In this page: + * [Highlight - basic example](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight---basic-example) + * [Highlight tags](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight-tags) + * [Highlight results in Studio](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight-results-in-studio) + * [Highlight - customize tags](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight---customize-tags) + * [Highlight - projected results](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight---projected-results) + * [Syntax](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#syntax) + + +## Highlight - basic example + + + + +{`// Make a full-text search dynamic query: +// ====================================== + +// Define a param that will get the highlighted text fragments +let salesHighlights; + +const employeesResults = await session + // Make a dynamic query on 'Employees' collection + .query({ collection: "Employees" }) + // Search for documents containing the term 'sales' in their 'Notes' field + .search("Notes", "sales") + // Request to highlight the searched term by calling 'highlight' + .highlight({ + fieldName: 'Notes', // The document-field name in which we search + fragmentLength: 35, // Max length of each text fragment + fragmentCount: 4 }, // Max number of fragments to return per document + x => { salesHighlights = x; }) // Output param 'salesHighlights' will be filled + // with the highlighted text fragments when query returns. + // Execute the query + .all(); +`} + + + + +{`from "Employees" +where search(Notes, "sales") +include highlight(Notes, 35, 4) +`} + + + + + + +{`// Process results: +// ================ + +// 'employeesResults' contains all Employee DOCUMENTS that have 'sales' in their 'Notes' field. +// 'salesHighlights' contains the text FRAGMENTS that highlight the 'sales' term. + +let fragmentsHtml = "
    "; + +employeesResults.forEach((employee) => \{ + // Call 'getFragments' to get all fragments for the specified employee id + let fragments = salesHighlights.getFragments(employee.id); + + fragments.forEach((fragment) => \{ + fragmentsHtml += \`
  • Doc: $\{employee.id\} Fragment: $\{fragment\}
  • \`; + \}); +\}); + +fragmentsHtml += "
"; + +// The resulting fragmentsHtml: +// ============================ + +//
    +//
  • Doc: employees/2-A Fragment: company as a sales
  • +//
  • Doc: employees/2-A Fragment: promoted to sales manager in
  • +//
  • Doc: employees/2-A Fragment: president of sales in March 1993
  • +//
  • Doc: employees/2-A Fragment: member of the Sales Management
  • +//
  • Doc: employees/3-A Fragment: hired as a sales associate in
  • +//
  • Doc: employees/3-A Fragment: promoted to sales representativ
  • +//
  • Doc: employees/5-A Fragment: company as a sales representativ
  • +//
  • Doc: employees/5-A Fragment: promoted to sales manager in
  • +//
  • Doc: employees/5-A Fragment: Sales Management."
  • +//
  • Doc: employees/6-A Fragment: for the Sales Professional.
  • +//
+`} +
+
+ + + +#### Highlight tags +* By default, the highlighted term is wrapped with the following html: + `term` + +* When requesting to highlight multiple terms, + the background color returned for each different term will be in the following order: + + - <span style="border-left: 10px solid yellow"> </span>yellow, + - <span style="border-left: 10px solid lawngreen"> </span>lawngreen, + - <span style="border-left: 10px solid aquamarine"> </span>aquamarine, + - <span style="border-left: 10px solid magenta"> </span>magenta, + - <span style="border-left: 10px solid palegreen"> </span>palegreen, + - <span style="border-left: 10px solid coral"> </span>coral, + - <span style="border-left: 10px solid wheat"> </span>wheat, + - <span style="border-left: 10px solid khaki"> </span>khaki, + - <span style="border-left: 10px solid lime"> </span>lime, + - <span style="border-left: 10px solid deepskyblue"> </span>deepskyblue, + - <span style="border-left: 10px solid deeppink"> </span>deeppink, + - <span style="border-left: 10px solid salmon"> </span>salmon, + - <span style="border-left: 10px solid peachpuff"> </span>peachpuff, + - <span style="border-left: 10px solid violet"> </span>violet, + - <span style="border-left: 10px solid mediumpurple"> </span>mediumpurple, + - <span style="border-left: 10px solid palegoldenrod"> </span>palegoldenrod, + - <span style="border-left: 10px solid darkkhaki"> </span>darkkhaki, + - <span style="border-left: 10px solid springgreen"> </span>springgreen, + - <span style="border-left: 10px solid turquoise"> </span>turquoise, + - <span style="border-left: 10px solid powderblue"> </span>powderblue + +* The html tags that wrap the highlighted terms can be __customized__ to any other tags. + See [customize tags](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight---customize-tags) below. + + + + + +#### Highlight results in Studio +![Figure 1. Fragments results](./assets/fragmentsResults.png) + +1. __Auto-Index__ + This is the auto-index that was created by the server to serve the dynamic-query. + +2. __Results tab__ + The results tab contains the resulting __documents__ that match the provided RQL query. + +3. __Highlight tab__ + The highlight tab shows the resulting __fragments__ that were included in the query result. + + + + + +## Highlight - customize tags + +* The html tags that wrap the highlighted terms can be __customized__ to any other tags. + + + + +{`// Define customized tags to use for highlighting the searched terms +// ================================================================= +const tagsToUse = { + preTags: ["+++", "<<<"], + postTags: ["+++", ">>>"] +}; + +// Make a full-text search dynamic query: +// ====================================== +let salesHighlights; +let managerHighlights; + +const employeesResults = await session + .query({ collection: "Employees" }) + // Search for: + // * documents containing the term 'sales' in their 'Notes' field + // * OR for documents containing the term 'manager' in their 'Title' field + .search("Notes", "sales") + .search("Title", "manager") + // Call 'highlight' for each field searched + // Pass 'tagsToUse' to OVERRIDE the default tags used + .highlight({ fieldName: 'Notes', fragmentLength: 35, fragmentCount: 1, ...tagsToUse }, + x => { salesHighlights = x; }) + .highlight({ fieldName: 'Title', fragmentLength: 35, fragmentCount: 1, ...tagsToUse }, + x => { managerHighlights = x; }) + .all(); +`} + + + + +{`from "Employees" +where (search(Notes, "sales") or search(Title, "manager")) +include highlight(Notes, 35, 1, $p0), highlight(Title, 35, 1, $p1) +{ +"p0":{"PreTags":["+++","<<<"],"PostTags":["+++",">>>"]}, +"p1":{"PreTags":["+++","<<<"],"PostTags":["+++",">>>"]} +} +`} + + + + + + +{`// The resulting salesHighlights fragments: +// ======================================== + +// "for the +++Sales+++ Professional." +// "hired as a +++sales+++ associate in" +// "company as a +++sales+++" +// "company as a +++sales+++ representativ" + +// The resulting managerHighlights fragments: +// ========================================== + +// "Sales <<>>" +`} + + + + + +## Highlight - projected results + +* Highlighting can also be used when [projecting query results](../../../../client-api/session/querying/how-to-project-query-results.mdx). + + + + +{`// Make a full-text search dynamic query & project results: +// ======================================================== + +// Define a param that will get the highlighted text fragments +let termsHighlights; + +// Define the class for the projected results +class Result { + constructor () { + this.Name = null; + this.Title = null; + } +} + +// Make a dynamic query on 'Employees' collection +const employeesResults = await session + .query({ collection: "Employees" }) + // Search for documents containing 'sales' or 'german' in their 'Notes' field + .search("Notes", "manager german") + // Request to highlight the searched terms from the 'Notes' field + .highlight({ fieldName: "Notes", fragmentLength: 35, fragmentCount: 2 }, + x => { termsHighlights = x; }) + // Define the projection + .selectFields(QueryData.customFunction("o", "{ Name: o.FirstName + ' ' + o.LastName, Title: o.Title }" ), + Result) + .all(); +`} + + + + +{`from "Employees" as x +where search(x.Notes, "manager german") +select { Name : "{0} {1}".format(x.FirstName, x.LastName), Title : x.Title } +include highlight(Notes, 35, 2) +`} + + + + + + +{`// The resulting fragments from termsHighlights: +// ============================================= + +// "to sales manager in March" +// "and reads German. He joined" +// "to sales manager in January" +// "in French and German." + +// NOTE: each search term is wrapped with a different color +// 'manager' is wrapped with yellow +// 'german' is wrapped with lawngreen +`} + + + + + +## Syntax + + + +{`highlight(parameters, hightlightingsCallback); +`} + + + +| Parameter | Type | Description | +|----------------------------|-------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------| +| __parameters__ | `HighlightingParameters` | Parameters for the highlight method. | +| __hightlightingsCallback__ | `(highlightResults) => void)` | A callback function with an output parameter.
The parameter passed to the callback will be filled with the `Highlightings` object when query returns. | + +
+ +__The Highlighting parameters:__ + +| Parameter | Type | Description | +|--------------------|-----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| __fieldName__ | string | Name of the field that contains the searched terms to highlight. | +| __fragmentLength__ | number | Maximum length of a text fragment. Must be `>= 18`. | +| __fragmentCount__ | number | Maximum number of text fragments that will be returned. | +| __groupKey__ | string | Grouping key for the results.
Used when highlighting query results from a [Map-Reduce index](../../../../indexes/querying/highlighting.mdx#highlight-results---map-reduce-index).
If `null` results are grouped by document ID (default).
Note: Highlighting is Not available for dynamic aggregation queries. | +| __preTags__ | string[] | Array of PRE tags used to wrap the highlighted search terms in the text fragments. | +| __postTags__ | string[] | Array of POST tags used to wrap the highlighted search terms in the text fragments. | + +
+ +__The Highlightings object__: + + + +{`class Highlightings \{ + // Name of the field that contains the searched terms to highlight. + get fieldName(); + // The resulting keys (document IDs, or the map-reduce keys) + get resultIndents(); + // Returns the list of the highlighted text fragments for the passed document ID, or the map-reduce key + getFragments(key); +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_highlight-query-results-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_highlight-query-results-php.mdx new file mode 100644 index 0000000000..b8325ec0cf --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_highlight-query-results-php.mdx @@ -0,0 +1,359 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When making a [Full-Text Search query](../../../../client-api/session/querying/text-search/full-text-search.mdx), + in addition to retrieving documents that contain the searched terms in the results, + you can also request to get a **list of text fragments that highlight the searched terms**. + +* The highlighted terms can enhance user experience when searching for documents with specific content. + +* This article shows highlighting search results when making a **dynamic-query**. + For highlighting search results when querying a **static-index** see [highlight index search results](../../../../indexes/querying/highlighting.mdx). +* In this page: + * [Highlight - basic example](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight---basic-example) + * [Highlight tags](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight-tags) + * [Highlight results in Studio](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight-results-in-studio) + * [Highlight - customize tags](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight---customize-tags) + * [Highlight - projected results](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight---projected-results) + * [Syntax](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#syntax) + + +## Highlight - basic example + + + + +{`// Make a full-text search dynamic query: +// ====================================== + +$highlightings = new Highlightings(); + +/** @var array $employeesResults */ +$employeesResults = $session + // Make a dynamic query on 'Employees' collection + ->query(Employee::class) + // Search for documents containing the term 'sales' in their 'Notes' field + ->search("Notes", "sales") + // Request to highlight the searched term by calling 'highlight()' + ->highlight( + "Notes", // The document-field name in which we search + 35, // Max length of each text fragment + 4, // Max number of fragments to return per document + null, // Put null to use default options + $highlightings) // An out param for getting the highlighted text fragments + + // Execute the query + ->toList(); +`} + + + + +{`from "Employees" +where search(Notes, "sales") +include highlight(Notes, 35, 4) +`} + + + + + + +{`// Process results: +// ================ + +// 'employeesResults' contains all Employee DOCUMENTS that have 'sales' in their 'Notes' field. +// 'salesHighlights' contains the text FRAGMENTS that highlight the 'sales' term. + +$builder = '
    '; + +/** @var SearchItem $employee */ +foreach ($employeesResults as $employee) \{ + // Call 'GetFragments' to get all fragments for the specified employee Id + $fragments = $highlightings->getFragments($employee->getId()); + foreach ($fragments as $fragment) \{ + $builder .= '
  • Doc: ' . $employee->getId() . ' Fragment: ' . $fragment . '
  • '; + \} +\} + +$builder .= '
'; +$fragmentsHtml = $builder; + +// The resulting fragmentsHtml: +// ============================ + +//
    +//
  • Doc: employees/2-A Fragment: company as a sales
  • +//
  • Doc: employees/2-A Fragment: promoted to sales manager in
  • +//
  • Doc: employees/2-A Fragment: president of sales in March 1993
  • +//
  • Doc: employees/2-A Fragment: member of the Sales Management
  • +//
  • Doc: employees/3-A Fragment: hired as a sales associate in
  • +//
  • Doc: employees/3-A Fragment: promoted to sales representativ
  • +//
  • Doc: employees/5-A Fragment: company as a sales representativ
  • +//
  • Doc: employees/5-A Fragment: promoted to sales manager in
  • +//
  • Doc: employees/5-A Fragment: Sales Management."
  • +//
  • Doc: employees/6-A Fragment: for the Sales Professional.
  • +//
+`} +
+
+ + + +#### Highlight tags + +* By default, the highlighted term is wrapped with the following html: + `term` + +* When requesting to highlight multiple terms, + the background color returned for each different term will be in the following order: + + - <span style="border-left: 10px solid yellow"> </span>yellow, + - <span style="border-left: 10px solid lawngreen"> </span>lawngreen, + - <span style="border-left: 10px solid aquamarine"> </span>aquamarine, + - <span style="border-left: 10px solid magenta"> </span>magenta, + - <span style="border-left: 10px solid palegreen"> </span>palegreen, + - <span style="border-left: 10px solid coral"> </span>coral, + - <span style="border-left: 10px solid wheat"> </span>wheat, + - <span style="border-left: 10px solid khaki"> </span>khaki, + - <span style="border-left: 10px solid lime"> </span>lime, + - <span style="border-left: 10px solid deepskyblue"> </span>deepskyblue, + - <span style="border-left: 10px solid deeppink"> </span>deeppink, + - <span style="border-left: 10px solid salmon"> </span>salmon, + - <span style="border-left: 10px solid peachpuff"> </span>peachpuff, + - <span style="border-left: 10px solid violet"> </span>violet, + - <span style="border-left: 10px solid mediumpurple"> </span>mediumpurple, + - <span style="border-left: 10px solid palegoldenrod"> </span>palegoldenrod, + - <span style="border-left: 10px solid darkkhaki"> </span>darkkhaki, + - <span style="border-left: 10px solid springgreen"> </span>springgreen, + - <span style="border-left: 10px solid turquoise"> </span>turquoise, + - <span style="border-left: 10px solid powderblue"> </span>powderblue + +* The html tags that wrap the highlighted terms can be **customized** to any other tags. + See [customize tags](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight---customize-tags) below. + + + + + +#### Highlight results in Studio + +![Figure 1. Fragments results](./assets/fragmentsResults.png) + +1. **Auto-Index** + This is the auto-index that was created by the server to serve the dynamic-query. + +2. **Results tab** + The results tab contains the resulting **documents** that match the provided RQL query. + +3. **Highlight tab** + The highlight tab shows the resulting **fragments** that were included in the query result. + + + + + +## Highlight - customize tags + +* The html tags that wrap the highlighted terms can be **customized** to any other tags. + + + + +{`// Define customized tags to use for highlighting the searched terms +// ================================================================= + +$salesHighlights = new Highlightings(); +$managerHighlights = new Highlightings(); + +$tagsToUse = new HighlightingOptions(); +// Provide strings of your choice to 'PreTags' & 'PostTags', e.g.: +// the first term searched for will be wrapped with '+++' +// the second term searched for will be wrapped with '<<<' & '>>>' +$tagsToUse->setPreTags(["+++", "<<<"]); +$tagsToUse->setPostTags(["+++", ">>>"]); + +// Make a full-text search dynamic query: +// ====================================== +$employeesResults = $session + ->query(Employee::class) + // Search for: + // * documents containing the term 'sales' in their 'Notes' field + // * OR for documents containing the term 'manager' in their 'Title' field + ->search("Notes", "sales") + ->search("Title", "manager") + // Call 'Highlight' for each field searched + // Pass 'tagsToUse' to OVERRIDE the default tags used + ->highlight("Notes", 35, 1, $tagsToUse, $salesHighlights) + ->highlight("Title", 35, 1, $tagsToUse, $managerHighlights) + ->toList(); +`} + + + + +{`from "Employees" +where (search(Notes, "sales") or search(Title, "manager")) +include highlight(Notes, 35, 1, $p0), highlight(Title, 35, 1, $p1) +{ +"p0":{"PreTags":["+++","<<<"],"PostTags":["+++",">>>"]}, +"p1":{"PreTags":["+++","<<<"],"PostTags":["+++",">>>"]} +} +`} + + + + + + +{`// The resulting salesHighlights fragments: +// ======================================== + +// "for the +++Sales+++ Professional." +// "hired as a +++sales+++ associate in" +// "company as a +++sales+++" +// "company as a +++sales+++ representativ" + +// The resulting managerHighlights fragments: +// ========================================== + +// "Sales <<>>" +`} + + + + + +## Highlight - projected results + +* Highlighting can also be used when [projecting query results](../../../../client-api/session/querying/how-to-project-query-results.mdx). + + + + +{`// Make a full-text search dynamic query & project results: +// ======================================================== +$termsHighlights = new Highlightings(); + +/** @var array $employeesProjectedResults */ +$employeesProjectedResults = $session + ->query(Employee::class) + // Search for documents containing 'sales' or 'german' in their 'Notes' field + ->search("Notes", "manager german") + // Request to highlight the searched terms from the 'Notes' field + ->highlight("Notes", 35, 2, null, $termsHighlights) + // Define the projection + ->selectFields(EmployeeDetails::class, QueryData::customFunction("o", "{ name: o.FirstName + ' ' + o.LastName, title: o.Title }")) + ->toList(); +`} + + + + +{`from "Employees" as x +where search(x.Notes, "manager german") +select { Name : "{0} {1}".format(x.FirstName, x.LastName), Title : x.Title } +include highlight(Notes, 35, 2) +`} + + + + + + +{`// The resulting fragments from termsHighlights: +// ============================================= + +// "to sales manager in March" +// "and reads German. He joined" +// "to sales manager in January" +// "in French and German." + +// NOTE: each search term is wrapped with a different color +// 'manager' is wrapped with yellow +// 'german' is wrapped with lawngreen +`} + + + + + +## Syntax + + + +{`function highlight( + ?string $fieldName, + int $fragmentLength, + int $fragmentCount, + ?HighlightingOptions $options, + Highlightings &$highlightings +): DocumentQueryInterface; +`} + + + +| Parameter | Type | Description | +|--------------------|-------------------------------|-------------------------------------| +| **$fieldName** | `?string` | Name of the field that contains the searched terms to highlight | +| **$fragmentLength** | `int` | Maximum length of a text fragment
Must be `>= 18` | +| **$fragmentCount** | `int` | Maximum number of text fragments that will be returned | +| **$options** | `?HighlightingOptions ` | Customizing options | +| **&$highlightings** | `Highlightings` | A callback function to retrieve the highlighted text fragments for each returned result | + +
+ +**Highlighting options**: + + + +{`private ?string $groupKey; +private ?StringArray $preTags = null; +private ?StringArray $postTags = null; + +// getters and setters +`} + + + +| Option | Type | Description | +|--------------|-----------|--------------| +| **$groupKey** | `?string` | Grouping key for the results.
Used when highlighting query results from a [Map-Reduce index](../../../../indexes/querying/highlighting.mdx#highlight-results---map-reduce-index).
If `None` results are grouped by document ID (default).
Note: Highlighting is Not available for dynamic aggregation queries. | +| **$preTags** | `?StringArray` | Array of PRE tags used to wrap the highlighted search terms in the text fragments. | +| **$postTags** | `?StringArray` | Array of POST tags used to wrap the highlighted search terms in the text fragments. | + +
+ +**Highlightings object**: + + + +{`private ?string $fieldName = null; +public function getResultIndents(): array; +`} + + + +| Property | Type | Description | +|--------------------|------------|-------------| +| **$fieldName** | `?string` | Name of the field that contains the searched terms to highlight | +| **getResultIndents()** | function returning an `array` | The resulting keys (document IDs, or the map-reduce keys) | + + + +{`public function getFragments(?string $key): array; +`} + + + +| Method | Return Type | Description | +|-------------------|-------------|-------------| +| **getFragments(?string $key)** | function returning an `array` | Returns the list of the highlighted text fragments for the passed document ID, or the map-reduce key | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_highlight-query-results-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_highlight-query-results-python.mdx new file mode 100644 index 0000000000..4d6da16a2d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_highlight-query-results-python.mdx @@ -0,0 +1,373 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When making a [Full-Text Search query](../../../../client-api/session/querying/text-search/full-text-search.mdx), + in addition to retrieving documents that contain the searched terms in the results, + you can also request to get a **list of text fragments that highlight the searched terms**. + +* The highlighted terms can enhance user experience when searching for documents with specific content. + +* This article shows highlighting search results when making a **dynamic-query**. + For highlighting search results when querying a **static-index** see [highlight index search results](../../../../indexes/querying/highlighting.mdx). +* In this page: + * [Highlight - basic example](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight---basic-example) + * [Highlight tags](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight-tags) + * [Highlight results in Studio](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight-results-in-studio) + * [Highlight - customize tags](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight---customize-tags) + * [Highlight - projected results](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight---projected-results) + * [Syntax](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#syntax) + + +## Highlight - basic example + + + + +{`# Make a full-text search dynamic query: +# ====================================== + +# Define a callback that takes highlightings as an argument +sales_highlightings: Optional[Highlightings] = None + +def _sales_highlights(highlightings: Highlightings): + # You may use the results (highlightings) here in any way desired + sales_highlightings = highlightings + +employees_result = list( # Execute the query inside the parenthesis + session + # Make a query on 'Employees' collection + .query(object_type=Employee) + # Search for documents containing the term 'sales' in their 'Notes' field + .search("Notes", "sales") + # Request to highlight the searched term by calling 'Highlight' + .highlight( + "Notes", # The document-field name in which we search + 35, # Max length of each text fragment + 4, # Max number of fragments to return per document + _sales_highlights, # An out param for getting the highlighted text fragments + ) +) +`} + + + + +{`from "Employees" +where search(Notes, "sales") +include highlight(Notes, 35, 4) +`} + + + + + + +{`# Process results: +# ================ + +# 'employees_results' contains all Employee DOCUMENTS that have 'sales' in their 'Notes' field. +# 'sales_highlights' contains the text FRAGMENTS that highlight the 'sales' term. + +builder = ["
    ", \{os.linesep\}] +for employee in employees_result: + # Call 'get_fragments' to get all fragments for the specified employee Id + fragments = sales_highlightings.get_fragments(employee.Id) + for fragment in fragments: + builder.append(f"\{os.linesep\}
  • Doc: \{employee.Id\} Fragment: \{fragment\}
  • ") + +fragments_html = builder.append(f"\{os.linesep\}
") + +# The resulting fragments_html: +# ============================ + +#
    +#
  • Doc: employees/2-A Fragment: company as a sales
  • +#
  • Doc: employees/2-A Fragment: promoted to sales manager in
  • +#
  • Doc: employees/2-A Fragment: president of sales in March 1993
  • +#
  • Doc: employees/2-A Fragment: member of the Sales Management
  • +#
  • Doc: employees/3-A Fragment: hired as a sales associate in
  • +#
  • Doc: employees/3-A Fragment: promoted to sales representativ
  • +#
  • Doc: employees/5-A Fragment: company as a sales representativ
  • +#
  • Doc: employees/5-A Fragment: promoted to sales manager in
  • +#
  • Doc: employees/5-A Fragment: Sales Management."
  • +#
  • Doc: employees/6-A Fragment: for the Sales Professional.
  • +#
+`} +
+
+ + + +#### Highlight tags + +* By default, the highlighted term is wrapped with the following html: + `term` + +* When requesting to highlight multiple terms, + the background color returned for each different term will be in the following order: + + - <span style="border-left: 10px solid yellow"> </span>yellow, + - <span style="border-left: 10px solid lawngreen"> </span>lawngreen, + - <span style="border-left: 10px solid aquamarine"> </span>aquamarine, + - <span style="border-left: 10px solid magenta"> </span>magenta, + - <span style="border-left: 10px solid palegreen"> </span>palegreen, + - <span style="border-left: 10px solid coral"> </span>coral, + - <span style="border-left: 10px solid wheat"> </span>wheat, + - <span style="border-left: 10px solid khaki"> </span>khaki, + - <span style="border-left: 10px solid lime"> </span>lime, + - <span style="border-left: 10px solid deepskyblue"> </span>deepskyblue, + - <span style="border-left: 10px solid deeppink"> </span>deeppink, + - <span style="border-left: 10px solid salmon"> </span>salmon, + - <span style="border-left: 10px solid peachpuff"> </span>peachpuff, + - <span style="border-left: 10px solid violet"> </span>violet, + - <span style="border-left: 10px solid mediumpurple"> </span>mediumpurple, + - <span style="border-left: 10px solid palegoldenrod"> </span>palegoldenrod, + - <span style="border-left: 10px solid darkkhaki"> </span>darkkhaki, + - <span style="border-left: 10px solid springgreen"> </span>springgreen, + - <span style="border-left: 10px solid turquoise"> </span>turquoise, + - <span style="border-left: 10px solid powderblue"> </span>powderblue + +* The html tags that wrap the highlighted terms can be **customized** to any other tags. + See [customize tags](../../../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight---customize-tags) below. + + + + + +#### Highlight results in Studio + +![Figure 1. Fragments results](./assets/fragmentsResults.png) + +1. **Auto-Index** + This is the auto-index that was created by the server to serve the dynamic-query. + +2. **Results tab** + The results tab contains the resulting **documents** that match the provided RQL query. + +3. **Highlight tab** + The highlight tab shows the resulting **fragments** that were included in the query result. + + + + + +## Highlight - customize tags + +* The html tags that wrap the highlighted terms can be **customized** to any other tags. + + + + +{`# Define customized tags to use for highlighting the searched terms +# ================================================================= +tags_to_use = HighlightingOptions( + # Provide strings of your choice to 'PreTags' & 'PostTags', e.g.: + # the first term searched for will be wrapped with '+++' + # the second term searched for will be wrapped with '<<<' & '>>>' + pre_tags=["+++", "<<<"], + post_tags=["+++", ">>>"], +) + +# Define a callback that takes highlightings as an argument +manager_highlightings: Optional[Highlightings] = None + +def _manager_highlights(highlightings: Highlightings): + # You may use the results (highlightings) here in any way desired + manager_highlightings = highlightings + +# Make a full-text search dynamic query: +# ====================================== +employees_result = list( + session.query(object_type=Employee) + # Search for: + # * documents containing the term 'sales' in their 'Notes' field + # * OR for documents containing the term 'manager' in their 'Title' field + .search("Notes", "sales") + .search("Title", "manager") + # Call 'Highlight' for each field searched + # Pass 'tagsToUse' to OVERRIDE the default tags used + .highlight("Notes", 35, 1, _sales_highlights) + .highlight("Title", 35, 1, tags_to_use, _manager_highlights) +) +`} + + + + +{`from "Employees" +where (search(Notes, "sales") or search(Title, "manager")) +include highlight(Notes, 35, 1, $p0), highlight(Title, 35, 1, $p1) +{ +"p0":{"PreTags":["+++","<<<"],"PostTags":["+++",">>>"]}, +"p1":{"PreTags":["+++","<<<"],"PostTags":["+++",">>>"]} +} +`} + + + + + + +{`# The resulting salesHighlights fragments: +# ======================================== +# +# "for the +++Sales+++ Professional." +# "hired as a +++sales+++ associate in" +# "company as a +++sales+++" +# "company as a +++sales+++ representative" +# +# The resulting managerHighlights fragments: +# ========================================== +# +# "Sales <<>>" +`} + + + + + +## Highlight - projected results + +* Highlighting can also be used when [projecting query results](../../../../client-api/session/querying/how-to-project-query-results.mdx). + + + + +{`# Make a full-text search dynamic query & project results: +# ======================================================== + +# Define a callback that takes highlightings as an argument +terms_highlightings: Optional[Highlightings] = None + +def _terms_highlights(highlightings: Highlightings): + # You may use the results (highlightings) here in any way desired + terms_highlightings = highlightings + +employees_projected = list( + session.query(object_type=Employee) + .search("Notes", "manager german") + .highlight("Notes", 35, 2, _terms_highlights) + .select_fields_query_data( + QueryData.custom_function("o", "{ Name: o.FirstName + ' ' + o.LastName, Title: o.Title }"), + ) +) + +# todo reeb & gracjan: lets implement it after 5.4 release +# i have a perfect ticket for that +# https://issues.hibernatingrhinos.com/issue/RDBC-820#focus=Comments-67-1050834.0-0 +`} + + + + +{`from "Employees" as x +where search(x.Notes, "manager german") +select { Name : "{0} {1}".format(x.FirstName, x.LastName), Title : x.Title } +include highlight(Notes, 35, 2) +`} + + + + + + +{`# The resulting fragments from termsHighlights: +# ============================================= +# +# "to sales manager in March" +# "and reads German. He joined" +# "to sales manager in January" +# "in French and German." +# +# NOTE: each search term is wrapped with a different color +# 'manager' is wrapped with yellow +# 'german' is wrapped with lawngreen +`} + + + + + +## Syntax + + + +{`def highlight( + self, + field_name: str, + fragment_length: int, + fragment_count: int, + highlightings_callback: Callable[[Highlightings], None], + options: Optional[HighlightingOptions] = None, +) -> DocumentQuery[_T]: ... +`} + + + +| Parameter | Type | Description | +|--------------------|-------------------------------|-------------------------------------| +| **field_name** | `str` | Name of the field that contains the searched terms to highlight | +| **fragment_length** | `int` | Maximum length of a text fragment
Must be `>= 18` | +| **fragment_count** | `int` | Maximum number of text fragments that will be returned | +| **highlightings_callback** | `Callable[[Highlightings], None]` | A callback function to retrieve the highlighted text fragments for each returned result | +| **options** (Optional) | `HighlightingOptions ` | Customizing options | + +
+ +**Highlighting options**: + + + +{`def __init__(self, group_key: str = None, pre_tags: List[str] = None, post_tags: List[str] = None): + self.group_key = group_key + self.pre_tags = pre_tags + self.post_tags = post_tags +`} + + + +| Option | Type | Description | +|--------------|-----------|--------------| +| **group_key** | `str` | Grouping key for the results.
Used when highlighting query results from a [Map-Reduce index](../../../../indexes/querying/highlighting.mdx#highlight-results---map-reduce-index).
If `None` results are grouped by document ID (default).
Note: Highlighting is Not available for dynamic aggregation queries. | +| **pre_tags** | `List[str]` | Array of PRE tags used to wrap the highlighted search terms in the text fragments. | +| **post_tags** | `List[str]` | Array of POST tags used to wrap the highlighted search terms in the text fragments. | + +
+ +**Highlightings object**: + + + +{`def __init__(self, field_name: str): + self.field_name = field_name + ... + +@property +def result_indents(self) -> Set[str]: ... +`} + + + +| Property | Type | Description | +|--------------------|------------|-----------------------------------------------------------------| +| **field_name** | `str` | Name of the field that contains the searched terms to highlight | +| **result_indents** | `Set[str]` | The resulting keys (document IDs, or the map-reduce keys) | + + + +{`def get_fragments(self, key: str) -> List[str]: ... +`} + + + +| Method | Return Type | Description | +|-------------------|-------------|------------------------------------------------------------------------------------------------------| +| **get_fragments** | `List[str]` | Returns the list of the highlighted text fragments for the passed document ID, or the map-reduce key | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_proximity-search-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_proximity-search-csharp.mdx new file mode 100644 index 0000000000..6a0b3e66e1 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_proximity-search-csharp.mdx @@ -0,0 +1,199 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A **proximity search** retrieves documents containing search terms that are located within a specified distance from each other. + The distance is measured by the number of intermediate terms. + +* Proximity search is available only via [DocumentQuery](../../../../client-api/session/querying/document-query/what-is-document-query.mdx) or RQL. + +* Use the `Proximity` method when running a **full-text search** by the [Search](../../../../client-api/session/querying/text-search/full-text-search.mdx) method. + +* In this page: + * [Why use proximity search](../../../../client-api/session/querying/text-search/proximity-search.mdx#why-use-proximity-search) + * [How proximity works](../../../../client-api/session/querying/text-search/proximity-search.mdx#how-proximity-works) + * [Proximity search examples](../../../../client-api/session/querying/text-search/proximity-search.mdx#proximity-search-examples) + * [Syntax](../../../../client-api/session/querying/text-search/proximity-search.mdx#syntax) + + +## Why use proximity search + +* A basic linguistic assumption is that the proximity of words implies a relationship between them. + +* Proximity search helps match phrases while avoiding scattered or spread-out terms in the text. + +* By limiting the search to only include matches where the terms are within the specified maximum proximity, + the search results can be more relevant than those with scattered terms. + + + +## How proximity works + +* When searching with some specified distance between search terms `term1` and `term2`: + + * Retrieved documents will contain text in which `term1` and `term2` are separated by + the maximum number of terms specified or less. + + * The search terms can be separated by fewer terms, but not more than the specified distance. + + * Only the terms generated by the [search analyzer](../../../../indexes/using-analyzers.mdx#ravendb) + are considered towards the count of the maximum distance. + Words or tokens that are Not part of the generated terms are Not included in the proximity calculation. + +* Note: + + * Search criteria should contain at least 2 search terms. + + * Search terms must be simple string terms without wildcards. + + + +## Proximity search examples + +## Proximity search (0 distance) + + + + +{`List employees = session.Advanced + .DocumentQuery() + // Make a full-text search with search terms + .Search(x => x.Notes,"fluent french") + // Call 'Proximity' with 0 distance + .Proximity(0) + .ToList(); + +// Running the above query on the Northwind sample data returns the following Employee documents: +// * employees/2-A +// * employees/5-A +// * employees/9-A + +// Each resulting document has the text 'fluent in French' in its 'Notes' field. +// +// The word "in" is not taken into account as it is Not part of the terms list generated +// by the analyzer. (Search is case-insensitive in this case). +// +// Note: +// A document containing text with the search terms appearing with no words in between them +// (e.g. "fluent french") would have also been returned. +`} + + + + +{`List employees = await asyncSession.Advanced + .AsyncDocumentQuery() + // Make a full-text search with search terms + .Search(x => x.Notes,"fluent french") + // Call 'Proximity' with 0 distance + .Proximity(0) + .ToListAsync(); + +// Running the above query on the Northwind sample data returns the following Employee documents: +// * employees/2-A +// * employees/5-A +// * employees/9-A + +// Each resulting document has the text 'fluent in French' in its 'Notes' field. +// +// The word "in" is not taken into account as it is Not part of the terms list generated +// by the analyzer. (Search is case-insensitive in this case). +// +// Note: +// A document containing text with the search terms appearing with no words in between them +// (e.g. "fluent french") would have also been returned. +`} + + + + +{`from "Employees" +where proximity(search(Notes, "fluent french"), 0) +`} + + + + +## Proximity search (distance > 0) + + + + +{`List employees = session.Advanced + .DocumentQuery() + // Make a full-text search with search terms + .Search(x => x.Notes,"fluent french") + // Call 'Proximity' with distance 5 + .Proximity(4) + .ToList(); + +// Running the above query on the Northwind sample data returns the following Employee documents: +// * employees/2-A +// * employees/5-A +// * employees/6-A +// * employees/9-A + +// This time document 'employees/6-A' was added to the previous results since it contains the phrase: +// "fluent in Japanese and can read and write French" +// where the search terms are separated by a count of 4 terms. +// +// "in" & "and" are not taken into account as they are not part of the terms list generated +// by the analyzer.(Search is case-insensitive in this case). +`} + + + + +{`List employees = await asyncSession.Advanced + .AsyncDocumentQuery() + // Make a full-text search with search terms + .Search(x => x.Notes,"fluent french") + // Call 'Proximity' with distance 5 + .Proximity(4) + .ToListAsync(); + +// Running the above query on the Northwind sample data returns the following Employee documents: +// * employees/2-A +// * employees/5-A +// * employees/6-A +// * employees/9-A + +// This time document 'employees/6-A' was added to the previous results since it contains the phrase: +// "fluent in Japanese and can read and write French" +// where the search terms are separated by a count of 4 terms. +// +// "in" & "and" are not taken into account as they are not part of the terms list generated +// by the analyzer.(Search is case-insensitive in this case). +`} + + + + +{`from "Employees" +where proximity(search(Notes, "fluent french"), 5) +`} + + + + + + +## Syntax + + + +{`IDocumentQuery Proximity(int proximity); +`} + + + +| Parameter | Type | Description | +|---------------|-------|------------------------------------------------------------------------------------------| +| **proximity** | `int` | The maximum number of terms between the search terms.
Can be greater or equal to `0`. | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_proximity-search-java.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_proximity-search-java.mdx new file mode 100644 index 0000000000..9a6089d5ce --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_proximity-search-java.mdx @@ -0,0 +1,45 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To find words within a specific distance away use `proximity` method. +This method is available only from [DocumentQuery](../../../../client-api/session/querying/document-query/what-is-document-query.mdx) level and can only be used right after `search` method. + +## Syntax + + + +{`IDocumentQuery proximity(int proximity); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **proximity** | `int` | Number of words within. | + +## Example + + + + +{`session + .advanced() + .documentQuery(Fox.class) + .search("name", "quick fox") + .proximity(2) + .toList(); +`} + + + + +{`from Foxes +where proximity(search(Name, 'quick fox'), 2) +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_proximity-search-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_proximity-search-nodejs.mdx new file mode 100644 index 0000000000..76951e8a94 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_proximity-search-nodejs.mdx @@ -0,0 +1,154 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A __proximity search__ retrieves documents containing search terms that are located within a specified distance from each other. + The distance is measured by the number of intermediate terms. + +* Use the `proximity` method when running a __full-text search__ by the [search](../../../../client-api/session/querying/text-search/full-text-search.mdx) method. + +* In this page: + * [Why use proximity search](../../../../client-api/session/querying/text-search/proximity-search.mdx#why-use-proximity-search) + * [How proximity works](../../../../client-api/session/querying/text-search/proximity-search.mdx#how-proximity-works) + * [Proximity search examples](../../../../client-api/session/querying/text-search/proximity-search.mdx#proximity-search-examples) + * [Syntax](../../../../client-api/session/querying/text-search/proximity-search.mdx#syntax) + + +## Why use proximity search + +* A basic linguistic assumption is that the proximity of words implies a relationship between them. + +* Proximity search helps match phrases while avoiding scattered or spread-out terms in the text. + +* By limiting the search to only include matches where the terms are within the specified maximum proximity, + the search results can be more relevant than those with scattered terms. + + + +## How proximity works + +* When searching with some specified distance between search terms `term1` and `term2`: + + * Retrieved documents will contain text in which `term1` and `term2` are separated by + the maximum number of terms specified or less. + + * The search terms can be separated by fewer terms, but not more than the specified distance. + + * Only the terms generated by the [search analyzer](../../../../indexes/using-analyzers.mdx#ravendb) + are considered towards the count of the maximum distance. + Words or tokens that are Not part of the generated terms are Not included in the proximity calculation. + +* Note: + + * Search criteria should contain at least 2 search terms. + + * Search terms must be simple string terms without wildcards. + + + +## Proximity search examples + + + +__Proximity search (0 distance)__ + + + + +{`const employees = await session + .query({ collection: "Employees" }) + // Make a full-text search with search terms + .search("Notes", "fluent french") + // Call 'proximity' with 0 distance + .proximity(0) + .all(); + +// Running the above query on the Northwind sample data returns the following Employee documents: +// * employees/2-A +// * employees/5-A +// * employees/9-A + +// Each resulting document has the text 'fluent in French' in its 'Notes' field. +// +// The word "in" is not taken into account as it is Not part of the terms list generated +// by the analyzer. (Search is case-insensitive in this case). +// +// Note: +// A document containing text with the search terms appearing with no words in between them +// (e.g. "fluent french") would have also been returned. +`} + + + + +{`from "Employees" +where proximity(search(Notes, "fluent french"), 0) +`} + + + + + + + + +__Proximity search (distance > 0)__ + + + + +{`const employees = await session + .query({ collection: "Employees" }) + // Make a full-text search with search terms + .search("Notes, "fluent french") + // Call 'proximity' with distance 5 + .proximity(5) + .all(); + +// Running the above query on the Northwind sample data returns the following Employee documents: +// * employees/2-A +// * employees/5-A +// * employees/6-A +// * employees/9-A + +// This time document 'employees/6-A' was added to the previous results since it contains the phrase: +// "fluent in Japanese and can read and write French" +// where the search terms are separated by a count of 4 terms. +// +// "in" & "and" are not taken into account as they are not part of the terms list generated +// by the analyzer.(Search is case-insensitive in this case). +`} + + + + +{`from "Employees" +where proximity(search(Notes, "fluent french"), 5) +`} + + + + + + + + +## Syntax + + + +{`proximity(proximity); +`} + + + +| Parameter | Type | Description | +|---------------|----------|--------------------------------------------------------------------------------------------| +| __proximity__ | `number` | The maximum number of terms between the search terms.
Can be greater or equal to `0`. | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_proximity-search-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_proximity-search-php.mdx new file mode 100644 index 0000000000..61371297e7 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_proximity-search-php.mdx @@ -0,0 +1,149 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A **proximity search** retrieves documents containing search terms that are located within a specified distance from each other. + The distance is measured by the number of intermediate terms. + +* Use the `proximity` method when running a **full-text search** by the [search](../../../../client-api/session/querying/text-search/full-text-search.mdx) method. + +* In this page: + * [Why use proximity search](../../../../client-api/session/querying/text-search/proximity-search.mdx#why-use-proximity-search) + * [How proximity works](../../../../client-api/session/querying/text-search/proximity-search.mdx#how-proximity-works) + * [Proximity search examples](../../../../client-api/session/querying/text-search/proximity-search.mdx#proximity-search-examples) + * [Syntax](../../../../client-api/session/querying/text-search/proximity-search.mdx#syntax) + + +## Why use proximity search + +* A basic linguistic assumption is that the proximity of words implies a relationship between them. + +* Proximity search helps match phrases while avoiding scattered or spread-out terms in the text. + +* By limiting the search to only include matches where the terms are within the specified maximum proximity, + the search results can be more relevant than those with scattered terms. + + + +## How proximity works + +* When searching with some specified distance between search terms `term1` and `term2`: + + * Retrieved documents will contain text in which `term1` and `term2` are separated by + the maximum number of terms specified or less. + + * The search terms can be separated by fewer terms, but not more than the specified distance. + + * Only the terms generated by the [search analyzer](../../../../indexes/using-analyzers.mdx#ravendb) + are considered towards the count of the maximum distance. + Words or tokens that are Not part of the generated terms are Not included in the proximity calculation. + +* Note: + + * Search criteria should contain at least 2 search terms. + + * Search terms must be simple string terms without wildcards. + + + +## Proximity search examples + +## Proximity search (0 distance) + + + + +{`/** @var array $employees */ +$employees = $session->advanced() + ->documentQuery(Employee::class) + // Make a full-text search with search terms + ->search("Notes", "fluent french") + // Call 'Proximity' with 0 distance + ->proximity(0) + ->toList(); +`} + + + + +{`from "Employees" +where proximity(search(Notes, "fluent french"), 0) +`} + + + + +Running the above query on the Northwind sample data returns the following Employee documents: +`employees/2-A` +`employees/5-A` +`employees/9-A` + +Each resulting document has the text 'fluent in French' in its 'Notes' field. + +The word "in" is not taken into account as it is Not part of the terms list generated +by the analyzer. (Search is case-insensitive in this case.) + + +Documents containing text with the search terms appearing with no words between them +(e.g. "fluent french") will also be returned. + + +## Proximity search (distance > 0) + + + + +{`/** @var array $employees */ +$employees = $session->advanced() + ->documentQuery(Employee::class) + // Make a full-text search with search terms + ->search("Notes", "fluent french") + // Call 'Proximity' with distance 5 + ->proximity(4) + ->toList(); +`} + + + + +{`from "Employees" +where proximity(search(Notes, "fluent french"), 5) +`} + + + + +Running the above query on the Northwind sample data returns the following Employee documents: +`employees/2-A` +`employees/5-A` +`employees/6-A` +`employees/9-A` + +This time document 'employees/6-A' was added to the previous results since it contains the phrase: +"fluent in Japanese and can read and write French" +where the search terms are separated by a count of 4 terms. + +"in" & "and" are not taken into account as they are not part of the terms list generated +by the analyzer.(Search is case-insensitive in this case). + + + +## Syntax + + + +{`public function proximity(int $proximity): DocumentQueryInterface; +`} + + + +| Parameter | Type | Description | +|---------------|-------|-------------------------------------------------------------------------------| +| **$proximity** | `int` | The maximum number of terms between the search terms.
Can be `0` or more. | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_proximity-search-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_proximity-search-python.mdx new file mode 100644 index 0000000000..92d24b1059 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_proximity-search-python.mdx @@ -0,0 +1,145 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A **proximity search** retrieves documents containing search terms that are located within a specified distance from each other. + The distance is measured by the number of intermediate terms. + +* Use the `proximity` method when running a **full-text search** by the [search](../../../../client-api/session/querying/text-search/full-text-search.mdx) method. + +* In this page: + * [Why use proximity search](../../../../client-api/session/querying/text-search/proximity-search.mdx#why-use-proximity-search) + * [How proximity works](../../../../client-api/session/querying/text-search/proximity-search.mdx#how-proximity-works) + * [Proximity search examples](../../../../client-api/session/querying/text-search/proximity-search.mdx#proximity-search-examples) + * [Syntax](../../../../client-api/session/querying/text-search/proximity-search.mdx#syntax) + + +## Why use proximity search + +* A basic linguistic assumption is that the proximity of words implies a relationship between them. + +* Proximity search helps match phrases while avoiding scattered or spread-out terms in the text. + +* By limiting the search to only include matches where the terms are within the specified maximum proximity, + the search results can be more relevant than those with scattered terms. + + + +## How proximity works + +* When searching with some specified distance between search terms `term1` and `term2`: + + * Retrieved documents will contain text in which `term1` and `term2` are separated by + the maximum number of terms specified or less. + + * The search terms can be separated by fewer terms, but not more than the specified distance. + + * Only the terms generated by the [search analyzer](../../../../indexes/using-analyzers.mdx#ravendb) + are considered towards the count of the maximum distance. + Words or tokens that are Not part of the generated terms are Not included in the proximity calculation. + +* Note: + + * Search criteria should contain at least 2 search terms. + + * Search terms must be simple string terms without wildcards. + + + +## Proximity search examples + +## Proximity search (0 distance) + + + + +{`employees = list( + session.advanced.document_query(object_type=Employee) + # Make a full-text search with search terms + .search("Notes", "fluent french") + # Call 'proximity' with 0 distance + .proximity(0) +) +# Running the above query on the Northwind sample data returns the following Employee documents: +# * employees/2-A +# * employees/5-A +# * employees/9-A +# +# Each resulting document has the text 'fluent in French' in its 'Notes' field. +# +# The word "in" is not taken into account as it is Not part of the terms list generated +# by the analyzer. (Search is case-insensitive in this case). +# +# Note: +# A document containing text with the search terms appearing with no words in between them +# (e.g. "fluent french") would have also been returned. +`} + + + + +{`from "Employees" +where proximity(search(Notes, "fluent french"), 0) +`} + + + + + +## Proximity search (distance > 0) + + + + +{`employees = list( + session.advanced.document_query(object_type=Employee) + # Make a full-text search with search terms + .search("Notes", "fluent french") + # Call 'proximity' with 0 distance + .proximity(4) +) +# Running the above query on the Northwind sample data returns the following Employee documents: +# * employees/2-A +# * employees/5-A +# * employees/6-A +# * employees/9-A +# +# This time document 'employees/6-A' was added to the previous results since it contains the phrase: +# "fluent in Japanese and can read and write French" +# where the search terms are separated by a count of 4 terms. +# +# "in" & "and" are not taken into account as they are not part of the terms list generated +# by the analyzer.(Search is case-insensitive in this case). +`} + + + + +{`from "Employees" +where proximity(search(Notes, "fluent french"), 5) +`} + + + + + + +## Syntax + + + +{`def proximity(self, proximity: int) -> DocumentQuery[_T]: ... +`} + + + +| Parameter | Type | Description | +|---------------|-------|------------------------------------------------------------------------------------------| +| **proximity** | `int` | The maximum number of terms between the search terms.
Can be greater or equal to `0`. | + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_starts-with-query-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_starts-with-query-csharp.mdx new file mode 100644 index 0000000000..6673fd1290 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_starts-with-query-csharp.mdx @@ -0,0 +1,186 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can query for documents having a field that starts with some specified string. + +* Unless explicitly specified, the string comparisons are case-insensitive by default. + +* In this page: + * [StartsWith](../../../../client-api/session/querying/text-search/starts-with-query.mdx#startswith) + * [StartsWith (case-sensitive)](../../../../client-api/session/querying/text-search/starts-with-query.mdx#startswith-(case-sensitive)) + * [Negate StartsWith](../../../../client-api/session/querying/text-search/starts-with-query.mdx#negate-startswith) + + +## StartsWith + + + + +{`List products = session + .Query() + // Call 'StartsWith' on the field + // Pass the prefix to search by + .Where(x => x.Name.StartsWith("Ch")) + .ToList(); + +// Results will contain only Product documents having a 'Name' field +// that starts with any case variation of 'ch' +`} + + + + +{`List products = await asyncSession + .Query() + // Call 'StartsWith' on the field + // Pass the prefix to search by + .Where(x => x.Name.StartsWith("Ch")) + .ToListAsync(); + +// Results will contain only Product documents having a 'Name' field +// that starts with any case variation of 'ch' +`} + + + + +{`List products = session.Advanced + .DocumentQuery() + // Call 'WhereStartsWith' + // Pass the document field and the prefix to search by + .WhereStartsWith(x => x.Name, "Ch") + .ToList(); + +// Results will contain only Product documents having a 'Name' field +// that starts with any case variation of 'ch' +`} + + + + +{`from "Products" +where startsWith(Name, "Ch") +`} + + + + + + +## StartsWith (case-sensitive) + + + + +{`List products = session + .Query() + // Pass 'exact: true' to search for an EXACT prefix match + .Where(x => x.Name.StartsWith("Ch"), exact: true) + .ToList(); + +// Results will contain only Product documents having a 'Name' field +// that starts with 'Ch' +`} + + + + +{`List products = await asyncSession + .Query() + // Pass 'exact: true' to search for an EXACT prefix match + .Where(x => x.Name.StartsWith("Ch"), exact: true) + .ToListAsync(); + +// Results will contain only Product documents having a 'Name' field +// that starts with 'Ch' +`} + + + + +{`List products = session.Advanced + .DocumentQuery() + // Call 'WhereStartsWith' + // Pass 'exact: true' to search for an EXACT prefix match + .WhereStartsWith(x => x.Name, "Ch", exact: true) + .ToList(); + +// Results will contain only Product documents having a 'Name' field +// that starts with 'Ch' +`} + + + + +{`from "Products" +where exact(startsWith(Name, "Ch")) +`} + + + + + + +## Negate StartsWith + + + + +{`List products = session + .Query() + // Call 'StartsWith' on the field + // Pass the prefix to search by + .Where(x => x.Name.StartsWith("Ch") == false) + .ToList(); + +// Results will contain only Product documents having a 'Name' field +// that does NOT start with 'ch' or any other case variations of it +`} + + + + +{`List products = await asyncSession + .Query() + // Call 'StartsWith' on the field + // Pass the prefix to search by + .Where(x => x.Name.StartsWith("Ch") == false) + .ToListAsync(); + +// Results will contain only Product documents having a 'Name' field +// that does NOT start with 'ch' or any other case variations of it +`} + + + + +{`List products = session.Advanced + .DocumentQuery() + // Call 'Not' to negate the next predicate + .Not + // Call 'WhereStartsWith' + // Pass the document field and the prefix to search by + .WhereStartsWith(x => x.Name, "Ch") + .ToList(); + +// Results will contain only Product documents having a 'Name' field +// that does NOT start with 'ch' or any other case variations of it +`} + + + + +{`from "Products" +where (true and not startsWith(Name, "Ch")) +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_starts-with-query-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_starts-with-query-nodejs.mdx new file mode 100644 index 0000000000..a422c64ec5 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_starts-with-query-nodejs.mdx @@ -0,0 +1,125 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `whereStartsWith` to query for documents having a field that starts with some specified string. + +* Unless explicitly specified, the string comparisons are case-insensitive by default. + +* In this page: + * [whereStartsWith](../../../../client-api/session/querying/text-search/starts-with-query.mdx#wherestartswith) + * [whereStartsWith (case-sensitive)](../../../../client-api/session/querying/text-search/starts-with-query.mdx#wherestartswith-(case-sensitive)) + * [Negate whereStartsWith](../../../../client-api/session/querying/text-search/starts-with-query.mdx#negate-wherestartswith) + * [Syntax](../../../../client-api/session/querying/text-search/starts-with-query.mdx#syntax) + + +## whereStartsWith + + + + +{`const products = await session + .query({ collection: "Products" }) + // Call 'whereStartsWith' + // Pass the document field and the prefix to search by + .whereStartsWith("Name", "Ch") + .all(); + +// Results will contain only Product documents having a 'Name' field +// that starts with any case variation of 'ch' +`} + + + + +{`from "Products" +where startsWith(Name, "Ch") +`} + + + + + + +## whereStartsWith (case-sensitive) + + + + +{`const products = await session + .query({ collection: "Products" }) + // Call 'whereStartsWith' + // Pass 'true' as the 3'rd parameter to search for an EXACT prefix match + .whereStartsWith("Name", "Ch", true) + .all(); + +// Results will contain only Product documents having a 'Name' field +// that starts with 'Ch' +`} + + + + +{`from "Products" +where exact(startsWith(Name, "Ch")) +`} + + + + + + +## Negate whereStartsWith + + + + +{`const products = await session + .query({ collection: "Products" }) + // Call 'Not' to negate the next predicate + .not() + // Call 'whereStartsWith' + // Pass the document field and the prefix to search by + .whereStartsWith("Name", "Ch") + .all(); + +// Results will contain only Product documents having a 'Name' field +// that does NOT start with 'ch' or any other case variations of it +`} + + + + +{`from "Products" +where exists(Name) and not startsWith(Name, "Ch") +`} + + + + + + +## Syntax + + + +{`// Available overloads: +whereStartsWith(fieldName, value); +whereStartsWith(fieldName, value, exact); +`} + + + +| Parameter | Type | Description | +|---------------|---------|---------------------------------------------------------------------------| +| __fieldName__ | string | The field name in which to search | +| __value__ | string | The __prefix__ string to search by | +| __exact__ | boolean | `false` - search is case-insensitive
`true` - search is case-sensitive | + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_starts-with-query-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_starts-with-query-php.mdx new file mode 100644 index 0000000000..b9454a61da --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_starts-with-query-php.mdx @@ -0,0 +1,144 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can query for documents having a field that starts with some specified string. + +* Unless explicitly specified, the string comparisons are case-insensitive by default. + +* In this page: + * [StartsWith](../../../../client-api/session/querying/text-search/starts-with-query.mdx#startswith) + * [StartsWith (case-sensitive)](../../../../client-api/session/querying/text-search/starts-with-query.mdx#startswith-(case-sensitive)) + * [Negate StartsWith](../../../../client-api/session/querying/text-search/starts-with-query.mdx#negate-startswith) + + +## StartsWith + +The results will contain only Product documents having a 'Name' field +that starts with any case variation of 'ch'. + + + + +{`/** @var array $products */ +$products = $session + ->query(Product::class) + // Call 'StartsWith' on the field + // Pass the prefix to search by + ->whereStartsWith("Name", "Ch") + ->toList(); +`} + + + + +{`/** @var array $products */ +$products = $session->advanced() + ->documentQuery(Product::class) + // Call 'WhereStartsWith' + // Pass the document field and the prefix to search by + ->whereStartsWith("Name", "Ch") + ->toList(); +`} + + + + +{`from "Products" +where startsWith(Name, "Ch") +`} + + + + + + +## StartsWith (case-sensitive) + +The results will contain only Product documents having a 'Name' field +that starts with 'Ch'. + + + + +{`/** @var array $products */ +$products = $session->advanced() + ->query(Product::class) + // Pass 'exact: true' to search for an EXACT prefix match + ->whereStartsWith("Name", "Ch", true) + ->toList(); +`} + + + + +{`/** @var array $products */ +$products = $session->advanced() + ->documentQuery(Product::class) + // Call 'WhereStartsWith' + // Pass 'exact: true' to search for an EXACT prefix match + ->whereStartsWith("Name", "Ch", true) + ->toList(); +`} + + + + +{`from "Products" +where exact(startsWith(Name, "Ch")) +`} + + + + + + +## Negate StartsWith + +The results will contain only Product documents having a 'Name' field +that does NOT start with 'ch' or any other case variations of it. + + + + +{`/** @var array $products */ +$products = $session + ->query(Product::class) + # Negate next statement + ->not() + // Call 'StartsWith' on the field + // Pass the prefix to search by + ->whereStartsWith("Name", "Ch") + ->toList(); +`} + + + + +{`/** @var array $products */ +$products = $session->advanced() + ->documentQuery(Product::class) + // Call 'Not' to negate the next predicate + ->not() + // Call 'WhereStartsWith' + // Pass the document field and the prefix to search by + ->whereStartsWith("Name", "Ch") + ->toList(); +`} + + + + +{`from "Products" +where (true and not startsWith(Name, "Ch")) +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_starts-with-query-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_starts-with-query-python.mdx new file mode 100644 index 0000000000..5a05f62bbc --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_starts-with-query-python.mdx @@ -0,0 +1,102 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can query for documents having a field that starts with some specified string. + +* Unless explicitly specified, the string comparisons are case-insensitive by default. + +* In this page: + * [StartsWith](../../../../client-api/session/querying/text-search/starts-with-query.mdx#startswith) + * [StartsWith (case-sensitive)](../../../../client-api/session/querying/text-search/starts-with-query.mdx#startswith-(case-sensitive)) + * [Negate StartsWith](../../../../client-api/session/querying/text-search/starts-with-query.mdx#negate-startswith) + + +## StartsWith + + + + +{`products = list( + session.query(object_type=Product) + # Call 'where_starts_with' on the field + # Pass the prefix to search by + .where_starts_with("Name", "Ch") +) + +# Results will contain only Product documents having a 'Name' field +# that starts with any case variation of 'ch' +`} + + + + +{`from "Products" +where startsWith(Name, "Ch") +`} + + + + + + +## StartsWith (case-sensitive) + + + + +{`products = list( + session.query(object_type=Product) + # Pass 'exact=True' to search for an EXACT prefix match + .where_starts_with("Name", "Ch", exact=True) +) + +# Results will contain only Product documents having a 'Name' field +# that starts with 'Ch' +`} + + + + +{`from "Products" +where exact(startsWith(Name, "Ch")) +`} + + + + + + +## Negate StartsWith + + + + +{`products = list( + session.query(object_type=Product) + # Negate next statement + .not_() + # Call 'where_starts_with' on the field + # Pass the prefix to search by + .where_starts_with("Name", "Ch") +) +# Results will contain only Product documents having a 'Name' field +# that does NOT start with 'ch' or any other case variations of it +`} + + + + +{`from "Products" +where (true and not startsWith(Name, "Ch")) +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_using-regex-csharp.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_using-regex-csharp.mdx new file mode 100644 index 0000000000..5730266ca6 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_using-regex-csharp.mdx @@ -0,0 +1,45 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To return only documents that match regular expression, +use the `Regex` method which enables RavenDB to perform server-side pattern matching queries. + +The supplied regular expression must be [.NET compatible](https://docs.microsoft.com/en-us/dotnet/api/system.text.regularexpressions.regex?view=netframework-4.7.1). + +## Example + + + + +{`// loads all products, which name +// starts with 'N' or 'A' +List products = session + .Query() + .Where(x => Regex.IsMatch(x.Name, "^[NA]")) + .ToList(); +`} + + + + +{`// loads all products, which name +// starts with 'N' or 'A' +List products = await asyncSession + .Query() + .Where(x => Regex.IsMatch(x.Name, "^[NA]")) + .ToListAsync(); +`} + + + + +{`from Products +where regex(Name, '^[NA]') +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_using-regex-java.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_using-regex-java.mdx new file mode 100644 index 0000000000..4b3a20715b --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_using-regex-java.mdx @@ -0,0 +1,33 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To return only documents that match regular expression, +use the `regex` method which enables RavenDB to perform server-side pattern matching queries. + +The supplied regular expression must be [.NET compatible](https://docs.microsoft.com/en-us/dotnet/api/system.text.regularexpressions.regex?view=netframework-4.7.1). + +## Example + + + + +{`// loads all products, which name +// starts with 'N' or 'A' +List products = session.query(Product.class) + .whereRegex("Name", "^[NA]") + .toList(); +`} + + + + +{`from Products +where regex(Name, '^[NA]') +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_using-regex-nodejs.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_using-regex-nodejs.mdx new file mode 100644 index 0000000000..efba13ec58 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_using-regex-nodejs.mdx @@ -0,0 +1,33 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To return only documents that match a given regular expression, +use the `regex()` method which enables RavenDB to perform server-side pattern matching queries. + +The supplied regular expression must be [.NET compatible](https://docs.microsoft.com/en-us/dotnet/api/system.text.regularexpressions.regex?view=netframework-4.7.1). + +## Example + + + + +{`// loads all products, which name +// starts with 'N' or 'A' +const products = await session.query({ collection: "Products" }) + .whereRegex("name", "^[NA]") + .all(); +`} + + + + +{`from Products +where regex(name, '^[NA]') +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_using-regex-php.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_using-regex-php.mdx new file mode 100644 index 0000000000..58c7a063a4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_using-regex-php.mdx @@ -0,0 +1,49 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To return only documents that match a regular expression ("regex"), +use the `whereRegex` method which enables RavenDB to perform server-side pattern matching queries. + +The supplied regular expression must be [.NET compatible](https://docs.microsoft.com/en-us/dotnet/api/system.text.regularexpressions.regex?view=netframework-4.7.1). + +## Example + +Load all products whose name starts with 'N' or 'A'. + + + + +{`/** @var array $products */ +$products = $session + ->query(Product::class) + ->whereRegex("Name", "^[NA]") + ->toList(); +`} + + + + +{`from Products +where regex(Name, '^[NA]') +`} + + + + +## Syntax + + + +{`function whereRegex(?string $fieldName, ?string $pattern): FilterDocumentQueryBaseInterface; +`} + + + +| Parameter | Type | Description | +|---------------|-------|-------------------------------------------------------------------------------| +| **$fieldName** | `?string` | Name of the field to query | +| **$pattern** | `?string` | Pattern to query for | + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/_using-regex-python.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/_using-regex-python.mdx new file mode 100644 index 0000000000..27f8d3e996 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/_using-regex-python.mdx @@ -0,0 +1,31 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To return only documents that match regular expression, +use the `where_regex` method which enables RavenDB to perform server-side pattern matching queries. + +The supplied regular expression must be [.NET compatible](https://docs.microsoft.com/en-us/dotnet/api/system.text.regularexpressions.regex?view=netframework-4.7.1). + +## Example + + + + +{`# loads all products, which name +# starts with 'N' or 'A' +products = list(session.query(object_type=Product).where_regex("Name", "^[NA]")) +`} + + + + +{`from Products +where regex(Name, '^[NA]') +`} + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/assets/fragmentsResults.png b/versioned_docs/version-7.1/client-api/session/querying/text-search/assets/fragmentsResults.png new file mode 100644 index 0000000000..8a6323ed5f Binary files /dev/null and b/versioned_docs/version-7.1/client-api/session/querying/text-search/assets/fragmentsResults.png differ diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/boost-search-results.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/boost-search-results.mdx new file mode 100644 index 0000000000..bc028265fe --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/boost-search-results.mdx @@ -0,0 +1,46 @@ +--- +title: "Boost Search Results" +hide_table_of_contents: true +sidebar_label: Boost Search Results +sidebar_position: 4 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import BoostSearchResultsCsharp from './_boost-search-results-csharp.mdx'; +import BoostSearchResultsPython from './_boost-search-results-python.mdx'; +import BoostSearchResultsPhp from './_boost-search-results-php.mdx'; +import BoostSearchResultsNodejs from './_boost-search-results-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/ends-with-query.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/ends-with-query.mdx new file mode 100644 index 0000000000..22b8a2449a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/ends-with-query.mdx @@ -0,0 +1,47 @@ +--- +title: "Ends-With Query" +hide_table_of_contents: true +sidebar_label: Ends-With Query +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import EndsWithQueryCsharp from './_ends-with-query-csharp.mdx'; +import EndsWithQueryPython from './_ends-with-query-python.mdx'; +import EndsWithQueryPhp from './_ends-with-query-php.mdx'; +import EndsWithQueryNodejs from './_ends-with-query-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/exact-match-query.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/exact-match-query.mdx new file mode 100644 index 0000000000..889838f923 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/exact-match-query.mdx @@ -0,0 +1,50 @@ +--- +title: "Exact Match Query" +hide_table_of_contents: true +sidebar_label: Exact Match Query +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import ExactMatchQueryCsharp from './_exact-match-query-csharp.mdx'; +import ExactMatchQueryJava from './_exact-match-query-java.mdx'; +import ExactMatchQueryPython from './_exact-match-query-python.mdx'; +import ExactMatchQueryPhp from './_exact-match-query-php.mdx'; +import ExactMatchQueryNodejs from './_exact-match-query-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/full-text-search.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/full-text-search.mdx new file mode 100644 index 0000000000..178e9bc114 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/full-text-search.mdx @@ -0,0 +1,48 @@ +--- +title: "Full-Text Search" +hide_table_of_contents: true +sidebar_label: Full-Text Search +sidebar_position: 3 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import FullTextSearchCsharp from './_full-text-search-csharp.mdx'; +import FullTextSearchPython from './_full-text-search-python.mdx'; +import FullTextSearchPhp from './_full-text-search-php.mdx'; +import FullTextSearchNodejs from './_full-text-search-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/fuzzy-search.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/fuzzy-search.mdx new file mode 100644 index 0000000000..5d47ccc6f9 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/fuzzy-search.mdx @@ -0,0 +1,49 @@ +--- +title: "Fuzzy Search" +hide_table_of_contents: true +sidebar_label: Fuzzy Search +sidebar_position: 7 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import FuzzySearchCsharp from './_fuzzy-search-csharp.mdx'; +import FuzzySearchJava from './_fuzzy-search-java.mdx'; +import FuzzySearchPython from './_fuzzy-search-python.mdx'; +import FuzzySearchPhp from './_fuzzy-search-php.mdx'; +import FuzzySearchNodejs from './_fuzzy-search-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/highlight-query-results.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/highlight-query-results.mdx new file mode 100644 index 0000000000..f75c43c819 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/highlight-query-results.mdx @@ -0,0 +1,51 @@ +--- +title: "Highlight Search Results" +hide_table_of_contents: true +sidebar_label: Highlight Search Results +sidebar_position: 5 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HighlightQueryResultsJava from './_highlight-query-results-java.mdx'; +import HighlightQueryResultsCsharp from './_highlight-query-results-csharp.mdx'; +import HighlightQueryResultsPython from './_highlight-query-results-python.mdx'; +import HighlightQueryResultsPhp from './_highlight-query-results-php.mdx'; +import HighlightQueryResultsNodejs from './_highlight-query-results-nodejs.mdx'; + +export const supportedLanguages = ["java", "csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/proximity-search.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/proximity-search.mdx new file mode 100644 index 0000000000..8489e86a7c --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/proximity-search.mdx @@ -0,0 +1,50 @@ +--- +title: "Proximity Search" +hide_table_of_contents: true +sidebar_label: Proximity Search +sidebar_position: 6 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import ProximitySearchCsharp from './_proximity-search-csharp.mdx'; +import ProximitySearchJava from './_proximity-search-java.mdx'; +import ProximitySearchPython from './_proximity-search-python.mdx'; +import ProximitySearchPhp from './_proximity-search-php.mdx'; +import ProximitySearchNodejs from './_proximity-search-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/starts-with-query.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/starts-with-query.mdx new file mode 100644 index 0000000000..9101b0b5cc --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/starts-with-query.mdx @@ -0,0 +1,44 @@ +--- +title: "Starts-With Query" +hide_table_of_contents: true +sidebar_label: Starts-With Query +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import StartsWithQueryCsharp from './_starts-with-query-csharp.mdx'; +import StartsWithQueryPython from './_starts-with-query-python.mdx'; +import StartsWithQueryPhp from './_starts-with-query-php.mdx'; +import StartsWithQueryNodejs from './_starts-with-query-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/text-search/using-regex.mdx b/versioned_docs/version-7.1/client-api/session/querying/text-search/using-regex.mdx new file mode 100644 index 0000000000..d51a3437c4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/text-search/using-regex.mdx @@ -0,0 +1,50 @@ +--- +title: "Using Regex" +hide_table_of_contents: true +sidebar_label: Using Regex +sidebar_position: 8 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import UsingRegexCsharp from './_using-regex-csharp.mdx'; +import UsingRegexJava from './_using-regex-java.mdx'; +import UsingRegexPython from './_using-regex-python.mdx'; +import UsingRegexPhp from './_using-regex-php.mdx'; +import UsingRegexNodejs from './_using-regex-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/vector-search.mdx b/versioned_docs/version-7.1/client-api/session/querying/vector-search.mdx new file mode 100644 index 0000000000..ab72b7ce20 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/vector-search.mdx @@ -0,0 +1,24 @@ +--- +title: "Vector Search" +hide_table_of_contents: true +sidebar_label: Vector Search Query +sidebar_position: 18 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import VectorSearchCsharp from './_vector-search-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/querying/what-is-rql.mdx b/versioned_docs/version-7.1/client-api/session/querying/what-is-rql.mdx new file mode 100644 index 0000000000..8dfc4ca963 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/querying/what-is-rql.mdx @@ -0,0 +1,575 @@ +--- +title: "RQL - Raven Query Language" +hide_table_of_contents: true +sidebar_label: What is RQL +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# RQL - Raven Query Language + + +* Queries in RavenDB use a SQL-like language called **RQL** (Raven Query Language). + +* RQL exposes the RavenDB query pipeline in a straightforward and accessible manner + that is easy to use and interact with. + +* Any query written using high-level Session methods (`Query`, `DocumentQuery`) + is translated by the client to RQL before being sent to the server for execution. + +* A query can be written with RQL directly by either: + * Using the session's `RawQuery` method + * Making a query from the [Query view](../../../studio/database/queries/query-view.mdx) in Studio + +* Learn more about querying from the session in this [Query Overview](../../../client-api/session/querying/how-to-query.mdx). + +* In this page: + + * [The query pipeline](../../../client-api/session/querying/what-is-rql.mdx#the-query-pipeline) + + * [RQL keywords and methods](../../../client-api/session/querying/what-is-rql.mdx#rql-keywords-and-methods) + + * [`declare`](../../../client-api/session/querying/what-is-rql.mdx#declare) + * [`from`](../../../client-api/session/querying/what-is-rql.mdx#from) + * [`where`](../../../client-api/session/querying/what-is-rql.mdx#where) + * [`group by`](../../../client-api/session/querying/what-is-rql.mdx#group-by) + * [`include`](../../../client-api/session/querying/what-is-rql.mdx#include) + * [`order by`](../../../client-api/session/querying/what-is-rql.mdx#order-by) + * [`select`](../../../client-api/session/querying/what-is-rql.mdx#select) + * [`load`](../../../client-api/session/querying/what-is-rql.mdx#load) + * [`limit`](../../../client-api/session/querying/what-is-rql.mdx#limit) + * [`update`](../../../client-api/session/querying/what-is-rql.mdx#update) + + * [RQL comments](../../../client-api/session/querying/what-is-rql.mdx#rql-comments) + + +## The query pipeline + +The query pipeline in RavenDB includes the following main stages: + +1. __Detect query source__ ([`from`](../../../client-api/session/querying/what-is-rql.mdx#from)) + + * Based on your query, RavenDB will determine the appropriate data source from which to retrieve results. + + * Note: all queries in RavenDB use an index to provide results, even when you don't specify one. + + * The following options are available: + + * `from index` - Explicitly specify which index to use. + + * `from collection` - Specify the collection to query. + RavenDB will decide which index will be used depending on the query criteria. + + * Learn more about these __query scenarios__ in this [Query Overview](../../../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index). + +2. __Filter the data__ ([`where`](../../../client-api/session/querying/what-is-rql.mdx#where)) + * The index is scanned for records that match the query predicate. + +3. __Include related documents__ ([`include`](../../../client-api/session/querying/what-is-rql.mdx#include)) + * [Related documents](../../../client-api/how-to/handle-document-relationships.mdx#includes) that are included in the query will be retrieved and returned to the client + along with the resulting matching documents, reducing the need to do another network round trip + to the database when accessing the included documents. + +4. __Sort results__ ([`order by`](../../../client-api/session/querying/what-is-rql.mdx#order-by)) + * Query results can be sorted. + For example, you can order by a field value, by the resulting documents' score, by random ordering, etc. + +5. __Limit results__ ([`limit`](../../../client-api/session/querying/what-is-rql.mdx#limit)) + * You can specify the number of results you want to get back from the query + and the number of results you want to skip. + +6. __Project results__ ([`select`](../../../client-api/session/querying/what-is-rql.mdx#select)) + * [Projections](../../../indexes/querying/projections.mdx) are specified when you need to retrieve only specific document fields, instead of the whole full document. + This reduces the amount of data sent over the network and is useful when only partial data is needed. + When projections are Not defined on the query - then the full document content is retrieved from the document storage. + + * Projections are applied as the last stage after the query has been processed, filtered, sorted, and paged. + This means that the projection doesn't apply to all the documents in the database, + only to the results that are actually returned. + + * Data can be loaded ([`load`](../../../client-api/session/querying/what-is-rql.mdx#load)) from related documents to be used in the projected fields. + + * For each record, the server extracts the requested field: + If a field is stored in the index - the server will fetch it from the index. + If a field is Not stored in the index - the server will fetch it from the document storage. + +6. __Return results__ to the client. + + + +## RQL keywords and methods + +The following keywords and methods are available in RQL: + +- [DECLARE](../../../client-api/session/querying/what-is-rql.mdx#declare) +- [FROM](../../../client-api/session/querying/what-is-rql.mdx#from) + - index +- [GROUP BY](../../../client-api/session/querying/what-is-rql.mdx#group-by) + - [array()](../../../client-api/session/querying/how-to-perform-group-by-query.mdx#by-array-content) +- [WHERE](../../../client-api/session/querying/what-is-rql.mdx#where) + - id() + - [search()](../../../client-api/session/querying/text-search/full-text-search.mdx) + - cmpxchg() + - [boost()](../../../client-api/session/querying/text-search/boost-search-results.mdx) + - [regex()](../../../client-api/session/querying/text-search/using-regex.mdx) + - [startsWith()](../../../client-api/session/querying/text-search/starts-with-query.mdx) + - [endsWith()](../../../client-api/session/querying/text-search/ends-with-query.mdx) + - [lucene()](../../../client-api/session/querying/document-query/how-to-use-lucene.mdx) + - [exists()](../../../client-api/session/querying/how-to-filter-by-field.mdx) + - [exact()](../../../client-api/session/querying/text-search/exact-match-query.mdx) + - [intersect()](../../../indexes/querying/intersection.mdx) + - [spatial.within()](../../../indexes/querying/spatial.mdx) + - [spatial.contains()](../../../indexes/querying/spatial.mdx) + - [spatial.disjoint()](../../../indexes/querying/spatial.mdx) + - [spatial.intersects()](../../../indexes/querying/spatial.mdx) + - [moreLikeThis()](../../../client-api/session/querying/how-to-use-morelikethis.mdx) + - [vector.search()](../../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx) +- [ORDER BY](../../../client-api/session/querying/what-is-rql.mdx#order-by) + - [ASC | ASCENDING](../../../indexes/querying/sorting.mdx#basics) + - [DESC | DESCENDING](../../../indexes/querying/sorting.mdx#basics) + - [AS](../../../indexes/querying/sorting.mdx#basics) + - [string](../../../indexes/querying/sorting.mdx#basics) + - [long](../../../indexes/querying/sorting.mdx#basics) + - [double](../../../indexes/querying/sorting.mdx#basics) + - [alphaNumeric](../../../indexes/querying/sorting.mdx#alphanumeric-ordering) + - [random()](../../../indexes/querying/sorting.mdx#random-ordering) + - [score()](../../../indexes/querying/sorting.mdx#ordering-by-score) + - [spatial.distance()](../../../client-api/session/querying/how-to-make-a-spatial-query.mdx#spatial-sorting) +- [LOAD](../../../client-api/session/querying/what-is-rql.mdx#load) +- [SELECT](../../../client-api/session/querying/what-is-rql.mdx#select) + - DISTINCT + - key() + - sum() + - count() + - [facet()](../../../indexes/querying/faceted-search.mdx) + - [timeseries()](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#syntax) + - [counter()](../../../document-extensions/counters/counters-and-other-features.mdx#counters-and-queries) +- [LIMIT](../../../client-api/session/querying/what-is-rql.mdx#limit) +- [UPDATE](../../../client-api/session/querying/what-is-rql.mdx#update) +- [INCLUDE](../../../client-api/session/querying/what-is-rql.mdx#include) + +With the following operators: + +- >= +- <= +- <> or != +- < +- > +- = or == +- BETWEEN +- IN +- ALL IN +- OR +- AND +- NOT +- ( +- ) + +And the following values: + +- true +- false +- null +- string e.g. 'John' or "John" +- number (long and double) e.g. 17 +- parameter e.g. $param1 + + + +## `declare` + +You can use the `declare` keyword to create a JavaScript function that can then be called from a `select` clause when using a projection. +JavaScript functions add flexibility to your queries as they can be used to manipulate and format retrieved results. + + + +{`// Declare a JavaScript function +declare function output(employee) \{ + // Format the value that will be returned in the projected field 'FullName' + var formatName = function(x)\{ return x.FirstName + " " + x.LastName; \}; + return \{ FullName : formatName(employee) \}; +\} + +// Query with projection calling the 'output' JavaScript function +from Employees as employee select output(employee) +`} + + + +Values are returned from a declared Javascript function as a set of values rather than in a nested array to ease the projection of retrieved values. +See an example for this usage [here](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#combine-time-series-and-javascript-functions). + + + +## `from` + +The keyword `from` is used to determine the source data that will be used when the query is executed. +The following options are available: + + +* __Query a specific collection__:    `from ` + + + +{`// Full collection query +// Data source: The raw collection documents (Auto-index is Not created) +from "Employees" +`} + + + + + +{`// Collection query - by ID +// Data source: The raw collection documents (Auto-index is Not created) +from "Employees" where id() = "employees/1-A" +`} + + + + + +{`// Dynamic query - with filtering +// Data source: Auto-index (server uses an existing auto-index or creates a new one) +from "Employees" where FirstName = "Laura" +`} + + + + + + + +* __Query all documents__:    `from @all_docs` + + + +{`// All collections query +// Data source: All raw collections (Auto-index is Not created) +from @all_docs +`} + + + + + +{`// Dynamic query - with filtering +// Data source: Auto-index (server uses an existing auto-index or creates a new one) +from @all_docs where FirstName = "Laura" +`} + + + + + + + +* __Query an index__:    `from index ` + + + +{`// Index query +// Data source: The specified index +from index "Employees/ByFirstName" +`} + + + + + +{`// Index query - with filtering +// Data source: The specified index +from index "Employees/ByFirstName" where FirstName = "Laura" +`} + + + + + + + +## `where` + +Use the `where` keyword with various operators to filter chosen documents from the final result-set. + + +#### Operator:    `>=`, `<=`, `<>`, `!=`, `<`, `>`, `=`, `==` + +These basic operators can be used with all value types, including 'numbers' and 'strings'. +For example, you can return every document from the [Companies collection](../../../client-api/faq/what-is-a-collection.mdx) +whose _field value_ **=** _a given input_. + + + +{`from "Companies" +where Name == "The Big Cheese" // Can use either '=' or'==' +`} + + + +Filtering on **nested properties** is also supported. +So in order to return all companies from 'Albuquerque' we need to execute following query: + + + +{`from "Companies" +where Address.City = "Albuquerque" +`} + + + + + + +#### Operator:    `between` + +The operator `between` returns results inclusively, and the type of border values used must match. +It works on both 'numbers' and 'strings' and can be substituted with the `>=` and `<=` operators. + + + +{`from "Products" +where PricePerUnit between 10.5 and 13.0 // Using between +`} + + + + + +{`from "Products" +where PricePerUnit >= 10.5 and PricePerUnit <= 13.0 // Using >= and <= +`} + + + + + + +#### Operator:    `in` + +The operator `in` is validating if a given field contains passed values. +It will return results if a given field matches **any** of the passed values. + + + +{`from "Companies" +where Name in ("The Big Cheese", "Unknown company name") +`} + + + + + +{`from "Orders" +where Lines[].ProductName in ("Chang", "Spegesild", "Unknown product name") +`} + + + + + + +#### Operator:    `all in` + +This operator checks if **all** passes values are matching a given field. +Due to its mechanics, it is only useful when used on array fields. + +The following query will yield no results in contrast to the `in` operator. + + + +{`from "Orders" +where Lines[].ProductName all in ("Chang", "Spegesild", "Unknown product name") +`} + + + +Removing 'Unknown product name' will return only orders that contain products with both +'Chang' and 'Spegesild' names. + + + +{`from "Orders" +where Lines[].ProductName all in ("Chang", "Spegesild") +`} + + + + + + +#### Binary Operators:    `AND`, `OR`, `NOT` + +Binary operators can be used to build more complex statements. +The `NOT` operator can only be used with one of the other binary operators creating `OR NOT` or `AND NOT` ones. + + + +{`from "Companies" +where Name = "The Big Cheese" OR Name = "Richter Supermarkt" +`} + + + + + +{`from "Orders" +where Freight > 500 AND ShippedAt > '1998-01-01' +`} + + + + + +{`from "Orders" +where Freight > 500 AND ShippedAt > '1998-01-01' AND NOT Freight = 830.75 +`} + + + + + + +#### Subclauses:    `(`, `)` + +Subclauses can be used along with binary operators to build even more complex logical statements. + + + + + +## `group by` + +The keyword `group by` is used to create an aggregation query. +Learn more in [dynamic group by queries](../../../client-api/session/querying/how-to-perform-group-by-query.mdx). + + + +## `include` + +The keyword `include` has been introduced to support: + +- [including related documents](../../../client-api/how-to/handle-document-relationships.mdx#includes) in the query response +- [including counters](../../../document-extensions/counters/counters-and-other-features.mdx#including-counters), + [time series](../../../document-extensions/timeseries/client-api/session/include/with-raw-queries.mdx), + or [revisions](../../../document-extensions/revisions/client-api/session/including.mdx#include-revisions-when-making-a-raw-query) in the query response +- [including compare-exchange items](../../../client-api/operations/compare-exchange/include-compare-exchange.mdx#include-cmpxchg-items-when-querying) in the query response +- [highlighting](../../../client-api/session/querying/text-search/highlight-query-results.mdx) results +- [get query timings](../../../client-api/session/querying/debugging/query-timings.mdx) +- [get explanations](../../../client-api/session/querying/debugging/include-explanations.mdx) + + + +## `order by` + +Use `order by` to perform sorting. +Learn more in this [sorting](../../../indexes/querying/sorting.mdx) article. + + + +## `select` + +Use `select` to have the query return a projection instead of the full document. +Learn more in this [projection](../../../indexes/querying/projections.mdx) article. + + + +## `load` + +Use `load`when you need to use data from a related document in projection. +See an example in this [projection](../../../indexes/querying/projections.mdx#example-viii---projection-using-a-loaded-document) article. + + + +## `limit` + +Use `limit` to limit the number of results returned by the query. +Specify the number of items to __skip__ from the beginning of the result set and the number of items to __take__ (return). +This is useful when [paging](../../../indexes/querying/paging.mdx) results. + + + +{`// Available syntax options: +// ========================= + +from "Products" limit 5, 10 // skip 5, take 10 + +from "Products" limit 10 offset 5 // skip 5, take 10 + +from "Products" offset 5 // skip 5, take all the rest +`} + + + + + +## `update` + +To patch documents on the server-side, use `update` with the desired JavaScript that will be applied to any document matching the query criteria. +For more information, please refer to this [patching](../../../client-api/operations/patching/set-based.mdx) article. + + + +## RQL comments + + + +__Single-line comments__: + +* Single-line comments start with `//` and end at the end of that line. + + + +{`// This is a single-line comment. +from "Companies" +where Name = "The Big Cheese" OR Name = "Richter Supermarkt" +`} + + + + + +{`from "Companies" +where Name = "The Big Cheese" // OR Name = "Richter Supermarkt" +`} + + + + + + + +__Multiline comments__: + +* Multiline comments start with `/*` and end with `*/`. + + + +{`/* +This is a multiline comment. +Any text here will be ignored. +*/ +from "Companies" +where Name = "The Big Cheese" OR Name = "Richter Supermarkt" +`} + + + + + +{`from "Companies" +where Name = "The Big Cheese" /* this part is a comment */ OR Name = "Richter Supermarkt" +`} + + + + + + + diff --git a/versioned_docs/version-7.1/client-api/session/saving-changes.mdx b/versioned_docs/version-7.1/client-api/session/saving-changes.mdx new file mode 100644 index 0000000000..a234668892 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/saving-changes.mdx @@ -0,0 +1,61 @@ +--- +title: "Session: Saving changes" +hide_table_of_contents: true +sidebar_label: Saving changes +sidebar_position: 6 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import SavingChangesCsharp from './_saving-changes-csharp.mdx'; +import SavingChangesJava from './_saving-changes-java.mdx'; +import SavingChangesPython from './_saving-changes-python.mdx'; +import SavingChangesPhp from './_saving-changes-php.mdx'; +import SavingChangesNodejs from './_saving-changes-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/storing-entities.mdx b/versioned_docs/version-7.1/client-api/session/storing-entities.mdx new file mode 100644 index 0000000000..ca239ffe1d --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/storing-entities.mdx @@ -0,0 +1,57 @@ +--- +title: "Session: Storing Entities" +hide_table_of_contents: true +sidebar_label: Storing Entities +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import StoringEntitiesCsharp from './_storing-entities-csharp.mdx'; +import StoringEntitiesJava from './_storing-entities-java.mdx'; +import StoringEntitiesPython from './_storing-entities-python.mdx'; +import StoringEntitiesPhp from './_storing-entities-php.mdx'; +import StoringEntitiesNodejs from './_storing-entities-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/updating-entities.mdx b/versioned_docs/version-7.1/client-api/session/updating-entities.mdx new file mode 100644 index 0000000000..6237036cd4 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/updating-entities.mdx @@ -0,0 +1,52 @@ +--- +title: "Update Entities" +hide_table_of_contents: true +sidebar_label: Update Entities +sidebar_position: 5 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import UpdatingEntitiesCsharp from './_updating-entities-csharp.mdx'; +import UpdatingEntitiesPython from './_updating-entities-python.mdx'; +import UpdatingEntitiesPhp from './_updating-entities-php.mdx'; +import UpdatingEntitiesNodejs from './_updating-entities-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/session/what-is-a-session-and-how-does-it-work.mdx b/versioned_docs/version-7.1/client-api/session/what-is-a-session-and-how-does-it-work.mdx new file mode 100644 index 0000000000..b2724f7fe9 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/session/what-is-a-session-and-how-does-it-work.mdx @@ -0,0 +1,55 @@ +--- +title: "What is a Session and How Does it Work" +hide_table_of_contents: true +sidebar_label: What is a Session and How Does it Work +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import WhatIsASessionAndHowDoesItWorkCsharp from './_what-is-a-session-and-how-does-it-work-csharp.mdx'; +import WhatIsASessionAndHowDoesItWorkJava from './_what-is-a-session-and-how-does-it-work-java.mdx'; +import WhatIsASessionAndHowDoesItWorkPython from './_what-is-a-session-and-how-does-it-work-python.mdx'; +import WhatIsASessionAndHowDoesItWorkPhp from './_what-is-a-session-and-how-does-it-work-php.mdx'; +import WhatIsASessionAndHowDoesItWorkNodejs from './_what-is-a-session-and-how-does-it-work-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/setting-up-authentication-and-authorization.mdx b/versioned_docs/version-7.1/client-api/setting-up-authentication-and-authorization.mdx new file mode 100644 index 0000000000..b0508ded4a --- /dev/null +++ b/versioned_docs/version-7.1/client-api/setting-up-authentication-and-authorization.mdx @@ -0,0 +1,57 @@ +--- +title: "Client API: Setting up Authentication and Authorization" +hide_table_of_contents: true +sidebar_label: Setting up Authentication and Authorization +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import SettingUpAuthenticationAndAuthorizationCsharp from './_setting-up-authentication-and-authorization-csharp.mdx'; +import SettingUpAuthenticationAndAuthorizationJava from './_setting-up-authentication-and-authorization-java.mdx'; +import SettingUpAuthenticationAndAuthorizationNodejs from './_setting-up-authentication-and-authorization-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/setting-up-default-database.mdx b/versioned_docs/version-7.1/client-api/setting-up-default-database.mdx new file mode 100644 index 0000000000..8b84bd5461 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/setting-up-default-database.mdx @@ -0,0 +1,47 @@ +--- +title: "Client API: Setting up a Default Database" +hide_table_of_contents: true +sidebar_label: Setting up Default Database +sidebar_position: 3 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import SettingUpDefaultDatabaseCsharp from './_setting-up-default-database-csharp.mdx'; +import SettingUpDefaultDatabaseJava from './_setting-up-default-database-java.mdx'; +import SettingUpDefaultDatabaseNodejs from './_setting-up-default-database-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/smuggler/_category_.json b/versioned_docs/version-7.1/client-api/smuggler/_category_.json new file mode 100644 index 0000000000..1e666bd5cd --- /dev/null +++ b/versioned_docs/version-7.1/client-api/smuggler/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 15, + "label": Smuggler, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/smuggler/_what-is-smuggler-csharp.mdx b/versioned_docs/version-7.1/client-api/smuggler/_what-is-smuggler-csharp.mdx new file mode 100644 index 0000000000..ef8313cea7 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/smuggler/_what-is-smuggler-csharp.mdx @@ -0,0 +1,179 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Smuggler gives you the ability to export or import data from or to a database using JSON format. +It is exposed via the `DocumentStore.Smuggler` property. + +## ForDatabase + +By default, the `DocumentStore.Smuggler` works on the default document store database from the `DocumentStore.Database` property. + +In order to switch it to a different database use the `.ForDatabase` method. + + + +{`var northwindSmuggler = store + .Smuggler + .ForDatabase("Northwind"); +`} + + + + + +## Export + +### Syntax + + + +{`Task ExportAsync( + DatabaseSmugglerExportOptions options, + DatabaseSmuggler toDatabase, + CancellationToken token = default(CancellationToken)); + +Task ExportAsync( + DatabaseSmugglerExportOptions options, + string toFile, + CancellationToken token = default(CancellationToken)); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **options** | `DatabaseSmugglerExportOptions` | Options that will be used during the export. Read more [here](../../client-api/smuggler/what-is-smuggler.mdx#databasesmugglerexportoptions). | +| **toDatabase** | `DatabaseSmuggler` | `DatabaseSmuggler` instance used as a destination | +| **toFile** | `string` | Path to a file where exported data will be written | +| **token** | `CancellationToken` | Token used to cancel the operation | + +| Return Value | | +| ------------- | ----- | +| `Operation` | Instance of Operation class which gives you an ability to wait for the operation to complete and subscribe to operation progress events | + +### DatabaseSmugglerExportOptions + +| Parameters | | | +| ------------- | ------------- | ----- | +| **Collections** | `List` | List of specific collections to export. If empty, then all collections will be exported.
Default: `empty` | +| **OperateOnTypes** | `DatabaseItemType` | Indicates what should be exported.
Default: `Indexes`, `Documents`, `RevisionDocuments`, `Conflicts`, `DatabaseRecord`, `ReplicationHubCertificates`, `Identities`, `CompareExchange`, `Attachments`, `CounterGroups`, `Subscriptions`, `TimeSeries` | +| **OperateOnDatabaseRecordTypes** | `DatabaseRecordItemType` | Indicates what should be exported from database record.
Default: `Client`, `ConflictSolverConfig`, `Expiration`, `ExternalReplications`, `PeriodicBackups`, `RavenConnectionStrings`, `RavenEtls`, `Revisions`, `Settings`, `SqlConnectionStrings`, `Sorters`, `SqlEtls`, `HubPullReplications`, `SinkPullReplications`, `TimeSeries`, `DocumentsCompression`, `Analyzers`, `LockMode`, `OlapConnectionStrings`, `OlapEtls`, `ElasticSearchConnectionStrings`, `ElasticSearchEtls`, `PostgreSQLIntegration`, `QueueConnectionStrings`, `QueueEtls`, `IndexesHistory`, `Refresh`, `DataArchival` | +| **IncludeExpired** | `bool` | Should expired documents be exported.
Default: `true` | +| **IncludeArtificial** | `bool` | Should artificial documents be exported.
Default: `false` | +| **IncludeArchived** | `bool` | Should archived documents be exported.
Default: `true` | +| **RemoveAnalyzers** | `bool` | Should analyzers be removed from Indexes.
Default: `false` | +| **TransformScript** | `string` | JavaScript-based script applied to every exported document. Read more [here](../../client-api/smuggler/what-is-smuggler.mdx#transformscript). | +| **MaxStepsForTransformScript** | `int` | Maximum number of steps that the transform script can process before failing.
Default: **10000** | + +### Example + + + +{`// export only Indexes and Documents to a given file +var exportOperation = await store + .Smuggler + .ExportAsync( + new DatabaseSmugglerExportOptions + \{ + OperateOnTypes = DatabaseItemType.Indexes + | DatabaseItemType.Documents + \}, + @"C:\\ravendb-exports\\Northwind.ravendbdump", + token); + +await exportOperation.WaitForCompletionAsync(); +`} + + + + + +## Import + +### Syntax + + + +{`Task ImportAsync( + DatabaseSmugglerImportOptions options, + Stream stream, + CancellationToken token = default(CancellationToken)); + +Task ImportAsync( + DatabaseSmugglerImportOptions options, + string fromFile, + CancellationToken token = default(CancellationToken)); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **options** | `DatabaseSmugglerImportOptions` | Options that will be used during the import. Read more [here](../../client-api/smuggler/what-is-smuggler.mdx#databasesmugglerimportoptions). | +| **stream** | `Stream` | Stream with data to import | +| **fromFile** | `string` | Path to a file from which data will be imported | +| **token** | `CancellationToken` | Token used to cancel the operation | + +| Return Value | | +| ------------- | ----- | +| `Operation` | Instance of Operation-class which gives you an ability to wait for the operation to complete and subscribe to operation progress events | + +### DatabaseSmugglerImportOptions + +| Parameters | | | +| - | - | - | +| **Collections** | `List` | List of specific collections to import. If empty, then all collections will be imported.
Default: `empty` | +| **OperateOnTypes** | `DatabaseItemType` | Indicates what should be imported.
Default: `Indexes`, `Documents`, `RevisionDocuments`, `Conflicts`, `DatabaseRecord`, `ReplicationHubCertificates`, `Identities`, `CompareExchange`, `Attachments`, `CounterGroups`, `Subscriptions`, `TimeSeries` | +| **OperateOnDatabaseRecordTypes** | `DatabaseRecordItemType` | Indicates what should be imported from database record.
Default: `Client`, `ConflictSolverConfig`, `Expiration`, `ExternalReplications`, `PeriodicBackups`, `RavenConnectionStrings`, `RavenEtls`, `Revisions`, `Settings`, `SqlConnectionStrings`, `Sorters`, `SqlEtls`, `HubPullReplications`, `SinkPullReplications`, `TimeSeries`, `DocumentsCompression`, `Analyzers`, `LockMode`, `OlapConnectionStrings`, `OlapEtls`, `ElasticSearchConnectionStrings`, `ElasticSearchEtls`, `PostgreSQLIntegration`, `QueueConnectionStrings`, `QueueEtls`, `IndexesHistory`, `Refresh`, `DataArchival` | +| **IncludeExpired** | `bool` | Should expired documents be imported.
Default: `true` | +| **IncludeArtificial** | `bool` | Should artificial documents be imported.
Default: `false` | +| **IncludeArchived** | `bool` | Should archived documents be imported.
Default: `true` | +| **RemoveAnalyzers** | `bool` | Should analyzers be removed from Indexes.
Default: `false` | +| **TransformScript** | `string` | JavaScript-based script applied to every imported document. Read more [here](../../client-api/smuggler/what-is-smuggler.mdx#transformscript). | +| **MaxStepsForTransformScript** | `int` | Maximum number of steps that the transform script can process before failing.
Default: **10000** | + +### Example + + + +{`// import only Documents from a given file +var importOperation = await store + .Smuggler + .ImportAsync( + new DatabaseSmugglerImportOptions + \{ + OperateOnTypes = DatabaseItemType.Documents + \}, + // import the .ravendbdump file that you exported (i.e. in the export example above) + @"C:\\ravendb-exports\\Northwind.ravendbdump", + token); + +await importOperation.WaitForCompletionAsync(); +`} + + + + + +## TransformScript + +`TransformScript` exposes the ability to modify or even filter-out the document during the import and export process using the provided JavaScript. + +Underneath the JavaScript engine is exactly the same as used for [patching operations](../../client-api/operations/patching/single-document.mdx) giving you identical syntax and capabilities with additional **ability to filter out documents by throwing a 'skip' exception**. + + + +{`var id = this['@metadata']['@id']; +if (id === 'orders/999-A') + throw 'skip'; // filter-out + +this.Freight = 15.3; +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/smuggler/_what-is-smuggler-java.mdx b/versioned_docs/version-7.1/client-api/smuggler/_what-is-smuggler-java.mdx new file mode 100644 index 0000000000..313e5fdbc5 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/smuggler/_what-is-smuggler-java.mdx @@ -0,0 +1,137 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Smuggler gives you the ability to export or import data from or to a database using JSON format. +It is exposed via the `DocumentStore.smuggler()`. + +## ForDatabase + +By default, the `IDocumentStore.smuggler` works on the default document store database from the `IDocumentStore.database` property. + +In order to switch it to a different database use the `.forDatabase` method. + + + +{`DatabaseSmuggler northwindSmuggler = store.smuggler().forDatabase("Northwind"); +`} + + + + + +## Export + +### Syntax + + + +{`//export +public Operation exportAsync(DatabaseSmugglerExportOptions options, String toFile); + +public Operation exportAsync(DatabaseSmugglerExportOptions options, DatabaseSmuggler toDatabase); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **options** | `DatabaseSmugglerExportOptions` | Options that will be used during the export. Read more [here](../../client-api/smuggler/what-is-smuggler.mdx#databasesmugglerexportoptions). | +| **toDatabase** | `DatabaseSmuggler` | `DatabaseSmuggler` instance used as a destination | +| **toFile** | `String` | Path to a file where exported data will be written | + +| Return Value | | +| ------------- | ----- | +| `Operation` | Instance of Operation class which gives you an ability to wait for the operation to complete and subscribe to operation progress events | + +### DatabaseSmugglerExportOptions + +| Parameters | | | +| ------------- | ------------- | ----- | +| **Collections** | `List` | List of specific collections to export. If empty, then all collections will be exported. Default: `empty` | +| **operateOnTypes** | `DatabaseItemType` | Indicates what should be exported. Default: `Indexes`, `Documents`, `RevisionDocuments`, `Conflicts`, `DatabaseRecord`, `Identities`, `CompareExchange`, `Subscriptions` | +| **operateOnDatabaseRecordType** | `DatabaseRecordItemType` | Indicates what should be exported from database record. Default: `Client`, `ConflictSolverConfig`, `Expiration`, `ExternalReplications`, `PeriodicBackups`, `RavenConnectionStrings`, `RavenEtls`, `Revisions`, `SqlConnectionStrings`, `Sorters`, `SqlEtls`, `HubPullReplications`, `SinkPullReplications` | +| **includeExpired** | `boolean` | Should expired documents be included in the export. Default: `true` | +| **removeAnalyzers** | `boolean` | Should analyzers be removed from Indexes. Default: `false` | +| **transformScript** | `String` | JavaScript-based script applied to every exported document. Read more [here](../../client-api/smuggler/what-is-smuggler.mdx#transformscript). | +| **maxStepsForTransformScript** | `int` | Maximum number of steps that transform script can process before failing. Default: 10000 | + +### Example + + + +{`// export only Indexes and Documents to a given file +Operation exportOperation = store.smuggler().exportAsync(exportOptions, "C:\\\\ravendb-exports\\\\Northwind.ravendbdump"); +`} + + + + + +## Import + +### Syntax + + + +{`public Operation importAsync(DatabaseSmugglerImportOptions options, String fromFile); +public Operation importAsync(DatabaseSmugglerImportOptions options, InputStream stream); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **options** | `DatabaseSmugglerImportOptions` | Options that will be used during the import. Read more [here](../../client-api/smuggler/what-is-smuggler.mdx#databasesmugglerimportoptions). | +| **stream** | `InputStream` | Stream with data to import | +| **fromFile** | `String` | Path to a file from which data will be imported | + +| Return Value | | +| ------------- | ----- | +| `Operation` | Instance of Operation-class which gives you an ability to wait for the operation to complete and subscribe to operation progress events | + +### DatabaseSmugglerImportOptions + +| Parameters | | | +| - | - | - | +| **Collections** | `List` | List of specific collections to import. If empty, then all collections will be exported. Default: `empty` | +| **operateOnTypes** | `DatabaseItemType` | Indicates what should be imported. Default: `INDEXES`, `DOCUMENTS`, `REVISION_DOCUMENTS`, `CONFLICTS`, `DATABASE_RECORD`, `IDENTITIES`, `COMPARE_EXCHANGE`, `SUBSCRIPTIONS` | +| **operateOnDatabaseRecordType** | `DatabaseRecordItemType` | Indicates what should be imported. Default: `CLIENT`, `CONFLICT_SOLVER_CONFIG`, `EXPIRATION`, `EXTERNAL_REPLICATIONS`, `PERIODIC_BACKUPS`, `RAVEN_CONNECTION_STRINGS`, `RAVEN_ETLS`, `REVISIONS`, `SQL_CONNECTION_STRINGS`, `SORTERS`, `SQL_ETLS`, `HUB_PULL_REPLICATIONS`, `SINK_PULL_REPLICATIONS` | +| **includeExpired** | `boolean` | Should expired documents be included in the import. Default: `true` | +| **removeAnalyzers** | `boolean` | Should analyzers be removed from Indexes. Default: `false` | +| **transformScript** | `String` | JavaScript-based script applied to every exported document. Read more [here](../../client-api/smuggler/what-is-smuggler.mdx#transformscript). | +| **maxStepsForTransformScript** | `int` | Maximum number of steps that transform script can process before failing. Default: 10000 | + + +### Example + + + +{`Operation importOperation = store.smuggler().importAsync(importOptions, "C:\\\\ravendb-exports\\\\Northwind.ravendbdump"); +`} + + + + + +## TransformScript + +`TransformScript` exposes the ability to modify or even filter-out the document during the import and export process using the provided JavaScript. + +Underneath the JavaScript engine is exactly the same as used for [patching operations](../../client-api/operations/patching/single-document.mdx) giving you identical syntax and capabilities with additional **ability to filter out documents by throwing a 'skip' exception**. + + + +{`var id = this['@metadata']['@id']; +if (id === 'orders/999-A') + throw 'skip'; // filter-out + +this.Freight = 15.3; +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/smuggler/_what-is-smuggler-nodejs.mdx b/versioned_docs/version-7.1/client-api/smuggler/_what-is-smuggler-nodejs.mdx new file mode 100644 index 0000000000..13c11a8b6f --- /dev/null +++ b/versioned_docs/version-7.1/client-api/smuggler/_what-is-smuggler-nodejs.mdx @@ -0,0 +1,150 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Smuggler gives you the ability to export or import data from or to a database using JSON format. +It is exposed via the `DocumentStore.smuggler`. + +## ForDatabase + +By default, the `DocumentStore.smuggler` works on the default document store database from the `DocumentStore.database` . + +In order to switch it to a different database use the `.forDatabase` method. + + + +{`const northwindSmuggler = store + .smuggler + .forDatabase("Northwind"); +`} + + + + + +## Export + +### Usage + + + +{`const operation = await store.smuggler.export(options, toDatabase); + +const operation = await store.smuggler.export(options, toFile); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **options** | `DatabaseSmugglerExportOptions` | Options that will be used during the export. Read more [here](../../client-api/smuggler/what-is-smuggler.mdx#databasesmugglerexportoptions). | +| **toDatabase** | `DatabaseSmuggler` | `DatabaseSmuggler` instance used as a destination | +| **toFile** | `string` | Path to a file where exported data will be written | + +| Return Value | | +| ------------- | ----- | +| `Operation` | Instance of Operation class which gives you an ability to wait for the operation to complete and subscribe to operation progress events | + +### DatabaseSmugglerExportOptions + +| Parameters | | | +| - | - | - | +| **collections** | ` string[]` | List of specific collections to export. If empty, then all collections will be exported. Default: `empty` | +| **operateOnTypes** | `DatabaseItemType[]` | Indicates what should be exported. Default: `Indexes`, `Documents`, `RevisionDocuments`, `Conflicts`, `DatabaseRecord`, `Identities`, `CompareExchange`, `Subscriptions` | +| **operateOnDatabaseRecordTypes** | `DatabaseRecordItemType[]` | Indicates what should be exported from database record. Default: `Client`, `ConflictSolverConfig`, `Expiration`, `ExternalReplications`, `PeriodicBackups`, `RavenConnectionStrings`, `RavenEtls`, `Revisions`, `SqlConnectionStrings`, `Sorters`, `SqlEtls`, `HubPullReplications`, `SinkPullReplications` | +| **includeExpired** | `boolean` | Should expired documents be included in the export. Default: `true` | +| **includeArtificial** | `boolean` | ? | +| **removeAnalyzers** | `boolean` | Should analyzers be removed from Indexes. Default: `false` | +| **transformScript** | `string` | JavaScript-based script applied to every exported document. Read more [here](../../client-api/smuggler/what-is-smuggler.mdx#transformscript). | +| **maxStepsForTransformScript** | `number` | Maximum number of steps that transform script can process before failing. Default: 10000 | +| **skipRevisionCreation** | `boolean` | skip revision creation | +| **encryptionKey** | `string` | Encryption key used for restore | + +### Example + + + +{`const options = new DatabaseSmugglerExportOptions(); +options.operateOnTypes = ["Documents"]; +const operation = await store + .smuggler + .export(options, "C:\\\\ravendb-exports\\\\Northwind.ravendbdump"); +await operation.waitForCompletion(); +`} + + + + + +## Import + +### Usage + + + +{`const operation = await store.smuggler.import(options, fromFile); + +const operation = await store.smuggler.import(options, stream); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **options** | `DatabaseSmugglerImportOptions` | Options that will be used during the import. Read more [here](../../client-api/smuggler/what-is-smuggler.mdx#databasesmugglerimportoptions). | +| **stream** | `Stream` | Stream with data to import | +| **fromFile** | `string` | Path to a file from which data will be imported | + +| Return Value | | +| ------------- | ----- | +| `Operation` | Instance of Operation-class which gives you an ability to wait for the operation to complete and subscribe to operation progress events | + +### DatabaseSmugglerImportOptions + +| Parameters | | | +| - | - | - | +| **operateOnTypes** | `DatabaseItemType[]` | Indicates what should be imported. Default: `Indexes`, `Documents`, `RevisionDocuments`, `Conflicts`, `DatabaseRecord`, `Identities`, `CompareExchange`, `Subscriptions` | +| **operateOnDatabaseRecordTypes** | `DatabaseRecordItemType[]` | Indicates what should be imported. Default: `Client`, `ConflictSolverConfig`, `Expiration`, `ExternalReplications`, `PeriodicBackups`, `RavenConnectionStrings`, `RavenEtls`, `Revisions`, `SqlConnectionStrings`, `Sorters`, `SqlEtls`, `HubPullReplications`, `SinkPullReplications` | +| **includeExpired** | `boolean` | Should expired documents be included in the import. Default: `true` | +| **includeArtificial** | `boolean` | ? | +| **removeAnalyzers** | `boolean` | Should analyzers be removed from Indexes. Default: `false` | +| **transformScript** | `string` | JavaScript-based script applied to every imported document. Read more [here](../../client-api/smuggler/what-is-smuggler.mdx#transformscript). | +| **maxStepsForTransformScript** | `number` | Maximum number of steps that transform script can process before failing. Default: 10000 | +| **skipRevisionCreation** | `boolean` | skip revision creation | +| **encryptionKey** | `string` | Encryption key used for restore | + +### Example + + + +{`const options = new DatabaseSmugglerImportOptions(); +options.operateOnTypes = ["Documents"]; +const operation = await store.smuggler.import(options, "C:\\\\ravendb-exports\\\\Northwind.ravendbdump"); +await operation.waitForCompletion(); +`} + + + + + +## TransformScript + +`TransformScript` exposes the ability to modify or even filter-out the document during the import and export process using the provided JavaScript. + +Underneath the JavaScript engine is exactly the same as used for [patching operations](../../client-api/operations/patching/single-document.mdx) giving you identical syntax and capabilities with additional **ability to filter out documents by throwing a 'skip' exception**. + + + +{`var id = this['@metadata']['@id']; +if (id === 'orders/999-A') + throw 'skip'; // filter-out + +this.Freight = 15.3; +`} + + + + + + diff --git a/versioned_docs/version-7.1/client-api/smuggler/what-is-smuggler.mdx b/versioned_docs/version-7.1/client-api/smuggler/what-is-smuggler.mdx new file mode 100644 index 0000000000..57978f9272 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/smuggler/what-is-smuggler.mdx @@ -0,0 +1,37 @@ +--- +title: "What is Smuggler" +hide_table_of_contents: true +sidebar_label: What is Smuggler +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import WhatIsSmugglerCsharp from './_what-is-smuggler-csharp.mdx'; +import WhatIsSmugglerJava from './_what-is-smuggler-java.mdx'; +import WhatIsSmugglerNodejs from './_what-is-smuggler-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/what-is-a-document-store.mdx b/versioned_docs/version-7.1/client-api/what-is-a-document-store.mdx new file mode 100644 index 0000000000..ac8f619f38 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/what-is-a-document-store.mdx @@ -0,0 +1,48 @@ +--- +title: "Client API: What is a Document Store" +hide_table_of_contents: true +sidebar_label: What is a Document Store +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import WhatIsADocumentStoreCsharp from './_what-is-a-document-store-csharp.mdx'; +import WhatIsADocumentStoreJava from './_what-is-a-document-store-java.mdx'; +import WhatIsADocumentStoreNodejs from './_what-is-a-document-store-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/client-api/what-is-a-public-api.mdx b/versioned_docs/version-7.1/client-api/what-is-a-public-api.mdx new file mode 100644 index 0000000000..8557817aa8 --- /dev/null +++ b/versioned_docs/version-7.1/client-api/what-is-a-public-api.mdx @@ -0,0 +1,43 @@ +--- +title: "Client API: What is a public API?" +hide_table_of_contents: true +sidebar_label: What is a Public API +sidebar_position: 4 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Client API: What is a public API? + +In RavenDB we are doing our best to not introduce any breaking changes in the public API between minor versions of our client API. This means that the upgrade between version 4.0.A and 4.0.B or even 4.C.D should be smooth. + +## What is considered a public API? + +Very common question that we are getting is what we consider a public API. The answer to this question is not straightforward, because our .NET Client contains two DLLs + +- `Raven.Client.dll` +- `Sparrow.dll` + +And a lot of types in those DLLs are shared between Client, Tools and Server so naturally some changes between versions might occur. But does this mean that we are changing the public API? In our opinion no, because there is a set of interfaces/methods/types that we consider unchangeable. + +Those interfaces/methods/types are related to the most common actions of the client that cover 99,9% of the usage cases. What are those you might ask? Those are related to the **session actions**, including **advanced session operations**, **operations for manipulating documents and attachments** and **related types**. So any changes here (excluding new features) should be considered a bug, but this also does not mean that the changes will not occur at all, they can, but those will be a backward-compatibile changes e.g. we might add an optional parameter to method X that will not brake current behavior but will extend the functionality. + +## Binary-level compatibility + +We guarantee binary-level compatibility **within minor versions** (e.g. 4.0.X and 4.0.Y) of our client library. **Between minor versions** there is no binary-level compatibility guarantees, but we guarantee source-level compatibility. What does it mean? It means that when you are upgrading from version 4.0.X to 4.0.Y you do not have to recompile your application - simple DLL swap should work. For 4.0.X to 4.5.Y updates we do not support that, so your application needs to be recompiled. + +### NuGet dependency + +Given no binary compatibility is guaranteed between minors, any NuGet package taking a dependency on Raven packages should be locked down to the patch range. For example: + +``` + +``` + +As such that NuGet package will need to be re-compile and re-deployed (with a new dependency range) on every minor release of the RavenDB NuGet package. + diff --git a/versioned_docs/version-7.1/compare-exchange/_category_.json b/versioned_docs/version-7.1/compare-exchange/_category_.json new file mode 100644 index 0000000000..3c5a076b2f --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 5, + "label": "Compare-Exchange" +} diff --git a/versioned_docs/version-7.1/compare-exchange/api-studio-quick-links/_category_.json b/versioned_docs/version-7.1/compare-exchange/api-studio-quick-links/_category_.json new file mode 100644 index 0000000000..be90a8f2ba --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/api-studio-quick-links/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 13, + "label": "API/Studio Quick Links" +} diff --git a/versioned_docs/version-7.1/compare-exchange/api-studio-quick-links/client-api-references.mdx b/versioned_docs/version-7.1/compare-exchange/api-studio-quick-links/client-api-references.mdx new file mode 100644 index 0000000000..b4bffb2b85 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/api-studio-quick-links/client-api-references.mdx @@ -0,0 +1,60 @@ +--- +title: "Client API References" +hide_table_of_contents: true +sidebar_label: "Client API References" +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; + + + +Refer to the following links for managing compare-exchange items via the Client API: + +* **Overview examples**: + [Example I - Email address reservation](../../compare-exchange/overview#example-i---email-address-reservation) + [Example II - Reserve a shared resource](../../compare-exchange/overview#example-ii---reserve-a-shared-resource) + [Example III - Ensuring unique values without using compare exchange](../../compare-exchange/overview#example-iii---ensuring-unique-values-without-using-compare-exchange) + +* **Create compare-exchange items**: + [Create item using a cluster-wide session](../../compare-exchange/create-cmpxchg-items#create-item-using-a-cluster-wide-session) + [Create item using a store operation](../../compare-exchange/create-cmpxchg-items#create-item-using-a-store-operation) + +* **Get compare-exchange item**: + [Get item using a cluster-wide session](../../compare-exchange/get-cmpxchg-item#get-item-using-a-cluster-wide-session) + [Get item using a store operation](../../compare-exchange/get-cmpxchg-item#get-item-using-a-store-operation) + +* **Get compare-exchange items**: + [Get compare-exchange items by list of keys](../../compare-exchange/get-cmpxchg-items#get-compare-exchange-items-by-list-of-keys) + [Get compare-exchange items by prefix](../../compare-exchange/get-cmpxchg-items#get-compare-exchange-items-by-prefix) + [Get compare-exchange items count](../../compare-exchange/get-cmpxchg-items#get-compare-exchange-items-count) + +* **Delete compare-exchange items**: + [Delete compare-exchange item using a cluster-wide session](../../compare-exchange/delete-cmpxchg-items#delete-compare-exchange-item-using-a-cluster-wide-session) + [Delete compare-exchange item using a store operation](../../compare-exchange/delete-cmpxchg-items#delete-compare-exchange-item-using-a-store-operation) + +* **Update compare-exchange item**: + [Update compare-exchange item using a cluster-wide session](../../compare-exchange/update-cmpxchg-item#update-compare-exchange-item-using-a-cluster-wide-session) + [Update compare-exchange item using a store operation](../../compare-exchange/update-cmpxchg-item#update-compare-exchange-item-using-a-store-operation) + +* **Include compare-exchange items**: + [Include compare-exchange items when loading](../../compare-exchange/include-cmpxchg-items#include-compare-exchange-items-when-loading) + [Include compare-exchange items when querying](../../compare-exchange/include-cmpxchg-items#include-compare-exchange-items-when-querying) + +* **Indexing compare-exchange values**: + [Index compare-exchange values](../../compare-exchange/indexing-cmpxchg-values#index-compare-exchange-values) + [Query the index](../../compare-exchange/indexing-cmpxchg-values#query-the-index) + [Query the index and project compare-exchange values](../../compare-exchange/indexing-cmpxchg-values#query-the-index-and-project-compare-exchange-values) + +* **Compare-exchange in dynamic queries**: + [Projecting compare-exchange values in query results](../../compare-exchange/cmpxchg-in-dynamic-queries#projecting-compare-exchange-values-in-query-results) + [Filtering by compare-exchange value](../../compare-exchange/cmpxchg-in-dynamic-queries#filtering-by-compare-exchange-value) + +* **Compare-exchange expiration**: + [Add expiration date using the Client API](../../compare-exchange/cmpxchg-expiration#add-expiration-date-using-the-client-api) + +* **Atomic guards**: + [Atomic guard usage example](../../compare-exchange/atomic-guards#atomic-guard-usage-example) + [Best practice when storing a document in a cluster-wide transaction](../../compare-exchange/atomic-guards#best-practice-when-storing-a-document-in-a-cluster-wide-transaction) + + diff --git a/versioned_docs/version-7.1/compare-exchange/api-studio-quick-links/studio-references.mdx b/versioned_docs/version-7.1/compare-exchange/api-studio-quick-links/studio-references.mdx new file mode 100644 index 0000000000..bea5636bda --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/api-studio-quick-links/studio-references.mdx @@ -0,0 +1,29 @@ +--- +title: "Studio References" +hide_table_of_contents: true +sidebar_label: "Studio References" +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; + + + +Refer to the following links for managing compare-exchange items via the Studio: + +* **Overview**: + [Ways to create and manage compare-exchange items](../../compare-exchange/overview#ways-to-create-and-manage-compare-exchange-items) + +* **Create compare-exchange items**: + [Create item using the Studio](../../compare-exchange/create-cmpxchg-items#create-item-using-the-studio) + +* **Delete compare-exchange items**: + [Delete compare-exchange items using the Studio](../../compare-exchange/delete-cmpxchg-items#delete-compare-exchange-items-using-the-studio) + +* **Update compare-exchange item**: + [Update compare-exchange item using the Studio](../../compare-exchange/update-cmpxchg-item#update-compare-exchange-item-using-the-studio) + +* **Compare-exchange expiration**: + [Add expiration date using the Studio](../../compare-exchange/cmpxchg-expiration#add-expiration-date-using-the-studio) + + diff --git a/versioned_docs/version-7.1/compare-exchange/assets/atomic-guard.png b/versioned_docs/version-7.1/compare-exchange/assets/atomic-guard.png new file mode 100644 index 0000000000..4e2a12abe7 Binary files /dev/null and b/versioned_docs/version-7.1/compare-exchange/assets/atomic-guard.png differ diff --git a/versioned_docs/version-7.1/compare-exchange/assets/create-new-cmpxchg-1.png b/versioned_docs/version-7.1/compare-exchange/assets/create-new-cmpxchg-1.png new file mode 100644 index 0000000000..e0a43f84c9 Binary files /dev/null and b/versioned_docs/version-7.1/compare-exchange/assets/create-new-cmpxchg-1.png differ diff --git a/versioned_docs/version-7.1/compare-exchange/assets/create-new-cmpxchg-2.png b/versioned_docs/version-7.1/compare-exchange/assets/create-new-cmpxchg-2.png new file mode 100644 index 0000000000..b29f4abdcd Binary files /dev/null and b/versioned_docs/version-7.1/compare-exchange/assets/create-new-cmpxchg-2.png differ diff --git a/versioned_docs/version-7.1/compare-exchange/assets/delete-cmpxchg.png b/versioned_docs/version-7.1/compare-exchange/assets/delete-cmpxchg.png new file mode 100644 index 0000000000..ca732e14be Binary files /dev/null and b/versioned_docs/version-7.1/compare-exchange/assets/delete-cmpxchg.png differ diff --git a/versioned_docs/version-7.1/compare-exchange/assets/set-expiration.png b/versioned_docs/version-7.1/compare-exchange/assets/set-expiration.png new file mode 100644 index 0000000000..aefe36fbfd Binary files /dev/null and b/versioned_docs/version-7.1/compare-exchange/assets/set-expiration.png differ diff --git a/versioned_docs/version-7.1/compare-exchange/assets/the-cmpxchg-view.png b/versioned_docs/version-7.1/compare-exchange/assets/the-cmpxchg-view.png new file mode 100644 index 0000000000..a1faf41437 Binary files /dev/null and b/versioned_docs/version-7.1/compare-exchange/assets/the-cmpxchg-view.png differ diff --git a/versioned_docs/version-7.1/compare-exchange/assets/update-cmpxchg-1.png b/versioned_docs/version-7.1/compare-exchange/assets/update-cmpxchg-1.png new file mode 100644 index 0000000000..85587c6c1e Binary files /dev/null and b/versioned_docs/version-7.1/compare-exchange/assets/update-cmpxchg-1.png differ diff --git a/versioned_docs/version-7.1/compare-exchange/assets/update-cmpxchg-2.png b/versioned_docs/version-7.1/compare-exchange/assets/update-cmpxchg-2.png new file mode 100644 index 0000000000..f848006979 Binary files /dev/null and b/versioned_docs/version-7.1/compare-exchange/assets/update-cmpxchg-2.png differ diff --git a/versioned_docs/version-7.1/compare-exchange/atomic-guards.mdx b/versioned_docs/version-7.1/compare-exchange/atomic-guards.mdx new file mode 100644 index 0000000000..1f49046005 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/atomic-guards.mdx @@ -0,0 +1,47 @@ +--- +title: "Atomic Guards" +hide_table_of_contents: true +sidebar_label: Atomic Guards +sidebar_position: 11 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import AtomicGuardsCsharp from './content/_atomic-guards-csharp.mdx'; +import AtomicGuardsPython from './content/_atomic-guards-python.mdx'; +import AtomicGuardsPhp from './content/_atomic-guards-php.mdx'; +import AtomicGuardsNodejs from './content/_atomic-guards-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + diff --git a/versioned_docs/version-7.1/compare-exchange/cmpxchg-expiration.mdx b/versioned_docs/version-7.1/compare-exchange/cmpxchg-expiration.mdx new file mode 100644 index 0000000000..e7858d3dab --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/cmpxchg-expiration.mdx @@ -0,0 +1,34 @@ +--- +title: "Compare-Exchange Expiration" +hide_table_of_contents: true +sidebar_label: "Compare-Exchange Expiration" +sidebar_position: 10 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import CmpXchgItemExpirationCsharp from './content/_cmpxchg-item-expiration-csharp.mdx'; +import CmpXchgItemExpirationNodejs from './content/_cmpxchg-item-expiration-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + diff --git a/versioned_docs/version-7.1/compare-exchange/cmpxchg-in-dynamic-queries.mdx b/versioned_docs/version-7.1/compare-exchange/cmpxchg-in-dynamic-queries.mdx new file mode 100644 index 0000000000..9446d6e720 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/cmpxchg-in-dynamic-queries.mdx @@ -0,0 +1,37 @@ +--- +title: "Compare-Exchange in Dynamic Queries" +hide_table_of_contents: true +sidebar_label: "Compare-Exchange in Dynamic Queries" +sidebar_position: 9 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import CmpXchgInDynamicQueriesCsharp from './content/_cmpxchg-in-dynamic-queries-csharp.mdx'; +import CmpXchgInDynamicQueriesNodejs from './content/_cmpxchg-in-dynamic-queries-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + diff --git a/versioned_docs/version-7.1/compare-exchange/configuration.mdx b/versioned_docs/version-7.1/compare-exchange/configuration.mdx new file mode 100644 index 0000000000..2b9393e08a --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/configuration.mdx @@ -0,0 +1,98 @@ +--- +title: "Compare-Exchange Configuration" +hide_table_of_contents: true +sidebar_label: "Configuration" +sidebar_position: 12 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + + + +* For an overview of what configuration options are and how they can be applied, + see the [Configuration Overview](../server/configuration/configuration-options) article. + +* The following configuration options are available for compare-exchange: + * [Cluster.CompareExchangeExpiredDeleteFrequencyInSec](../compare-exchange/configuration#clustercompareexchangeexpireddeletefrequencyinsec) + * [Cluster.CompareExchangeTombstonesCleanupIntervalInMin](../compare-exchange/configuration#clustercompareexchangetombstonescleanupintervalinmin) + * [Cluster.MaxClusterTransactionCompareExchangeTombstoneCheckIntervalInMin](../compare-exchange/configuration#clustermaxclustertransactioncompareexchangetombstonecheckintervalinmin) + * [Cluster.DisableAtomicDocumentWrites](../todo..) + + + +--- + + + +## Cluster.CompareExchangeExpiredDeleteFrequencyInSec + +Time (in seconds) between cleanup of **expired** compare-exchange items. + +- **Type**: `int` +- **Default**: `60` +- **Scope**: Server-wide only + + + + + +## Cluster.CompareExchangeTombstonesCleanupIntervalInMin + +* Compare-exchange tombstones are created when compare-exchange items are [deleted](..//compare-exchange/delete-cmpxchg-items). + These tombstones are not removed immediately - RavenDB uses an internal cleaner task to periodically remove tombstones that are eligible for deletion. + +* This configuration option sets the interval, in minutes, between each cleanup run. + +* At each interval, the cleaner will only remove tombstones that are ready for removal - meaning their deletion has already been processed by all relevant subscribers. + Subscribers are internal RavenDB processes that need to observe or react to compare-exchange deletions, + such as indexes, ETL tasks, subscriptions, etc. + Tombstones become eligible for removal only after all such processes have handled the deletion. + +--- + +- **Type**: `int` +- **Default**: `10` +- **Scope**: Server-wide only + + + + + +## Cluster.MaxClusterTransactionCompareExchangeTombstoneCheckIntervalInMin + +EXPERT ONLY: + +* This configuration sets the interval (in minutes) between checks for compare-exchange tombstones that can be marked for deletion by the cluster-wide transaction mechanism on the node. + This is separate from checks performed by other subscribers (such as indexes, subscriptions, or ETL tasks). + +* Normally, whenever a cluster-wide transaction command is processed, the cluster-wide transaction mechanism checks for compare-exchange tombstones that can be marked as eligible for deletion. + If no cluster-wide transaction command occurs within the specified interval, the mechanism will automatically perform this check after the configured time has elapsed. + +* Any tombstones that have been fully handled by the cluster-wide transaction mechanism will be marked for deletion, making them eligible for cleanup by the tombstone cleaner task. + +--- + +- **Type**: `int` +- **Default**: `5` +- **Scope**: Server-wide only + + + + + +## Cluster.DisableAtomicDocumentWrites + +EXPERT ONLY: +Disable automatic atomic writes with cluster write transactions. +If set to _true_, will only consider explicitly added compare exchange values to validate cluster wide transactions. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide or per database + + diff --git a/versioned_docs/version-7.1/compare-exchange/content/_atomic-guards-csharp.mdx b/versioned_docs/version-7.1/compare-exchange/content/_atomic-guards-csharp.mdx new file mode 100644 index 0000000000..c98fe8c8b1 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_atomic-guards-csharp.mdx @@ -0,0 +1,357 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Atomic Guards** are [compare-exchange key/value items](../compare-exchange/overview) + that RavenDB creates and manages **automatically** to guarantee [ACID](../server/clustering/cluster-transactions#cluster-transactions-properties) behavior in cluster-wide sessions. + +* When a document is created in a cluster-wide session, RavenDB associates it with a unique atomic guard item. + Atomic guards coordinate concurrent writes by different sessions to the same document. + +* In this article: + * [Atomic guard creation and update](../compare-exchange/atomic-guards#atomic-guard-creation-and-update) + * [Atomic guard usage example](../compare-exchange/atomic-guards#atomic-guard-creation-and-update) + * [Atomic guard database scope](../compare-exchange/atomic-guards#atomic-guard-database-scope) + * [Disabling atomic guards](../compare-exchange/atomic-guards#disabling-atomic-guards) + * [When are atomic guards removed](../compare-exchange/atomic-guards#when-are-atomic-guards-removed) + * [Best practice when storing a document in a cluster-wide transaction](../compare-exchange/atomic-guards#best-practice-when-storing-a-document-in-a-cluster-wide-transaction) + + + +--- + +## Atomic guard creation and update + + +Atomic guards are created and managed **only when the session's transaction mode is set to [ClusterWide](../client-api/session/cluster-transaction/overview#open-a-cluster-transaction)**. + + +* **When creating a new document**: + A new atomic guard is created when a new document is successfully saved. + +* **When modifying an existing document that already has an atomic guard**: + * The atomic guard’s Raft index is incremented when the document is successfully saved after being modified. + This allows RavenDB to detect that the document has changed. + * If another session had loaded the document before the document's version changed, it will not be able to save its changes + unless it first reloads the updated version. Otherwise, a `ConcurrencyException` is thrown. + +* **When modifying an existing document that doesn't have an atomic guard**: + * A new atomic guard is created when modifying an existing document that does not yet have one. + * The absence of the atomic guard may be because the document was created in a single-node session, + or because its atomic guard was manually removed (which is not recommended). + +* **When saving a document fails**: + * If a session's `SaveChanges()` fails, the entire session is rolled back and the atomic guard is Not created. + * Ensure your business logic is designed to re-execute the session in case saving changes fails for any reason. + +--- + +## Atomic guard usage example + +In the code sample below, an atomic guard is automatically created when a new document is saved. +It is then used to detect and prevent conflicting writes: when two sessions load and modify the same document, +only the first save succeeds, and the second fails with a _ConcurrencyException_. + + + +```csharp +using (var session = store.OpenSession(new SessionOptions +{ + // Open a cluster-wide session: + TransactionMode = TransactionMode.ClusterWide +})) +{ + session.Store(new User(), "users/johndoe"); + session.SaveChanges(); + // An atomic guard is now automatically created for the new document "users/johndoe". +} + +// Open two concurrent cluster-wide sessions: +using (var session1 = store.OpenSession( + new SessionOptions + {TransactionMode = TransactionMode.ClusterWide})) +using (var session2 = store.OpenSession( + new SessionOptions + {TransactionMode = TransactionMode.ClusterWide})) +{ + // Both sessions load the same document: + var loadedUser1 = session1.Load("users/johndoe"); + loadedUser1.Name = "jindoe"; + + var loadedUser2 = session2.Load("users/johndoe"); + loadedUser2.Name = "jandoe"; + + // session1 saves its changes first — + // this increments the Raft index of the associated atomic guard. + session1.SaveChanges(); + + // session2 tries to save using an outdated atomic guard version + // and fails with a ConcurrencyException. + session2.SaveChanges(); +} +``` + + +```csharp +using (var asyncSession = store.OpenAsyncSession(new SessionOptions +{ + // Open a cluster-wide session: + TransactionMode = TransactionMode.ClusterWide +})) +{ + await asyncSession.StoreAsync(new User(), "users/johndoe"); + await asyncSession.SaveChangesAsync(); + // An atomic guard is now automatically created for the new document "users/johndoe". +} + +// Open two concurrent cluster-wide sessions: +using (var asyncSession1 = store.OpenAsyncSession( + new SessionOptions + {TransactionMode = TransactionMode.ClusterWide})) +using (var asyncSession2 = store.OpenAsyncSession( + new SessionOptions + {TransactionMode = TransactionMode.ClusterWide})) +{ + // Both sessions load the same document: + var loadedUser1 = await asyncSession1.LoadAsync("users/johndoe"); + loadedUser1.Name = "jindoe"; + + var loadedUser2 = await asyncSession2.LoadAsync("users/johndoe"); + loadedUser2.Name = "jandoe"; + + // asyncSession1 saves its changes first — + // this increments the Raft index of the associated atomic guard. + await asyncSession1.SaveChangesAsync(); + + // asyncSession2 tries to save using an outdated atomic guard version + // and fails with a ConcurrencyException. + await asyncSession2.SaveChangesAsync(); +} +``` + + + +After running the above example, you can view the automatically created atomic guard in the **Compare-Exchange view** +in the Studio: + +![Atomic Guard](../assets/atomic-guard.png) + +1. These are **custom compare-exchange items**, created by the user for any purpose, + as described in [Create compare-exchange items](../compare-exchange/create-cmpxchg-items). + They are NOT the automatically created atomic guards. + +2. This is the **atomic guard** that was generated by running the example above. + The generated atomic guard **key** is: `rvn-atomic/users/johndoe`. It is composed of: + * The prefix `rvn-atomic/`. + * The ID of the associated document (`users/johndoe`). + + + * Although this Studio view allows editing compare-exchange items, **do NOT delete or modify atomic guard entries**. + * Doing so will interfere with RavenDB's ability to track document versioning through atomic guards. + + +--- + +## Atomic guard database scope + +* Atomic guards are local to the database on which they were defined. + +* Since atomic guards are implemented as compare-exchange items, + they are Not externally replicated to other databases by any ongoing replication task. + Learn more in [why compare-exchange items are not replicated](../compare-exchange/overview#why-compare-exchange-items-are-not-replicated-to-external-databases). + +--- + +## Disabling atomic guards + +* Before atomic guards were introduced (in RavenDB 5.2), client code had to explicitly manage compare-exchange entries + to ensure concurrency control and maintain ACID guarantees in cluster-wide transactions. + +* You can still take this manual approach by disabling the automatic use of atomic guards in a cluster-wide session, + and managing the required [compare-exchange key/value pairs](../compare-exchange/overview) yourself, + as shown in this [example](../compare-exchange/overview#example-i---email-address-reservation). + +* To disable the automatic creation and use of atomic guards in a cluster-wide session, + set the session's `DisableAtomicDocumentWritesInClusterWideTransaction` configuration option to `true`. + + + +```csharp +using (var session = store.OpenSession(new SessionOptions +{ + TransactionMode = TransactionMode.ClusterWide, + // Disable atomic-guards + DisableAtomicDocumentWritesInClusterWideTransaction = true +})) +{ + session.Store(new User(), "users/johndoe"); + + // No atomic-guard will be created upon saveChanges + session.SaveChanges(); +} +``` + + +```csharp +using (var asyncSession = store.OpenAsyncSession(new SessionOptions +{ + TransactionMode = TransactionMode.ClusterWide, + // Disable atomic-guards + DisableAtomicDocumentWritesInClusterWideTransaction = true +})) +{ + await asyncSession.StoreAsync(new User(), "users/johndoe"); + + // No atomic-guard will be created upon saveChanges + await asyncSession.SaveChangesAsync(); +} +``` + + + +--- + +## When are atomic guards removed + +Atomic guards are removed **automatically** in the following scenarios: +(you don't need to clean them up manually) + +* **Document deleted via a cluster-wide session**: + * Create a document using a cluster-wide session (an associated atomic guard is created). + * Delete the document using a cluster-wide session - its atomic guard will be removed automatically. + +* **Document expires via the expiration feature**: + * Create a document using a cluster-wide session (an associated atomic guard is created). + * Add the `@expires` metadata property the document, as described in [Document expiration](../studio/database/settings/document-expiration). + * When the expiration time is reached, the document and its atomic guard will both be removed automatically. + * Since different cleanup tasks handle the removal of **expired** documents and the removal of their associated atomic guards, + it may happen that atomic guards of removed documents would linger in the compare-exchange entries list a short while longer before they are removed. + You do Not need to remove such atomic guards yourself, they will be removed by the cleanup task. + + + +* **Do not delete or modify atomic guards manually**. + If a session attempts to save a document whose atomic guard was removed or modified, it will fail with an error. + +* If you accidentally remove an atomic guard that is associated with an existing document, + you can restore it by re-saving the document in a cluster-wide session, + this will re-create the atomic guard automatically. + + + +--- + +## Best practice when storing a document in a cluster-wide transaction + +* When working with a cluster-wide session, + we recommend that you always **`Load` the document into the session before storing it** - + even if the document is expected to be new. + +* This is especially important if a document (originally created in a cluster-wide transaction) was deleted **outside** of a cluster-wide session - + as when using a [single-node session](../client-api/session/cluster-transaction/overview#cluster-wide-transaction-vs-single-node-transaction) + or the [DeleteByQueryOperation](../client-api/operations/common/delete-by-query). + In these cases, the document is deleted, but the atomic guard remains (it is not automatically removed). + If you attempt to re-create such a document without loading it first, + RavenDB will fail to save it because the session is unaware of the existing atomic guard’s latest Raft index. + +* In this example, the document is loaded into the session BEFORE creating or modifying it: + + + +```csharp +using (var session = store.OpenSession(new SessionOptions +{ + // Open a cluster-wide session + TransactionMode = TransactionMode.ClusterWide +})) +{ + // Load the user document BEFORE creating a new one or modifying if already exists + var user = session.Load("users/johndoe"); + + if (user == null) + { + // Document doesn't exist => create a new document: + var newUser = new User + { + Name = "John Doe", + // ... initialize other properties + }; + + // Store the new user document in the session + session.Store(newUser, "users/johndoe"); + } + else + { + // Document exists => apply your modifications: + user.Name = "New name"; + // ... make any other updates + + // No need to call Store() again + // RavenDB tracks changes on loaded entities + } + + // Commit your changes + session.SaveChanges(); +} +``` + + +```csharp +using (var asyncSession = store.OpenAsyncSession(new SessionOptions +{ + // Open a cluster-wide session + TransactionMode = TransactionMode.ClusterWide +})) +{ + // Load the user document BEFORE creating or updating + var user = await asyncSession.LoadAsync("users/johndoe"); + + if (user == null) + { + // Document doesn't exist => create a new document: + var newUser = new User + { + Name = "John Doe", + // ... initialize other properties + }; + + // Store the new user document in the session + await asyncSession.StoreAsync(newUser, "users/johndoe"); + } + else + { + // Document exists => apply your modifications: + user.Name = "New name"; + // ... make any other updates + + // No need to call Store() again + // RavenDB tracks changes on loaded entities + } + + // Commit your changes + await asyncSession.SaveChangesAsync(); +} +``` + + + + + +When _loading_ a document in a cluster-wide session, RavenDB attempts to retrieve the document from the document store: + +* **If the document is found**, it is loaded into the session, + and modifications will be saved successfully as long as no other session has modified the document in the meantime. + * RavenDB checks whether the Raft index of the atomic guard associated with the document matches the version tracked by the current session. + If another session has already updated the document (and incremented the atomic guard’s Raft index), the save will fail with a _ConcurrencyException_. + +* **If no document is found**, RavenDB will check whether a matching atomic guard exists + (as in the case when the document was deleted outside of a cluster-wide session): + * **If an atomic guard exists**, + the client constructs a change vector for the document using the atomic guard’s Raft index, and the document will be saved with this change vector. + * **If no atomic guard exists**, + the document is treated as a brand new document and will be saved as usual. + + diff --git a/versioned_docs/version-7.1/compare-exchange/content/_atomic-guards-nodejs.mdx b/versioned_docs/version-7.1/compare-exchange/content/_atomic-guards-nodejs.mdx new file mode 100644 index 0000000000..03d044c3b7 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_atomic-guards-nodejs.mdx @@ -0,0 +1,259 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Atomic Guards** are [compare-exchange key/value items](../compare-exchange/overview) + that RavenDB creates and manages **automatically** to guarantee [ACID](../server/clustering/cluster-transactions#cluster-transactions-properties) behavior in cluster-wide sessions. + +* When a document is created in a cluster-wide session, RavenDB associates it with a unique atomic guard item. + Atomic guards coordinate concurrent writes by different sessions to the same document. + +* In this article: + * [Atomic guard creation and update](../compare-exchange/atomic-guards#atomic-guard-creation-and-update) + * [Atomic guard usage example](../compare-exchange/atomic-guards#atomic-guard-usage-example) + * [Atomic guard database scope](../compare-exchange/atomic-guards#atomic-guard-database-scope) + * [Disabling atomic guards](../compare-exchange/atomic-guards#disabling-atomic-guards) + * [When are atomic guards removed](../compare-exchange/atomic-guards#when-are-atomic-guards-removed) + * [Best practice when storing a document in a cluster-wide transaction](../compare-exchange/atomic-guards#best-practice-when-storing-a-document-in-a-cluster-wide-transaction) + + + +--- + +## Atomic guard creation and update + + +Atomic guards are created and managed **only when the session's transaction mode is set to [ClusterWide](../client-api/session/cluster-transaction/overview#open-a-cluster-transaction)**. + + +* **When creating a new document**: + A new atomic guard is created when a new document is successfully saved. + +* **When modifying an existing document that already has an atomic guard**: + * The atomic guard’s Raft index is incremented when the document is successfully saved after being modified. + This allows RavenDB to detect that the document has changed. + * If another session had loaded the document before the document's version changed, it will not be able to save its changes + unless it first reloads the updated version. Otherwise, a `ConcurrencyException` is thrown. + +* **When modifying an existing document that doesn't have an atomic guard**: + * A new atomic guard is created when modifying an existing document that does not yet have one. + * The absence of the atomic guard may be because the document was created in a single-node session, + or because its atomic guard was manually removed (which is not recommended). + +* **When saving a document fails**: + * If a session's `saveChanges()` fails, the entire session is rolled back and the atomic guard is Not created. + * Ensure your business logic is designed to re-execute the session in case saving changes fails for any reason. + +--- + +## Atomic guard usage example + +In the code sample below, an atomic guard is automatically created when a new document is saved. +It is then used to detect and prevent conflicting writes: when two sessions load and modify the same document, +only the first save succeeds, and the second fails with a _ConcurrencyException_. + + + +{`const user = \{ + firstName: "John", + lastName: "Doe" +\}; + +// Open a cluster-wide session: +const session = documentStore.openSession(\{ + transactionMode: "ClusterWide" +\}); + +await session.store(user, "users/johndoe"); +await session.saveChanges(); +// An atomic-guard is now automatically created for the new document "users/johndoe". + +// Open two concurrent cluster-wide sessions: +const session1 = documentStore.openSession(\{ + transactionMode: "ClusterWide" +\}); +const session2 = documentStore.openSession(\{ + transactionMode: "ClusterWide" +\}); + +// Both sessions load the same document: +const loadedUser1 = await session1.load("users/johndoe"); +loadedUser1.name = "jindoe"; + +const loadedUser2 = await session2.load("users/johndoe"); +loadedUser2.name = "jandoe"; + +// session1 saves its changes first — +// this increments the Raft index of the associated atomic guard. +await session1.saveChanges(); + +// session2 tries to save using an outdated atomic guard version +// and fails with a ConcurrencyException. +await session2.saveChanges(); +`} + + + +After running the above example, you can view the automatically created atomic guard in the **Compare-Exchange view** +in the Studio: + +![Atomic Guard](../assets/atomic-guard.png) + +1. These are **custom compare-exchange items**, created by the user for any purpose, + as described in [Create compare-exchange items](../compare-exchange/create-cmpxchg-items). + They are NOT the automatically created atomic guards. + +2. This is the **atomic guard** that was generated by running the example above. + The generated atomic guard **key** is: `rvn-atomic/users/johndoe`. It is composed of: + * The prefix `rvn-atomic/`. + * The ID of the associated document (`users/johndoe`). + + + * Although this Studio view allows editing compare-exchange items, **do NOT delete or modify atomic guard entries**. + * Doing so will interfere with RavenDB's ability to track document versioning through atomic guards. + + +--- + +## Atomic guard database scope + +* Atomic guards are local to the database on which they were defined. + +* Since atomic guards are implemented as compare-exchange items, + they are Not externally replicated to other databases by any ongoing replication task. + Learn more in [why compare-exchange items are not replicated](../compare-exchange/overview#why-compare-exchange-items-are-not-replicated-to-external-databases). + +--- + +## Disabling atomic guards + +* Before atomic guards were introduced (in RavenDB 5.2), client code had to explicitly manage compare-exchange entries + to ensure concurrency control and maintain ACID guarantees in cluster-wide transactions. + +* You can still take this manual approach by disabling the automatic use of atomic guards in a cluster-wide session, + and managing the required [compare-exchange key/value pairs](../compare-exchange/overview) yourself, + as shown in this [example](../compare-exchange/overview#example-i---email-address-reservation). + +* To disable the automatic creation and use of atomic guards in a cluster-wide session, + set the session's `DisableAtomicDocumentWritesInClusterWideTransaction` configuration option to `true`. + + + +{`// Open a cluster-wide session +const session = documentStore.openSession(\{ + transactionMode: "ClusterWide", + // Disable atomic-guards + disableAtomicDocumentWritesInClusterWideTransaction: true +\}); + +await session.store(user, "users/johndoe"); + +// No atomic-guard will be created upon saveChanges +await session.saveChanges(); +`} + + + +--- + +## When are atomic guards removed + +Atomic guards are removed **automatically** in the following scenarios: +(you don't need to clean them up manually) + +* **Document deleted via a cluster-wide session**: + * Create a document using a cluster-wide session (an associated atomic guard is created). + * Delete the document using a cluster-wide session - its atomic guard will be removed automatically. + +* **Document expires via the expiration feature**: + * Create a document using a cluster-wide session (an associated atomic guard is created). + * Add the `@expires` metadata property the document, as described in [Document expiration](../studio/database/settings/document-expiration.mdx). + * When the expiration time is reached, the document and its atomic guard will both be removed automatically. + * Since different cleanup tasks handle the removal of **expired** documents and the removal of their associated atomic guards, + it may happen that atomic guards of removed documents would linger in the compare-exchange entries list a short while longer before they are removed. + You do Not need to remove such atomic guards yourself, they will be removed by the cleanup task. + + + +* **Do not delete or modify atomic guards manually**. + If a session attempts to save a document whose atomic guard was removed or modified, it will fail with an error. + +* If you accidentally remove an atomic guard that is associated with an existing document, + you can restore it by re-saving the document in a cluster-wide session, + this will re-create the atomic guard automatically. + + + +--- + +## Best practice when storing a document in a cluster-wide transaction + +* When working with a cluster-wide session, + we recommend that you always **`load` the document into the session before storing it** - + even if the document is expected to be a new document. + +* **Document expires via the expiration feature**: + * Create a document using a cluster-wide session (an associated atomic guard is created). + * Add the `@expires` metadata property the document, as described in [Document expiration](../studio/database/settings/document-expiration). + * When the expiration time is reached, the document and its atomic guard will both be removed automatically. + * Since different cleanup tasks handle the removal of **expired** documents and the removal of their associated atomic guards, + it may happen that atomic guards of removed documents would linger in the compare-exchange entries list a short while longer before they are removed. + You do Not need to remove such atomic guards yourself, they will be removed by the cleanup task. + +* In this example, the document is loaded into the session BEFORE creating or modifying it: + + + +{`const session = documentStore.openSession(\{ + // Open a cluster-wide session + transactionMode: "ClusterWide" +\}); + +// Load the user document BEFORE creating or updating +const user = await session.load("users/johndoe"); + +if (!user) \{ + // Document doesn't exist => create a new document + const newUser = \{ + name: "John Doe", + // ... initialize other properties + \}; + + // Store the new user document in the session + await session.store(newUser, "users/johndoe"); + +\} else \{ + // Document exists => apply your modifications + user.name = "New name"; + // ... make any other updates + + // No need to call store() again + // RavenDB tracks changes on loaded entities +\} + +// Commit your changes +await session.saveChanges(); +`} + + + + + +When _loading_ a document in a cluster-wide session, RavenDB attempts to retrieve the document from the document store: + +* **If the document is found**, it is loaded into the session, + and modifications will be saved successfully as long as no other session has modified the document in the meantime. + * RavenDB checks whether the Raft index of the atomic guard associated with the document matches the version tracked by the current session. + If another session has already updated the document (and incremented the atomic guard’s Raft index), the save will fail with a _ConcurrencyException_. + +* **If no document is found**, RavenDB will check whether a matching atomic guard exists + (as in the case when the document was deleted outside of a cluster-wide session): + * **If an atomic guard exists**, + the client constructs a change vector for the document using the atomic guard’s Raft index, and the document will be saved with this change vector. + * **If no atomic guard exists**, + the document is treated as a brand new document and will be saved as usual. + + diff --git a/versioned_docs/version-7.1/compare-exchange/content/_atomic-guards-php.mdx b/versioned_docs/version-7.1/compare-exchange/content/_atomic-guards-php.mdx new file mode 100644 index 0000000000..04ed102fb7 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_atomic-guards-php.mdx @@ -0,0 +1,273 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Atomic Guards** are [compare-exchange key/value items](../compare-exchange/overview) + that RavenDB creates and manages **automatically** to guarantee [ACID](../server/clustering/cluster-transactions#cluster-transactions-properties) behavior in cluster-wide sessions. + +* When a document is created in a cluster-wide session, RavenDB associates it with a unique atomic guard item. + Atomic guards coordinate concurrent writes by different sessions to the same document. + +* In this article: + * [Atomic guard creation and update](../compare-exchange/atomic-guards#atomic-guard-creation-and-update) + * [Atomic guard usage example](../compare-exchange/atomic-guards#atomic-guard-usage-example) + * [Atomic guard database scope](../compare-exchange/atomic-guards#atomic-guard-database-scope) + * [Disabling atomic guards](../compare-exchange/atomic-guards#disabling-atomic-guards) + * [When are atomic guards removed](../compare-exchange/atomic-guards#when-are-atomic-guards-removed) + * [Best practice when storing a document in a cluster-wide transaction](../compare-exchange/atomic-guards#best-practice-when-storing-a-document-in-a-cluster-wide-transaction) + + + +--- + +## Atomic guard creation and update + + +Atomic guards are created and managed **only when the session's transaction mode is set to [ClusterWide](../client-api/session/cluster-transaction/overview#open-a-cluster-transaction)**. + + +* **When creating a new document**: + A new atomic guard is created when a new document is successfully saved. + +* **When modifying an existing document that already has an atomic guard**: + * The atomic guard’s Raft index is incremented when the document is successfully saved after being modified. + This allows RavenDB to detect that the document has changed. + * If another session had loaded the document before the document's version changed, it will not be able to save its changes + unless it first reloads the updated version. Otherwise, a `ConcurrencyException` is thrown. + +* **When modifying an existing document that doesn't have an atomic guard**: + * A new atomic guard is created when modifying an existing document that does not yet have one. + * The absence of the atomic guard may be because the document was created in a single-node session, + or because its atomic guard was manually removed (which is not recommended). + +* **When saving a document fails**: + * If a session's `saveChanges()` fails, the entire session is rolled back and the atomic guard is Not created. + * Ensure your business logic is designed to re-execute the session in case saving changes fails for any reason. + +--- + +## Atomic guard usage example + +In the code sample below, an atomic guard is automatically created when a new document is saved. +It is then used to detect and prevent conflicting writes: when two sessions load and modify the same document, +only the first save succeeds, and the second fails with a _ConcurrencyException_. + + + +{`// Open a cluster-wide session: +$sessionOptions = new SessionOptions(); +$sessionOptions->setTransactionMode(TransactionMode::clusterWide()); + +$session = $store->openSession($sessionOptions); + +try \{ + $session->store(new User(), "users/johndoe"); + // An atomic-guard is now automatically created for the new document "users/johndoe". + $session->saveChanges(); +\} finally \{ + $session->close(); +\} + +// Open two concurrent cluster-wide sessions: + +$sessionOptions1 = new SessionOptions(); +$sessionOptions1->setTransactionMode(TransactionMode::clusterWide()); + +$session1 = $store->openSession($sessionOptions1); +try \{ + $sessionOptions2 = new SessionOptions(); + $sessionOptions2->setTransactionMode(TransactionMode::clusterWide()); + $session2 = $store->openSession($sessionOptions2); + + try \{ + // Both sessions load the same document: + var $loadedUser1 = $session1->load(User::class, "users/johndoe"); + $loadedUser1->setName("jindoe"); + + $loadedUser2 = $session2->load(User::class, "users/johndoe"); + $loadedUser2->setName("jandoe"); + + // session1 saves its changes first — + // this increments the Raft index of the associated atomic guard. + $session1->saveChanges(); + + // session2 tries to save using an outdated atomic guard version + // and fails with a ConcurrencyException. + $session2->saveChanges(); + \} finally \{ + $session2->close(); + \} + +\} finally \{ + $session1->close(); +\} +`} + + + +After running the above example, you can view the automatically created atomic guard in the **Compare-Exchange view** +in the Studio: + +![Atomic Guard](../assets/atomic-guard.png) + +1. These are **custom compare-exchange items**, created by the user for any purpose, + as described in [Create compare-exchange items](../compare-exchange/create-cmpxchg-items). + They are NOT the automatically created atomic guards. + +2. This is the **atomic guard** that was generated by running the example above. + The generated atomic guard **key** is: `rvn-atomic/users/johndoe`. It is composed of: + * The prefix `rvn-atomic/`. + * The ID of the associated document (`users/johndoe`). + + + * Although this Studio view allows editing compare-exchange items, **do NOT delete or modify atomic guard entries**. + * Doing so will interfere with RavenDB's ability to track document versioning through atomic guards. + + +--- + +## Atomic guard database scope + +* Atomic guards are local to the database on which they were defined. + +* Since atomic guards are implemented as compare-exchange items, + they are Not externally replicated to other databases by any ongoing replication task. + Learn more in [why compare-exchange items are not replicated](../compare-exchange/overview#why-compare-exchange-items-are-not-replicated-to-external-databases). + +--- + +## Disabling atomic guards + +* Before atomic guards were introduced (in RavenDB 5.2), client code had to explicitly manage compare-exchange entries + to ensure concurrency control and maintain ACID guarantees in cluster-wide transactions. + +* You can still take this manual approach by disabling the automatic use of atomic guards in a cluster-wide session, + and managing the required [compare-exchange key/value pairs](../compare-exchange/overview) yourself, + as shown in this [example](../compare-exchange/overview#example-i---email-address-reservation). + +* To disable the automatic creation and use of atomic guards in a cluster-wide session, + set the session's `DisableAtomicDocumentWritesInClusterWideTransaction` configuration option to `true`. + + + +{`$sessionOptions = new SessionOptions(); +$sessionOptions->setTransactionMode(TransactionMode::clusterWide()); +$sessionOptions->setDisableAtomicDocumentWritesInClusterWideTransaction(true); + +$session = $store->openSession($sessionOptions); + +try \{ + $session->store(new User(), "users/johndoe"); + // No atomic-guard will be created upon saveChanges + $session->saveChanges(); +\} finally \{ + $session->close(); +\} +`} + + + +--- + +## When are atomic guards removed + +Atomic guards are removed **automatically** in the following scenarios: +(you don't need to clean them up manually) + +* **Document deleted via a cluster-wide session**: + * Create a document using a cluster-wide session (an associated atomic guard is created). + * Delete the document using a cluster-wide session - its atomic guard will be removed automatically. + +* **Document expires via the expiration feature**: + * Create a document using a cluster-wide session (an associated atomic guard is created). + * Add the `@expires` metadata property the document, as described in [Document expiration](../studio/database/settings/document-expiration). + * When the expiration time is reached, the document and its atomic guard will both be removed automatically. + * Since different cleanup tasks handle the removal of **expired** documents and the removal of their associated atomic guards, + it may happen that atomic guards of removed documents would linger in the compare-exchange entries list a short while longer before they are removed. + You do Not need to remove such atomic guards yourself, they will be removed by the cleanup task. + + + +* **Do not delete or modify atomic guards manually**. + If a session attempts to save a document whose atomic guard was removed or modified, it will fail with an error. + +* If you accidentally remove an atomic guard that is associated with an existing document, + you can restore it by re-saving the document in a cluster-wide session, + this will re-create the atomic guard automatically. + + + +--- + +## Best practice when storing a document in a cluster-wide transaction + +* When working with a cluster-wide session, + we recommend that you always **`load` the document into the session before storing it** - + even if the document is expected to be a new document. + +* This is especially important if a document (originally created in a cluster-wide transaction) was deleted **outside** of a cluster-wide session - + as when using a [single-node session](../client-api/session/cluster-transaction/overview#cluster-wide-transaction-vs-single-node-transaction) + or the [DeleteByQueryOperation](../client-api/operations/common/delete-by-query). + In these cases, the document is deleted, but the atomic guard remains (it is not automatically removed). + If you attempt to re-create such a document without loading it first, + RavenDB will fail to save it because the session is unaware of the existing atomic guard’s latest Raft index. + +* In this example, the document is loaded into the session BEFORE creating or modifying it: + + + +{`// Open a cluster-wide session +$sessionOptions = new SessionOptions(); +$sessionOptions->setTransactionMode(TransactionMode::clusterWide()); + +$session = $store->openSession($sessionOptions); +try \{ + // Load the user document BEFORE creating or updating + $user = $session->load(User::class, "users/johndoe"); + + if ($user === null) \{ + // Document doesn't exist => create a new document: + $newUser = new User(); + $newUser->setName("John Doe"); + // ... initialize other properties + + // Store the new user document in the session + $session->store($newUser, "users/johndoe"); + \} else \{ + // Document exists => apply your modifications: + $user->setName("New name"); + // ... make any other updates + + // No need to call Store() again + // RavenDB tracks changes on loaded entities + \} + + // Commit your changes + $session->saveChanges(); +\} finally \{ + $session->close(); +\} +`} + + + + + +When _loading_ a document in a cluster-wide session, RavenDB attempts to retrieve the document from the document store: + +* **If the document is found**, it is loaded into the session, + and modifications will be saved successfully as long as no other session has modified the document in the meantime. + * RavenDB checks whether the Raft index of the atomic guard associated with the document matches the version tracked by the current session. + If another session has already updated the document (and incremented the atomic guard’s Raft index), the save will fail with a _ConcurrencyException_. + +* **If no document is found**, RavenDB will check whether a matching atomic guard exists + (as in the case when the document was deleted outside of a cluster-wide session): + * **If an atomic guard exists**, + the client constructs a change vector for the document using the atomic guard’s Raft index, and the document will be saved with this change vector. + * **If no atomic guard exists**, + the document is treated as a brand new document and will be saved as usual. + + diff --git a/versioned_docs/version-7.1/compare-exchange/content/_atomic-guards-python.mdx b/versioned_docs/version-7.1/compare-exchange/content/_atomic-guards-python.mdx new file mode 100644 index 0000000000..9380170379 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_atomic-guards-python.mdx @@ -0,0 +1,248 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Atomic Guards** are [compare-exchange key/value items](../compare-exchange/overview) + that RavenDB creates and manages **automatically** to guarantee [ACID](../server/clustering/cluster-transactions#cluster-transactions-properties) behavior in cluster-wide sessions. + +* When a document is created in a cluster-wide session, RavenDB associates it with a unique atomic guard item. + Atomic guards coordinate concurrent writes by different sessions to the same document. + +* In this article: + * [Atomic guard creation and update](../compare-exchange/atomic-guards#atomic-guard-creation-and-update) + * [Atomic guard usage example](../compare-exchange/atomic-guards#atomic-guard-usage-example) + * [Atomic guard database scope](../compare-exchange/atomic-guards#atomic-guard-database-scope) + * [Disabling atomic guards](../compare-exchange/atomic-guards#disabling-atomic-guards) + * [When are atomic guards removed](../compare-exchange/atomic-guards#when-are-atomic-guards-removed) + * [Best practice when storing a document in a cluster-wide transaction](../compare-exchange/atomic-guards#best-practice-when-storing-a-document-in-a-cluster-wide-transaction) + + + +--- + +## Atomic guard creation and update + + +Atomic guards are created and managed **only when the session's transaction mode is set to [CLUSTER_WIDE](../client-api/session/cluster-transaction/overview#open-a-cluster-transaction)**. + + +* **When creating a new document**: + A new atomic guard is created when a new document is successfully saved. + +* **When modifying an existing document that already has an atomic guard**: + * The atomic guard’s Raft index is incremented when the document is successfully saved after being modified. + This allows RavenDB to detect that the document has changed. + * If another session had loaded the document before the document's version changed, it will not be able to save its changes + unless it first reloads the updated version. Otherwise, a `ConcurrencyException` is thrown. + +* **When modifying an existing document that doesn't have an atomic guard**: + * A new atomic guard is created when modifying an existing document that does not yet have one. + * The absence of the atomic guard may be because the document was created in a single-node session, + or because its atomic guard was manually removed (which is not recommended). + +* **When saving a document fails**: + * If a session's `save_changes()` fails, the entire session is rolled back and the atomic guard is Not created. + * Ensure your business logic is designed to re-execute the session in case saving changes fails for any reason. + +--- + +## Atomic guard usage example + +In the code sample below, an atomic guard is automatically created when a new document is saved. +It is then used to detect and prevent conflicting writes: when two sessions load and modify the same document, +only the first save succeeds, and the second fails with a _ConcurrencyException_. + + + +{`with store.open_session( + # Open a cluster-wide session: + session_options=SessionOptions(transaction_mode=TransactionMode.CLUSTER_WIDE) +) as session: + session.store(User(), "users/johndoe") + session.save_changes() + # An atomic-guard is now automatically created for the new document "users/johndoe" + +# Open two concurrent cluster-wide sessions: +with store.open_session( + session_options=SessionOptions(transaction_mode=TransactionMode.CLUSTER_WIDE) +) as session1: + with store.open_session( + session_options=SessionOptions(transaction_mode=TransactionMode.CLUSTER_WIDE) + ) as session2: + # Both sessions load the same document: + loaded_user_1 = session1.load("users/johndoe", User) + loaded_user_1.name = "jindoe" + loaded_user_2 = session2.load("users/johndoe", User) + loaded_user_2.name = "jandoe" + + # session1 saves its changes first — + # this increments the Raft index of the associated atomic guard. + session1.save_changes() + + # session2 tries to save using an outdated atomic guard version + # and fails with a ConcurrencyException. + session2.save_changes() +`} + + + +After running the above example, you can view the automatically created atomic guard in the **Compare-Exchange view** +in the Studio: + +![Atomic Guard](../assets/atomic-guard.png) + +1. These are **custom compare-exchange items**, created by the user for any purpose, + as described in [Create compare-exchange items](../compare-exchange/create-cmpxchg-items). + They are NOT the automatically created atomic guards. + +2. This is the **atomic guard** that was generated by running the example above. + The generated atomic guard **key** is: `rvn-atomic/users/johndoe`. It is composed of: + * The prefix `rvn-atomic/`. + * The ID of the associated document (`users/johndoe`). + + + * Although this Studio view allows editing compare-exchange items, **do NOT delete or modify atomic guard entries**. + * Doing so will interfere with RavenDB's ability to track document versioning through atomic guards. + + +--- + +## Atomic guard database scope + +* Atomic guards are local to the database on which they were defined. + +* Since atomic guards are implemented as compare-exchange items, + they are Not externally replicated to other databases by any ongoing replication task. + Learn more in [why compare-exchange items are not replicated](../compare-exchange/overview#why-compare-exchange-items-are-not-replicated-to-external-databases). + +--- + +## Disabling atomic guards + +* Before atomic guards were introduced (in RavenDB 5.2), client code had to explicitly manage compare-exchange entries + to ensure concurrency control and maintain ACID guarantees in cluster-wide transactions. + +* You can still take this manual approach by disabling the automatic use of atomic guards in a cluster-wide session, + and managing the required [compare-exchange key/value pairs](../compare-exchange/overview) yourself, + as shown in this [example](../compare-exchange/overview#example-i---email-address-reservation). + +* To disable the automatic creation and use of atomic guards in a cluster-wide session, + set the session's `DisableAtomicDocumentWritesInClusterWideTransaction` configuration option to `true`. + + + +{`with store.open_session( + # Open a cluster-wide session + session_options=SessionOptions( + transaction_mode=TransactionMode.CLUSTER_WIDE, + disable_atomic_document_writes_in_cluster_wide_transaction=True, + ) +) as session: + session.store(User(), "users/johndoe") + + # No atomic-guard will be created upon save_changes + session.save_changes() +`} + + + +--- + +## When are atomic guards removed + +Atomic guards are removed **automatically** in the following scenarios: +(you don't need to clean them up manually) + +* **Document deleted via a cluster-wide session**: + * Create a document using a cluster-wide session (an associated atomic guard is created). + * Delete the document using a cluster-wide session - its atomic guard will be removed automatically. + +* **Document expires via the expiration feature**: + * Create a document using a cluster-wide session (an associated atomic guard is created). + * Add the `@expires` metadata property the document, as described in [Document expiration](../studio/database/settings/document-expiration). + * When the expiration time is reached, the document and its atomic guard will both be removed automatically. + * Since different cleanup tasks handle the removal of **expired** documents and the removal of their associated atomic guards, + it may happen that atomic guards of removed documents would linger in the compare-exchange entries list a short while longer before they are removed. + You do Not need to remove such atomic guards yourself, they will be removed by the cleanup task. + + + +* **Do not delete or modify atomic guards manually**. + If a session attempts to save a document whose atomic guard was removed or modified, it will fail with an error. + +* If you accidentally remove an atomic guard that is associated with an existing document, + you can restore it by re-saving the document in a cluster-wide session, + this will re-create the atomic guard automatically. + + + +--- + +## Best practice when storing a document in a cluster-wide transaction + +* When working with a cluster-wide session, + we recommend that you always **`load` the document into the session before storing it** - + even if the document is expected to be a new document. + +* This is especially important if a document (originally created in a cluster-wide transaction) was deleted **outside** of a cluster-wide session - + as when using a [single-node session](../client-api/session/cluster-transaction/overview#cluster-wide-transaction-vs-single-node-transaction) + or the [DeleteByQueryOperation](../client-api/operations/common/delete-by-query). + In these cases, the document is deleted, but the atomic guard remains (it is not automatically removed). + If you attempt to re-create such a document without loading it first, + RavenDB will fail to save it because the session is unaware of the existing atomic guard’s latest Raft index. + +* In this example, the document is loaded into the session BEFORE creating or modifying it: + + + +{`with store.open_session( + session_options=SessionOptions( + # Open a cluster-wide session + transaction_mode=TransactionMode.CLUSTER_WIDE + ) +) as session: + # Load the user document BEFORE creating or updating + user = session.load("users/johndoe", User) + + if user is None: + # Document doesn't exist => create a new document + new_user = User() + new_user.name = "John Doe" + # ... initialize other properties + + # Store the new user document in the session + session.store(new_user, "users/johndoe") + else: + # Document exists => apply your modifications + user.name = "New name" + # ... make any other updates + + # No need to call store() again + # RavenDB tracks changes on loaded entities + + # Commit your changes + session.save_changes() +`} + + + + + +When _loading_ a document in a cluster-wide session, RavenDB attempts to retrieve the document from the document store: + +* **If the document is found**, it is loaded into the session, + and modifications will be saved successfully as long as no other session has modified the document in the meantime. + * RavenDB checks whether the Raft index of the atomic guard associated with the document matches the version tracked by the current session. + If another session has already updated the document (and incremented the atomic guard’s Raft index), the save will fail with a _ConcurrencyException_. + +* **If no document is found**, RavenDB will check whether a matching atomic guard exists + (as in the case when the document was deleted outside of a cluster-wide session): + * **If an atomic guard exists**, + the client constructs a change vector for the document using the atomic guard’s Raft index, and the document will be saved with this change vector. + * **If no atomic guard exists**, + the document is treated as a brand new document and will be saved as usual. + + diff --git a/versioned_docs/version-7.1/compare-exchange/content/_cmpxchg-in-dynamic-queries-csharp.mdx b/versioned_docs/version-7.1/compare-exchange/content/_cmpxchg-in-dynamic-queries-csharp.mdx new file mode 100644 index 0000000000..c0f29f37e9 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_cmpxchg-in-dynamic-queries-csharp.mdx @@ -0,0 +1,494 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can use compare-exchange values in **dynamic queries** in two ways: + * **Project** compare-exchange values into the query results. + * **Filter** documents based on compare-exchange values. + +* Dynamic queries are queries that do not rely on a predefined static index. + When you run a dynamic query with a filtering condition, RavenDB automatically creates an auto-index to serve it. + Learn more about dynamic queries in the [Query overview](../client-api/session/querying/how-to-query). + +* In this article: + * [Projecting compare-exchange values in query results](../compare-exchange/cmpxchg-in-dynamic-queries#projecting-compare-exchange-values-in-query-results) + * [Filtering by compare-exchange value](../compare-exchange/cmpxchg-in-dynamic-queries#filtering-by-compare-exchange-value) + * [Example 1 - Filtering when the compare-exchange value is a string](../compare-exchange/cmpxchg-in-dynamic-queries#example-1---filtering-when-the-compare-exchange-value-is-a-string) + * [Example 2 - Filtering when the compare-exchange value is an object](../compare-exchange/cmpxchg-in-dynamic-queries#example-2---filtering-when-the-compare-exchange-value-is-an-object) + * [Syntax](../compare-exchange/cmpxchg-in-dynamic-queries#syntax) + + + +--- + +## Projecting compare-exchange values in query results + +* You can project values from compare-exchange items alongside fields from the queried documents. + +* The following example is based on the sample data described in: [Create sample compare-exchange items](../compare-exchange/indexing-cmpxchg-values#create-sample-compare-exchange-items). + In this example, we want to retrieve the current number of guests in each room. + We query all _HotelRoom_ documents and project: + * the number of guests (from the compare-exchange item's value), and + * the room number (from the document field). + +* No auto-index is created in this case, since the query doesn’t apply any filters. + + + +```csharp +// The session does not need to be opened in cluster-wide mode +using (var session = store.OpenSession()) +{ + List numberOfGuestsPerRoom = session + .Query() + .Select(x => new ProjectedNumberOfGuests + { + // Project content from the compare-exchange item: + // Call 'RavenQuery.CmpXchg' to load the compare-exchange value by its key. + CurrentNumberOfGuests = + RavenQuery.CmpXchg(x.RoomNumber).CurrentNumberOfGuests, + + // Project content from the document: + RoomNumber = x.RoomNumber + }) + .ToList(); +} +``` + + +```csharp +// The session does not need to be opened in cluster-wide mode +using (var asyncSession = store.OpenAsyncSession()) +{ + List numberOfGuestsPerRoom = await asyncSession + .Query() + .Select(x => new ProjectedNumberOfGuests + { + // Project content from the compare-exchange item: + // Call 'RavenQuery.CmpXchg' to load the compare-exchange value by its key. + CurrentNumberOfGuests = + RavenQuery.CmpXchg(x.RoomNumber).CurrentNumberOfGuests, + + // Project content from the document: + RoomNumber = x.RoomNumber + }) + .ToListAsync(); +} +``` + + +```csharp +// The session does not need to be opened in cluster-wide mode +using (var session = store.OpenSession()) +{ + List numberOfGuestsPerRoom = session.Advanced + .DocumentQuery() + .SelectFields(QueryData.CustomFunction( + alias: "room", + func: @" + { + // Project content from the compare-exchange item: + // Call 'cmpxchg' to load the compare-exchange value by its key. + CurrentNumberOfGuests : cmpxchg(room.RoomNumber).CurrentNumberOfGuests, + + // Project content from the document: + RoomNumber : room.RoomNumber + }")) + .ToList(); +} +``` + + +```csharp +// The session does not need to be opened in cluster-wide mode +using (var asyncSession = store.OpenAsyncSession()) +{ + var numberOfGuestsPerRoom = await asyncSession.Advanced + .AsyncDocumentQuery() + .SelectFields(QueryData.CustomFunction( + alias: "room", + func: @" + { + // Project content from the compare-exchange item: + // Call 'cmpxchg' to load the compare-exchange value by its key. + CurrentNumberOfGuests : cmpxchg(room.RoomNumber).CurrentNumberOfGuests, + + // Project content from the document: + RoomNumber : room.RoomNumber + }")) + .ToListAsync(); +} +``` + + +```csharp +using (var session = store.OpenSession()) +{ + var numberOfGuestsPerRoom = session.Advanced + .RawQuery(@" + from 'HotelRooms' as x + select { + CurrentNumberOfGuests : cmpxchg(x.RoomNumber).CurrentNumberOfGuests, + RoomNumber : x.RoomNumber + }") + .ToList(); +} +``` + + +```csharp +using (var asyncSession = store.OpenAsyncSession()) +{ + var numberOfGuestsPerRoom = await asyncSession.Advanced + .AsyncRawQuery(@" + from 'HotelRooms' as x + select { + CurrentNumberOfGuests : cmpxchg(x.RoomNumber).CurrentNumberOfGuests, + RoomNumber : x.RoomNumber + }") + .ToListAsync(); +} +``` + + +```sql +from "HotelRooms" as x +select { + CurrentNumberOfGuests : cmpxchg(x.RoomNumber).CurrentNumberOfGuests, + RoomNumber : x.RoomNumber +} +``` + + + +--- + +## Filtering by compare-exchange value + +#### Example 1 - Filtering when the compare-exchange value is a string + +* You can filter documents based on a compare-exchange value. + For example, to find a user with a blocked email address, + you can filter by the compare-exchange value that stores the blocked email. + +* **Limitation**: + When filtering with `RavenQuery.CmpXchg` inside the `Where` predicate (as shown in this example), + the compare-exchange value must be a _string_. + If your compare-exchange value is of another type, you can still filter the results - + see the [next example](../compare-exchange/cmpxchg-in-dynamic-queries#example-2---filtering-when-the-compare-exchange-value-is-an-object). + +* Since this query uses filtering, RavenDB will automatically create an auto-index to serve it. + In this case, the auto-index `Auto/Users/ByEmail` will be created. + + + +```csharp +// First, let's create a compare-exchange item with a blocked email address for this example: +var putResult = store.Operations.Send(new PutCompareExchangeValueOperation( + "blocked-address", "someUser@Company.com", 0)); + +// Query the 'Users' collection +// Find a user with an email address that matches the blocked address: +// (The session does not need to be opened in cluster-wide mode) +using (var session = store.OpenSession()) +{ + var blockedUser = session.Query() + // Call 'RavenQuery.CmpXchg' to load the compare-exchange value by its key + // Filter the users collection by the value: + .Where(x => x.Email == RavenQuery.CmpXchg("blocked-address")) + .FirstOrDefault(); +} + +// The results will include the user document whose email address is "someUser@company.com" +// (assuming such a user exists in your data). +``` + + +```csharp +// First, let's create a compare-exchange item with a blocked email address for this example: +var putResult = await store.Operations.SendAsync(new PutCompareExchangeValueOperation( + "blocked-address", "someUser@Company.com", 0)); + +// Query the 'Users' collection +// Find a user with an email address that matches the blocked address: +// (The session does not need to be opened in cluster-wide mode) +using (var asyncSession = store.OpenAsyncSession()) +{ + var blockedUser = await asyncSession.Query() + // Call 'RavenQuery.CmpXchg' to load the compare-exchange value by its key + // Filter the users collection by the value: + .Where(x => x.Email == RavenQuery.CmpXchg("blocked-address")) + .FirstOrDefaultAsync(); +} + +// The results will include the user document whose email address is "someUser@company.com" +// (assuming such a user exists in your data). +``` + + +```csharp +var putResult = store.Operations.Send(new PutCompareExchangeValueOperation( + "blocked-address", "someUser@Company.com", 0)); + +using (var session = store.OpenSession()) +{ + var blockedUser = session.Advanced + .RawQuery(@" + from 'Users' + where Email == cmpxchg('blocked-address') + ") + .FirstOrDefault(); +} +``` + + +```csharp +var putResult = await store.Operations.SendAsync(new PutCompareExchangeValueOperation( + "blocked-address", "someUser@Company.com", 0)); + +using (var asyncSession = store.OpenAsyncSession()) +{ + var blockedUser = await asyncSession.Advanced + .AsyncRawQuery(@" + from 'Users' + where Email == cmpxchg('blocked-address') + ") + .FirstOrDefaultAsync(); +} +``` + + +```sql +from "Users" +where Email == cmpxchg("blocked-address") +limit 0, 1 +``` + + +```csharp +public class User +{ + public string Id { get; set; } + public string Name { get; set; } + public string Email { get; set; } +} +``` + + + +#### Example 2 - Filtering when the compare-exchange value is an object + +* The following example shows how to filter documents based on a compare-exchange value that is an _object_. + We query for users whose email ends with one of the domains stored in the object held by the compare-exchange item. + +* Retrieve the compare-exchange value outside the query, then use its content to build the filter logic. + +* Since this query uses filtering, RavenDB will automatically create an auto-index to serve it. + In this case, the auto-index `Auto/Users/ByEmail` will be created. + + + +```csharp +// First, let's create a compare-exchange item with a value that is an object. +// The object contains multiple blocked email domains for this example. +var putResult = store.Operations.Send(new PutCompareExchangeValueOperation( + "blocked-domains", + new BlockedDomains() { Domains = ["suspicious-company-1.com", "suspicious-company-2.org"]}, + 0)); + +// Retrieve the compare-exchange value before running the query +BlockedDomains blockedDomains = store.Operations.Send( + new GetCompareExchangeValueOperation("blocked-domains")).Value; + +// Query the 'Users' collection +// Find users whose email address ends with one of the blocked domains. +// (The session does not need to be opened in cluster-wide mode) +using (var session = store.OpenSession()) +{ + var blockedUsers = session.Query() + .Where(user => + user.Email.EndsWith(blockedDomains.Domains[0]) || + user.Email.EndsWith(blockedDomains.Domains[1])) + .ToList(); +} + +// The results will include users whose email address domain ends with +// one of the values stored in the compare-exchange item, +// assuming such users exist in your data. +``` + + +```csharp +// First, let's create a compare-exchange item with a value that is an object. +// The object contains multiple blocked email domains for this example. +var putResult = await store.Operations.SendAsync(new PutCompareExchangeValueOperation( + "blocked-domains", + new BlockedDomains() { Domains = ["suspicious-company-1.com", "suspicious-company-2.org"]}, + 0)); + +// Retrieve the compare-exchange value before running the query +BlockedDomains blockedDomains = await store.Operations.SendAsync( + new GetCompareExchangeValueOperation("blocked-domains")).Value; + +// Query the 'Users' collection +// Find users whose email address ends with one of the blocked domains. +// (The session does not need to be opened in cluster-wide mode) +using (var asyncSession = store.OpenAsyncSession()) +{ + var blockedUsers = await asyncSession.Query() + .Where(user => + user.Email.EndsWith(blockedDomains.Domains[0]) || + user.Email.EndsWith(blockedDomains.Domains[1])) + .ToListAsync(); +} + +// The results will include users whose email address domain ends with +// one of the values stored in the compare-exchange item, +// assuming such users exist in your data. +``` + + +```csharp +// First, let's create a compare-exchange item with a value that is an object. +// The object contains multiple blocked email domains for this example. +var putResult = store.Operations.Send(new PutCompareExchangeValueOperation( + "blocked-domains", + new BlockedDomains() { Domains = ["suspicious-company-1.com", "suspicious-company-2.org"]}, + 0)); + +// Retrieve the compare-exchange value before running the query +BlockedDomains blockedDomains = store.Operations.Send( + new GetCompareExchangeValueOperation("blocked-domains")).Value; + +// Query the 'Users' collection +// Find users whose email address ends with one of the blocked domains. +// (The session does not need to be opened in cluster-wide mode) +using (var session = store.OpenSession()) +{ + var blockedUsers = session.Advanced + .DocumentQuery() + .WhereEndsWith(x => x.Email, blockedDomains.Domains[0]) + .OrElse() + .WhereEndsWith(x=> x.Email, blockedDomains.Domains[1]) + .ToList(); +} + +// The results will include users whose email address domain ends with +// one of the values stored in the compare-exchange item, +// assuming such users exist in your data. +``` + + +```csharp +// First, let's create a compare-exchange item with a value that is an object. +// The object contains multiple blocked email domains for this example. +var putResult = await store.Operations.SendAsync(new PutCompareExchangeValueOperation( + "blocked-domains", + new BlockedDomains() { Domains = ["suspicious-company-1.com", "suspicious-company-2.org"]}, + 0)); + +// Retrieve the compare-exchange value before running the query +BlockedDomains blockedDomains = await store.Operations.SendAsync( + new GetCompareExchangeValueOperation("blocked-domains")).Value; + +// Query the 'Users' collection +// Find users whose email address ends with one of the blocked domains. +// (The session does not need to be opened in cluster-wide mode) +using (var asyncSession = store.OpenAsyncSession()) +{ + var blockedUsers = await asyncSession.Advanced + .AsyncDocumentQuery() + .WhereEndsWith(x => x.Email, blockedDomains.Domains[0]) + .OrElse() + .WhereEndsWith(x=> x.Email, blockedDomains.Domains[1]) + .ToListAsync(); +} + +// The results will include users whose email address domain ends with +// one of the values stored in the compare-exchange item, +// assuming such users exist in your data. +``` + + +```csharp +var putResult = store.Operations.Send(new PutCompareExchangeValueOperation( + "blocked-domains", + new BlockedDomains() { Domains = ["suspicious-company-1.com", "suspicious-company-2.org"]}, + 0)); + +BlockedDomains blockedDomains = store.Operations.Send( + new GetCompareExchangeValueOperation("blocked-domains")).Value; + +using (var session = store.OpenSession()) +{ + var blockedUsers = session.Advanced + .RawQuery(@" + from 'Users' + where endsWith(Email, 'suspicious-company-1.com') + or + endsWith(Email, 'suspicious-company-2.org') + ") + .ToList(); +} +``` + + +```csharp +var putResult = await store.Operations.SendAsync(new PutCompareExchangeValueOperation( + "blocked-domains", + new BlockedDomains() { Domains = ["suspicious-company-1.com", "suspicious-company-2.org"]}, + 0)); + +BlockedDomains blockedDomains = await store.Operations.SendAsync( + new GetCompareExchangeValueOperation("blocked-domains")).Value; + +using (var asyncSession = store.OpenAsyncSession()) +{ + var blockedUsers = await asyncSession.Advanced + .AsyncRawQuery(@" + from 'Users' + where endsWith(Email, 'suspicious-company-1.com') + or + endsWith(Email, 'suspicious-company-2.org') + ") + .ToListAsync(); +} +``` + + +```sql +from "Users" +where endsWith(Email, "suspicious-company-1.com") +or +endsWith(Email, "suspicious-company-2.org") +``` + + +```csharp +public class User +{ + public string Id { get; set; } + public string Name { get; set; } + public string Email { get; set; } +} +``` + + + +--- + +## Syntax + +### `RavenQuery.CmpXchg()` +This method can be used to filter a LINQ query +or to project fields from a compare-exchange value into the query results. + + +```csharp +// Get a compare-exchange value by key. +public static T CmpXchg(string key) +``` + diff --git a/versioned_docs/version-7.1/compare-exchange/content/_cmpxchg-in-dynamic-queries-nodejs.mdx b/versioned_docs/version-7.1/compare-exchange/content/_cmpxchg-in-dynamic-queries-nodejs.mdx new file mode 100644 index 0000000000..2209ef9e56 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_cmpxchg-in-dynamic-queries-nodejs.mdx @@ -0,0 +1,256 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can use compare-exchange values in **dynamic queries** in two ways: + * **Project** compare-exchange values into the query results. + * **Filter** documents based on compare-exchange values. + +* Dynamic queries are queries that do not rely on a predefined static index. + When you run a dynamic query with a filtering condition, RavenDB automatically creates an auto-index to serve it. + Learn more about dynamic queries in the [Query overview](../client-api/session/querying/how-to-query). + +* In this article: + * [Projecting compare-exchange values in query results](../compare-exchange/cmpxchg-in-dynamic-queries#projecting-compare-exchange-values-in-query-results) + * [Filtering by compare-exchange value](../compare-exchange/cmpxchg-in-dynamic-queries#filtering-by-compare-exchange-value) + * [Example 1 - Filtering when the compare-exchange value is a string](../compare-exchange/cmpxchg-in-dynamic-queries#example-1---filtering-when-the-compare-exchange-value-is-a-string) + * [Example 2 - Filtering when the compare-exchange value is an object](../compare-exchange/cmpxchg-in-dynamic-queries#example-2---filtering-when-the-compare-exchange-value-is-an-object) + * [Syntax](../compare-exchange/cmpxchg-in-dynamic-queries#syntax) + + + +--- + +## Projecting compare-exchange values in query results + +* You can project values from compare-exchange items alongside fields from the queried documents. + +* The following example is based on the sample data described in: [Create sample compare-exchange items](../compare-exchange/indexing-cmpxchg-values#create-sample-compare-exchange-items). + In this example, we want to retrieve the current number of guests in each room. + We query all _HotelRoom_ documents and project: + * the number of guests (from the compare-exchange item's value), and + * the room number (from the document field). + +* No auto-index is created in this case, since the query doesn’t apply any filters. + + + +```js +// The session does not need to be opened in cluster-wide mode +const session = documentStore.openSession(); + +// Define the projection using QueryData: +const queryData = QueryData.customFunction("room", `{ + // Project from compare-exchange: call cmpxchg with RoomNumber + CurrentNumberOfGuests: cmpxchg(room.RoomNumber).CurrentNumberOfGuests, + + // Project from document field + RoomNumber: room.RoomNumber +}`); + +const numberOfGuestsPerRoom = await session + .query({ collection: "HotelRooms" }) + // Project content: + .selectFields(queryData) + .all(); +``` + + +```js +const session = documentStore.openSession(); + +const numberOfGuestsPerRoom = await session.advanced + .rawQuery(` + from 'HotelRooms' as x + select { + CurrentNumberOfGuests : cmpxchg(x.RoomNumber).CurrentNumberOfGuests, + RoomNumber : x.RoomNumber + } + `) + .all(); +``` + + +```sql +from "HotelRooms" as x +select { + CurrentNumberOfGuests : cmpxchg(x.RoomNumber).CurrentNumberOfGuests, + RoomNumber : x.RoomNumber +} +``` + + + +--- + +## Filtering by compare-exchange value + +#### Example 1 - Filtering when the compare-exchange value is a string + +* You can filter documents based on a compare-exchange value. + For example, to find a user with a blocked email address, + you can filter by the compare-exchange value that stores the blocked email. + +* **Limitation**: + When filtering with `cmpxchg` inside the `where` predicate (as shown in this example), + the compare-exchange value must be a _string_. + If your compare-exchange value is of another type, you can still filter the results - + see the [next example](../compare-exchange/cmpxchg-in-dynamic-queries#example-2---filtering-when-the-compare-exchange-value-is-an-object). + +* Since this query uses filtering, RavenDB will automatically create an auto-index to serve it. + In this case, the auto-index `Auto/Users/ByEmail` will be created. + + + +```js +// First, let's create a compare-exchange item with a blocked email address for this example: +const putResult = await documentStore.operations.send( + new PutCompareExchangeValueOperation("blocked-address", "someUser@Company.com", 0) +); + +// Query the 'Users' collection +// Find a user with an email address that matches the blocked address: +// (The session does not need to be opened in cluster-wide mode) +const session = documentStore.openSession(); + +const blockedUser = await session.advanced + .rawQuery(` + from 'Users' + where Email == cmpxchg('blocked-address') + `) + .firstOrNull(); + +// The results will include the user document whose email address is "someUser@company.com" +// (assuming such a user exists in your data). +``` + + +```sql +from "Users" +where Email == cmpxchg("blocked-address") +limit 0, 1 +``` + + +```js +class User { + constructor(id, name, email) { + this.Id = id; + this.Name = name; + this.Email = email; + } +} +``` + + + +#### Example 2 - Filtering when the compare-exchange value is an object + +* The following example shows how to filter documents based on a compare-exchange value that is an _object_. + We query for users whose email ends with one of the domains stored in the object held by the compare-exchange item. + +* Retrieve the compare-exchange value outside the query, then use its content to build the filter logic. + +* Since this query uses filtering, RavenDB will automatically create an auto-index to serve it. + In this case, the auto-index `Auto/Users/ByEmail` will be created. + + + +```js +// First, let's create a compare-exchange item with a value that is an object. +// The object contains multiple blocked email domains for this example. +const blockedDomainsValue = { + Domains: ["suspicious-company-1.com", "suspicious-company-2.org"] +}; +const putResult = await documentStore.operations.send( + new PutCompareExchangeValueOperation("blocked-domains", blockedDomainsValue, 0) +); + +// Retrieve the compare-exchange value before running the query +const blockedDomainsResult = await documentStore.operations.send( + new GetCompareExchangeValueOperation("blocked-domains") +); +const blockedDomains = blockedDomainsResult.value; + +// Query the 'Users' collection +// Find users whose email address ends with one of the blocked domains. +// (The session does not need to be opened in cluster-wide mode) +const session = documentStore.openSession(); + +const blockedUsers = await session + .query({ collection: "Users" }) + .whereEndsWith("Email", blockedDomains.Domains[0]) + .orElse() + .whereEndsWith("Email", blockedDomains.Domains[1]) + .all(); + +// The results will include users whose email address domain ends with +// one of the values stored in the compare-exchange item, +// assuming such users exist in your data. +``` + + +```js +const blockedDomainsValue = { + Domains: ["suspicious-company-1.com", "suspicious-company-2.org"] +}; + +const putResult = await documentStore.operations.send( + new PutCompareExchangeValueOperation("blocked-domains", blockedDomainsValue, 0) +); + +const blockedDomainsResult = await documentStore.operations.send( + new GetCompareExchangeValueOperation("blocked-domains") +); +const blockedDomains = blockedDomainsResult.value; + +const session = documentStore.openSession(); + +const blockedUsers = await session.advanced + .rawQuery(` + from 'Users' + where endsWith(Email, 'suspicious-company-1.com') + or endsWith(Email, 'suspicious-company-2.org') + `) + .all(); +``` + + +```sql +from "Users" +where endsWith(Email, "suspicious-company-1.com") +or +endsWith(Email, "suspicious-company-2.org") +``` + + +```js +class User { + constructor(id, name, email) { + this.Id = id; + this.Name = name; + this.Email = email; + } +} +``` + + + +--- + +## Syntax + +### `cmpxchg()` +This function can be used to filter a query +or to project fields from a compare-exchange value into the query results. + + +```js +// Get a compare-exchange value by key. +// Used inside 'where', 'selectFields', or rawQuery. +cmpxchg(key) +``` + diff --git a/versioned_docs/version-7.1/compare-exchange/content/_cmpxchg-item-expiration-csharp.mdx b/versioned_docs/version-7.1/compare-exchange/content/_cmpxchg-item-expiration-csharp.mdx new file mode 100644 index 0000000000..8236c3b6ab --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_cmpxchg-item-expiration-csharp.mdx @@ -0,0 +1,140 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Compare-exchange items can be set to be deleted automatically in a future time. + +* **To schedule expiration for a compare-exchange item**, set the `@expires` field in the item's **metadata**. + This can be done when creating a new item or updating an existing one. + +* RavenDB scans the database periodically to remove expired items. + Any compare-exchange item whose `@expires` timestamp has passed at the time of the scan will be automatically removed. + +* The **scan frequency** is configurable - + see the [Cluster.CompareExchangeExpiredDeleteFrequencyInSec](../compare-exchange/configuration#clustercompareexchangeexpireddeletefrequencyinsec) configuration key. + The default is 60 seconds. + +* To manually remove a compare-exchange item, see [Delete compare-exchange items](../compare-exchange/delete-cmpxchg-items). + +* Note: The compare-exchange expiration feature is not related to [document expiration](../server/extensions/expiration). + You do NOT need to enable document expiration in order to use compare-exchange expiration. + +--- + +* In this article: + * [Add expiration date using the **Client API**](../compare-exchange/cmpxchg-expiration#add-expiration-date-using-the-client-api) + * [Add expiration date using the **Studio**](../compare-exchange/cmpxchg-expiration#add-expiration-date-using-the-studio) + * [Syntax](../compare-exchange/cmpxchg-expiration#syntax) + + + +--- + +## Add expiration date using the Client API + + + +```csharp +// The session must be opened in cluster-wide mode +using (var session = store.OpenSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) +{ + // Call 'CreateCompareExchangeValue', specify the item's KEY and VALUE + CompareExchangeValue item = + session.Advanced.ClusterTransaction.CreateCompareExchangeValue( + key: "user1-name@example.com", + value: "users/1" + ); + + // Add METADATA fields to the item + // Set a future UTC DateTime in the `@expires` field to schedule expiration + // "Constants.Documents.Metadata.Expires" = "@expires" + item.Metadata[Constants.Documents.Metadata.Expires] = DateTime.UtcNow.AddDays(7); + + // The item will be created on the server once 'SaveChanges' is called + session.SaveChanges(); +} +``` + + +```csharp +// The session must be opened in cluster-wide mode +using (var asyncSession = store.OpenAsyncSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) +{ + // Call 'CreateCompareExchangeValue', specify the item's KEY and VALUE + CompareExchangeValue item = + asyncSession.Advanced.ClusterTransaction.CreateCompareExchangeValue( + key: "user1-name@example.com", + value: "users/1" + ); + + // Add METADATA fields to the item + // Set a future UTC DateTime in the `@expires` field to schedule expiration + // "Constants.Documents.Metadata.Expires" = "@expires" + item.Metadata[Constants.Documents.Metadata.Expires] = DateTime.UtcNow.AddDays(7); + + // The item will be created on the server once 'SaveChangesAsync' is called + await asyncSession.SaveChangesAsync(); +} +``` + + +```csharp +// Define the metadata with the future expiration date +var metadata = new MetadataAsDictionary +{ + // Constants.Documents.Metadata.Expires = "@expires" + // Specify the expiration time (UTC) in ISO 8601 format + { Constants.Documents.Metadata.Expires, DateTime.UtcNow.AddDays(7).ToString("o") } +}; + +// Pass the metadata when creating the item +var putCmpXchgOp = new PutCompareExchangeValueOperation( + "user1-name@example.com", "users/1", 0, metadata); + +// Execute the operation +CompareExchangeResult putResult = store.Operations.Send(putCmpXchgOp); +``` + + +```csharp +// Define the metadata with the future expiration date +var metadata = new MetadataAsDictionary +{ + // Constants.Documents.Metadata.Expires = "@expires" + // Specify the expiration time (UTC) in ISO 8601 format + { Constants.Documents.Metadata.Expires, DateTime.UtcNow.AddDays(7).ToString("o") } +}; + +// Pass the metadata when creating the item +var putCmpXchgOp = new PutCompareExchangeValueOperation( + "user1-name@example.com", "users/1", 0, metadata); + +// Execute the operation +CompareExchangeResult putResult = await store.Operations.SendAsync(putCmpXchgOp); +``` + + + +--- + +## Add expiration date using the Studio + +* You can set or update the expiration date of a compare-exchange item directly from the Studio. + +* Go to **Documents > Compare Exchange**. + Edit an existing item or create a new one. + In the item's metadata, set the `@expires` field to a future UTC date/time (ISO 8601 format). + +![The compare-exchange view](../assets/set-expiration.png) + +--- + +## Syntax + +* The syntax for **creating** a compare-exchange item is available in [Create compare-exchange item - Syntax](../compare-exchange/create-cmpxchg-item#syntax) +* The syntax for **updating** a compare-exchange item is available in [Update compare-exchange item - Syntax](../compare-exchange/update-cmpxchg-item#syntax) diff --git a/versioned_docs/version-7.1/compare-exchange/content/_cmpxchg-item-expiration-nodejs.mdx b/versioned_docs/version-7.1/compare-exchange/content/_cmpxchg-item-expiration-nodejs.mdx new file mode 100644 index 0000000000..1265d951ae --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_cmpxchg-item-expiration-nodejs.mdx @@ -0,0 +1,98 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Compare-exchange items can be set to be deleted automatically in a future time. + +* **To schedule expiration for a compare-exchange item**, set the `@expires` field in the item's **metadata**. + This can be done when creating a new item or updating an existing one. + +* RavenDB scans the database periodically to remove expired items. + Any compare-exchange item whose `@expires` timestamp has passed at the time of the scan will be automatically removed. + +* The **scan frequency** is configurable - + see the [Cluster.CompareExchangeExpiredDeleteFrequencyInSec](../compare-exchange/configuration#clustercompareexchangeexpireddeletefrequencyinsec) configuration key. + The default is 60 seconds. + +* To manually remove a compare-exchange item, see [Delete compare-exchange items](../compare-exchange/delete-cmpxchg-items). + +* Note: The compare-exchange expiration feature is not related to [document expiration](../server/extensions/expiration). + You do NOT need to enable document expiration in order to use compare-exchange expiration. + +--- + +* In this article: + * [Add expiration date using the **Client API**](../compare-exchange/cmpxchg-expiration#add-expiration-date-using-the-client-api) + * [Add expiration date using the **Studio**](../compare-exchange/cmpxchg-expiration#add-expiration-date-using-the-studio) + * [Syntax](../compare-exchange/cmpxchg-expiration#syntax) + + + +--- + +## Add expiration date using the Client API + + + +```js +// The session must be opened in cluster-wide mode +const session = documentStore.openSession({ + transactionMode: "ClusterWide" +}); + +// Call 'createCompareExchangeValue', specify the item's KEY and VALUE +const item = session.advanced.clusterTransaction.createCompareExchangeValue( + "user1-name@example.com", "users/1" // key, value +); + +// Add METADATA fields to the item +// Set a future UTC DateTime in the `@expires` field to schedule expiration +// "CONSTANTS.Documents.Metadata.EXPIRES" = "@expires" +const expireAt = new Date(Date.now() + 7 * 24 * 60 * 60 * 1000).toISOString(); // expire in 7 days +item.metadata[CONSTANTS.Documents.Metadata.EXPIRES] = expireAt; + +// The item will be created on the server once 'SaveChanges' is called +await session.saveChanges(); +``` + + +```js +// Define the metadata with the future expiration date +const expireAt = new Date(Date.now() + 7 * 24 * 60 * 60 * 1000).toISOString(); // expire in 7 days + +const metadata = { + // CONSTANTS.Documents.Metadata.EXPIRES = "@expires" + [CONSTANTS.Documents.Metadata.EXPIRES]: expireAt +}; + +// Pass the metadata when creating the item +const putCmpXchgOp = new PutCompareExchangeValueOperation( + "user1-name@example.com", "users/1", 0, metadata); + +// Execute the operation +const result = await documentStore.operations.send(putCmpXchgOp); +``` + + + +--- + +## Add expiration date using the Studio + +* You can set or update the expiration date of a compare-exchange item directly from the Studio. + +* Go to **Documents > Compare Exchange**. + Edit an existing item or create a new one. + In the item's metadata, set the `@expires` field to a future UTC date/time (ISO 8601 format). + +![The compare-exchange view](../assets/set-expiration.png) + +--- + +## Syntax + +* The syntax for **creating** a compare-exchange item is available in [Create compare-exchange item - Syntax](../compare-exchange/create-cmpxchg-item#syntax) +* The syntax for **updating** a compare-exchange item is available in [Update compare-exchange item - Syntax](../compare-exchange/update-cmpxchg-item#syntax) diff --git a/versioned_docs/version-7.1/compare-exchange/content/_create-cmpxchg-items-csharp.mdx b/versioned_docs/version-7.1/compare-exchange/content/_create-cmpxchg-items-csharp.mdx new file mode 100644 index 0000000000..f54ee96665 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_create-cmpxchg-items-csharp.mdx @@ -0,0 +1,571 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A new compare-exchange item can be created in the following ways: + * Using a cluster-wide session + * Using a store operation + * Using the Studio + +* To create a new compare-exchange item, you must provide: + * **A unique key** (string, up to 512 bytes) + * **An associated value** (number, string, array, or any valid JSON object) + * You can optionally add **metadata** (a valid JSON object) to store extra information with the item. + A common use case is to set an expiration time for the compare-exchange item in its metadata. + Learn more in [Set expiration for compare-exchange items](../compare-exchange/cmpxchg-expiration). + +* To modify an existing compare-exchange item, see: [Update compare-exchange item](../compare-exchange/update-cmpxchg-item). + +* In this article: + * [Create item using a **cluster-wide session**](../compare-exchange/create-cmpxchg-items#create-item-using-a-cluster-wide-session) + * [Ex.1 - Create compare-exchange item - with string value](../compare-exchange/create-cmpxchg-items#ex1---create-compare-exchange-item---with-string-value) + * [Ex.2 - Create compare-exchange item - with custom object value](../compare-exchange/create-cmpxchg-items#ex2---create-compare-exchange-item---with-custom-object-value) + * [Ex.3 - Create compare-exchange item - with metadata](../compare-exchange/create-cmpxchg-items#ex3---create-compare-exchange-item---with-metadata) + * [Ex.4 - Create multiple items](../compare-exchange/create-cmpxchg-items#ex4---create-multiple-items) + * [Create item using a **store operation**](../compare-exchange/create-cmpxchg-items#create-item-using-a-store-operation) + * [Ex.5 - Create compare-exchange item - with string value](../compare-exchange/create-cmpxchg-items#ex5---create-compare-exchange-item---with-string-value) + * [Ex.6 - Create compare-exchange item - with custom object value](../compare-exchange/create-cmpxchg-items#ex6---create-compare-exchange-item---with-custom-object-value) + * [Ex.7 - Create compare-exchange item - with metadata](../compare-exchange/create-cmpxchg-items#ex7---create-compare-exchange-item---with-metadata) + * [Create item using the **Studio**](../compare-exchange/create-cmpxchg-items#create-item-using-the-studio) + * [Syntax](../compare-exchange/create-cmpxchg-items#syntax) + + + + +--- + +## Create item using a cluster-wide session + +* Create compare-exchange items using a cluster-wide session when you want the creation to be part of a transaction committed via `SaveChanges()`. + This is suitable if you want to include compare-exchange item creation and document changes in the same transaction. + Learn more about cluster-wide sessions in [Cluster transactions - overview](../client-api/session/cluster-transaction/overview). + +* Use `CreateCompareExchangeValue()` to register the creation of a new compare-exchange item in the session. + The item will be created as part of the cluster-wide transaction when _SaveChanges()_ is called. + +* Exceptions: + An `InvalidOperationException` is thrown if the session is not opened in cluster-wide mode. + If the key already exists, _SaveChanges()_ will throw a `ClusterTransactionConcurrencyException`. + +* Examples: + + #### Ex.1 - Create compare-exchange item - with string value + + + + ```csharp + // The session must be opened in cluster-wide mode + using (var session = store.OpenSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + // Call 'CreateCompareExchangeValue' to register the creation of a new compare-exchange item + // as part of the cluster-wide transaction. Specify the item's KEY and VALUE. + // The item will be created only when 'SaveChanges' is called. + CompareExchangeValue item = + session.Advanced.ClusterTransaction.CreateCompareExchangeValue( + key: "user1-name@example.com", + value: "users/1" + ); + + // Commit the cluster-wide transaction. + // This will create the compare-exchange item, + // or throw a 'ClusterTransactionConcurrencyException' if the key already exists. + session.SaveChanges(); + } + ``` + + + ```csharp + // The session must be opened in cluster-wide mode + using (var asyncSession = store.OpenAsyncSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + // Call 'CreateCompareExchangeValue' to register the creation of a new compare-exchange item + // as part of the cluster-wide transaction. Specify the item's KEY and VALUE. + // The item will be created only when 'SaveChanges' is called. + CompareExchangeValue item = + asyncSession.Advanced.ClusterTransaction.CreateCompareExchangeValue( + key: "user1-name@example.com", + value: "users/1" + ); + + // Commit the cluster-wide transaction. + // This will create the compare-exchange item, + // or throw a 'ClusterTransactionConcurrencyException' if the key already exists. + await asyncSession.SaveChangesAsync(); + } + ``` + + + + #### Ex.2 - Create compare-exchange item - with custom object value + + + + ```csharp + // Define the object to be stored as the value + var user1Info = new UserInfo() + { + UserDocumentId = "users/1", + AdditionalInfo = "someInfo.." + }; + + // The session must be opened in cluster-wide mode + using (var session = store.OpenSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + // Call 'CreateCompareExchangeValue' to register the creation of a new compare-exchange item + // as part of the cluster-wide transaction. Specify the item's KEY and VALUE. + // The item will be created only when 'SaveChanges' is called. + CompareExchangeValue item = + session.Advanced.ClusterTransaction.CreateCompareExchangeValue( + key: "user1-name@example.com", + value: user1Info // Pass the object instance + ); + + // Commit the cluster-wide transaction. + // This will create the compare-exchange item, + // or throw a 'ClusterTransactionConcurrencyException' if the key already exists. + session.SaveChanges(); + } + ``` + + + ```csharp + // Define the object to be stored as the value + var user1Info = new UserInfo() + { + UserDocumentId = "users/1", + AdditionalInfo = "someInfo.." + }; + + // The session must be opened in cluster-wide mode + using (var asyncSession = store.OpenAsyncSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + // Call 'CreateCompareExchangeValue' to register the creation of a new compare-exchange item + // as part of the cluster-wide transaction. Specify the item's KEY and VALUE. + // The item will be created only when 'SaveChanges' is called. + CompareExchangeValue item = + asyncSession.Advanced.ClusterTransaction.CreateCompareExchangeValue( + key: "user1-name@example.com", + value: user1Info // Pass the object instance + ); + + // Commit the cluster-wide transaction. + // This will create the compare-exchange item, + // or throw a 'ClusterTransactionConcurrencyException' if the key already exists. + await asyncSession.SaveChangesAsync(); + } + ``` + + + + #### Ex.3 - Create compare-exchange item - with metadata + + + + ```csharp + // The session must be opened in cluster-wide mode + using (var session = store.OpenSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + // Call 'CreateCompareExchangeValue', specify the item's KEY and VALUE: + CompareExchangeValue item = + session.Advanced.ClusterTransaction.CreateCompareExchangeValue( + key: "user1-name@example.com", + value: "users/1" + ); + + // Add METADATA fields to the item: + item.Metadata["field-name"] = "some value"; + item.Metadata["email-type"] = "work email"; // e.g. describe the email type + + // Commit the cluster-wide transaction. + // This will create the compare-exchange item, + // or throw a 'ClusterTransactionConcurrencyException' if the key already exists. + session.SaveChanges(); + } + ``` + + + ```csharp + // The session must be opened in cluster-wide mode + using (var asyncSession = store.OpenAsyncSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + // Call 'CreateCompareExchangeValue', specify the item's KEY and VALUE: + CompareExchangeValue item = + asyncSession.Advanced.ClusterTransaction.CreateCompareExchangeValue( + key: "user1-name@example.com", + value: "users/1" + ); + + // Add METADATA fields to the item: + item.Metadata["field-name"] = "some value"; + item.Metadata["email-type"] = "work email"; // e.g. describe the email type + + // Commit the cluster-wide transaction. + // This will create the compare-exchange item, + // or throw a 'ClusterTransactionConcurrencyException' if the key already exists. + await asyncSession.SaveChangesAsync(); + } + ``` + + + + #### Ex.4 - Create multiple items + + You can create multiple compare-exchange items in the same cluster-wide transaction. + + + + ```csharp + // The session must be opened in cluster-wide mode + using (var session = store.OpenSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + // You can create multiple compare-exchange items before calling 'SaveChanges'. + // Call 'CreateCompareExchangeValue' for each item you want to create in the transaction. + session.Advanced.ClusterTransaction.CreateCompareExchangeValue( + key: "user7-name@example.com", value: "users/7" + ); + + session.Advanced.ClusterTransaction.CreateCompareExchangeValue( + key: "user8-name@example.com", value: "users/8" + ); + + session.Advanced.ClusterTransaction.CreateCompareExchangeValue( + key: "user9-name@example.com", value: "users/9" + ); + + // All three items will be created atomically as part of the same transaction. + // If any creation fails (e.g., due to an existing key), the entire transaction is rolled back + // and none of the new items will be created. + session.SaveChanges(); + } + ``` + + + ```csharp + // The session must be opened in cluster-wide mode + using (var asyncSession = store.OpenAsyncSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + // You can create multiple compare-exchange items before calling 'SaveChanges'. + // Call 'CreateCompareExchangeValue' for each item you want to create in the transaction. + asyncSession.Advanced.ClusterTransaction.CreateCompareExchangeValue( + key: "user7-name@example.com", value: "users/7" + ); + + asyncSession.Advanced.ClusterTransaction.CreateCompareExchangeValue( + key: "user8-name@example.com", value: "users/8" + ); + + asyncSession.Advanced.ClusterTransaction.CreateCompareExchangeValue( + key: "user9-name@example.com", value: "users/9" + ); + + // All three items will be created atomically as part of the same transaction. + // If any creation fails (e.g., due to an existing key), the entire transaction is rolled back + // and none of the new items will be created. + await asyncSession.SaveChangesAsync(); + } + ``` + + + +--- + +## Create item using a store operation + +* Use the `PutCompareExchangeValueOperation` [store operation](../client-api/operations/what-are-operations) to create a compare-exchange item independently, + without opening a session. + This is ideal for stand-alone tasks that don't require batching multiple commands into a single transactional session. + +* Note: + _PutCompareExchangeValueOperation_ is used for both **creating** and [modifying](../compare-exchange/update-cmpxchg-item) compare-exchange items. + The intent of the operation is determined by the index you pass: + * An index of `0` indicates a **create** operation. + * A non-zero index indicates a **modify** operation. + +* Creating a new compare-exchange item will succeed only if: + * The passed index is 0, and + * The specified key does not already exist in the database. + +* The operation will return a failed result (no exception is thrown) in the following cases: + * A new key is provided, but the index is not 0. + * The key already exists, even if the passed index is 0. + +* Examples: + + #### Ex.5 - Create compare-exchange item - with string value + + + + ```csharp + // Create a new compare-exchange item: + // =================================== + + // Define the put compare-exchange operation. Pass: + // * KEY: a new unique string identifier (e.g. a user's email) + // * VALUE: an associated value (e.g. the user's document ID) + // * INDEX: pass '0' to indicate that this is a new compare-exchange item + var putCmpXchgOp = new PutCompareExchangeValueOperation( + "user1-name@example.com", "users/1", 0); + + // Execute the operation by passing it to Operations.Send + CompareExchangeResult putResult = store.Operations.Send(putCmpXchgOp); + + // Check results + bool successful = putResult.Successful; // Has operation succeeded + long indexForItem = putResult.Index; // The version number assigned to the new item + + // If 'successful' is true, then a new compare-exchange item has been created + // with the unique email key and the associated value. + ``` + + + ```csharp + // Create a new compare-exchange item: + // =================================== + + // Define the put compare-exchange operation. Pass: + // * KEY: a new unique string identifier (e.g. a user's email) + // * VALUE: an associated value (e.g. the user's document ID) + // * INDEX: pass '0' to indicate that this is a new compare-exchange item + var putCmpXchgOp = new PutCompareExchangeValueOperation( + "user1-name@example.com", "users/1", 0); + + // Execute the operation by passing it to Operations.SendAsync + CompareExchangeResult putResult = await store.Operations.SendAsync(putCmpXchgOp); + + // Check results + bool successful = putResult.Successful; // Has operation succeeded + long indexForItem = putResult.Index; // The version number assigned to the new item + + // If 'successful' is true, then a new compare-exchange item has been created + // with the unique email key and the associated value. + ``` + + + + #### Ex.6 - Create compare-exchange item - with custom object value + + + + ```csharp + // Create a new compare-exchange item: + // =================================== + + // Define the object to be stored as the value + var user1Info = new UserInfo() + { + UserDocumentId = "users/1", + AdditionalInfo = "someInfo.." + }; + + // Define the put compare-exchange operation. + // Pass the key, the object instance, and '0' to indicate that this is a new item. + // Specify the object type in the generic parameter. + var putCmpXchgOp = new PutCompareExchangeValueOperation( + "user1-name@example.com", user1Info, 0); + + // Execute the operation by passing it to Operations.Send + CompareExchangeResult putResult = store.Operations.Send(putCmpXchgOp); + + // Check results + bool successful = putResult.Successful; // Has operation succeeded + long indexForItem = putResult.Index; // The version number assigned to the new item + + // If 'successful' is true, then a new compare-exchange item has been created + // with the unique email key and the associated value. + ``` + + + ```csharp + // Create a new compare-exchange item: + // =================================== + + // Define the object to be stored as the value + var user1Info = new UserInfo() + { + UserDocumentId = "users/1", + AdditionalInfo = "someInfo.." + }; + + // Define the put compare-exchange operation. + // Pass the key, the object instance, and '0' to indicate that this is a new item. + // Specify the object type in the generic parameter. + var putCmpXchgOp = new PutCompareExchangeValueOperation( + "user1-name@example.com", user1Info, 0); + + // Execute the operation by passing it to Operations.SendAsync + CompareExchangeResult putResult = await store.Operations.SendAsync(putCmpXchgOp); + + // Check results + bool successful = putResult.Successful; // Has operation succeeded + long indexForItem = putResult.Index; // The version number assigned to the new item + + // If 'successful' is true, then a new compare-exchange item has been created + // with the unique email key and the associated value. + ``` + + + + #### Ex.7 - Create compare-exchange item - with metadata + + + + ```csharp + // Create a new compare-exchange item with metadata: + // ================================================= + + // Define the metadata - must be a valid JSON object + var metadata = new MetadataAsDictionary + { + { "email-type", "work email" } + }; + + // Define the put compare-exchange operation. + // Pass a 4'th parameter with the metadata object. + var putCmpXchgOp = new PutCompareExchangeValueOperation( + "user1-name@example.com", "users/1", 0, metadata); + + // Execute the operation by passing it to Operations.Send + CompareExchangeResult putResult = store.Operations.Send(putCmpXchgOp); + + // Check results + bool successful = putResult.Successful; // Has operation succeeded + long indexForItem = putResult.Index; // The version number assigned to the new item + + // If successful is true then a new compare-exchange item has been created + // with the unique key, value, and metadata. + ``` + + + ```csharp + // Create a new compare-exchange item with metadata: + // ================================================= + + // Define the metadata - must be a valid JSON object + var metadata = new MetadataAsDictionary + { + { "email-type", "work email" } + }; + + // Define the put compare-exchange operation. + // Pass a 4'th parameter with the metadata object. + var putCmpXchgOp = new PutCompareExchangeValueOperation( + "user1-name@example.com", "users/1", 0, metadata); + + // Execute the operation by passing it to Operations.SendAsync + CompareExchangeResult putResult = await store.Operations.SendAsync(putCmpXchgOp); + + // Check results + bool successful = putResult.Successful; // Has operation succeeded + long indexForItem = putResult.Index; // The version number assigned to the new item + + // If successful is true then a new compare-exchange item has been created + // with the unique key, value, and metadata. + ``` + + + +--- + +## Create item using the Studio + +To create compare-exchange items using the Studio, go to **Documents > Compare Exchange**. + +![The compare-exchange view](../assets/create-new-cmpxchg-1.png) + +![The compare-exchange view](../assets/create-new-cmpxchg-2.png) + +1. **Key** + Enter a unique identifier for the compare-exchange item (up to 512 bytes). + This key must be unique across the entire database. +2. **Value** + Enter the value to associate with the key. + Can be a number, string, array, or any valid JSON object. +3. **Metadata** (optional) + Add any additional data you want to store with the item. + Must be a valid JSON object. + Can be used to [set expiration time](../todo..) for the compare-exchange item. +4. **Save** + Click to create the compare-exchange item. + If the key already exists, an error message will be shown. + +--- + +## Syntax + +--- + +### `PutCompareExchangeValueOperation` +Create compare-exchange item using a store operation: + + +```csharp +public PutCompareExchangeValueOperation( + string key, T value, long index, IMetadataDictionary metadata = null) +``` + + +| Parameter | Type | Description | +|--------------|-----------------------|-------------| +| **key** | `string` |
  • A unique identifier in the database scope.
  • Can be up to 512 bytes.
| +| **value** | `T` |
  • A value to be saved for the specified _key_.
  • Can be any value (number, string, array, or any valid JSON object).
| +| **index** | `long` |
  • Pass `0` to create a new key.
  • When updating an existing key, pass the current number for concurrency control.
| +| **metadata** | `IMetadataDictionary` |
  • Optional metadata to be saved for the specified _key_.
  • Must be a valid JSON object.
| + + +**Returned object**: + + +```csharp +public class CompareExchangeResult +{ + public bool Successful; + public T Value; + public long Index; +} +``` + + +| Return Value | Type | Description | +|---------------|--------|-------------| +| **Successful**| `bool` |
  • `true` if the put operation has completed successfully.
  • `false` if the put operation has failed.
| +| **Value** | `T` |
  • Upon success - the value of the compare-exchange item that was saved.
  • Upon failure - the existing value on the server.
| +| **Index** | `long` |
  • The compare-exchange item's version.
  • This number increases with each successful modification of the `value` or `metadata`.
  • Upon success - the updated version of the compare-exchange item that was saved.
  • Upon failure - the existing version number on the server.
| + +--- + +### `CreateCompareExchangeValue` +Create compare-exchange item using cluster-wide session: + + +```csharp +session.Advanced.ClusterTransaction.CreateCompareExchangeValue(key, value); +``` + + +| Parameter | Type | Description | +|------------|----------|--------------------------------------------------------------------| +| **key** | `string` | The compare-exchange item key. This string can be up to 512 bytes. | +| **value** | `T` | The associated value to store for the key.
Can be a number, string, array, or any valid JSON object. | + +| `CreateCompareExchangeValue` returns: | Description | +|---------------------------|---------------------------------------------------------------------------------------------------------------------| +| `CompareExchangeValue` | The compare-exchange item that is added to the transaction.
It will be created when `SaveChanges()` is called. | + +The returned `CompareExchangeValue` contains: + +| Property | Type | Description | +|------------|----------|--------------------------------------------------------------------| +| **key** | `string` | The compare-exchange item key. This string can be up to 512 bytes. | +| **value** | `T` | The value associated with the key. | +| **index** | `long` | The index used for concurrency control.
Will be `0` when calling `CreateCompareExchangeValue`. | diff --git a/versioned_docs/version-7.1/compare-exchange/content/_create-cmpxchg-items-java.mdx b/versioned_docs/version-7.1/compare-exchange/content/_create-cmpxchg-items-java.mdx new file mode 100644 index 0000000000..a75225bf1c --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_create-cmpxchg-items-java.mdx @@ -0,0 +1,153 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A new compare-exchange item can be created in the following ways: + * Using a cluster-wide session + * Using a store operation + * Using the Studio + +* To create a new compare-exchange item, you must provide: + * **A unique key** (string, up to 512 bytes) + * **An associated value** (number, string, array, or any valid JSON object) + * You can optionally add **metadata** (a valid JSON object) to store extra information with the item. + A common use case is to set an expiration time for the compare-exchange item in its metadata. + Learn more in [Set expiration for compare-exchange items](../compare-exchange/cmpxchg-expiration). + +* To modify an existing compare-exchange item, see: [Update compare-exchange item](../compare-exchange/update-cmpxchg-item). + +* In this article: + * [Create item using a **store operation**](../compare-exchange/create-cmpxchg-items#create-item-using-a-store-operation) + * [Create item using the **Studio**](../compare-exchange/create-cmpxchg-items#create-item-using-the-studio) + * [Syntax](../compare-exchange/create-cmpxchg-items#syntax) + + + +--- + +## Create item using a store operation + +* Use the `PutCompareExchangeValueOperation` [store operation](../client-api/operations/what-are-operations) to create a compare-exchange item independently, + without opening a session. + This is ideal for stand-alone tasks that don't require batching multiple commands into a single transactional session. + +* Note: + _PutCompareExchangeValueOperation_ is used for both **creating** and [modifying](../compare-exchange/update-cmpxchg-item) compare-exchange items. + The intent of the operation is determined by the index you pass: + * An index of `0` indicates a **create** operation. + * A non-zero index indicates a **modify** operation. + +* Creating a new compare-exchange item will succeed only if: + * The passed index is 0, and + * The specified key does not already exist in the database. + +* The operation will return a failed result (no exception is thrown) in the following cases: + * A new key is provided, but the index is not 0. + * The key already exists, even if the passed index is 0. + +--- + +#### Example + + + +{`CompareExchangeResult compareExchangeResult = store.operations().send( + new PutCompareExchangeValueOperation<>("user1-name@example.com", "users/1", 0)); + +boolean successful = compareExchangeResult.isSuccessful(); +// If successful is true: then Key 'user1-name@example.com' now has the value of "users/1" +`} + + + +--- + +## Create item using the Studio + +To create compare-exchange items using the Studio, go to **Documents > Compare Exchange**. + +![The compare-exchange view](../assets/create-new-cmpxchg-1.png) + +![The compare-exchange view](../assets/create-new-cmpxchg-2.png) + +1. **Key** + Enter a unique identifier for the compare-exchange item (up to 512 bytes). + This key must be unique across the entire database. +2. **Value** + Enter the value to associate with the key. + Can be a number, string, array, or any valid JSON object. +3. **Metadata** (optional) + Add any additional data you want to store with the item. + Must be a valid JSON object. + Can be used to [set expiration time](../todo..) for the compare-exchange item. +4. **Save** + Click to create the compare-exchange item. + If the key already exists, an error message will be shown. + +--- + +## Syntax + +--- + +### `PutCompareExchangeValueOperation` +Create compare-exchange item using a store operation: + + + +{`public PutCompareExchangeValueOperation(String key, T value, long index) +`} + + + +| Parameter | Type | Description | +|-----------|--------|-------------| +| **key** | String | Object identifier under which _value_ is saved, unique in the database scope across the cluster. This string can be up to 512 bytes. | +| **value** | `T` | The value to be saved for the specified _key_. | +| **index** | long | * `0` if creating a new key
* The current version of _value_ when updating a value for an existing key. | + +**Returned object**: + + + +{`public class CompareExchangeResult \{ + private T value; + private long index; + private boolean successful; + + public T getValue() \{ + return value; + \} + + public void setValue(T value) \{ + this.value = value; + \} + + public long getIndex() \{ + return index; + \} + + public void setIndex(long index) \{ + this.index = index; + \} + + public boolean isSuccessful() \{ + return successful; + \} + + public void setSuccessful(boolean successful) \{ + this.successful = successful; + \} +\} +`} + + + +| Return Value | Type | Description | +|----------------|---------|-----------------------------------------------------------------------------| +| **Successful** | boolean | * `true` if the save operation has completed successfully
* `false` if the save operation failed | +| **Value** | `T` | * The value that was saved if the operation was successful
* The currently existing value in the server upon failure | +| **Index** | long | * The version number of the value that was saved upon success
* The currently existing version number in the server upon failure | diff --git a/versioned_docs/version-7.1/compare-exchange/content/_create-cmpxchg-items-nodejs.mdx b/versioned_docs/version-7.1/compare-exchange/content/_create-cmpxchg-items-nodejs.mdx new file mode 100644 index 0000000000..52010ece5f --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_create-cmpxchg-items-nodejs.mdx @@ -0,0 +1,315 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A new compare-exchange item can be created in the following ways: + * Using a cluster-wide session + * Using a store operation + * Using the Studio + +* To create a new compare-exchange item, you must provide: + * **A unique key** (string, up to 512 bytes) + * **An associated value** (number, string, array, or any valid JSON object) + * You can optionally add **metadata** (a valid JSON object) to store extra information with the item. + A common use case is to set an expiration time for the compare-exchange item in its metadata. + Learn more in [Set expiration for compare-exchange items](../compare-exchange/cmpxchg-expiration). + +* To modify an existing compare-exchange item, see: [Update compare-exchange items](../compare-exchange/update-cmpxchg-item). + +* In this article: + * [Create item using a **cluster-wide session**](../compare-exchange/create-cmpxchg-items#create-item-using-a-cluster-wide-session) + * [Ex.1 - Create new compare-exchange item](../compare-exchange/create-cmpxchg-items#ex1---create-new-compare-exchange-item) + * [Ex.2 - Create new compare-exchange item with metadata](../compare-exchange/create-cmpxchg-items#ex2---create-new-compare-exchange-item-with-metadata) + * [Ex.3 - Create multiple items](../compare-exchange/create-cmpxchg-items#ex3---create-multiple-items) + * [Create item using a **store operation**](../compare-exchange/create-cmpxchg-items#create-item-using-a-store-operation) + * [Ex.4 - Create new compare-exchange item](../compare-exchange/create-cmpxchg-items#ex4---create-new-compare-exchange-item) + * [Ex.5 - Create new compare-exchange item with metadata](../compare-exchange/create-cmpxchg-items#ex5---create-new-compare-exchange-item-with-metadata) + * [Create item using the **Studio**](../compare-exchange/create-cmpxchg-items#create-item-using-the-studio) + * [Syntax](../compare-exchange/create-cmpxchg-items#syntax) + + + +--- + +## Create item using a cluster-wide session + +* Create compare-exchange items using a cluster-wide session when you want the creation to be part of a transaction committed via `saveChanges()`. + This is suitable if you want to include compare-exchange item creation and document changes in the same transaction. + Learn more about cluster-wide sessions in [Cluster transactions - overview](../client-api/session/cluster-transaction/overview). + +* Use `createCompareExchangeValue()` to register the creation of a new compare-exchange item in the session. + The item will be created as part of the cluster-wide transaction when _saveChanges()_ is called. + +* Exceptions: + An `InvalidOperationException` is thrown if the session is not opened in cluster-wide mode. + If the key already exists, _saveChanges()_ will throw a `ClusterTransactionConcurrencyException`. + +* Examples: + + #### Ex.1 - Create new compare-exchange item + + + + ```js + // The session must be opened in cluster-wide mode + const session = documentStore.openSession({ + transactionMode: "ClusterWide" + }); + + // Call 'createCompareExchangeValue' to register the creation of a new compare-exchange item + // as part of the cluster-wide transaction. Specify the item's KEY and VALUE. + // The item will be created only when 'saveChanges' is called. + const item = session.advanced.clusterTransaction.createCompareExchangeValue( + "user1-name@example.com", "users/1" // key, value + ); + + // Commit the cluster-wide transaction. + // This will create the compare-exchange item, + // or throw a 'ClusterTransactionConcurrencyException' if the key already exists. + await session.saveChanges(); + ``` + + + + #### Ex.2 - Create new compare-exchange item with metadata + + + + ```js + // The session must be opened in cluster-wide mode + const session = documentStore.openSession({ + transactionMode: "ClusterWide" + }); + + // Call 'createCompareExchangeValue' to register the creation of a new compare-exchange item + // as part of the cluster-wide transaction. Specify the item's KEY and VALUE. + // The item will be created only when 'saveChanges' is called. + const item = session.advanced.clusterTransaction.createCompareExchangeValue( + "user1-name@example.com", "users/1" // key, value + ); + + // Add METADATA fields to the item: + item.metadata["field-name"] = "some value"; + item.metadata["email-type"] = "work email"; // e.g. describe the email type + + // Commit the cluster-wide transaction. + // This will create the compare-exchange item, + // or throw a 'ClusterTransactionConcurrencyException' if the key already exists. + await session.saveChanges(); + ``` + + + + #### Ex.3 - Create multiple items + + You can create multiple compare-exchange items in the same cluster-wide transaction. + + + + ```js + // The session must be opened in cluster-wide mode + const session = documentStore.openSession({ + transactionMode: "ClusterWide" + }); + + // You can create multiple compare-exchange items before calling 'saveChanges'. + // Call 'createCompareExchangeValue' for each item you want to create in the transaction. + session.advanced.clusterTransaction.createCompareExchangeValue( + "user7-name@example.com", "users/7" + ); + + session.advanced.clusterTransaction.createCompareExchangeValue( + "user8-name@example.com", "users/8" + ); + + session.advanced.clusterTransaction.createCompareExchangeValue( + "user9-name@example.com", "users/9" + ); + + // All three items will be created atomically as part of the same transaction. + // If any creation fails (e.g., due to an existing key), the entire transaction is rolled back + // and none of the new items will be created. + await session.saveChanges(); + ``` + + + +--- + +## Create item using a store operation + +* Use the `PutCompareExchangeValueOperation` [store operation](../client-api/operations/what-are-operations) to create a compare-exchange item independently, + without opening a session. + This is ideal for stand-alone tasks that don't require batching multiple commands into a single transactional session. + +* Note: + _PutCompareExchangeValueOperation_ is used for both **creating** and [modifying](../compare-exchange/update-cmpxchg-item) compare-exchange items. + The intent of the operation is determined by the index you pass: + * An index of `0` indicates a **create** operation. + * A non-zero index indicates a **modify** operation. + +* Creating a new compare-exchange item will succeed only if: + * The passed index is 0, and + * The specified key does not already exist in the database. + +* The operation will return a failed result (no exception is thrown) in the following cases: + * A new key is provided, but the index is not 0. + * The key already exists, even if the passed index is 0. + +* Examples: + + #### Ex.4 - Create new compare-exchange item + + + + ```js + // Create a new compare-exchange item: + // =================================== + + // Define the put compare-exchange operation. Pass: + // * KEY: a new unique identifier (e.g. a user's email) + // * VALUE: an associated value (e.g. the user's document ID) + // * INDEX: pass '0' to indicate that this is a new key + const putCmpXchgOp = new PutCompareExchangeValueOperation("user1-name@example.com", "users/1", 0); + + // Execute the operation by passing it to operations.send + const result = await documentStore.operations.send(putCmpXchgOp); + + // Check results + const successful = result.successful; // Has operation succeeded + const indexForItem = result.index; // The version number assigned to the new item + + // If successful is true then a new compare-exchange item has been created + // with the unique email key and the associated value. + ``` + + + + #### Ex.5 - Create new compare-exchange item with metadata + + + + ```js + // Create a new compare-exchange item with metadata: + // ================================================= + + // Define the put compare-exchange operation. + // Pass a 4'th parameter with the metadata object. + const putCmpXchgOp = new PutCompareExchangeValueOperation("user1-name@example.com", "users/1", 0, + { + "email-type": "work email" + }); + + // Execute the operation by passing it to operations.send + const result = await documentStore.operations.send(putCmpXchgOp); + + // Check results + const successful = result.successful; // Has operation succeeded + const indexForItem = result.index; // The version number assigned to the new item + + // If successful is true then a new compare-exchange item has been created + // with the unique phone number key, value, and metadata. + ``` + + + +--- + +## Create item using the Studio + +To create compare-exchange items using the Studio, go to **Documents > Compare Exchange**. + +![The compare-exchange view](../assets/create-new-cmpxchg-1.png) + +![The compare-exchange view](../assets/create-new-cmpxchg-2.png) + +1. **Key** + Enter a unique identifier for the compare-exchange item (up to 512 bytes). + This key must be unique across the entire database. +2. **Value** + Enter the value to associate with the key. + Can be a number, string, array, or any valid JSON object. +3. **Metadata** (optional) + Add any additional data you want to store with the item. + Must be a valid JSON object. + Can be used to [set expiration time](../todo..) for the compare-exchange item. +4. **Save** + Click to create the compare-exchange item. + If the key already exists, an error message will be shown. + +--- + +## Syntax + +--- + +### `PutCompareExchangeValueOperation` +Create compare-exchange item using a store operation: + + +```js +// Available overloads: +// ==================== +const putCmpXchgOp = new PutCompareExchangeValueOperation(key, value, index); +const putCmpXchgOp = new PutCompareExchangeValueOperation(key, value, index, metadata); +``` + + +| Parameter | Type | Description | +|--------------|----------|-------------| +| **key** | `string` |
  • A unique identifier in the database scope.
  • Can be up to 512 bytes.
| +| **value** | `object` |
  • A value to be saved for the specified _key_.
  • Can be any value (number, string, array, or any valid JSON object).
| +| **index** | `number` |
  • Pass `0` to create a new key.
  • When updating an existing key, pass the current number for concurrency control.
| +| **metadata** | `object` |
  • Optional metadata to be saved for the specified _key_.
  • Must be a valid JSON object.
| + + +**Returned object**: + + +```js +// Return value of store.operations.send(putCmpXchgOp) +// =================================================== +class CompareExchangeResult { + successful; + value; + index; +} +``` + + +| Return Value | Type | Description | +|----------------|-----------|-------------| +| **successful** | `boolean` |
  • `true` if the put operation has completed successfully.
  • `false` if the put operation has failed.
| +| **value** | `object` |
  • Upon success - the value of the compare-exchange item that was saved.
  • Upon failure - the existing value on the server.
| +| **index** | `number` |
  • The compare-exchange item's version.
  • This number increases with each successful modification of the `value` or `metadata`.
  • Upon success - the updated version of the compare-exchange item that was saved.
  • Upon failure - the existing version number on the server.
| + +--- + +### `createCompareExchangeValue` +Create compare-exchange item using cluster-wide session: + + +```js +session.advanced.clusterTransaction.createCompareExchangeValue(key, item); +``` + + +| Parameter | Type | Description | +|------------|----------|--------------------------------------------------------------------| +| **key** | `string` | The compare-exchange item key. This string can be up to 512 bytes. | +| **value** | `object` | The associated value to store for the key.
Can be a number, string, array, or any valid JSON object. | + +| `createCompareExchangeValue` returns: | Description | +|---------------------------|----------------------------------------------------------------------------------------------------| +| `object` | The compare-exchange item that is added to the transaction.
It will be created when `saveChanges()` is called. | + +The returned object contains: + +| Property | Type | Description | +|------------|----------|--------------------------------------------------------------------| +| **key** | `string` | The compare-exchange item key. This string can be up to 512 bytes. | +| **value** | `object` | The value associated with the key. | +| **index** | `number` | The index used for concurrency control.
Will be `0` when calling `createCompareExchangeValue`. | diff --git a/versioned_docs/version-7.1/compare-exchange/content/_create-cmpxchg-items-php.mdx b/versioned_docs/version-7.1/compare-exchange/content/_create-cmpxchg-items-php.mdx new file mode 100644 index 0000000000..d32ece6a51 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_create-cmpxchg-items-php.mdx @@ -0,0 +1,112 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A new compare-exchange item can be created in the following ways: + * Using a cluster-wide session + * Using a store operation + * Using the Studio + +* To create a new compare-exchange item, you must provide: + * **A unique key** (string, up to 512 bytes) + * **An associated value** (number, string, array, or any valid JSON object) + * You can optionally add **metadata** (a valid JSON object) to store extra information with the item. + A common use case is to set an expiration time for the compare-exchange item in its metadata. + Learn more in [Set expiration for compare-exchange items](../compare-exchange/cmpxchg-expiration). + +* To modify an existing compare-exchange item, see: [Update compare-exchange item](../compare-exchange/update-cmpxchg-item). + +* In this article: + * [Create item using a **cluster-wide session**](../compare-exchange/create-cmpxchg-items#create-item-using-a-cluster-wide-session) + * [Create item using the **Studio**](../compare-exchange/create-cmpxchg-items#create-item-using-the-studio) + * [Syntax](../compare-exchange/create-cmpxchg-items#syntax) + + + +--- + +## Create item using a cluster-wide session + +* Create compare-exchange items using a cluster-wide session when you want the creation to be part of a transaction committed via `saveChanges()`. + This is suitable if you want to include compare-exchange item creation and document changes in the same transaction. + Learn more about cluster-wide sessions in [Cluster transactions - overview](../client-api/session/cluster-transaction/overview). + +* Use `createCompareExchangeValue()` to register the creation of a new compare-exchange item in the session. + The item will be created as part of the cluster-wide transaction when _saveChanges()_ is called. + +* Exceptions: + An `InvalidOperationException` is thrown if the session is not opened in cluster-wide mode. + If the key already exists, _saveChanges()_ will throw a `ConcurrencyException`. + +#### Example + + +```php +// The session must be first opened with cluster-wide mode +$session->advanced()->clusterTransaction()->createCompareExchangeValue( + key: "Best NoSQL Transactional Database", + value: "RavenDB" +); + +$session->saveChanges(); +``` + + +--- + +## Create item using the Studio + +To create compare-exchange items using the Studio, go to **Documents > Compare Exchange**. + +![The compare-exchange view](../assets/create-new-cmpxchg-1.png) + +![The compare-exchange view](../assets/create-new-cmpxchg-2.png) + +1. **Key** + Enter a unique identifier for the compare-exchange item (up to 512 bytes). + This key must be unique across the entire database. +2. **Value** + Enter the value to associate with the key. + Can be a number, string, array, or any valid JSON object. +3. **Metadata** (optional) + Add any additional data you want to store with the item. + Must be a valid JSON object. + Can be used to [set expiration time](../todo..) for the compare-exchange item. +4. **Save** + Click to create the compare-exchange item. + If the key already exists, an error message will be shown. + +--- + +## Syntax + +--- + +### `createCompareExchangeValue` +Create compare-exchange item using cluster-wide session: + + +```php +$session->advanced()->clusterTransaction()->createCompareExchangeValue($key, $value); +``` + + +| Parameter | Type | Description | +|------------|----------|--------------------------------------------------------------------| +| **key** | `string` | The compare-exchange item key. This string can be up to 512 bytes. | +| **value** | `T` | The associated value to store for the key.
Can be a number, string, array, or any valid JSON object. | + +| `create_compare_exchange_value` returns: | Description | +|---------------------------|----------------------------------------------------------------------------------------------------------------------| +| `CompareExchangeValue[T]` | The compare-exchange item that is added to the transaction.
It will be created when `save_changes()` is called. | + +The `CompareExchangeValue`: + +| Property | Type | Description | +|------------|----------|--------------------------------------------------------------------| +| **key** | `string` | The compare-exchange item key. This string can be up to 512 bytes. | +| **value** | `T` | The value associated with the key. | +| **index** | `int` | The index used for concurrency control.
Will be `0` when calling `createCompareExchangeValue`. | diff --git a/versioned_docs/version-7.1/compare-exchange/content/_create-cmpxchg-items-python.mdx b/versioned_docs/version-7.1/compare-exchange/content/_create-cmpxchg-items-python.mdx new file mode 100644 index 0000000000..6bbf563583 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_create-cmpxchg-items-python.mdx @@ -0,0 +1,113 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A new compare-exchange item can be created in the following ways: + * Using a cluster-wide session + * Using a store operation + * Using the Studio + +* To create a new compare-exchange item, you must provide: + * **A unique key** (string, up to 512 bytes) + * **An associated value** (number, string, array, or any valid JSON object) + * You can optionally add **metadata** (a valid JSON object) to store extra information with the item. + A common use case is to set an expiration time for the compare-exchange item in its metadata. + Learn more in [Set expiration for compare-exchange items](../compare-exchange/cmpxchg-expiration). + +* To modify an existing compare-exchange item, see: [Update compare-exchange item](../compare-exchange/update-cmpxchg-item). + +* In this article: + * [Create item using a **cluster-wide session**](../compare-exchange/create-cmpxchg-items#create-item-using-a-cluster-wide-session) + * [Create item using the **Studio**](../compare-exchange/create-cmpxchg-items#create-item-using-the-studio) + * [Syntax](../compare-exchange/create-cmpxchg-items#syntax) + + + +--- + +## Create item using a cluster-wide session + +* Create compare-exchange items using a cluster-wide session when you want the creation to be part of a transaction committed via `save_changes()`. + This is suitable if you want to include compare-exchange item creation and document changes in the same transaction. + Learn more about cluster-wide sessions in [Cluster transactions - overview](../client-api/session/cluster-transaction/overview). + +* Use `create_compare_exchange_value()` to register the creation of a new compare-exchange item in the session. + The item will be created as part of the cluster-wide transaction when _save_changes()_ is called. + +* Exceptions: + A `RuntimeError` is thrown when the session is Not opened in cluster-wide mode. + If the key already exists, save_changes()_ will throw a `ConcurrencyException`. + +#### Example + + +```python +# The session must be first opened with cluster-wide mode + +session.advanced.cluster_transaction.create_compare_exchange_value( + key="Best NoSQL Transactional Database", + item="RavenDB", +) + +session.save_changes(); +``` + + +--- + +## Create item using the Studio + +To create compare-exchange items using the Studio, go to **Documents > Compare Exchange**. + +![The compare-exchange view](../assets/create-new-cmpxchg-1.png) + +![The compare-exchange view](../assets/create-new-cmpxchg-2.png) + +1. **Key** + Enter a unique identifier for the compare-exchange item (up to 512 bytes). + This key must be unique across the entire database. +2. **Value** + Enter the value to associate with the key. + Can be a number, string, array, or any valid JSON object. +3. **Metadata** (optional) + Add any additional data you want to store with the item. + Must be a valid JSON object. + Can be used to [set expiration time](../todo..) for the compare-exchange item. +4. **Save** + Click to create the compare-exchange item. + If the key already exists, an error message will be shown. + +--- + +## Syntax + +--- + +### `create_compare_exchange_value` +Create compare-exchange item using cluster-wide session: + + +```python +session.advanced.cluster_transaction.create_compare_exchange_value(key, value) +``` + + +| Parameter | Type | Description | +|------------|-------|--------------------------------------------------------------------| +| **key** | `str` | The compare-exchange item key. This string can be up to 512 bytes. | +| **value** | `T` | The associated value to store for the key.
Can be a number, string, array, or any valid JSON object. | + +| `create_compare_exchange_value` returns: | Description | +|---------------------------|----------------------------------------------------------------------------------------------------------------------| +| `CompareExchangeValue[T]` | The compare-exchange item that is added to the transaction.
It will be created when `save_changes()` is called. | + +The `CompareExchangeValue`: + +| Property | Type | Description | +|------------|--------|--------------------------------------------------------------------| +| **key** | `str` | The compare-exchange item key. This string can be up to 512 bytes. | +| **value** | `T` | The value associated with the key. | +| **index** | `int` | The index used for concurrency control.
Will be `0` when calling `create_compare_exchange_value`. | diff --git a/versioned_docs/version-7.1/compare-exchange/content/_delete-cmpxchg-items-csharp.mdx b/versioned_docs/version-7.1/compare-exchange/content/_delete-cmpxchg-items-csharp.mdx new file mode 100644 index 0000000000..eee16ab16b --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_delete-cmpxchg-items-csharp.mdx @@ -0,0 +1,376 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Custom compare-exchange items can be deleted**: + You can delete your own custom compare-exchange items. + An item is deleted only if the index you provide in the request matches the current index stored on the server for the specified key. + +* **Delete items by expiration**: + Compare-exchange items can also be deleted by adding an expiration date to them. + Learn more in [Compare-exchange expiration](../compare-exchange/cmpxchg-expiration). + +* **Compare-exchange tombstones**: + Whenever a compare-exchange item is deleted, a compare-exchange tombstone is created for it. + These tombstones are used to indicate to other RavenDB processes that the compare-exchange item was deleted, + so they can react accordingly. + For example, indexes referencing the deleted item will update themselves to remove those references. + Compare-exchange tombstones that are eligible for deletion are removed periodically by an internal cleanup task. + See: [Cluster.CompareExchangeTombstonesCleanupIntervalInMin](../compare-exchange/configuration#clustercompareexchangetombstonescleanupintervalinmin). + +* + Do NOT attempt to delete [atomic guards](../compare-exchange/atomic-guards), which RavenDB uses internally to ensure ACID guarantees in cluster-wide transactions. + These compare-exchange items are created automatically and must not be modified or removed. + + If your custom compare-exchange item was set up to protect the consistency of a transaction, deleting it will break the ACID guarantees. + Only delete or modify such items if you truly know what you're doing. + + +--- + +* In this article: + * [Delete compare-exchange item using a **cluster-wide session**](../compare-exchange/delete-cmpxchg-items#delete-compare-exchange-item-using-a-cluster-wide-session) + * [Delete by item](../compare-exchange/delete-cmpxchg-items#delete-by-item) + * [Delete by key and index](../compare-exchange/delete-cmpxchg-items#delete-by-key-and-index) + * [Delete multiple items](../compare-exchange/delete-cmpxchg-items#delete-multiple-items) + * [Delete compare-exchange item using a **store operation**](../compare-exchange/delete-cmpxchg-items#delete-compare-exchange-item-using-a-store-operation) + * [Delete compare-exchange items using the **Studio**](../compare-exchange/delete-cmpxchg-items#delete-compare-exchange-items-using-the-studio) + * [Syntax](../compare-exchange/delete-cmpxchg-items#syntax) + + + +--- + +## Delete compare-exchange item using a cluster-wide session + +* Delete compare-exchange items using a cluster-wide session when you want the deletion to be part of a transaction committed via `SaveChanges()`. + This is suitable if you want to include compare-exchange deletions alongside other operations, such as putting or deleting documents and compare-exchange items, in a single transaction. + Learn more about cluster-wide sessions in [Cluster transactions - overview](../client-api/session/cluster-transaction/overview). + +* Use `DeleteCompareExchangeValue()` to register the deletion of an existing compare-exchange item in the session. + The item will be deleted as part of the cluster-wide transaction when _SaveChanges()_ is called. + +* If the item's index (its version) on the server is different from the index you provide, _SaveChanges()_ will throw a `ClusterTransactionConcurrencyException`. + This means the item was modified by another operation after it was loaded into the session, and the entire transaction will be rejected. + +* Examples: + + #### Delete by item + + + + ```csharp + // The session must be opened in cluster-wide mode. + // An `InvalidOperationException` is thrown if the session is not opened in cluster-wide mode. + using (var session = store.OpenSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + // Get the latest version of the existing compare-exchange item to be deleted. + CompareExchangeValue itemToDelete = session.Advanced.ClusterTransaction + .GetCompareExchangeValue("user1-name@example.com"); + + if (itemToDelete != null) + { + // Call 'DeleteCompareExchangeValue' to register the deletion as part of the cluster-wide + // transaction. Pass the item to delete. + session.Advanced.ClusterTransaction.DeleteCompareExchangeValue(itemToDelete); + + // Commit the cluster-wide transaction. This will delete the compare-exchange item, + // or throw a 'ClusterTransactionConcurrencyException' if the item's index (its version) + // on the server is different than the one provided in the delete request. + session.SaveChanges(); + } + } + ``` + + + ```csharp + // The session must be opened in cluster-wide mode. + // An `InvalidOperationException` is thrown if the session is not opened in cluster-wide mode. + using (var asyncSession = store.OpenAsyncSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + // Get the latest version of the existing compare-exchange item to be deleted. + CompareExchangeValue itemToDelete = await asyncSession.Advanced.ClusterTransaction + .GetCompareExchangeValueAsync("user1-name@example.com"); + + if (itemToDelete != null) + { + // Call 'DeleteCompareExchangeValue' to register the deletion as part of the cluster-wide + // transaction. Pass the item to delete. + asyncSession.Advanced.ClusterTransaction.DeleteCompareExchangeValue(itemToDelete); + + // Commit the cluster-wide transaction. This will delete the compare-exchange item, + // or throw a 'ClusterTransactionConcurrencyException' if the item's index (its version) + // on the server is different than the one provided in the delete request. + await asyncSession.SaveChangesAsync(); + } + } + ``` + + + + #### Delete by key and index + + + + ```csharp + // The session must be opened in cluster-wide mode. + // An `InvalidOperationException` is thrown if the session is not opened in cluster-wide mode. + using (var session = store.OpenSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + // Get the latest version of the existing compare-exchange item to be deleted. + CompareExchangeValue itemToDelete = session.Advanced.ClusterTransaction + .GetCompareExchangeValue("user1-name@example.com"); + + if (itemToDelete != null) + { + // Call 'DeleteCompareExchangeValue' to register the deletion as part of the cluster-wide + // transaction. Specify the item's KEY and current INDEX (its version). + session.Advanced.ClusterTransaction.DeleteCompareExchangeValue( + itemToDelete.Key, itemToDelete.Index); + + // Commit the cluster-wide transaction. This will delete the compare-exchange item, + // or throw a 'ClusterTransactionConcurrencyException' if the item's index (its version) + // on the server is different than the one provided in the delete request. + session.SaveChanges(); + } + } + ``` + + + ```csharp + // The session must be opened in cluster-wide mode. + // An `InvalidOperationException` is thrown if the session is not opened in cluster-wide mode. + using (var asyncSession = store.OpenAsyncSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + // Get the latest version of the existing compare-exchange item to be deleted. + CompareExchangeValue itemToDelete = await asyncSession.Advanced.ClusterTransaction + .GetCompareExchangeValueAsync("user1-name@example.com"); + + if (itemToDelete != null) + { + // Call 'DeleteCompareExchangeValue' to register the deletion as part of the cluster-wide + // transaction. Specify the item's KEY and current INDEX (its version). + asyncSession.Advanced.ClusterTransaction.DeleteCompareExchangeValue( + itemToDelete.Key, itemToDelete.Index); + + // Commit the cluster-wide transaction. This will delete the compare-exchange item, + // or throw a 'ClusterTransactionConcurrencyException' if the item's index (its version) + // on the server is different than the one provided in the delete request. + await asyncSession.SaveChangesAsync(); + } + } + ``` + + + + #### Delete multiple items + + + + ```csharp + // The session must be opened in cluster-wide mode + using (var session = store.OpenSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + // Get the latest version of the items to be deleted. + CompareExchangeValue itemToDelete1 = session.Advanced.ClusterTransaction + .GetCompareExchangeValue("user1-name@example.com"); + CompareExchangeValue itemToDelete2 = session.Advanced.ClusterTransaction + .GetCompareExchangeValue("user2-name@example.com"); + CompareExchangeValue itemToDelete3 = session.Advanced.ClusterTransaction + .GetCompareExchangeValue("user3-name@example.com"); + + // You can delete multiple compare-exchange items before calling 'SaveChanges'. + // Call 'DeleteCompareExchangeValue' for each item you want to delete in the transaction. + session.Advanced.ClusterTransaction.DeleteCompareExchangeValue(itemToDelete1); + session.Advanced.ClusterTransaction.DeleteCompareExchangeValue(itemToDelete2); + session.Advanced.ClusterTransaction.DeleteCompareExchangeValue(itemToDelete3); + + // All items will be deleted atomically as part of the same transaction. + // If any deletion fails, the entire transaction is rolled back + // and none of the items will be deleted. + session.SaveChanges(); + } + ``` + + + ```csharp + // The session must be opened in cluster-wide mode + using (var asyncSession = store.OpenAsyncSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + // Get the latest version of the items to be deleted. + CompareExchangeValue itemToDelete1 = await asyncSession.Advanced.ClusterTransaction + .GetCompareExchangeValue("user1-name@example.com"); + CompareExchangeValue itemToDelete2 = await asyncSession.Advanced.ClusterTransaction + .GetCompareExchangeValue("user2-name@example.com"); + CompareExchangeValue itemToDelete3 = await asyncSession.Advanced.ClusterTransaction + .GetCompareExchangeValue("user3-name@example.com"); + + // You can delete multiple compare-exchange items before calling 'SaveChanges'. + // Call 'DeleteCompareExchangeValue' for each item you want to delete in the transaction. + asyncSession.Advanced.ClusterTransaction.DeleteCompareExchangeValue(itemToDelete1); + asyncSession.Advanced.ClusterTransaction.DeleteCompareExchangeValue(itemToDelete2); + asyncSession.Advanced.ClusterTransaction.DeleteCompareExchangeValue(itemToDelete3); + + // All items will be deleted atomically as part of the same transaction. + // If any deletion fails, the entire transaction is rolled back + // and none of the items will be deleted. + await asyncSession.SaveChangesAsync(); + } + ``` + + + +--- + +## Delete compare-exchange item using a store operation + +* Use the `DeleteCompareExchangeValueOperation` [store operation](../client-api/operations/what-are-operations) to delete a compare-exchange item by its key and index, without opening a session. + This is ideal for stand-alone tasks that don't require batching multiple commands into a single transactional session. + +* The delete operation will only succeed if the item's current index on the server is the same as the one you provide. + If the indexes do not match, the item is not deleted and no exception is thrown. + +* Examples: + + + + ```csharp + // Get the latest version of the existing compare-exchange item to be deleted + var getCmpXchgOp = new GetCompareExchangeValueOperation("user1-name@example.com"); + CompareExchangeValue itemToDelete = store.Operations.Send(getCmpXchgOp); + + if (itemToDelete != null) + { + // Define the delete compare-exchange operation + // Pass the item's KEY and INDEX (its version) + var deleteCmpXchgOp = new DeleteCompareExchangeValueOperation( + itemToDelete.Key, itemToDelete.Index); + + // Execute the delete operation by passing it to Operations.Send + CompareExchangeResult resultOfDelete = store.Operations.Send(deleteCmpXchgOp); + + // Check results + bool successful = resultOfDelete.Successful; // Has operation succeeded + long indexOfItem = resultOfDelete.Index; // The version of the deleted item + + // If 'successful' is true - the compare-exchange item was deleted. + + // If 'successful' is false - + // * The item was not deleted because the index didn't match (it was modified by someone else). + // In this case, 'resultOfDelete.Value' will contain the current value stored on the server. + // * Or the item no longer existed at the time of deletion (it was already deleted). + // In this case, 'resultOfDelete.Value' will be null. + } + ``` + + + ```csharp + // Get the latest version of the existing compare-exchange item to be deleted + var getCmpXchgOp = new GetCompareExchangeValueOperation("user1-name@example.com"); + CompareExchangeValue itemToDelete = await store.Operations.SendAsync(getCmpXchgOp); + + if (itemToDelete != null) + { + // Define the delete compare-exchange operation + // Pass the item's KEY and its INDEX (its version) + var deleteCmpXchgOp = new DeleteCompareExchangeValueOperation( + itemToDelete.Key, itemToDelete.Index); + + // Execute the delete operation by passing it to Operations.SendAsync + CompareExchangeResult resultOfDelete = await + store.Operations.SendAsync(deleteCmpXchgOp); + + // Check results + bool successful = resultOfDelete.Successful; // Has operation succeeded + long indexOfItem = resultOfDelete.Index; // The version of the deleted item + + // If 'successful' is true - the compare-exchange item was deleted. + + // If 'successful' is false - + // * The item was not deleted because the index didn't match (it was modified by someone else). + // In this case, 'resultOfDelete.Value' will contain the current value stored on the server. + // * Or the item no longer existed at the time of deletion (it was already deleted). + // In this case, 'resultOfDelete.Value' will be null. + } + ``` + + + +--- + +## Delete compare-exchange items using the Studio + +You can delete one or multiple compare-exchange items from the Studio. + +![The compare-exchange view](../assets/delete-cmpxchg.png) + +1. Go to **Documents > Compare Exchange**. +2. Select the compare-exchange items you want to delete. +3. Click **Delete**. + +--- + +## Syntax + +--- + +### `DeleteCompareExchangeValueOperation` +Delete compare-exchange item using a store operation: + + +```csharp +public DeleteCompareExchangeValueOperation(string key, long index) +``` + + +| Parameter | Type | Description | +|-----------|----------|-------------| +| **key** | `string` | The unique key of the compare-exchange item. | +| **index** | `long` | The current version of the item.
Deletion will only succeed if this matches the version stored on the server. | + +**Returned object**: + + +```csharp +public class CompareExchangeResult +{ + public bool Successful; + public T Value; + public long Index; +} +``` + + +| Return Value | Type | Description | +|---------------|--------|-------------| +| **Successful**| `bool` |
  • `true` if the delete operation completed successfully.
  • `true` if _key_ doesn't exist
  • `false` if the delete operation has failed, e.g. when the index version doesn't match.
| +| **Value** | `T` |
  • The value that was deleted upon a successful delete.
  • `null` if _key_ doesn't exist
  • The currently existing value on the server if the delete operation has failed.
| +| **Index** | `long` |
  • The next available version number upon success.
  • The next available version number if _key_ doesn't exist.
  • The currently existing index on the server if the delete operation has failed.
| + +--- + +### `DeleteCompareExchangeValue` +Delete compare-exchange item using cluster-wide session: + + +```csharp +// Available overloads: +void DeleteCompareExchangeValue(CompareExchangeValue item); +void DeleteCompareExchangeValue(string key, long index); +``` + + +| Parameter | Type | Description | +|-----------|---------------------------|-------------| +| **item** | `CompareExchangeValue` | The compare-exchange item to delete. | +| **key** | `string` | The unique key of the compare-exchange item. | +| **index** | `long` | The current version of the item.
Deletion will only succeed if this matches the version stored on the server. | diff --git a/versioned_docs/version-7.1/compare-exchange/content/_delete-cmpxchg-items-java.mdx b/versioned_docs/version-7.1/compare-exchange/content/_delete-cmpxchg-items-java.mdx new file mode 100644 index 0000000000..6fe2d159e7 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_delete-cmpxchg-items-java.mdx @@ -0,0 +1,142 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Custom compare-exchange items can be deleted**: + You can delete your own custom compare-exchange items. + An item is deleted only if the index you provide in the request matches the current index stored on the server for the specified key. + +* **Delete items by expiration**: + Compare-exchange items can also be deleted by adding an expiration date to them. + Learn more in [Compare-exchange expiration](../compare-exchange/cmpxchg-expiration). + +* **Compare-exchange tombstones**: + Whenever a compare-exchange item is deleted, a compare-exchange tombstone is created for it. + These tombstones are used to indicate to other RavenDB processes that the compare-exchange item was deleted, + so they can react accordingly. + For example, indexes referencing the deleted item will update themselves to remove those references. + Compare-exchange tombstones that are eligible for deletion are removed periodically by an internal cleanup task. + See: [Cluster.CompareExchangeTombstonesCleanupIntervalInMin](../compare-exchange/configuration#clustercompareexchangetombstonescleanupintervalinmin). + +* + Do not attempt to delete [atomic guards](../compare-exchange/atomic-guards), which RavenDB uses internally to ensure ACID guarantees in cluster-wide transactions. + These items are created automatically and must not be modified or removed. + + If your custom compare-exchange item was set up to protect the consistency of a transaction, deleting it will break the ACID guarantees. + Only delete or modify such items if you truly know what you're doing. + + +--- + +* In this article: + * [Delete compare-exchange item using a **store operation**](../compare-exchange/delete-cmpxchg-items#delete-compare-exchange-item-using-a-store-operation) + * [Delete compare-exchange items using the **Studio**](../compare-exchange/delete-cmpxchg-items#delete-compare-exchange-items-using-the-studio) + * [Syntax](../compare-exchange/delete-cmpxchg-items#syntax) + + + +--- + +## Delete compare-exchange item using a store operation + +* Use the `DeleteCompareExchangeValueOperation` [store operation](../client-api/operations/what-are-operations) to delete a compare-exchange item by its key and index, without opening a session. + This is ideal for stand-alone tasks that don't require batching multiple commands into a single transactional session. + +* The delete operation will only succeed if the item's current index on the server is the same as the one you provide. + If the indexes do not match, the item is not deleted and no exception is thrown. + +* Example: + + + + ```java + // Get the latest version of the existing compare-exchange item to be deleted + CompareExchangeValue itemToDelete = store.operations().send( + new GetCompareExchangeValueOperation<>(User.class, "AdminUser")); + + // Execute the delete operation + CompareExchangeResult deleteResult = store.operations().send( + new DeleteCompareExchangeValueOperation<>(User.class, "AdminUser", itemToDelete.getIndex())); + + // Check results + boolean deleteResultSuccessful = deleteResult.isSuccessful(); + ``` + + + +--- + +## Delete compare-exchange items using the Studio + +You can delete one or multiple compare-exchange items from the Studio. + +![The compare-exchange view](../assets/delete-cmpxchg.png) + +1. Go to **Documents > Compare Exchange**. +2. Select the compare-exchange items you want to delete. +3. Click **Delete**. + +--- + +## Syntax + +--- + +### `DeleteCompareExchangeValueOperation` +Delete compare-exchange item using a store operation: + + +```java +public DeleteCompareExchangeValueOperation(Class clazz, String key, long index) +``` + + +| Parameter | Type | Description | +|-----------|----------|-------------| +| **key** | `string` | The unique key of the compare-exchange item. | +| **index** | `long` | The current version of the item.
Deletion will only succeed if this matches the version stored on the server. | + +**Returned object**: + + +```java +public class CompareExchangeResult { + private T value; + private long index; + private boolean successful; + + public T getValue() { + return value; + } + + public void setValue(T value) { + this.value = value; + } + + public long getIndex() { + return index; + } + + public void setIndex(long index) { + this.index = index; + } + + public boolean isSuccessful() { + return successful; + } + + public void setSuccessful(boolean successful) { + this.successful = successful; + } +} +``` + + +| Return Value | Type | Description | +|---------------|-----------|-------------| +| **Successful**| `boolean` |
  • `true` if the delete operation completed successfully.
  • `true` if _key_ doesn't exist
  • `false` if the delete operation has failed, e.g. when the index version doesn't match.
| +| **Value** | `T` |
  • The value that was deleted upon a successful delete.
  • `null` if _key_ doesn't exist
  • The currently existing value on the server if the delete operation has failed.
| +| **Index** | `long` |
  • The next available version number upon success.
  • The next available version number if _key_ doesn't exist.
  • The currently existing index on the server if the delete operation has failed.
| diff --git a/versioned_docs/version-7.1/compare-exchange/content/_delete-cmpxchg-items-nodejs.mdx b/versioned_docs/version-7.1/compare-exchange/content/_delete-cmpxchg-items-nodejs.mdx new file mode 100644 index 0000000000..39bb9c6ff1 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_delete-cmpxchg-items-nodejs.mdx @@ -0,0 +1,267 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Custom compare-exchange items can be deleted**: + You can delete your own custom compare-exchange items. + An item is deleted only if the index you provide in the request matches the current index stored on the server for the specified key. + +* **Delete items by expiration**: + Compare-exchange items can also be deleted by adding an expiration date to them. + Learn more in [Compare-exchange expiration](../compare-exchange/cmpxchg-expiration). + +* **Compare-exchange tombstones**: + Whenever a compare-exchange item is deleted, a compare-exchange tombstone is created for it. + These tombstones are used to indicate to other RavenDB processes that the compare-exchange item was deleted, + so they can react accordingly. + For example, indexes referencing the deleted item will update themselves to remove those references. + Compare-exchange tombstones that are eligible for deletion are removed periodically by an internal cleanup task. + See: [Cluster.CompareExchangeTombstonesCleanupIntervalInMin](../compare-exchange/configuration#clustercompareexchangetombstonescleanupintervalinmin). + +* + Do not attempt to delete [atomic guards](../compare-exchange/atomic-guards), which RavenDB uses internally to ensure ACID guarantees in cluster-wide transactions. + These items are created automatically and must not be modified or removed. + + If your custom compare-exchange item was set up to protect the consistency of a transaction, deleting it will break the ACID guarantees. + Only delete or modify such items if you truly know what you're doing. + + +--- + +* In this article: + * [Delete compare-exchange item using a **cluster-wide session**](../compare-exchange/delete-cmpxchg-items#delete-compare-exchange-item-using-a-cluster-wide-session) + * [Delete by item](../compare-exchange/delete-cmpxchg-items#delete-by-item) + * [Delete by key and index](../compare-exchange/delete-cmpxchg-items#delete-by-key-and-index) + * [Delete multiple items](../compare-exchange/delete-cmpxchg-items#delete-multiple-items) + * [Delete compare-exchange item using a **store operation**](../compare-exchange/delete-cmpxchg-items#delete-compare-exchange-item-using-a-store-operation) + * [Delete compare-exchange items using the **Studio**](../compare-exchange/delete-cmpxchg-items#delete-compare-exchange-items-using-the-studio) + * [Syntax](../compare-exchange/delete-cmpxchg-items#syntax) + + + +--- + +## Delete compare-exchange item using a cluster-wide session + +* Delete compare-exchange items using a cluster-wide session when you want the deletion to be part of a transaction committed via `saveChanges()`. + This is suitable if you want to include compare-exchange deletions alongside other operations, such as putting or deleting documents and compare-exchange items, in a single transaction. + Learn more about cluster-wide sessions in [Cluster transactions - overview](../client-api/session/cluster-transaction/overview). + +* Use `deleteCompareExchangeValue()` to register the deletion of an existing compare-exchange item in the session. + The item will be deleted as part of the cluster-wide transaction when _saveChanges()_ is called. + +* If the item's index (its version) on the server is different from the index you provide, _saveChanges()_ will throw a `ClusterTransactionConcurrencyException`. + This means the item was modified by another operation after it was loaded into the session, and the entire transaction will be rejected. + +* Examples: + + #### Delete by item + + + + ```js + // The session must be opened in cluster-wide mode. + // An `InvalidOperationException` is thrown if the session is not opened in cluster-wide mode. + const session = documentStore.openSession({ + transactionMode: "ClusterWide" + }); + + // Get the latest version of the existing compare-exchange item to be deleted. + const itemToDelete = await session.advanced.clusterTransaction + .getCompareExchangeValue("user1-name@example.com"); + + if (itemToDelete) { + // Call 'deleteCompareExchangeValue' to register the deletion as part of the cluster-wide + // transaction. Pass the item to delete. + session.advanced.clusterTransaction.deleteCompareExchangeValue(itemToDelete); + + // Commit the cluster-wide transaction. This will delete the compare-exchange item, + // or throw a 'ClusterTransactionConcurrencyException' if the item's index (its version) + // on the server is different than the one provided in the delete request. + await session.saveChanges(); + } + ``` + + + + #### Delete by key and index + + + + ```js + // The session must be opened in cluster-wide mode. + // An `InvalidOperationException` is thrown if the session is not opened in cluster-wide mode. + const session = documentStore.openSession({ + transactionMode: "ClusterWide" + }); + + // Get the latest version of the existing compare-exchange item to be deleted. + const itemToDelete = await session.advanced.clusterTransaction + .getCompareExchangeValue("user1-name@example.com"); + + if (itemToDelete) { + // Call 'deleteCompareExchangeValue' to register the deletion as part of the cluster-wide + // transaction. Specify the item's KEY and current INDEX (its version). + session.advanced.clusterTransaction.deleteCompareExchangeValue(itemToDelete.key, itemToDelete.index); + + // Commit the cluster-wide transaction. This will delete the compare-exchange item, + // or throw a 'ClusterTransactionConcurrencyException' if the item's index (its version) + // on the server is different than the one provided in the delete request. + await session.saveChanges(); + } + ``` + + + + #### Delete multiple items + + + + ```js + // The session must be opened in cluster-wide mode + const session = documentStore.openSession({ + transactionMode: "ClusterWide" + }); + + // Get the latest version of the items to be deleted. + const itemToDelete1 = await session.advanced.clusterTransaction + .getCompareExchangeValue("user1-name@example.com"); + const itemToDelete2 = await session.advanced.clusterTransaction + .getCompareExchangeValue("user2-name@example.com"); + const itemToDelete3 = await session.advanced.clusterTransaction + .getCompareExchangeValue("user3-name@example.com"); + + // You can delete multiple compare-exchange items before calling 'saveChanges'. + // Call 'deleteCompareExchangeValue' for each item you want to delete in the transaction. + session.advanced.clusterTransaction.deleteCompareExchangeValue(itemToDelete1); + session.advanced.clusterTransaction.deleteCompareExchangeValue(itemToDelete2); + session.advanced.clusterTransaction.deleteCompareExchangeValue(itemToDelete3); + + // All items will be deleted atomically as part of the same transaction. + // If any deletion fails, the entire transaction is rolled back + // and none of the items will be deleted. + await session.saveChanges(); + ``` + + + +--- + +## Delete compare-exchange item using a store operation + +* Use the `DeleteCompareExchangeValueOperation` [store operation](../client-api/operations/what-are-operations) to delete a compare-exchange item by its key and index, without opening a session. + This is ideal for stand-alone tasks that don't require batching multiple commands into a single transactional session. + +* The delete operation will only succeed if the item's current index on the server is the same as the one you provide. + If the indexes do not match, the item is not deleted and no exception is thrown. + +* Examples: + + + + ```js + // Get the latest version of the existing compare-exchange item to be deleted + const getCmpXchgOp = new GetCompareExchangeValueOperation("user1-name@example.com"); + const itemToDelete = await documentStore.operations.send(getCmpXchgOp); + + if (itemToDelete) + { + // Define the delete compare-exchange operation + // Pass the item's KEY and INDEX (its version) + const deleteCmpXchgOp = new DeleteCompareExchangeValueOperation(itemToDelete.key, itemToDelete.index); + + // Execute the delete operation by passing it to operations.send + const resultOfDelete = await documentStore.operations.send(deleteCmpXchgOp); + + // Check results + const successful = resultOfDelete.successful; // Has operation succeeded + const indexOfItem = resultOfDelete.index; // The version of the deleted item + + // If 'successful' is true - the compare-exchange item was deleted. + + // If 'successful' is false - + // * The item was not deleted because the index didn't match (it was modified by someone else). + // In this case, 'resultOfDelete.value' will contain the current value stored on the server. + // * Or the item no longer existed at the time of deletion (it was already deleted). + // In this case, 'resultOfDelete.value' will be null. + } + ``` + + + +--- + +## Delete compare-exchange items using the Studio + +You can delete one or multiple compare-exchange items from the Studio. + +![The compare-exchange view](../assets/delete-cmpxchg.png) + +1. Go to **Documents > Compare Exchange**. +2. Select the compare-exchange items you want to delete. +3. Click **Delete**. + +--- + +## Syntax + +--- + +### `DeleteCompareExchangeValueOperation` +Delete compare-exchange item using a store operation: + + +```js +const deleteCmpXchgOp = new DeleteCompareExchangeValueOperation(key, index, clazz?); +``` + + +| Parameter | Type | Description | +|-----------|----------|-------------| +| **key** | `string` | The unique key of the compare-exchange item to be deleted. | +| **index** | `long` | The current version of the item to be deleted.
Deletion will only succeed if this matches the version stored on the server. | +| **clazz** | `object` | When the item's value is a class, you can specify its type in this parameter. | + +**Returned object**: + + +```js +// Return value of store.operations.send(deleteCmpXchgOp) +// ====================================================== + +class CompareExchangeResult +{ + Successful; + Value; + Index; +} +``` + + +| Return Value | Type | Description | +|---------------|-----------|-------------| +| **Successful**| `boolean` |
  • `true` if the delete operation completed successfully.
  • `true` if _key_ doesn't exist
  • `false` if the delete operation has failed, e.g. when the index version doesn't match.
| +| **Value** | `object` |
  • The value that was deleted upon a successful delete.
  • `null` if _key_ doesn't exist
  • The currently existing value on the server if the delete operation has failed.
| +| **Index** | `number` |
  • The next available version number upon success.
  • The next available version number if _key_ doesn't exist.
  • The currently existing index on the server if the delete operation has failed.
| + +--- + +### `deleteCompareExchangeValue` +Delete compare-exchange item using cluster-wide session: + + +```js +// Available overloads: +deleteCompareExchangeValue(key, index); +deleteCompareExchangeValue(item); +``` + + +| Parameter | Type | Description | +|-----------|-------------------------|-------------| +| **item** | `CompareExchangeValue` | The compare-exchange item to delete. | +| **key** | `string` | The unique key of the compare-exchange item. | +| **index** | `number` | The current version of the item.
Deletion will only succeed if this matches the version stored on the server. | diff --git a/versioned_docs/version-7.1/compare-exchange/content/_delete-cmpxchg-items-php.mdx b/versioned_docs/version-7.1/compare-exchange/content/_delete-cmpxchg-items-php.mdx new file mode 100644 index 0000000000..0685a70894 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_delete-cmpxchg-items-php.mdx @@ -0,0 +1,90 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Custom compare-exchange items can be deleted**: + You can delete your own custom compare-exchange items. + An item is deleted only if the index you provide in the request matches the current index stored on the server for the specified key. + +* **Delete items by expiration**: + Compare-exchange items can also be deleted by adding an expiration date to them. + Learn more in [Compare-exchange expiration](../compare-exchange/cmpxchg-expiration). + +* **Compare-exchange tombstones**: + Whenever a compare-exchange item is deleted, a compare-exchange tombstone is created for it. + These tombstones are used to indicate to other RavenDB processes that the compare-exchange item was deleted, + so they can react accordingly. + For example, indexes referencing the deleted item will update themselves to remove those references. + Compare-exchange tombstones that are eligible for deletion are removed periodically by an internal cleanup task. + See: [Cluster.CompareExchangeTombstonesCleanupIntervalInMin](../compare-exchange/configuration#clustercompareexchangetombstonescleanupintervalinmin). + +* + Do not attempt to delete [atomic guards](../compare-exchange/atomic-guards), which RavenDB uses internally to ensure ACID guarantees in cluster-wide transactions. + These items are created automatically and must not be modified or removed. + + If your custom compare-exchange item was set up to protect the consistency of a transaction, deleting it will break the ACID guarantees. + Only delete or modify such items if you truly know what you're doing. + + +--- + +* In this article: + * [Delete compare-exchange item using a **cluster-wide session**](../compare-exchange/delete-cmpxchg-items#delete-compare-exchange-item-using-a-cluster-wide-session) + * [Delete by item](../compare-exchange/delete-cmpxchg-items#delete-by-item) + * [Delete by key and index](../compare-exchange/delete-cmpxchg-items#delete-by-key-and-index) + * [Delete compare-exchange items using the **Studio**](../compare-exchange/delete-cmpxchg-items#delete-compare-exchange-items-using-the-studio) + * [Syntax](../compare-exchange/delete-cmpxchg-items#syntax) + + + +--- + +## Delete compare-exchange item using a cluster-wide session + +* Delete compare-exchange items using a cluster-wide session when you want the deletion to be part of a transaction committed via `saveChanges()`. + This is suitable if you want to include compare-exchange deletions alongside other operations, such as putting or deleting documents and compare-exchange items, in a single transaction. + Learn more about cluster-wide sessions in [Cluster transactions - overview](../client-api/session/cluster-transaction/overview). + +* Use `deleteCompareExchangeValue()` to register the deletion of an existing compare-exchange item in the session. + The item will be deleted as part of the cluster-wide transaction when _saveChanges()_ is called. + +* If the item's index (its version) on the server is different from the index you provide, _saveChanges()_ will throw a `ClusterTransactionConcurrencyException`. + This means the item was modified by another operation after it was loaded into the session, and the entire transaction will be rejected. + +--- + +## Delete compare-exchange items using the Studio + +You can delete one or multiple compare-exchange items from the Studio. + +![The compare-exchange view](../assets/delete-cmpxchg.png) + +1. Go to **Documents > Compare Exchange**. +2. Select the compare-exchange items you want to delete. +3. Click **Delete**. + +--- + +## Syntax + +--- + +### `deleteCompareExchangeValue` +Delete compare-exchange item using cluster-wide session: + + +```csharp +// Available overloads: +$session->advanced()->clusterTransaction()->deleteCompareExchangeValue($item); +$session->advanced()->clusterTransaction()->deleteCompareExchangeValue($key, $index); +``` + + +| Parameter | Type | Description | +|-----------|---------------------------|-------------| +| **item** | `CompareExchangeValue` | The compare-exchange item to delete. | +| **key** | `string` | The unique key of the compare-exchange item. | +| **index** | `long` | The current version of the item.
Deletion will only succeed if this matches the version stored on the server. | diff --git a/versioned_docs/version-7.1/compare-exchange/content/_delete-cmpxchg-items-python.mdx b/versioned_docs/version-7.1/compare-exchange/content/_delete-cmpxchg-items-python.mdx new file mode 100644 index 0000000000..331dc75a23 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_delete-cmpxchg-items-python.mdx @@ -0,0 +1,90 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Custom compare-exchange items can be deleted**: + You can delete your own custom compare-exchange items. + An item is deleted only if the index you provide in the request matches the current index stored on the server for the specified key. + +* **Delete items by expiration**: + Compare-exchange items can also be deleted by adding an expiration date to them. + Learn more in [Compare-exchange expiration](../compare-exchange/cmpxchg-expiration). + +* **Compare-exchange tombstones**: + Whenever a compare-exchange item is deleted, a compare-exchange tombstone is created for it. + These tombstones are used to indicate to other RavenDB processes that the compare-exchange item was deleted, + so they can react accordingly. + For example, indexes referencing the deleted item will update themselves to remove those references. + Compare-exchange tombstones that are eligible for deletion are removed periodically by an internal cleanup task. + See: [Cluster.CompareExchangeTombstonesCleanupIntervalInMin](../compare-exchange/configuration#clustercompareexchangetombstonescleanupintervalinmin). + +* + Do not attempt to delete [atomic guards](../compare-exchange/atomic-guards), which RavenDB uses internally to ensure ACID guarantees in cluster-wide transactions. + These items are created automatically and must not be modified or removed. + + If your custom compare-exchange item was set up to protect the consistency of a transaction, deleting it will break the ACID guarantees. + Only delete or modify such items if you truly know what you're doing. + + +--- + +* In this article: + * [Delete compare-exchange item using a **cluster-wide session**](../compare-exchange/delete-cmpxchg-items#delete-compare-exchange-item-using-a-cluster-wide-session) + * [Delete by item](../compare-exchange/delete-cmpxchg-items#delete-by-item) + * [Delete by key and index](../compare-exchange/delete-cmpxchg-items#delete-by-key-and-index) + * [Delete compare-exchange items using the **Studio**](../compare-exchange/delete-cmpxchg-items#delete-compare-exchange-items-using-the-studio) + * [Syntax](../compare-exchange/delete-cmpxchg-items#syntax) + + + +--- + +## Delete compare-exchange item using a cluster-wide session + +* Delete compare-exchange items using a cluster-wide session when you want the deletion to be part of a transaction committed via `save_changes()`. + This is suitable if you want to include compare-exchange deletions alongside other operations, such as putting or deleting documents and compare-exchange items, in a single transaction. + Learn more about cluster-wide sessions in [Cluster transactions - overview](../client-api/session/cluster-transaction/overview). + +* Use `delete_compare_exchange_value()` to register the deletion of an existing compare-exchange item in the session. + The item will be deleted as part of the cluster-wide transaction when _save_changes()_ is called. + +* If the item's index (its version) on the server is different from the index you provide, _save_changes()_ will throw a `ClusterTransactionConcurrencyException`. + This means the item was modified by another operation after it was loaded into the session, and the entire transaction will be rejected. + +--- + +## Delete compare-exchange items using the Studio + +You can delete one or multiple compare-exchange items from the Studio. + +![The compare-exchange view](../assets/delete-cmpxchg.png) + +1. Go to **Documents > Compare Exchange**. +2. Select the compare-exchange items you want to delete. +3. Click **Delete**. + +--- + +## Syntax + +--- + +### `delete_compare_exchange_value` +Delete compare-exchange item using cluster-wide session: + + +```csharp +// Available overloads: +session.advanced.cluster_transaction.delete_compare_exchange_value(item) +session.advanced.cluster_transaction.delete_compare_exchange_value(key, index) +``` + + +| Parameter | Type | Description | +|-----------|---------------------------|-------------| +| **item** | `CompareExchangeValue[T]` | The compare-exchange item to delete. | +| **key** | `str` | The unique key of the compare-exchange item. | +| **index** | `int` | The current version of the item.
Deletion will only succeed if this matches the version stored on the server. | diff --git a/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-item-csharp.mdx b/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-item-csharp.mdx new file mode 100644 index 0000000000..ee35aaf78e --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-item-csharp.mdx @@ -0,0 +1,530 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To retrieve an existing compare-exchange item using the **Client API**, + use either a store operation or a cluster-wide session - as described below. + A cluster-wide session also supports lazy retrieval. + +* To view existing compare-exchange items in the **Studio**, go to _Documents > Compare Exchange_, + as described in [Ways to manage compare-exchange items](../compare-exchange/overview#ways-to-create-and-manage-compare-exchange-items). + +* This article shows how to get a **single** compare-exchange item by its unique _key_. + To get **multiple** items at once, see [Get multiple compare-exchange items](../compare-exchange/get-cmpxchg-items). + +* In this article: + * [Get item using a **cluster-wide session**](../compare-exchange/get-cmpxchg-item#get-item-using-a-cluster-wide-session) + * [Get compare-exchange item](../compare-exchange/get-cmpxchg-item#get-compare-exchange-item) + * [Get compare-exchange item lazily](../compare-exchange/get-cmpxchg-item#get-compare-exchange-item-lazily) + * [Retrieved compare-exchange items are tracked by the session](../compare-exchange/get-cmpxchg-item#retrieved-compare-exchange-items-are-tracked-by-the-session) + * [Get item using a **store operation**](../compare-exchange/get-cmpxchg-item#get-item-using-a-store-operation) + * [Get compare-exchange item that has a number value and metadata](../compare-exchange/get-cmpxchg-item#get-compare-exchange-item-that-has-a-number-value-and-metadata) + * [Get compare-exchange item that has a custom object value](../compare-exchange/get-cmpxchg-item#get-compare-exchange-item-that-has-a-custom-object-value) + * [Syntax](../compare-exchange/get-cmpxchg-item#syntax) + + + +--- + +## Get item using a cluster-wide session + +* You can retrieve compare-exchange items using a [cluster-wide session](../client-api/session/cluster-transaction/overview#open-a-cluster-transaction). + The session must be opened in cluster-wide mode. + +* Use the `GetCompareExchangeValue` advanced session method to get a compare-exchange item by its _key_. + If the specified key does not exist, the method returns `null`. No exception is thrown. + +* Once a compare-exchange item is retrieved using a cluster-wide session, the item is **tracked** by the session. + Repeating the same `GetCompareExchangeValue` call with the same key does not send another request to the server, + the value is returned from the session's internal state. + To force a re-fetch from the server, call `session.Advanced.Clear()` first. + +* Examples: + + #### Get compare-exchange item + + + + ```csharp + // First, let's create a compare-exchange item for the example, + // e.g. store a user's email as the key and the user document id as the value. + using (var session = store.OpenSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + CompareExchangeValue itemToCreate = + session.Advanced.ClusterTransaction.CreateCompareExchangeValue( + key: "user1-name@example.com", + value: "users/1" + ); + + // Optionally, add some metadata: + itemToCreate.Metadata["email-type"] = "work email"; + + session.SaveChanges(); + } + + using (var session = store.OpenSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + // Get the compare-exchange item: + // ============================== + + CompareExchangeValue retrievedItem = session.Advanced.ClusterTransaction + .GetCompareExchangeValue("user1-name@example.com"); + + if (retrievedItem != null) + { + // Access the VALUE of the retrieved item + var userDocumentId = retrievedItem.Value; // "users/1" + + // Access the METADATA of the retrieved item + var emailType = retrievedItem.Metadata["email-type"]; // "work email" + + // Access the VERSION number of the retrieved item + var version = retrievedItem.Index; + } + else + { + Console.WriteLine("Compare-exchange item not found"); + } + } + ``` + + + ```csharp + // First, let's create a compare-exchange item for the example, + // e.g. store a user's email as the key and the user document id as the value. + using (var asyncSession = store.OpenAsyncSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + CompareExchangeValue itemToCreate = + asyncSession.Advanced.ClusterTransaction.CreateCompareExchangeValue( + key: "user1-name@example.com", + value: "users/1" + ); + + // Optionally, add some metadata: + itemToCreate.Metadata["email-type"] = "work email"; + + await asyncSession.SaveChangesAsync(); + } + + using (var asyncSession = store.OpenAsyncSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + // Get the compare-exchange item: + // ============================== + + CompareExchangeValue retrievedItem = await asyncSession.Advanced.ClusterTransaction + .GetCompareExchangeValueAsync("user1-name@example.com"); + + if (retrievedItem != null) + { + // Access the VALUE of the retrieved item + var userDocumentId = retrievedItem.Value; // "users/1" + + // Access the METADATA of the retrieved item + var emailType = retrievedItem.Metadata["email-type"]; // "work email" + + // Access the VERSION number of the retrieved item + var version = retrievedItem.Index; + } + else + { + Console.WriteLine("Compare-exchange item not found"); + } + } + ``` + + + + #### Get compare-exchange item lazily + + + + ```csharp + // Create a compare-exchange item for the example: + using (var session = store.OpenSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + CompareExchangeValue itemToCreate = + session.Advanced.ClusterTransaction.CreateCompareExchangeValue( + key: "user1-name@example.com", + value: "users/1" + ); + session.SaveChanges(); + } + + using (var session = store.OpenSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + // Get the compare-exchange item lazily: + // ===================================== + + var lazyItem = session.Advanced.ClusterTransaction.Lazily + .GetCompareExchangeValue("user1-name@example.com"); + + // Access the item: + CompareExchangeValue retrievedItem = lazyItem.Value; + + if (retrievedItem != null) { + // Access the VALUE of the retrieved item + var userDocumentId = retrievedItem.Value; // "users/1" + + // Access the VERSION number of the retrieved item + var version = retrievedItem.Index; + } + else + { + Console.WriteLine("Compare-exchange item not found"); + } + } + ``` + + + ```csharp + // Create a compare-exchange item for the example: + using (var asyncSession = store.OpenAsyncSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + CompareExchangeValue itemToCreate = + asyncSession.Advanced.ClusterTransaction.CreateCompareExchangeValue( + key: "user1-name@example.com", + value: "users/1" + ); + + await asyncSession.SaveChangesAsync(); + } + + using (var asyncSession = store.OpenAsyncSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + // Get the compare-exchange item lazily: + // ===================================== + + var lazyItem = asyncSession.Advanced.ClusterTransaction.Lazily + .GetCompareExchangeValueAsync("user1-name@example.com"); + + // Access the item: + CompareExchangeValue retrievedItem = await lazyItem.Value; + + if (retrievedItem != null) { + // Access the VALUE of the retrieved item + var userDocumentId = retrievedItem.Value; // "users/1" + + // Access the VERSION number of the retrieved item + var version = retrievedItem.Index; + } + else + { + Console.WriteLine("Compare-exchange item not found"); + } + } + ``` + + + + #### Retrieved compare-exchange items are tracked by the session + + + + ```csharp + using (var session = store.OpenSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + // First retrieval — server call will happen + var item1 = session.Advanced.ClusterTransaction + .GetCompareExchangeValue("user1-name@example.com"); + + // No server call - the item is returned from session + var item2 = session.Advanced.ClusterTransaction + .GetCompareExchangeValue("user1-name@example.com"); + + // Clear tracked entities and compare-exchange items + session.Advanced.Clear(); + + // Server call will happen again + var item3 = session.Advanced.ClusterTransaction + .GetCompareExchangeValue("user1-name@example.com"); + } + ``` + + + ```csharp + using (var asyncSession = store.OpenAsyncSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + // First retrieval — server call will happen + var item1 = await asyncSession.Advanced.ClusterTransaction + .GetCompareExchangeValueAsync("user1-name@example.com"); + + // No server call - the item is returned from session + var item2 = await asyncSession.Advanced.ClusterTransaction + .GetCompareExchangeValueAsync("user1-name@example.com"); + + // Clear tracked entities and compare-exchange items + asyncSession.Advanced.Clear(); + + // Server call will happen again + var item3 = await asyncSession.Advanced.ClusterTransaction + .GetCompareExchangeValueAsync("user1-name@example.com"); + } + ``` + + + +--- + +## Get item using a store operation + +* You can retrieve compare-exchange items using a [store operation](../client-api/operations/what-are-operations). + Use the `GetCompareExchangeValueOperation` operation to get a compare-exchange item by its _key_. + +* If the specified key does not exist, the operation returns `null`. No exception is thrown. + +* Examples: + + #### Get compare-exchange item that has a number value and metadata + + + + ```csharp + // First, let's create a new compare-exchange item for the example, + // e.g. store the number of sales made by an employee as the value + some metadata info: + var putCmpXchgOp = new PutCompareExchangeValueOperation("employees/1-A", 12345, 0, + new MetadataAsDictionary + { + { "Department", "Sales" }, + { "Role", "Salesperson" } + }); + + CompareExchangeResult putResult = store.Operations.Send(putCmpXchgOp); + + // Get the compare-exchange item: + // ============================== + + // Define the get compare-exchange operation, pass the unique item key + var getCmpXchgOp = new GetCompareExchangeValueOperation("employees/1-A"); + + // Execute the operation by passing it to Operations.Send + CompareExchangeValue retrievedItem = store.Operations.Send(getCmpXchgOp); + + if (retrievedItem != null) + { + // Access the VALUE of the retrieved item + long numberOfSales = retrievedItem.Value; // 12345 + + // Access the METADATA of the retrieved item + var employeeRole = retrievedItem.Metadata["Role"]; // "Salesperson" + + // Access the VERSION number of the retrieved item + long version = retrievedItem.Index; + } + else + { + Console.WriteLine("Compare-exchange item not found"); + } + ``` + + + ```csharp + // First, let's create a new compare-exchange item for the example, + // e.g. store the number of sales made by an employee as the value + some metadata info: + var putCmpXchgOp = new PutCompareExchangeValueOperation("employees/1-A", 12345, 0, + new MetadataAsDictionary + { + { "Department", "Sales" }, + { "Role", "Salesperson" } + }); + + CompareExchangeResult putResult = await store.Operations.SendAsync(putCmpXchgOp); + + // Get the compare-exchange item: + // ============================== + + // Define the get compare-exchange operation, pass the unique item key + var getCmpXchgOp = new GetCompareExchangeValueOperation("employees/1-A"); + + // Execute the operation by passing it to Operations.SendAsync + CompareExchangeValue retrievedItem = await store.Operations.SendAsync(getCmpXchgOp); + + if (retrievedItem != null) + { + // Access the VALUE of the retrieved item + long numberOfSales = retrievedItem.Value; // 12345 + + // Access the METADATA of the retrieved item + var employeeRole = retrievedItem.Metadata["Role"]; // "Salesperson" + + // Access the VERSION number of the retrieved item + long version = retrievedItem.Index; + } + else + { + Console.WriteLine("Compare-exchange item not found"); + } + ``` + + + + #### Get compare-exchange item that has a custom object value + + + + ```csharp + // Create a new compare-exchange item for the example: + // Put a new compare-exchange item with an object as the value + var employeeRole = new EmployeeRole + { + Role = "Salesperson", + Department = "Sales", + NumberOfSales = 12345 + }; + + var putCmpXchgOp = new PutCompareExchangeValueOperation( + "employees/1-A", employeeRole, 0); + + CompareExchangeResult putResult = store.Operations.Send(putCmpXchgOp); + + // Get the compare-exchange item: + // ============================== + + // Define the get compare-exchange operation, pass the unique item key + var getCmpXchgOp = new GetCompareExchangeValueOperation("employees/1-A"); + + // Execute the operation by passing it to Operations.Send + CompareExchangeValue retrievedItem = store.Operations.Send(getCmpXchgOp); + + if (retrievedItem != null) + { + // Access the VALUE of the retrieved item + var employeeDetails = retrievedItem.Value; + var objectType = employeeDetails.GetType(); // typeof(EmployeeRole) + var role = employeeDetails.Role; // "Salesperson" + var Dep = employeeDetails.Department; // "Sales" + var Sales = employeeDetails.NumberOfSales; // 12345 + + // Access the VERSION number of the retrieved item + long version = retrievedItem.Index; + } + else + { + Console.WriteLine("Compare-exchange item not found"); + } + ``` + + + ```csharp + // Create a new compare-exchange item for the example: + // Put a new compare-exchange item with an object as the value + var employeeRole = new EmployeeRole + { + Role = "Salesperson", + Department = "Sales", + NumberOfSales = 12345 + }; + + var putCmpXchgOp = new PutCompareExchangeValueOperation( + "employees/1-A", employeeRole, 0); + + CompareExchangeResult putResult = await store.Operations.SendAsync(putCmpXchgOp); + + // Get the compare-exchange item: + // ============================== + + // Define the get compare-exchange operation, pass the unique item key + var getCmpXchgOp = new GetCompareExchangeValueOperation("employees/1-A"); + + // Execute the operation by passing it to Operations.SendAsync + CompareExchangeValue retrievedItem = await store.Operations.SendAsync(getCmpXchgOp); + + if (retrievedItem != null) + { + // Access the VALUE of the retrieved item + var employeeDetails = retrievedItem.Value; + var objectType = employeeDetails.GetType(); // typeof(EmployeeRole) + var role = employeeDetails.Role; // "Salesperson" + var Dep = employeeDetails.Department; // "Sales" + var Sales = employeeDetails.NumberOfSales; // 12345 + + // Access the VERSION number of the retrieved item + long version = retrievedItem.Index; + } + else + { + Console.WriteLine("Compare-exchange item not found"); + } + ``` + + + ```csharp + public class EmployeeRole + { + public string Id { get; set; } + public string Department { get; set; } = ""; + public string Role { get; set; } = ""; + public int NumberOfSales { get; set; } + } + ``` + + + +--- + +## Syntax + +--- + +### `GetCompareExchangeValueOperation` +Get compare-exchange item using a store operation: + + +```csharp +public GetCompareExchangeValueOperation(string key); +``` + + +### `GetCompareExchangeValue` +Get compare-exchange item using cluster-wide session: + + +```csharp +CompareExchangeValue GetCompareExchangeValue(string key); +Task> GetCompareExchangeValueAsync(string key, + CancellationToken token = default); + +Lazy> GetCompareExchangeValue(string key); +Lazy>> GetCompareExchangeValueAsync(string key, + CancellationToken token = default); +``` + + +| Input parameter | Type | Description | +|-----------------|----------|-----------------------------------------------------------------| +| **key** | `string` | The unique identifier of the compare-exchange item to retrieve. | + +| The returned object: | Description | +|---------------------------|----------------------------------------------------------------| +| `CompareExchangeValue` | The compare-exchange item is returned.
Returns `null` if key doesn't exist. | + + +```csharp +public class CompareExchangeValue +{ + // The unique identifier of the compare-exchange item. + public string Key { get; } + + // The existing `value` of the returned compare-exchange item. + public T Value { get; set; } + + // The compare-exchange item's version. + public long Index { get; internal set; } + + // The existing `metadata` of the returned compare-exchange item. + public IMetadataDictionary Metadata; +} +``` + diff --git a/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-item-java.mdx b/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-item-java.mdx new file mode 100644 index 0000000000..3e8a562730 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-item-java.mdx @@ -0,0 +1,121 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To retrieve an existing compare-exchange item using the **Client API**, + use either a store operation or a cluster-wide session - as described below. + A cluster-wide session also supports lazy retrieval. + +* To view existing compare-exchange items in the **Studio**, go to _Documents > Compare Exchange_, + as described in [Ways to manage compare-exchange items](../compare-exchange/overview#ways-to-create-and-manage-compare-exchange-items). + +* This article shows how to get a **single** compare-exchange item by its unique _key_. + To get **multiple** items at once, see [Get multiple compare-exchange items](../compare-exchange/get-cmpxchg-items). + +* In this article: + * [Get item using a **store operation**](../compare-exchange/get-cmpxchg-item#get-item-using-a-store-operation) + * [Get compare-exchange item that has a number value](../compare-exchange/get-cmpxchg-item#get-compare-exchange-item-that-has-a-number-value) + * [Get compare-exchange item that has a custom object value](../compare-exchange/get-cmpxchg-item#get-compare-exchange-item-that-has-a-custom-object-value) + * [Syntax](../compare-exchange/get-cmpxchg-item#syntax) + + + +--- + +## Get item using a store operation + +* You can retrieve compare-exchange items using a [store operation](../client-api/operations/what-are-operations). + Use the `GetCompareExchangeValueOperation` operation to get a compare-exchange item by its _key_. + +* If the specified key does not exist, the operation returns `null`. No exception is thrown. + +* Examples: + + #### Get compare-exchange item that has a number value + + + ```java + CompareExchangeValue readResult = + store.operations().send(new GetCompareExchangeValueOperation<>(Long.class, "nextClientId")); + + Long value = readResult.getValue(); + ``` + + + #### Get compare-exchange item that has a custom object value + + + ```java + CompareExchangeValue readResult = store.operations().send( + new GetCompareExchangeValueOperation<>(User.class, "AdminUser")); + + User admin = readResult.getValue(); + ``` + + +--- + +## Syntax + +--- + +### `GetCompareExchangeValueOperation` +Get compare-exchange item using a store operation: + + +```java +GetCompareExchangeValueOperation(Class clazz, String key); +``` + + + +| Input parameter | Type | Description | +|-----------------|----------|-----------------------------------------------------------------| +| **key** | `String` | The unique identifier of the compare-exchange item to retrieve. | + +| The returned object: | Description | +|---------------------------|----------------------------------------------------------------| +| `CompareExchangeValue` | The compare-exchange item is returned.
Returns `null` if key doesn't exist. | + + +```java +public class CompareExchangeValue { + private String key; // The unique identifier of the compare-exchange item. + private long index; // The compare-exchange item's version. + private T value; // The existing `value` of the returned compare-exchange item. + + public CompareExchangeValue(String key, long index, T value) { + this.key = key; + this.index = index; + this.value = value; + } + + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + + public long getIndex() { + return index; + } + + public void setIndex(long index) { + this.index = index; + } + + public T getValue() { + return value; + } + + public void setValue(T value) { + this.value = value; + } +} +``` + diff --git a/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-item-nodejs.mdx b/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-item-nodejs.mdx new file mode 100644 index 0000000000..2ff0323223 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-item-nodejs.mdx @@ -0,0 +1,346 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To retrieve an existing compare-exchange item using the **Client API**, + use either a store operation or a cluster-wide session - as described below. + A cluster-wide session also supports lazy retrieval. + +* To view existing compare-exchange items in the **Studio**, go to _Documents > Compare Exchange_, + as described in [Ways to manage compare-exchange items](../compare-exchange/overview#ways-to-create-and-manage-compare-exchange-items). + +* This article shows how to get a **single** compare-exchange item by its unique _key_. + To get **multiple** items at once, see [Get multiple compare-exchange items](../compare-exchange/get-cmpxchg-items). + +* In this article: + * [Get item using a **cluster-wide session**](../compare-exchange/get-cmpxchg-item#get-item-using-a-cluster-wide-session) + * [Get compare-exchange item](../compare-exchange/get-cmpxchg-item#get-compare-exchange-item) + * [Get compare-exchange item lazily](../compare-exchange/get-cmpxchg-item#get-compare-exchange-item-lazily) + * [Retrieved compare-exchange items are tracked by the session](../compare-exchange/get-cmpxchg-item#retrieved-compare-exchange-items-are-tracked-by-the-session) + * [Get item using a **store operation**](../compare-exchange/get-cmpxchg-item#get-item-using-a-store-operation) + * [Get compare-exchange item that has a number value and metadata](../compare-exchange/get-cmpxchg-item#get-compare-exchange-item-that-has-a-number-value-and-metadata) + * [Get compare-exchange item that has a custom object value](../compare-exchange/get-cmpxchg-item#get-compare-exchange-item-that-has-a-custom-object-value) + * [Syntax](../compare-exchange/get-cmpxchg-item#syntax) + + + +--- + +## Get item using a cluster-wide session + +* You can retrieve compare-exchange items using a [cluster-wide session](../client-api/session/cluster-transaction/overview#open-a-cluster-transaction). + The session must be opened in cluster-wide mode. + +* Use the `getCompareExchangeValue` advanced session method to get a compare-exchange item by its _key_. + If the specified key does not exist, the method returns `null`. No exception is thrown. + +* Once a compare-exchange item is retrieved using a cluster-wide session, the item is **tracked** by the session. + Repeating the same `getCompareExchangeValue` call with the same key does not send another request to the server, + the value is returned from the session's internal state. + To force a re-fetch from the server, call `session.advanced.clear()` first. + +* Examples: + + #### Get compare-exchange item + + + ```js + // First, let's create a compare-exchange item for the example, + // e.g. store a user's email as the key and the user document id as the value. + + // The session must be opened in cluster-wide mode. + const session = documentStore.openSession({ + transactionMode: "ClusterWide" + }); + + const itemToCreate = session.advanced.clusterTransaction.createCompareExchangeValue( + "user1-name@example.com", "users/1" // key, value + ); + + // Optionally, add some metadata: + itemToCreate.metadata["email-type"] = "work email"; + + await session.saveChanges(); + + // Get the compare-exchange item: + // ============================== + + const item = await session.advanced.clusterTransaction.getCompareExchangeValue("user1-name@example.com"); + + if (item) { + // Access the VALUE of the retrieved item + const userDocumentId = item.value; // "users/1" + + // Access the METADATA of the retrieved item + const emailType = item.metadata["email-type"]; // "work email" + + // Access the VERSION number of the retrieved item + const version = item.index; + } else { + console.log("Compare-exchange item not found"); + } + ``` + + + + #### Get compare-exchange item lazily + + + + ```js + // Create a compare-exchange item for the example: + const session = documentStore.openSession({ + transactionMode: "ClusterWide" + }); + session.advanced.clusterTransaction.createCompareExchangeValue( + "user1-name@example.com", { userDocumentId: "users/1" } + ); + await session.saveChanges(); + + // Get the compare-exchange item lazily: + // ===================================== + + const lazyItem = session.advanced.clusterTransaction.lazily + .getCompareExchangeValue("user1-name@example.com"); + + // Access the item: + const item = await lazyItem.getValue(); + + if (item) { + // Access the VALUE of the retrieved item + const userDocumentId = item.value; // { "userDocumentId": "users/1" } + + // Access the VERSION number of the retrieved item + const version = item.index; + } else { + console.log("Compare-exchange item not found"); + } + ``` + + + + #### Retrieved compare-exchange items are tracked by the session + + + + ```js + const session = documentStore.openSession({ + transactionMode: "ClusterWide" + }); + + // First retrieval — server call will happen + const item1 = await session.advanced.clusterTransaction.getCompareExchangeValue( + "user1-name@example.com"); + + // No server call — the item is returned from session tracking + const item2 = await session.advanced.clusterTransaction.getCompareExchangeValue( + "user1-name@example.com"); + + // Clear tracked entities and compare-exchange items + session.advanced.clear(); + + // Server call will happen again + const item3 = await session.advanced.clusterTransaction.getCompareExchangeValue( + "user1-name@example.com"); + ``` + + + +--- + +## Get item uaing a store operation + +* You can retrieve compare-exchange items using a [store operation](../client-api/operations/what-are-operations). + Use the `GetCompareExchangeValueOperation` operation to get a compare-exchange item by its _key_. + +* If the specified key does not exist, the operation returns `null`. No exception is thrown. + +* Examples: + + #### Get compare-exchange item that has a number value and metadata + + + + ```js + // First, let's create a new compare-exchange item for the example, + // e.g. store the number of sales made by an employee as the value + some metadata info: + const putCmpXchgOp = new PutCompareExchangeValueOperation("employees/1-A", 12345, 0, { + "Department": "Sales", + "Role": "Salesperson", + }); + const putResult = await documentStore.operations.send(putCmpXchgOp); + + // Get the compare-exchange item: + // ============================== + + // Define the get compare-exchange operation, pass the unique item key + const getCmpXchgOp = new GetCompareExchangeValueOperation("employees/1-A"); + + // Execute the operation by passing it to operations.send + const item = await documentStore.operations.send(getCmpXchgOp); + + if (item) { + // Access the VALUE of the retrieved item + const numberOfSales = item.value; // 12345 + + // Access the METADATA of the retrieved item + const employeeRole = item.metadata["Role"]; // "Salesperson" + + // Access the VERSION number of the retrieved item + const version = item.index; + } else { + console.log("Compare-exchange item not found"); + } + ``` + + + + #### Get compare-exchange item that has a custom object value + + + + ```js + // Put a new compare-exchange item with an object as the value + const employee = new EmployeeRole(); + employee.role = "Salesperson" + employee.department = "Sales"; + employee.numberOfSales = 12345; + + const putCmpXchgOp = new PutCompareExchangeValueOperation("employees/1-A", employee, 0); + const putResult = await documentStore.operations.send(putCmpXchgOp); + + // Get the compare-exchange item: + // ============================== + + // Define the get compare-exchange operation, pass the unique item key & the class type + const getCmpXchgOp = new GetCompareExchangeValueOperation("employees/1-A", EmployeeRole); + + // Execute the operation by passing it to operations.send + const item = await documentStore.operations.send(getCmpXchgOp); + + if (item) { + // Access the VALUE of the retrieved item + const employeeResult = item.value; + const employeeClass = employeeResult.constructor; // EmployeeRole + + const employeeRole = employeeResult.role; // Salesperson + const employeeDep = employeeResult.department; // Sales + const employeeSales = employeeResult.numberOfSales; // 12345 + + // Access the VERSION number of the retrieved item + const version = item.index; + } else { + console.log("Compare-exchange item not found"); + } + ``` + + + ```js + class EmployeeRole { + constructor( + id = null, + department = "", + role = "", + numberOfSales = 0 + + ) { + Object.assign(this, { + id, + department, + role, + numberOfSales + }); + } + } + ``` + + + +--- + +## Syntax + +--- + +### `GetCompareExchangeValueOperation` +Get compare-exchange item using a store operation: + + +```js +const getCmpXchgOp = new GetCompareExchangeValueOperation(key, clazz, materializeMetadata); +``` + + +| Parameter | Type | Description | +|-------------------------|-----------|-----------------------------------------------------------------------------------------------------------------| +| **key** | `string` | The unique identifier of the compare-exchange item to retrieve. | +| **clazz** | `object` | The class type of the item's value. | +| **materializeMetadata** | `boolean` | The metadata will be retrieved and available regardless of the value of this param. Used for internal purposes. | + +| Returned object | Description | +|------------------------|---------------------------------------------------------------------------------| +| `CompareExchangeValue` | The compare-exchange item is returned.
Returns `null` if key doesn't exist. | + +--- + +### `getCompareExchangeValue` +Get compare-exchange item using cluster-wide session: + + +```js +await session.advanced.clusterTransaction.getCompareExchangeValue(key); +``` + + +| Parameter | Type | Description | +|------------|----------|--------------------------------------------------| +| **key** | `string` | The key of the compare-exchange item to retrieve | + +| `getCompareExchangeValue` returns: | Description | +|---------------------------|---------------------------------------------------------------------------------| +| `CompareExchangeValue` | The compare-exchange item is returned.
Returns `null` if key doesn't exist. | + +--- + +### `lazily.getCompareExchangeValue` +Get compare-exchange item using cluster-wide session lazily: + + +```js +await session.advanced.clusterTransaction.lazily.getCompareExchangeValue(key); +``` + + +| Parameter | Type | Description | +|------------|----------|--------------------------------------------------| +| **key** | `string` | The key of the compare-exchange item to retrieve | + +| Return value - after calling `getValue()` | Description | +|---------------------------|---------------------------------------------------------------------------------| +| `CompareExchangeValue` | The compare-exchange item is returned.
Returns `null` if key doesn't exist. | + +--- + + +```js +// The CompareExchangeValue object: +// ================================ + +class CompareExchangeValue { + // The unique identifier of the compare-exchange item. + key; // string + + // The existing `value` of the returned compare-exchange item. + value; // object + + // The existing `metadata` of the returned compare-exchange item. + metadata; // object + + // The compare-exchange item's version. + index; // number +} +``` + diff --git a/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-item-php.mdx b/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-item-php.mdx new file mode 100644 index 0000000000..9615ea7e60 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-item-php.mdx @@ -0,0 +1,73 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To retrieve an existing compare-exchange item using the **Client API**, + use either a store operation or a cluster-wide session - as described below. + A cluster-wide session also supports lazy retrieval. + +* To view existing compare-exchange items in the **Studio**, go to _Documents > Compare Exchange_, + as described in [Ways to manage compare-exchange items](../compare-exchange/overview#ways-to-create-and-manage-compare-exchange-items). + +* This article shows how to get a **single** compare-exchange item by its unique _key_. + To get **multiple** items at once, see [Get multiple compare-exchange items](../compare-exchange/get-cmpxchg-items). + +* In this article: + * [Get item using a **cluster-wide session**](../compare-exchange/get-cmpxchg-item#get-item-using-a-cluster-wide-session) + * [Syntax](../compare-exchange/get-cmpxchg-item#syntax) + + + +--- + +## Get item using a cluster-wide session + +* You can retrieve compare-exchange items using a [cluster-wide session](../client-api/session/cluster-transaction/overview#open-a-cluster-transaction). + The session must be opened in cluster-wide mode. + +* Use the `getCompareExchangeValue` advanced session method to get a compare-exchange item by its _key_. + If the specified key does not exist, the method returns `null`. No exception is thrown. + +--- + +## Syntax + +--- + +### `getCompareExchangeValue` +Get compare-exchange item using a cluster wide session: + + +```php +session.advanced.cluster_transaction.get_compare_exchange_value(key) +``` + + +| Input parameter | Type | Description | +|-----------------|----------|-----------------------------------------------------------------| +| **key** | `string` | The unique identifier of the compare-exchange item to retrieve. | + +| The returned object: | Description | +|---------------------------|----------------------------------------------------------------| +| `CompareExchangeValue` | The compare-exchange item is returned.
Returns `null` if key doesn't exist. | + + +### `lazily()->getCompareExchangeValue` +Get compare-exchange item using a cluster wide session lazily: + + +```php +$session->advanced()->clusterTransaction()->lazily()->getCompareExchangeValue(null, $key); +``` + + +| Input parameter | Type | Description | +|-----------------|----------|-----------------------------------------------------------------| +| **key** | `string` | The unique identifier of the compare-exchange item to retrieve. | + +| The returned object: | Description | +|--------------------------------|----------------------------------------------------------------| +| `Lazy>` | The compare-exchange item is returned.
Returns `None` if key doesn't exist. | diff --git a/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-item-python.mdx b/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-item-python.mdx new file mode 100644 index 0000000000..bfeccdb9d6 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-item-python.mdx @@ -0,0 +1,73 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To retrieve an existing compare-exchange item using the **Client API**, + use either a store operation or a cluster-wide session - as described below. + A cluster-wide session also supports lazy retrieval. + +* To view existing compare-exchange items in the **Studio**, go to _Documents > Compare Exchange_, + as described in [Ways to manage compare-exchange items](../compare-exchange/overview#ways-to-create-and-manage-compare-exchange-items). + +* This article shows how to get a **single** compare-exchange item by its unique _key_. + To get **multiple** items at once, see [Get multiple compare-exchange items](../compare-exchange/get-cmpxchg-items). + +* In this article: + * [Get item using a **cluster-wide session**](../compare-exchange/get-cmpxchg-item#get-item-using-a-cluster-wide-session) + * [Syntax](../compare-exchange/get-cmpxchg-item#syntax) + + + +--- + +## Get item using a cluster-wide session + +* You can retrieve compare-exchange items using a [cluster-wide session](../client-api/session/cluster-transaction/overview#open-a-cluster-transaction). + The session must be opened in cluster-wide mode. + +* Use the `get_compare_exchange_value` advanced session method to get a compare-exchange item by its _key_. + If the specified key does not exist, the method returns `null`. No exception is thrown. + +--- + +## Syntax + +--- + +### `get_compare_exchange_value` +Get compare-exchange item using a cluster wide session: + + +```python +session.advanced.cluster_transaction.get_compare_exchange_value(key) +``` + + +| Input parameter | Type | Description | +|-----------------|-------|-----------------------------------------------------------------| +| **key** | `str` | The unique identifier of the compare-exchange item to retrieve. | + +| The returned object: | Description | +|---------------------------|----------------------------------------------------------------| +| `CompareExchangeValue` | The compare-exchange item is returned.
Returns `None` if key doesn't exist. | + + +### `lazily.get_compare_exchange_value` +Get compare-exchange item using a cluster wide session lazily: + + +```python +session.advanced.cluster_transaction.lazily.get_compare_exchange_value(key) +``` + + +| Input parameter | Type | Description | +|-----------------|-------|-----------------------------------------------------------------| +| **key** | `str` | The unique identifier of the compare-exchange item to retrieve. | + +| The returned object: | Description | +|---------------------------------|----------------------------------------------------------------| +| `Lazy[CompareExchangeValue[T]]` | The compare-exchange item is returned.
Returns `None` if key doesn't exist. | diff --git a/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-items-csharp.mdx b/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-items-csharp.mdx new file mode 100644 index 0000000000..c6845fc4b3 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-items-csharp.mdx @@ -0,0 +1,456 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can retrieve multiple existing compare-exchange items at once using the **Client API** by either: + * specifying a list of unique keys, or + * using a common key prefix + + Retrieval can be done using either a store operation or a cluster-wide session - as described below. + A cluster-wide session also supports lazy retrieval. + +* To view existing compare-exchange items in the **Studio**, go to _Documents > Compare Exchange_, + as described in [Ways to manage compare-exchange items](../compare-exchange/overview#ways-to-create-and-manage-compare-exchange-items). + +* This article shows how to get **multiple** compare-exchange items. + To get a **single** item, see [Get compare-exchange item](../compare-exchange/get-cmpxchg-item). + +* In this article: + * [Create sample compare-exchange items](../compare-exchange/get-cmpxchg-items#create-sample-compare-exchange-items) + * [Get compare-exchange items by list of keys](../compare-exchange/get-cmpxchg-items#get-compare-exchange-items-by-list-of-keys) + * [Get compare-exchange items by prefix](../compare-exchange/get-cmpxchg-items#get-compare-exchange-items-by-prefix) + * [Get compare-exchange items count](../compare-exchange/get-cmpxchg-items#get-compare-exchange-items-count) + * [Syntax](../compare-exchange/get-cmpxchg-items#syntax) + + + +--- + +## Create sample compare-exchange items + +Let’s create some sample compare-exchange items to use in the examples below. +To learn about ALL the available methods for creating a compare-exchange item, see [Create compare-exchange item](../compare-exchange/create-cmpxchg-items). + + +```csharp +store.Operations.Send( + new PutCompareExchangeValueOperation("employees/1", "someValue1", 0)); +store.Operations.Send( + new PutCompareExchangeValueOperation("employees/2", "someValue2", 0)); +store.Operations.Send( + new PutCompareExchangeValueOperation("employees/3", "someValue3", 0)); +store.Operations.Send( + new PutCompareExchangeValueOperation("customers/1", "someValue4", 0)); +store.Operations.Send( + new PutCompareExchangeValueOperation("customers/2", "someValue5", 0)); +``` + + +--- + +## Get compare-exchange items by list of keys + +* To retrieve multiple compare-exchange items by specifying a list of unique keys, use either: + + * **Cluster-wide session**: + Use the `GetCompareExchangeValues` session method – which also supports lazy retrieval. + If one of the specified keys does not exist, its corresponding entry in the returned dictionary will be `null`. + No exception is thrown. + + * **Store operation**: + Use the `GetCompareExchangeValuesOperation` store operation. + If one of the specified keys does not exist, the `.Value` property of the corresponding entry in the returned dictionary is `null`. + No exception is thrown. + +* Examples: + + #### Get compare-exchange items by list of keys + + + + ```csharp + // The session must be opened in cluster-wide mode. + // An `InvalidOperationException` is thrown if the session is not opened in cluster-wide mode. + using (var session = store.OpenSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + // Define the list of keys of the compare-exchange items to retrieve + var keys = new[] { "employees/1", "employees/2", "customers/2", "non-existing-key" }; + + // Call 'GetCompareExchangeValues', pass the list of keys + Dictionary> items = + session.Advanced.ClusterTransaction.GetCompareExchangeValues(keys); + + // Check results + Console.WriteLine($"Number of compare-exchange items retrieved: {items.Count}"); // Expecting 4 + + // Access a retrieved item - an existing key + if (items.TryGetValue("employees/1", out var item)) + { + string value = item.Value; // "someValue1" + long version = item.Index; + } + + // The entry of a non-existing key will be null + if (items.TryGetValue("non-existing-key", out var nonExistingItem)) + { + Console.WriteLine(nonExistingItem == null); // true + } + } + ``` + + + ```csharp + // The session must be opened in cluster-wide mode. + // An `InvalidOperationException` is thrown if the session is not opened in cluster-wide mode. + using (var asyncSession = store.OpenAsyncSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + // Define the list of keys of the compare-exchange items to retrieve + var keys = new[] { "employees/1", "employees/2", "customers/2", "non-existing-key" }; + + // Call 'GetCompareExchangeValues', pass the list of keys + Dictionary> items = + await asyncSession.Advanced.ClusterTransaction.GetCompareExchangeValuesAsync(keys); + + // Check results + Console.WriteLine($"Number of compare-exchange items retrieved: {items.Count}"); // Expecting 4 + + // Access a retrieved item - an existing key + if (items.TryGetValue("employees/1", out var item)) + { + string value = item.Value; // "someValue1" + long version = item.Index; + } + + // The entry of a non-existing key will be null + if (items.TryGetValue("non-existing-key", out var nonExistingItem)) + { + Console.WriteLine(nonExistingItem == null); // true + } + } + ``` + + + ```csharp + // Define the list of keys of the compare-exchange items to retrieve + var keys = new[] { "employees/1", "employees/2", "customers/2", "non-existing-key" }; + + // Define the get compare-exchange items operation, pass the unique item key + var getCmpXchgItemsOp = new GetCompareExchangeValuesOperation(keys); + + // Execute the operation by passing it to Operations.Send + Dictionary> items = store.Operations.Send(getCmpXchgItemsOp); + + // Check results + Console.WriteLine($"Number of compare-exchange items retrieved: {items.Count}"); // Expecting 4 + + // Access a retrieved item - an existing key + if (items.TryGetValue("employees/1", out var item)) + { + string value = item.Value; // "someValue1" + long version = item.Index; // e.g. 321 + } + + // The 'Value' of the non-existing key will be null + if (items.TryGetValue("non-existing-key", out var nonExistingItem)) + { + string value = nonExistingItem.Value; // null + long version = nonExistingItem.Index; // -1 + } + ``` + + + ```csharp + // Define the list of keys of the compare-exchange items to retrieve + var keys = new[] { "employees/1", "employees/2", "customers/2", "non-existing-key" }; + + // Define the get compare-exchange items operation, pass the unique item key + var getCmpXchgItemsOp = new GetCompareExchangeValuesOperation(keys); + + // Execute the operation by passing it to Operations.SendAsync + Dictionary> items = await + store.Operations.SendAsync(getCmpXchgItemsOp); + + // Check results + Console.WriteLine($"Number of compare-exchange items retrieved: {items.Count}"); // Expecting 4 + + // Access a retrieved item - an existing key + if (items.TryGetValue("employees/1", out var item)) + { + string value = item.Value; // "someValue1" + long version = item.Index; // e.g. 321 + } + + // The 'Value' of the non-existing key will be null + if (items.TryGetValue("non-existing-key", out var nonExistingItem)) + { + string value = nonExistingItem.Value; // null + long version = nonExistingItem.Index; // -1 + } + ``` + + + + #### Get compare-exchange items by list of keys - lazily + + A list of compare-exchange items can be retrieved lazily when working within a cluster-wide session. + + + + ```csharp + // The session must be opened in cluster-wide mode. + // An `InvalidOperationException` is thrown if the session is not opened in cluster-wide mode. + using (var session = store.OpenSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + // Define the list of keys of the compare-exchange items to retrieve + var keys = new[] { "employees/1", "employees/2", "customers/2", "non-existing-key" }; + + // Call 'Lazily.GetCompareExchangeValues', pass the list of keys + var lazyItems = + session.Advanced.ClusterTransaction.Lazily.GetCompareExchangeValues(keys); + + Dictionary> items = lazyItems.Value; + + // Check results + Console.WriteLine($"Number of compare-exchange items retrieved: {items.Count}"); // Expecting 4 + + // Access a retrieved item - an existing key + if (items.TryGetValue("employees/1", out var item)) + { + string value = item.Value; // "someValue1" + long version = item.Index; // e.g. 321 + } + + // The entry of a non-existing key will be null + if (items.TryGetValue("non-existing-key", out var nonExistingItem)) + { + Console.WriteLine(nonExistingItem == null); // true + } + } + ``` + + + ```csharp + // The session must be opened in cluster-wide mode. + // An `InvalidOperationException` is thrown if the session is not opened in cluster-wide mode. + using var asyncSession = store.OpenAsyncSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + // Define the list of keys of the compare-exchange items to retrieve + var keys = new[] { "employees/1", "employees/2", "customers/2", "non-existing-key" }; + + // Call 'Lazily.GetCompareExchangeValues', pass the list of keys + var lazyItems = asyncSession.Advanced.ClusterTransaction + .Lazily.GetCompareExchangeValuesAsync(keys); + + Dictionary> items = await lazyItems.Value; + + // Check results + Console.WriteLine($"Number of compare-exchange items retrieved: {items.Count}"); // Expecting 4 + + // Access a retrieved item - an existing key + if (items.TryGetValue("employees/1", out var item)) + { + string value = item.Value; // "someValue1" + long version = item.Index; + } + + // The entry of a non-existing key will be null + if (items.TryGetValue("non-existing-key", out var nonExistingItem)) + { + Console.WriteLine(nonExistingItem == null); // true + } + } + ``` + + + +--- + +## Get compare-exchange items by prefix + +* You can retrieve compare-exchange items whose keys start with a specific **prefix**. + You can also control the **maximum number of items** to return and the **starting position** for paging. + +* Examples: + + + + ```csharp + // The session must be opened in cluster-wide mode. + // An `InvalidOperationException` is thrown if the session is not opened in cluster-wide mode. + using var session = store.OpenSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + // Call 'GetCompareExchangeValues', pass: + // * startWith: The common key prefix + // * start: The start position (optional, default is 0) + // * pageSize: Max items to get (optional, default is 25) + Dictionary> items = + session.Advanced.ClusterTransaction.GetCompareExchangeValues( + startsWith: "employees", start: 0, pageSize: 10); + + // Results will include only compare-exchange items with keys that start with "employees" + Console.WriteLine($"Number of compare-exchange items with prefix 'employees': {items.Count}"); + // Should be 3 + } + ``` + + + ```csharp + // The session must be opened in cluster-wide mode. + // An `InvalidOperationException` is thrown if the session is not opened in cluster-wide mode. + using var asyncSession = store.OpenAsyncSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) + { + // Call 'GetCompareExchangeValues', pass: + // * startWith: The common key prefix + // * start: The start position (optional, default is 0) + // * pageSize: Max items to get (optional, default is 25) + Dictionary> items = await + asyncSession.Advanced.ClusterTransaction.GetCompareExchangeValuesAsync( + startsWith: "employees", start: 0, pageSize: 10); + + // Results will include only compare-exchange items with keys that start with "employees" + Console.WriteLine($"Number of compare-exchange items with prefix 'employees': {items.Count}"); + // Should be 3 + } + ``` + + + ```csharp + // Define the get compare-exchange items operation, pass: + // * startWith: The common key prefix + // * start: The start position (optional, default is 0) + // * pageSize: Max items to get (optional, default is int.MaxValue) + var getCmpXchgItemsOp = new GetCompareExchangeValuesOperation( + startWith: "employees", start: 0, pageSize: 10); + + // Execute the operation by passing it to Operations.Send + Dictionary> items = store.Operations.Send(getCmpXchgItemsOp); + + // Results will include only compare-exchange items with keys that start with "employees" + Debug.Assert(items.Count == 3); // There are 3 keys with the "employees" prefix + ``` + + + ```csharp + // Define the get compare-exchange items operation, pass: + // * startWith: The common key prefix + // * start: The start position (optional, default is 0) + // * pageSize: Max items to get (optional, default is int.MaxValue) + var getCmpXchgItemsOp = new GetCompareExchangeValuesOperation( + startWith: "employees", start: 0, pageSize: 10); + + // Execute the operation by passing it to Operations.SendAsync + Dictionary> items = await + store.Operations.SendAsync(getCmpXchgItemsOp); + + // Results will include only compare-exchange items with keys that start with "employees" + Debug.Assert(items.Count == 3); // There are 3 keys with the "employees" prefix + ``` + + + +--- + +## Get compare-exchange items count + +Use `GetDetailedStatisticsOperation` to get the total number of existing compare-exchange items. +This operation does not retrieve any actual items, only the total count. + + + +```csharp +var stats = store.Maintenance.Send(new GetDetailedStatisticsOperation()); +var itemsCount = stats.CountOfCompareExchange; +``` + + +```csharp +var stats = await store.Maintenance.SendAsync(new GetDetailedStatisticsOperation()); +var itemsCount = stats.CountOfCompareExchange; +``` + + + +--- + +## Syntax + +--- + +### `GetCompareExchangeValuesOperation` +Get multiple compare-exchange items using a store operation: + + +```csharp +// Available overloads: +GetCompareExchangeValuesOperation(string[] keys); +GetCompareExchangeValuesOperation(string startWith, int? start = null, int? pageSize = null) +``` + + +| Parameter | Type | Description | +|---------------|------------|-------------------------------------------------------| +| **keys** | `string[]` | Keys of the compare-exchange items to retrieve. | +| **startWith** | `string` | The common key prefix of the items to retrieve. | +| **start** | `int?` | The number of items that should be skipped. Default is `0` | +| **pageSize** | `int?` | The maximum number of values that will be retrieved. Default is `int.MaxValue` | + +| Returned object | Description | +|-----------------------------------------------|---------------------------------------------------| +| `Dictionary>` | A Dictionary with a compare-exchange item per key | + +--- + +### `GetCompareExchangeValues` +Get multiple compare-exchange items using cluster-wide session + + +```csharp +GetCompareExchangeValues(string[] keys); +GetCompareExchangeValues(string startsWith, int start = 0, int pageSize = 25); +``` + + +| Parameter | Type | Description | +|---------------|------------|-------------------------------------------------------| +| **keys** | `string[]` | Keys of the compare-exchange items to retrieve. | +| **startWith** | `string` | The common key prefix of the items to retrieve. | +| **start** | `int?` | The number of items that should be skipped. Default is `0` | +| **pageSize** | `int?` | The maximum number of values that will be retrieved. Default is `25` | + + +| Returned object | Description | +|-----------------------------------------------|---------------------------------------------------| +| `Dictionary>` | A Dictionary with a compare-exchange item per key | + +--- + + +```csharp +// The CompareExchangeValue object: +// ================================ + +public class CompareExchangeValue +{ + // The unique identifier of the compare-exchange item. + public string Key { get; } + + // The existing `value` of the returned compare-exchange item. + public T Value { get; set; } + + // The compare-exchange item's version. + public long Index { get; internal set; } + + // The existing `metadata` of the returned compare-exchange item. + public IMetadataDictionary Metadata; +} +``` + diff --git a/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-items-java.mdx b/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-items-java.mdx new file mode 100644 index 0000000000..a7db2144a4 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-items-java.mdx @@ -0,0 +1,98 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can retrieve multiple existing compare-exchange items at once using the **Client API** by either: + * specifying a list of unique keys, or + * using a common key prefix + + Retrieval can be done using either a store operation or a cluster-wide session - as described below. + A cluster-wide session also supports lazy retrieval. + +* To view existing compare-exchange items in the **Studio**, go to _Documents > Compare Exchange_, + as described in [Ways to manage compare-exchange items](../compare-exchange/overview#ways-to-create-and-manage-compare-exchange-items). + +* This article shows how to get **multiple** compare-exchange items. + To get a **single** item, see [Get compare-exchange item](../compare-exchange/get-cmpxchg-item). + +* In this article: + * [Get compare-exchange items by list of keys](../compare-exchange/get-cmpxchg-items#get-compare-exchange-items-by-list-of-keys) + * [Get compare-exchange items by prefix](../compare-exchange/get-cmpxchg-items#get-compare-exchange-items-by-prefix) + * [Syntax](../compare-exchange/get-cmpxchg-items#syntax) + + + +--- + +## Get compare-exchange items by list of keys + +* Example: + + + ```java + Dictionary> compareExchangeValues + = store.Operations.Send( + new GetCompareExchangeValuesOperation(new[] { "Key-1", "Key-2" })); + ``` + + +--- + +## Get compare-exchange items by prefix + +* You can retrieve compare-exchange items whose keys start with a specific **prefix**. + You can also control the **maximum number of items** to return and the **starting position** for paging. + +* Example: + + + ```java + // Get values for keys that have the common prefix 'users' + // Retrieve maximum 20 entries + Dictionary> compareExchangeValues + = store.Operations.Send(new GetCompareExchangeValuesOperation("users", 0, 20)); + ``` + + +--- + +## Syntax + +--- + +### `GetCompareExchangeValuesOperation` +Get multiple compare-exchange items using a store operation: + + +```java +public GetCompareExchangeValuesOperation(Class clazz, String[] keys, boolean materializeMetadata) +public GetCompareExchangeValuesOperation(Class clazz, String startWith, Integer start, Integer pageSize) +``` + + +| Parameter | Type | Description | +|---------------|------------|-------------------------------------------------------| +| **keys** | `String[]` | Keys of the compare-exchange items to retrieve. | +| **startWith** | `String` | The common key prefix of the items to retrieve. | +| **start** | `Integer` | The number of items that should be skipped. Default is `0` | +| **pageSize** | `Integer` | The maximum number of values that will be retrieved. Default is `int.MaxValue` | + +| Returned object | Description | +|----------------------------------------|---------------------------------------------------| +| `Map>` | A Dictionary with a compare-exchange item per key | + +--- + + +```java +public class CompareExchangeValue +{ + public readonly string Key; + public readonly T Value; + public readonly long Index; +} +``` + diff --git a/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-items-nodejs.mdx b/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-items-nodejs.mdx new file mode 100644 index 0000000000..1d202a7b9c --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-items-nodejs.mdx @@ -0,0 +1,302 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can retrieve multiple existing compare-exchange items at once using the **Client API** by either: + * specifying a list of unique keys, or + * using a common key prefix + + Retrieval can be done using either a store operation or a cluster-wide session - as described below. + A cluster-wide session also supports lazy retrieval. + +* To view existing compare-exchange items in the **Studio**, go to _Documents > Compare Exchange_, + as described in [Ways to manage compare-exchange items](../compare-exchange/overview#ways-to-create-and-manage-compare-exchange-items). + +* This article shows how to get **multiple** compare-exchange items. + To get a **single** item, see [Get compare-exchange item](../compare-exchange/get-cmpxchg-item). + +* In this article: + * [Create sample compare-exchange items](../compare-exchange/get-cmpxchg-items#create-sample-compare-exchange-items) + * [Get compare-exchange items by list of keys](../compare-exchange/get-cmpxchg-items#get-compare-exchange-items-by-list-of-keys) + * [Get compare-exchange items by prefix](../compare-exchange/get-cmpxchg-items#get-compare-exchange-items-by-prefix) + * [Get compare-exchange items count](../compare-exchange/get-cmpxchg-items#get-compare-exchange-items-count) + * [Syntax](../compare-exchange/get-cmpxchg-items#syntax) + + + +--- + +## Create sample compare-exchange items + +Let’s create some sample compare-exchange items to use in the examples below. +To learn about ALL the available methods for creating a compare-exchange item, see [Create compare-exchange item](../compare-exchange/create-cmpxchg-items). + + +```js +await documentStore.operations.send( + new PutCompareExchangeValueOperation("employees/1", "someValue1", 0)); +await documentStore.operations.send( + new PutCompareExchangeValueOperation("employees/2", "someValue2", 0)); +await documentStore.operations.send( + new PutCompareExchangeValueOperation("employees/3", "someValue3", 0)); +await documentStore.operations.send( + new PutCompareExchangeValueOperation("customers/1", "someValue4", 0)); +await documentStore.operations.send( + new PutCompareExchangeValueOperation("customers/2", "someValue5", 0)); +``` + + +--- + +## Get compare-exchange items by list of keys + +* To retrieve multiple compare-exchange items by specifying a list of unique keys, use either: + * `GetCompareExchangeValuesOperation` store operation, or + * `getCompareExchangeValues` method of a cluster-wide session - which also supports lazy retrieval. + +* If one of the specified keys does not exist, its corresponding entry in the retrieved items will be `null`. + No exception is thrown. + +* Examples: + + #### Get compare-exchange items by list of keys + + + + ```js + // The session must be opened in cluster-wide mode. + // An `InvalidOperationException` is thrown if the session is not opened in cluster-wide mode. + const session = documentStore.openSession({ + transactionMode: "ClusterWide" + }); + + // Define the list of keys of the compare-exchange items to retrieve + const keys = ["employees/1", "employees/2", "customers/2", "non-existing-key"]; + + // Call 'getCompareExchangeValues', pass the list of keys + const items = await session.advanced.clusterTransaction.getCompareExchangeValues(keys); + + // Check results + console.log( + `Number of compare-exchange items retrieved: ${Object.keys(items).length}` + ); // Expecting 4 + + // Access the retrieved items + const value = items["employees/1"].value; + const version = items["employees/1"].index; + + // A non-existing item returns null + const nonExistingItem = items["non-existing-key"]; // null + ``` + + + ```js + // Define the list of keys of the compare-exchange items to retrieve + const keys = ["employees/1", "employees/2", "customers/2", "non-existing-key"]; + + // Define the get operation, pass the list of keys + const getCmpXchgOp = new GetCompareExchangeValuesOperation({keys}); + + // Execute the operation by passing it to operations.send + const items = await documentStore.operations.send(getCmpXchgOp); + + // Check results + console.log( + `Number of compare-exchange items retrieved: ${Object.keys(items).length}` + ); // Expecting 4 + + // Access the retrieved items + const value = items["employees/1"].value; + const version = items["employees/1"].index; + + // The value of a non-existing item returns null + const nonExistingItem = items["non-existing-key"].value; // null + ``` + + + + #### Get compare-exchange items by list of keys - lazily + + A list of compare-exchange items can be retrieved lazily when working within a cluster-wide session. + + + + ```js + // The session must be opened in cluster-wide mode. + // An `InvalidOperationException` is thrown if the session is not opened in cluster-wide mode. + const session = documentStore.openSession({ + transactionMode: "ClusterWide" + }); + + // Define the list of keys of the compare-exchange items to retrieve + const keys = ["employees/1", "employees/2", "customers/2", "non-existing-key"]; + + // Call 'lazily.getCompareExchangeValues', pass the list of keys + const lazyItems = await session.advanced.clusterTransaction.lazily.getCompareExchangeValues(keys); + + // Execute the lazy operation to get the actual items + const items = await lazyItems.getValue(); + + // Check results + console.log( + `Number of compare-exchange items retrieved: ${Object.keys(items).length}` + ); // Expecting 4 + + // Access the retrieved items + const value = items["employees/1"].value; + const version = items["employees/1"].index; + + // A non-existing item returns null + const nonExistingItem = items["non-existing-key"]; // null + ``` + + + +--- + +## Get compare-exchange items by prefix + +* You can retrieve compare-exchange items whose keys start with a specific **prefix**. + You can also control the **maximum number of items** to return and the **starting position** for paging. + +* Example: + + + + ```js + // Define the get compare-exchange operation, specify: + // * startWith: The common key prefix + // * start: The start position (this is optional, default is 0) + // * pageSize: Max items to get (this is optional, default is int.MaxValue) + const getCmpXchgOp = new GetCompareExchangeValuesOperation({ + startWith: "employees", + start: 0, + pageSize: 10 + }); + + // Execute the operation by passing it to operations.send + const items = await documentStore.operations.send(getCmpXchgOp); + + // Results will include only compare-exchange items with keys that start with "employees" + console.log( + `Number of compare-exchange items with prefix 'employees': ${Object.keys(items).length}` + ); // Should be 3 + ``` + + + +--- + +## Get compare-exchange items count + +Use `GetDetailedStatisticsOperation` to get the total number of existing compare-exchange items. +This operation does not retrieve any actual items, only the total count. + + + +```js +const stats = await documentStore.maintenance.send(new GetDetailedStatisticsOperation()); +const itemsCount = stats.countOfCompareExchange; +``` + + + +--- + +## Syntax + +--- + +### `GetCompareExchangeValuesOperation` +Get multiple compare-exchange items using a store operation: + + +```js +const getCmpXchgOp = new GetCompareExchangeValuesOperation(parameters); +``` + + + +```js +// the parameters object: +{ + // Keys of the items to retrieve + keys?; // string[] + + // The common key prefix of the items to retrieve + startWith?; // string + + // The number of items that should be skipped + start?; // number + + // The maximum number of values that will be retrieved + pageSize?; // number + + // When the item's value is a class, you can specify its type in this parameter + clazz?; // object + + // The metadata will be retrieved and available regardless of the value of this param. + // Used for internal purposes. + materializeMetadata?; +} +``` + + +| Returned object | Description | +|----------------------------------------|---------------------------------------------------| +| `Record` | A Dictionary with a compare-exchange item per key | + +--- + +### `getCompareExchangeValues` +Get multiple compare-exchange items using cluster-wide session + + +```js +await session.advanced.clusterTransaction.getCompareExchangeValues(keys); +``` + +### `lazily.getCompareExchangeValue` +Get compare-exchange item using cluster-wide session lazily + + +```js +await session.advanced.clusterTransaction.lazily.getCompareExchangeValues(keys); +``` + + + + +| Parameter | Type | Description | +|-----------|------------|-------------------------------------------------| +| **keys** | `string[]` | Keys of the compare-exchange items to retrieve | + +| Returned object | Description | +|---------------------------|-----------------------------------------------------------------| +| `Record` | A Dictionary with a compare-exchange item per key | + +--- + + +```js +// The CompareExchangeValue object: +// ================================ + +class CompareExchangeValue { + // The unique identifier of the compare-exchange item. + key; // string + + // The existing `value` of the returned compare-exchange item. + value; // object + + // The existing `metadata` of the returned compare-exchange item. + metadata; // object + + // The compare-exchange item's version. + index; // number +} +``` + diff --git a/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-items-php.mdx b/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-items-php.mdx new file mode 100644 index 0000000000..9500a8f0df --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-items-php.mdx @@ -0,0 +1,55 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can retrieve multiple existing compare-exchange items at once using the **Client API** by either: + * specifying a list of unique keys, or + * using a common key prefix + + Retrieval can be done using either a store operation or a cluster-wide session - as described below. + A cluster-wide session also supports lazy retrieval. + +* To view existing compare-exchange items in the **Studio**, go to _Documents > Compare Exchange_, + as described in [Ways to manage compare-exchange items](../compare-exchange/overview#ways-to-create-and-manage-compare-exchange-items). + +* This article shows how to get **multiple** compare-exchange items. + To get a **single** item, see [Get compare-exchange item](../compare-exchange/get-cmpxchg-item). + +* In this article: + * [Syntax](../compare-exchange/get-cmpxchg-items#syntax) + + + +--- + +## Syntax + +--- + +### `getCompareExchangeValues` + + +```php +// Get compare-exchange item using a cluster wide session: +$session->advanced()->clusterTransaction()->getCompareExchangeValues(null, $keys); +``` + + + +```php +// Get compare-exchange item using a cluster wide session - lazily: +$session->advanced()->clusterTransaction()->lazily()->getCompareExchangeValues(null, $keys); +``` + + +| Parameter | Type | Description | +|---------------|------------|-------------------------------------------------------| +| **keys** | `string[]` | Keys of the compare-exchange items to retrieve. | + +| Returned object | Description | +|-----------------------------------------------------|-------------------------------------------------------------| +| `Dictionary>` | If a key doesn't exists the associate value will be `null`. | +| `Lazy>>` | If a key doesn't exists the associate value will be `null`. | diff --git a/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-items-python.mdx b/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-items-python.mdx new file mode 100644 index 0000000000..63cb6a2e02 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_get-cmpxchg-items-python.mdx @@ -0,0 +1,55 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can retrieve multiple existing compare-exchange items at once using the **Client API** by either: + * specifying a list of unique keys, or + * using a common key prefix + + Retrieval can be done using either a store operation or a cluster-wide session - as described below. + A cluster-wide session also supports lazy retrieval. + +* To view existing compare-exchange items in the **Studio**, go to _Documents > Compare Exchange_, + as described in [Ways to manage compare-exchange items](../compare-exchange/overview#ways-to-create-and-manage-compare-exchange-items). + +* This article shows how to get **multiple** compare-exchange items. + To get a **single** item, see [Get compare-exchange item](../compare-exchange/get-cmpxchg-item). + +* In this article: + * [Syntax](../compare-exchange/get-cmpxchg-items#syntax) + + + +--- + +## Syntax + +--- + +### `get_compare_exchange_values` + + +```python +# Get compare-exchange item using a cluster wide session: +session.advanced.cluster_transaction.get_compare_exchange_values(keys) +``` + + + +```python +# Get compare-exchange item using a cluster wide session - lazily: +session.advanced.cluster_transaction.lazily.get_compare_exchange_values(keys) +``` + + +| Parameter | Type | Description | +|---------------|-------------|-------------------------------------------------| +| **keys** | `List[str]` | Keys of the compare-exchange items to retrieve. | + +| Returned object | Description | +|--------------------------------------------|-----------------| +| `Dict[str, CompareExchangeValue[T]]` | A Dictionary with a compare-exchange item per key.
If a key doesn't exist the value associated with it will be `None`. | +| `Lazy[Dict[str, CompareExchangeValue[T]]]` | A Dictionary with a compare-exchange item per key.
If a key doesn't exist the value associated with it will be `None`. | diff --git a/versioned_docs/version-7.1/compare-exchange/content/_include-compare-exchange-items-csharp.mdx b/versioned_docs/version-7.1/compare-exchange/content/_include-compare-exchange-items-csharp.mdx new file mode 100644 index 0000000000..d9dc01bd6f --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_include-compare-exchange-items-csharp.mdx @@ -0,0 +1,681 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Compare-exchange items can be included when [loading entities](../client-api/session/loading-entities) + or when making [queries](../client-api/session/querying/how-to-query). + +* The Session [tracks](../client-api/session/what-is-a-session-and-how-does-it-work) the included compare-exchange items, + which means their values can be accessed later in the same session without making additional requests to the server. + +* In this page: + * [Sample data](../compare-exchange/include-cmpxchg-items#sample-data) + * [Include compare-exchange items when loading](../compare-exchange/include-cmpxchg-items#include-compare-exchange-items-when-loading) + * [Include compare-exchange items when querying](../compare-exchange/include-cmpxchg-items#include-compare-exchange-items-when-querying) + * [Syntax](../compare-exchange/include-cmpxchg-items#syntax) + + + +--- + +## Sample data + +The examples in this article are based on the following **sample data**: +To learn about ALL the available methods for creating a compare-exchange item, see [Create compare-exchange item](../compare-exchange/create-cmpxchg-items). + + + +```csharp +using (var session = store.OpenSession()) +{ + // Create some company documents: + // ============================== + + var company1 = new Company + { + Id = "companies/1", + Name = "Apple", + Supplier = "suppliers/1", + Workers = new[] { "employees/1", "employees/2" } + }; + + var company2 = new Company + { + Id = "companies/2", + Name = "Google", + Supplier = "suppliers/2", + Workers = new[] { "employees/3", "employees/4" } + }; + + var company3 = new Company + { + Id = "companies/3", + Name = "Microsoft", + Supplier = "suppliers/3", + Workers = new[] { "employees/6", "employees/5" } + }; + + session.Store(company1); + session.Store(company2); + session.Store(company3); + + session.SaveChanges(); +} +``` + + +```csharp +using (var session = store.OpenSession(new SessionOptions +{ + TransactionMode = TransactionMode.ClusterWide +})) +{ + // Create some compare-exchange items: + // =================================== + + session.Advanced.ClusterTransaction.CreateCompareExchangeValue( + "employees/1", "content for employee 1 .."); + session.Advanced.ClusterTransaction.CreateCompareExchangeValue( + "employees/2", "content for employee 2 .."); + session.Advanced.ClusterTransaction.CreateCompareExchangeValue( + "employees/3", "content for employee 3 .."); + + session.Advanced.ClusterTransaction.CreateCompareExchangeValue( + "suppliers/1", "content for supplier 1 .."); + session.Advanced.ClusterTransaction.CreateCompareExchangeValue( + "suppliers/2", "content for supplier 2 .."); + session.Advanced.ClusterTransaction.CreateCompareExchangeValue( + "suppliers/3", "content for supplier 3 .."); + + session.SaveChanges(); +} +``` + + +```csharp +public class Company +{ + public string Id { get; set; } + public string Name { get; set; } + public string Supplier { get; set; } + public string[] Workers { get; set; } + + public Company() { } + + public Company(string id, string name, string supplier, string[] workers) + { + Id = id; + Name = name; + Supplier = supplier; + Workers = workers; + } +} +``` + + + +--- + +## Include compare-exchange items when loading + + + +**Include single item**: + + + +```csharp +// Open a session with cluster-wide mode to enable calling 'IncludeCompareExchangeValue' +using (var session = store.OpenSession(new SessionOptions +{ + TransactionMode = TransactionMode.ClusterWide +})) +{ + // Load a company document + include a CmpXchg item: + // ================================================= + + var company1 = session.Load("companies/1", includes => + // Call 'IncludeCompareExchangeValue' + // "Supplier" is the document property that holds the CmpXchg key to include + includes.IncludeCompareExchangeValue(c => c.Supplier)); + + // Calling 'Load' has triggered a server call + var numberOfRequests = session.Advanced.NumberOfRequests; + Console.WriteLine($"Number of requests made: {numberOfRequests}"); // Should be 1 + + // Access the included CmpXchg item: + // ================================= + + // Call 'GetCompareExchangeValue' to access the content of the included CmpXchg item. + // Pass the CmpXchg item KEY. This will NOT trigger another server call. + var item = session.Advanced.ClusterTransaction + .GetCompareExchangeValue(company1.Supplier); + + // You can check that no further server calls were made + Console.WriteLine(session.Advanced.NumberOfRequests == numberOfRequests); // Should be true + + // The CmpXchg item value is available + var value = item.Value; +} +``` + + +```csharp +// Open a session with cluster-wide mode to enable calling 'IncludeCompareExchangeValue' +using (var asyncSession = store.OpenAsyncSession(new SessionOptions +{ + TransactionMode = TransactionMode.ClusterWide +})) +{ + // Load a company document + include a CmpXchg item: + // ================================================= + + var company1 = await asyncSession.LoadAsync("companies/1", includes => + // Call 'IncludeCompareExchangeValue' + // "Supplier" is the document property that holds the CmpXchg key to include + includes.IncludeCompareExchangeValue(c => c.Supplier)); + + // Calling 'LoadAsync' has triggered a server call + var numberOfRequests = asyncSession.Advanced.NumberOfRequests; + Console.WriteLine($"Number of requests made: {numberOfRequests}"); // Should be 1 + + // Access the included CmpXchg item: + // ================================= + + // Call 'GetCompareExchangeValue' to access the content of the included CmpXchg item, + // pass the CmpXchg item KEY. This will NOT trigger another server call. + var item = await asyncSession.Advanced.ClusterTransaction + .GetCompareExchangeValueAsync(company1.Supplier); + + // You can check that no further server calls were made + Console.WriteLine(session.Advanced.NumberOfRequests == numberOfRequests); // Should be true + + // The CmpXchg item value is available + var value = item.Value; +} +``` + + + + + + + +**Include multiple items**: + + + +```csharp +// Open a session with cluster-wide mode +using (var session = store.OpenSession(new SessionOptions +{ + TransactionMode = TransactionMode.ClusterWide +})) +{ + // Load a company document + include multiple CmpXchg items: + // ========================================================= + + var company1 = session.Load("companies/1", includes => + // Call 'IncludeCompareExchangeValue' + // "Workers" is the document property that holds the list of keys to include + includes.IncludeCompareExchangeValue(c => c.Workers)); + + var numberOfRequests = session.Advanced.NumberOfRequests; + Console.WriteLine($"Number of requests made: {numberOfRequests}"); // Should be 1 + + // Access the included CmpXchg items: + // ================================== + + // Call 'GetCompareExchangeValues' to access the content of the included CmpXchg items. + // Pass the list of KEYS. This will NOT trigger another server call. + var items = session.Advanced.ClusterTransaction + .GetCompareExchangeValues(company1.Workers); + + // You can check that no further server calls were made + Console.WriteLine(session.Advanced.NumberOfRequests == numberOfRequests); // Should be true + + // The value of each item is available + var value1 = items["employees/1"].Value; + var value2 = items["employees/2"].Value; +} +``` + + +```csharp +// Open a session with cluster-wide mode +using (var asyncSession = store.OpenAsyncSession(new SessionOptions +{ + TransactionMode = TransactionMode.ClusterWide +})) +{ + // Load a company document + include multiple CmpXchg items: + // ========================================================= + + var company1 = session.Load("companies/1", includes => + // Call 'IncludeCompareExchangeValue' + // "Workers" is the document property that holds the list of keys to include + includes.IncludeCompareExchangeValue(c => c.Workers)); + + var numberOfRequests = asyncSession.Advanced.NumberOfRequests; + Console.WriteLine($"Number of requests made: {numberOfRequests}"); // Should be 1 + + // Access the included CmpXchg items: + // ================================== + + // Call 'GetCompareExchangeValues' to access the content of the included CmpXchg items. + // Pass the list of KEYS. This will NOT trigger another server call. + var items = await asyncSession.Advanced.ClusterTransaction + .GetCompareExchangeValuesAsync(company1.Workers); + + // You can check that no further server calls were made + Console.WriteLine(session.Advanced.NumberOfRequests == numberOfRequests); // Should be true + + // The value of each item is available + var value1 = items["employees/1"].Value; + var value2 = items["employees/2"].Value; +} +``` + + + + + +--- + +## Include compare-exchange items when querying + + + +**Dynamic query**: + + + +```csharp +// Open a session with cluster-wide mode to enable calling 'IncludeCompareExchangeValue' +using (var session = store.OpenSession(new SessionOptions +{ + TransactionMode = TransactionMode.ClusterWide +})) +{ + // Make a dynamic query + include CmpXchg items: + // ============================================= + + var companies = session.Query() + // Call 'Include' with 'IncludeCompareExchangeValue' + // pass the PATH of the document property that contains the key of the CmpXchg item to include + .Include(x => x.IncludeCompareExchangeValue(c => c.Supplier)) + .ToList(); + + // Making the query has triggered a server call + var numberOfRequests = session.Advanced.NumberOfRequests; + Console.WriteLine($"Number of requests made: {numberOfRequests}"); // Should be 1 + + // Access the included CmpXchg items: + // ================================== + + var cmpXchgItems = new List>(); + + foreach (var company in companies) + { + // Call 'GetCompareExchangeValue' to access the included CmpXchg item. + // Pass the KEY. This will NOT trigger another server call. + var item = session.Advanced.ClusterTransaction + .GetCompareExchangeValue(company.Supplier); + + cmpXchgItems.Add(item); + } + + // You can check that no further server calls were made + Console.WriteLine(session.Advanced.NumberOfRequests == numberOfRequests); // Should be true +} +``` + + +```csharp +// Open a session with cluster-wide mode to enable calling 'IncludeCompareExchangeValue' +using (var asyncSession = store.OpenAsyncSession(new SessionOptions +{ + TransactionMode = TransactionMode.ClusterWide +})) +{ + // Make a dynamic query + include CmpXchg items: + // ============================================= + + var companies = await asyncSession.Query() + // Call 'Include' with 'IncludeCompareExchangeValue' + // pass the PATH of the document property that contains the key of the CmpXchg item to include + .Include(x => x.IncludeCompareExchangeValue(c => c.Supplier)) + .ToListAsync(); + + // Making the query has triggered a server call + var numberOfRequests = asyncSession.Advanced.NumberOfRequests; + Console.WriteLine($"Number of requests made: {numberOfRequests}"); // Should be 1 + + var cmpXchgItems = new List>(); + + foreach (var company in companies) + { + // Call 'GetCompareExchangeValue' to access the included CmpXchg item. + // Pass the KEY. This will NOT trigger another server call. + var item = await asyncSession.Advanced.ClusterTransaction + .GetCompareExchangeValueAsync(company.Supplier); + + cmpXchgItems.Add(item); + } + + // You can check that no further server calls were made + Console.WriteLine(session.Advanced.NumberOfRequests == numberOfRequests); // Should be true +} +``` + + +```csharp +// Open a session with cluster-wide mode to enable calling 'include cmpxchg' +using (var session = store.OpenSession(new SessionOptions +{ + TransactionMode = TransactionMode.ClusterWide +})) +{ + // Make a raw query + include CmpXchg items: + // ========================================= + + // In the provided RQL: + // * Call 'include' with 'cmpxchg' + // * Pass the PATH of the document property that contains the key of the CmpXchg item to include + var companies = session.Advanced + .RawQuery(@" + from 'Companies' + include cmpxchg('Supplier')") + .ToList(); + + var numberOfRequests = session.Advanced.NumberOfRequests; + Console.WriteLine($"Number of requests made: {numberOfRequests}"); // Should be 1 + + // Access the included CmpXchg items: + // ================================== + + var cmpXchgItems = new List>(); + + foreach (var company in companies) + { + // Call 'getCompareExchangeValues' to access the content of the included CmpXchg items, + // pass the KEY, this will NOT trigger another server call. + var item = session.Advanced.ClusterTransaction + .GetCompareExchangeValue(company.Supplier); + + cmpXchgItems.Add(item); + } + + Console.WriteLine(session.Advanced.NumberOfRequests == numberOfRequests); // Should be true +} +``` + + +```csharp +// Open a session with cluster-wide mode to enable calling 'includes.cmpxchg' +using (var session = store.OpenSession(new SessionOptions +{ + TransactionMode = TransactionMode.ClusterWide +})) +{ + // Make a raw query + include CmpXchg items using JavaScript method: + // ================================================================= + + // In the provided RQL: + // * Call 'includes.cmpxchg' + // * Pass the PATH of the document property that contains the key of the CmpXchg item to include + var companies = session.Advanced + .RawQuery(@" + declare function includeCmpXchg(company) { + includes.cmpxchg(company.Supplier); + return company; + } + + from companies as c + select includeCmpXchg(c)") + .ToList(); + + var numberOfRequests = session.Advanced.NumberOfRequests; + Console.WriteLine($"Number of requests made: {numberOfRequests}"); // Should be 1 + + // Access the included CmpXchg items: + // ================================== + + var cmpXchgItems = new List>(); + + foreach (var company in companies) + { + // Call 'getCompareExchangeValues' to access the content of the included CmpXchg items, + // pass the KEY. This will NOT trigger another server call. + var item = session.Advanced.ClusterTransaction + .GetCompareExchangeValue(company.Supplier); + + cmpXchgItems.Add(item); + } + + Console.WriteLine(session.Advanced.NumberOfRequests == numberOfRequests); // Should be true +} +``` + + +```sql +// RQL that can be used with a Raw Query: +// ====================================== + +from "Companies" +include cmpxchg("Supplier") + +// or: + +from companies as c +select c +include cmpxchg(c.Supplier) + +// Using JS method: +// ================ + +declare function includeCmpXchg(company) { + includes.cmpxchg(company.Supplier); + return company; +} + +from companies as c +select includeCmpXchg(c) +``` + + + + + + + +**Index query**: + + + +```csharp +// Open a session with cluster-wide mode +using (var session = store.OpenSession(new SessionOptions +{ + TransactionMode = TransactionMode.ClusterWide +})) +{ + // Make an index query + include CmpXchg items: + // ============================================ + + var companies = session.Query() + // Call 'Include' with 'IncludeCompareExchangeValue' + // pass the PATH of the property that contains the key of the CmpXchg item to include + .Include(x => x.IncludeCompareExchangeValue(c => c.Supplier)) + .OfType() + .ToList(); + + var numberOfRequests = session.Advanced.NumberOfRequests; + Console.WriteLine($"Number of requests made: {numberOfRequests}"); // Should be 1 + + // Access the included CmpXchg items: + // ================================== + + var cmpXchgItems = new List>(); + + foreach (var company in companies) + { + var item = session.Advanced.ClusterTransaction + .GetCompareExchangeValue(company.Supplier); + + cmpXchgItems.Add(item); + } + + Console.WriteLine(session.Advanced.NumberOfRequests == numberOfRequests); // Should be true +} +``` + + +```csharp +// Open a session with cluster-wide mode +using (var asyncSession = store.OpenAsyncSession(new SessionOptions +{ + TransactionMode = TransactionMode.ClusterWide +})) +{ + // Make an index query + include CmpXchg items: + // ============================================ + + var companies = await asyncSession.Query() + // Call 'Include' with 'IncludeCompareExchangeValue' + // pass the PATH of the property that contains the key of the CmpXchg item to include + .Include(x => x.IncludeCompareExchangeValue(c => c.Supplier)) + .OfType() + .ToListAsync(); + + var numberOfRequests = asyncSession.Advanced.NumberOfRequests; + Console.WriteLine($"Number of requests made: {numberOfRequests}"); // Should be 1 + + // Access the included CmpXchg items: + // ================================== + + var cmpXchgItems = new List>(); + + foreach (var company in companies) + { + var item = await asyncSession.Advanced.ClusterTransaction + .GetCompareExchangeValueAsync(company.Supplier); + + cmpXchgItems.Add(item); + } + + Console.WriteLine(session.Advanced.NumberOfRequests == numberOfRequests); // Should be true +} +``` + + +```sql +// RQL that can be used with a Raw Query: +// ====================================== + +from index "Companies/ByNameAndSupplier" +include cmpxchg("Supplier") + +// Using JS method: +// ================ + +declare function includeCmpXchg(company) { + includes.cmpxchg(company.Supplier); + return company; +} + +from index "Companies/ByNameAndSupplier" as c +select includeCmpXchg(c) +``` + + +```csharp +public class Companies_ByNameAndSupplier : AbstractIndexCreationTask +{ + public class IndexEntry + { + public string Name { get; set;} + public string Supplier { get; set; } + } + + public Companies_ByNameAndSupplier() + { + Map = companies => from company in companies + select new IndexEntry() + { + Name = company.Name, + Supplier = company.Supplier + }; + } +} +``` + + + +* Note: + Similar to the above dynamic query example, you can query the index with a [raw query](../indexes/querying/query-index#query-an-index-by-rawquery) using the provided RQL. + + + +--- + +## Syntax + + + +**When loading entities or making queries**: + +* Use method `IncludeCompareExchangeValue()` to include compare-exchange items with `session.Load()` or with queries. + + +```csharp +// Available overloads: +IncludeCompareExchangeValue(string path); +IncludeCompareExchangeValue(Expression> path); +IncludeCompareExchangeValue(Expression>> path); + +``` + + +| Parameter | Type | Description | +|-----------|--------------------------------|--------------------------------------------------------------------------------------------------------------| +| **path** | `string` | The key of the compare-exchange item to include. | +| **path** | `Expression>` | An expression indicating the property path that resolves to the key of the compare-exchange item to include. | +| **path** | `Expression>>` | An expression indicating the property path that resolves to an array of keys of the items to include. | + + + + + +**When querying with RQL**: + +* Use the [include](../client-api/session/querying/what-is-rql.mdx#include) keyword followed by `cmpxchg()` to include a compare-exchange item. + + +```sql +include cmpxchg(key) +``` + + + + + + +**When using JavaScript functions within RQL**: + +* Use `includes.cmpxchg()` In [JavaScript functions](../client-api/session/querying/what-is-rql.mdx#declare) within RQL queries. + + +```csharp +includes.cmpxchg(key); +``` + + +| Parameter | Type | Description | +|-----------|-----------|----------------------------------------------------------------------------------| +| **key** | `string` | The key of the compare exchange value you want to include, or a path to the key. | + + diff --git a/versioned_docs/version-7.1/compare-exchange/content/_include-compare-exchange-items-nodejs.mdx b/versioned_docs/version-7.1/compare-exchange/content/_include-compare-exchange-items-nodejs.mdx new file mode 100644 index 0000000000..b17a4b1a9d --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_include-compare-exchange-items-nodejs.mdx @@ -0,0 +1,472 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Compare-exchange items can be included when [loading entities](../client-api/session/loading-entities) + or when making [queries](../client-api/session/querying/how-to-query). + +* The Session [tracks](../client-api/session/what-is-a-session-and-how-does-it-work) the included compare-exchange items, + which means their values can be accessed later in the same session without making additional requests to the server. + +* In this page: + * [Sample data](../compare-exchange/include-cmpxchg-items#sample-data) + * [Include compare-exchange items when loading](../compare-exchange/include-cmpxchg-items#include-compare-exchange-items-when-loading) + * [Include compare-exchange items when querying](../compare-exchange/include-cmpxchg-items#include-compare-exchange-items-when-querying) + * [Syntax](../compare-exchange/include-cmpxchg-items#syntax) + + + +--- + +## Sample data + +The examples in this article are based on the following **sample data**: +To learn about ALL the available methods for creating a compare-exchange item, see [Create compare-exchange item](../compare-exchange/create-cmpxchg-items). + + + +```js +{ + // Create some company documents: + // ============================== + + const session = documentStore.openSession(); + + const company1 = new Company(); + company1.id = "companies/1"; + company1.name = "Apple"; + company1.supplier = "suppliers/1"; + company1.workers = ["employees/1", "employees/2"]; + + const company2 = new Company( + "companies/2", "Google", "suppliers/2", ["employees/3", "employees/4"]); + + const company3 = new Company( + "companies/3", "Microsoft", "suppliers/3", ["employees/6", "employees/5"]); + + await session.store(company1); + await session.store(company2); + await session.store(company3); + + await session.saveChanges(); +} +``` + + +```js +{ + // Create some compare-exchange items: + // =================================== + + // Open a session with cluster-wide mode so that we can call 'createCompareExchangeValue' + const session = documentStore.openSession({ + transactionMode: "ClusterWide" + }); + + session.advanced.clusterTransaction.createCompareExchangeValue( + "employees/1", "content for employee 1 .."); + session.advanced.clusterTransaction.createCompareExchangeValue( + "employees/2", "content for employee 2 .."); + session.advanced.clusterTransaction.createCompareExchangeValue( + "employees/3", "content for employee 3 .."); + + session.advanced.clusterTransaction.createCompareExchangeValue( + "suppliers/1", "content for supplier 1 .."); + session.advanced.clusterTransaction.createCompareExchangeValue( + "suppliers/2", "content for supplier 2 .."); + session.advanced.clusterTransaction.createCompareExchangeValue( + "suppliers/3", "content for supplier 3 .."); + + await session.saveChanges(); +} +``` + + +```js +class Company { + constructor( + id = null, + name = "", + supplier = "", + workers = [] + + ) { + Object.assign(this, { + id, + name, + supplier, + workers + }); + } +} +``` + + + +--- + +## Include compare-exchange items when loading + + + +**Include single item**: + + +```js +// Open a session with cluster-wide mode to enable calling 'includeCompareExchangeValue' +const session = documentStore.openSession({ + transactionMode: "ClusterWide" +}); + +// Load a company document + include a CmpXchg item: +// ================================================= + +// Call 'includeCompareExchangeValue' within the 'load' options, +// pass the PATH of the document property that contains the key of the CmpXchg item to include +const company1 = await session.load("companies/1", { + documentType: Company, + // "supplier" is the document property that holds the CmpXchg key + includes: i => i.includeCompareExchangeValue("supplier") +}); + +// Calling 'load' has triggered a server call +const numberOfRequests = session.advanced.numberOfRequests; +console.log(`Number of requests made: ${numberOfRequests}`); // Should be 1 + +// Access the included CmpXchg item: +// ================================= + +// Call 'getCompareExchangeValue' to access the content of the included CmpXchg item, +// pass the CmpXchg item KEY. This will NOT trigger another server call. +const item = await session.advanced.clusterTransaction + .getCompareExchangeValue(company1.supplier); + +// You can check that no further server calls were made +console.log(session.advanced.numberOfRequests === numberOfRequests); // Should be true + +// The CmpXchg item value is available +const value = item.value; +``` + + + + + + +**Include multiple items**: + + +```js +// Open a session with cluster-wide mode +const session = documentStore.openSession({ + transactionMode: "ClusterWide" +}); + +// Load a company document + include multiple CmpXchg items: +// ========================================================= + +// Call 'includeCompareExchangeValue' within the 'load' options, +// pass the PATH of the document property that contains the list of the CmpXchg items to include +const company1 = await session.load("companies/1", { + documentType: Company, + // "workers" is the document property that holds the list of keys + includes: i => i.includeCompareExchangeValue("workers") +}); + +const numberOfRequests = session.advanced.numberOfRequests; +console.log(`Number of requests made: ${numberOfRequests}`); // Should be 1 + +// Access the included CmpXchg items: +// ================================== + +// Call 'getCompareExchangeValues' to access the content of the included CmpXchg items, +// pass the list of KEYS. This will NOT trigger another server call. +const items = await session.advanced.clusterTransaction + .getCompareExchangeValues(company1.workers); + +// You can check that no further server calls were made +console.log(session.advanced.numberOfRequests === numberOfRequests); // Should be true + +// The value of each item is available +const value1 = items["employees/1"].value; +const value2 = items["employees/2"].value; +``` + + + + +--- + +## Include compare-exchange items when querying + + + +**Dynamic query**: + + + +```js +// Open a session with cluster-wide mode to enable calling 'includeCompareExchangeValue' +const session = documentStore.openSession({ + transactionMode: "ClusterWide" +}); + +// Make a dynamic query + include CmpXchg items: +// ============================================= + +const companies = await session.query({ collection: "companies" }) + // Call 'include' with 'includeCompareExchangeValue' + // pass the PATH of the document property that contains the key of the CmpXchg item to include + .include(x => x.includeCompareExchangeValue("supplier")) + .all(); + +// Making the query has triggered a server call +const numberOfRequests = session.advanced.numberOfRequests; +console.log(`Number of requests made: ${numberOfRequests}`); // Should be 1 + +// Access the included CmpXchg items: +// ================================== + +const cmpXchgItems = []; + +for (let i = 0; i < companies.length; i++) { + // Call 'getCompareExchangeValues' to access the content of the included CmpXchg items, + // pass the KEY. This will NOT trigger another server call. + const item = await session.advanced.clusterTransaction + .getCompareExchangeValue(companies[i].supplier); + + cmpXchgItems.push(item); +} + +// You can check that no further server calls were made +console.log(session.advanced.numberOfRequests === numberOfRequests); // Should be true +``` + + +```js +// Open a session with cluster-wide mode to enable calling 'include cmpxchg' +const session = documentStore.openSession({ + transactionMode: "ClusterWide" +}); + +// Make a raw query + include CmpXchg items: +// ========================================= + +// In the provided RQL: +// * Call 'include' with 'cmpxchg' +// * Pass the PATH of the document property that contains the key of the CmpXchg item to include +const companies = await session.advanced + .rawQuery(`from companies as c + select c + include cmpxchg(c.supplier)`) + .all(); + +const numberOfRequests = session.advanced.numberOfRequests; +console.log(`Number of requests made: ${numberOfRequests}`); // Should be 1 + +// Access the included CmpXchg items: +// ================================== + +const cmpXchgItems = []; + +for (let i = 0; i < companies.length; i++) { + // Call 'getCompareExchangeValues' to access the content of the included CmpXchg items, + // pass the KEY, this will NOT trigger another server call. + const item = await session.advanced.clusterTransaction + .getCompareExchangeValue(companies[i].supplier); + + cmpXchgItems.push(item); +} + +// You can check that no further server calls were made +console.log(session.advanced.numberOfRequests === numberOfRequests); // Should be true +``` + + +```js +// Open a session with cluster-wide mode to enable calling 'includes.cmpxchg' +const session = documentStore.openSession({ + transactionMode: "ClusterWide" +}); + +// Make a raw query + include CmpXchg items using Javascript method: +// ================================================================= + +// In the provided RQL: +// * Call 'includes.cmpxchg' +// * Pass the PATH of the document property that contains the key of the CmpXchg item to include +const companies = await session.advanced + .rawQuery(`declare function includeCmpXchg(company) { + includes.cmpxchg(company.supplier); + return company; + } + + from companies as c + select includeCmpXchg(c)`) + .all(); + +const numberOfRequests = session.advanced.numberOfRequests; +console.log(`Number of requests made: ${numberOfRequests}`); // Should be 1 + +// Access the included CmpXchg items: +// ================================== + +const cmpXchgItems = []; + +for (let i = 0; i < companies.length; i++) { + // Call 'getCompareExchangeValues' to access the content of the included CmpXchg items, + // pass the KEY. This will NOT trigger another server call. + const item = await session.advanced.clusterTransaction + .getCompareExchangeValue(companies[i].supplier); + + cmpXchgItems.push(item); +} + +// You can check that no further server calls were made +console.log(session.advanced.numberOfRequests === numberOfRequests); // Should be true +``` + + + + + + + +**Index query**: + + + +```js +// Open a session with cluster-wide mode +const session = documentStore.openSession({ + transactionMode: "ClusterWide" +}); + +// Make an index query + include CmpXchg items: +// ============================================ + +// Call 'include' with 'includeCompareExchangeValue' +// pass the PATH of the document property that contains the key of the CmpXchg item to include +const companies = await session.query({ indexName: "Companies/ByName" }) + .include(x => x.includeCompareExchangeValue("supplier")) + .all(); + +const numberOfRequests = session.advanced.numberOfRequests; +console.log(`Number of requests made: ${numberOfRequests}`); // Should be 1 + +// Access the included CmpXchg items: +// ================================== + +const cmpXchgItems = []; + +for (let i = 0; i < companies.length; i++) { + // Call 'getCompareExchangeValues' to access the content of the included CmpXchg items, + // pass the KEY. This will NOT trigger another server call. + const item = await session.advanced.clusterTransaction + .getCompareExchangeValue(companies[i].supplier); + + cmpXchgItems.push(item); +} + +// You can check that no further server calls were made +console.log(session.advanced.numberOfRequests === numberOfRequests); // Should be true +``` + + +```sql +// RQL that can be used with a Raw Query: +// ====================================== + +from index "Companies/ByName" +include cmpxchg("supplier") + +// Using JS method: +// ================ + +declare function includeCmpXchg(company) { + includes.cmpxchg(company.supplier); + return company; +} + +from index "Companies/ByName" as c +select includeCmpXchg(c) +``` + + +```js +class Companies_ByName extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map('companies', company => { + return { + name: company.name + }; + }); + } +} +``` + + + +* Note: + Similar to the above dynamic query example, you can query the index with a [raw query](../indexes/querying/query-index#query-an-index-by-rawquery) using the provided RQL. + + + +--- + +## Syntax + + + +**When loading entities or making queries**: + +* Use method `includeCompareExchangeValue()` to include compare-exchange items with `session.load()` or with queries. + + +```js +includeCompareExchangeValue(path); +``` + + +| Parameter | Type | Description | +|-----------|-----------|------------------------------------------------------------------------------------------------------------| +| **path** | `string` | The path of the document property that contains the key (or list of keys) of the CmpXchg items to include. | + + + + + +**When querying with RQL**: + +* Use the [include](../client-api/session/querying/what-is-rql.mdx#include) keyword followed by `cmpxchg()` to include a compare-exchange item. + + +```sql +include cmpxchg(key) +``` + + + + + + +**When using JavaScript functions within RQL**: + +* Use `includes.cmpxchg()` In [JavaScript functions](../client-api/session/querying/what-is-rql.mdx#declare) within RQL queries. + + +```js +includes.cmpxchg(key); +``` + + +| Parameter | Type | Description | +|-----------|-----------|----------------------------------------------------------------------------------| +| **key** | `string` | The key of the compare exchange value you want to include, or a path to the key. | + + diff --git a/versioned_docs/version-7.1/compare-exchange/content/_indexing-compare-exchange-values-csharp.mdx b/versioned_docs/version-7.1/compare-exchange/content/_indexing-compare-exchange-values-csharp.mdx new file mode 100644 index 0000000000..c72007f252 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_indexing-compare-exchange-values-csharp.mdx @@ -0,0 +1,556 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can index compare-exchange values in a static-index. + This lets you query documents based on values stored in compare-exchange items. + +* Modifications to the indexed compare-exchange values will trigger index updates. + The index will be updated whenever an indexed compare-exchange value changes, + or when documents in the indexed collection(s) are modified. + +* In this article: + * [Create sample compare-exchange items](../compare-exchange/indexing-cmpxchg-values#create-sample-compare-exchange-items) + * [Index compare-exchange values](../compare-exchange/indexing-cmpxchg-values#index-compare-exchange-values) + * [Query the index](../compare-exchange/indexing-cmpxchg-values#query-the-index) + * [Query the index and project compare-exchange values](../compare-exchange/indexing-cmpxchg-values#query-the-index-and-project-compare-exchange-values) + * [Syntax](../compare-exchange/indexing-cmpxchg-values#syntax) + + + +--- + +## Create sample compare-exchange items + +Let’s create some sample documents and compare-exchange items to use in the examples below. +To learn about ALL the available methods for creating a compare-exchange item, see [Create compare-exchange item](../compare-exchange/create-cmpxchg-items). + + + +```csharp +// Create some hotel room DOCUMENTS with general info: +// =================================================== + +using (var session = store.OpenSession()) +{ + for (int i = 215; i <= 217; i++) + { + var room = new HotelRoom + { + RoomNumber = $"R{i}", + Description = $"Description of room number R{i}" + }; + + session.Store(room, $"hotelRooms/R{i}"); + } + + session.SaveChanges(); +} +``` + + +```csharp +// Create some COMPARE-EXCHANGE ITEMS to track current details of each room: +// ========================================================================= + +// Value for room R215 +var hotelRoomDetails = new HotelRoomCurrentDetails() +{ + CurrentNumberOfGuests = 2, + ReservedBy = "customers/2", + ReservedUntil = DateTime.Now.AddDays(2), + FullyPaid = true, + CustomerEmail = "customer2@gmail.com", + CustomerPhone = "123-123-1234" +}; + +CompareExchangeResult putResult = store.Operations.Send( + new PutCompareExchangeValueOperation( + "R215", hotelRoomDetails, 0)); + +// Value for room R216 +hotelRoomDetails = new HotelRoomCurrentDetails() +{ + CurrentNumberOfGuests = 3, + ReservedBy = "customers/3", + ReservedUntil = DateTime.Now.AddDays(5), + FullyPaid = false, + CustomerEmail = "customer3@gmail.com", + CustomerPhone = "456-456-6789" +}; + +putResult = store.Operations.Send( + new PutCompareExchangeValueOperation( + "R216", hotelRoomDetails, 0)); + +// Value for room R217 +// This room is currently not occupied... +hotelRoomDetails = new HotelRoomCurrentDetails() +{ + CurrentNumberOfGuests = 0 +}; + +putResult = store.Operations.Send( + new PutCompareExchangeValueOperation( + "R217", hotelRoomDetails, 0)); +``` + + +```csharp +// Sample classes used: +// ==================== + +// The document +public class HotelRoom +{ + public string RoomNumber { get; set; } + public string Description { get; set; } +} + +// The compare-exchange value +public class HotelRoomCurrentDetails +{ + public int CurrentNumberOfGuests { get; set; } + public string ReservedBy { get; set; } + public DateTime ReservedUntil { get; set; } + public bool FullyPaid { get; set; } + public string CustomerEmail { get; set; } + public string CustomerPhone { get; set; } +} + +// Projected content +public class ProjectedCustomerDetails +{ + public string CustomerEmail { get; set; } + public string CustomerPhone { get; set; } + public string RoomNumber { get; set; } +} + +// Projected content +public class ProjectedNumberOfGuests +{ + public int CurrentNumberOfGuests { get; set; } + public string RoomNumber { get; set; } +} +``` + + + +--- + +## Index compare-exchange values + + +* This index maps the rooms in a hotel, as well as compare-exchange values representing the guests in those rooms. + +* Use method `LoadCompareExchangeValue` to load the current details of each room from the associated compare-exchange value. + + + +```csharp +public class Rooms_ByGuestsAndPaymentStatus : AbstractIndexCreationTask +{ + // The index-fields + public class IndexEntry + { + public string RoomNumber; + public int? NumberOfGuests; + public boo? FullyPaid { get; set; } + } + + public Rooms_ByGuestsAndPaymentStatus() + { + Map = HotelRooms => from room in HotelRooms + // Call method 'LoadCompareExchangeValue' + // to load the compare-exchange value by its key (room number) + let cmpXchgValue = LoadCompareExchangeValue(room.RoomNumber) + + // Define the index-fields + select new IndexEntry() + { + // Index content from the document: + RoomNumber = room.RoomNumber, + + // Index content from the compare-exchange value: + NumberOfGuests = cmpXchgValue != null ? cmpXchgValue.CurrentNumberOfGuests : (int?)null, + FullyPaid = cmpXchgValue != null ? cmpXchgValue.FullyPaid : (bool?)null + }; + } +} +``` + + +```csharp +public class Rooms_ByGuestsAndPaymentStatus_JS : AbstractJavaScriptIndexCreationTask +{ + public Compare_Exchange_JS_Index() + { + Maps = new HashSet + { + @" + map('HotelRooms', function(room) { + // Call method 'cmpxchg' + // to load the compare-exchange value by its key (room number) + var cmpXchgValue = cmpxchg(room.RoomNumber); + + // Define the index-fields + return { + // Index content from the document: + RoomNumber: room.RoomNumber, + + // Index content from the compare-exchange value: + NumberOfGuests: cmpXchgValue ? cmpXchgValue.CurrentNumberOfGuests : null, + FullyPaid: cmpXchgValue ? cmpXchgValue.FullyPaid : null + }; + }); + " + }; + } +} +``` + + +```csharp +var indexDefinition = new IndexDefinition +{ + Name = "Rooms/ByGuestsAndPaymentStatus", + + Maps = new HashSet + { + @"from room in docs.HotelRooms + + // Call method 'LoadCompareExchangeValue' + // to load the compare-exchange value by its key (room number) + let cmpXchgValue = LoadCompareExchangeValue(room.RoomNumber) + where cmpXchgValue != null + + select new + { + // Index content from the document: + RoomNumber = room.RoomNumber, + + // Index content from the compare-exchange value: + NumberOfGuests = cmpXchgValue.CurrentNumberOfGuests, + FullyPaid = cmpXchgValue.FullyPaid + }" + } +}; + +store.Maintenance.Send(new PutIndexesOperation(indexDefinition)); +``` + + + +--- + +## Query the index + +* Using the index above, you can query for all rooms (room documents) that are occupied by a specific number of guests. + The _NumberOfGuests_ index-field, which is used in the query, contains the number of guests taken from the related compare-exchange value. + +* For example, you can find all vacant rooms (0 guests) or rooms occupied by any specific number of guests. + + + +```csharp +// When querying the index, +// the session does not need to be opened in cluster-wide mode. +using (var session = store.OpenSession()) +{ + // Query for all vacant rooms (0 guests) + List vacantRooms = session + .Query() + // Index-field 'NumberOfGuests' contains the guest count for each room, + // taken from the compare-exchange item. + .Where(x => x.NumberOfGuests == 0) + .OfType() + .ToList(); + + // Using the sample data created above, Room R217 will be returned, since it has no guests. +} +``` + + +```csharp +// When querying the index, +// the session does not need to be opened in cluster-wide mode. +using (var asyncSession = store.OpenAsyncSession()) +{ + // Query for all vacant rooms (0 guests) + List vacantRooms = await asyncSession + .Query() + // Index-field 'NumberOfGuests' contains the guest count for each room, + // taken from the compare-exchange item. + .Where(x => x.NumberOfGuests == 0) + .OfType() + .ToListAsync(); + + // Using the sample data created above, Room R217 will be returned, since it has no guests. +} +``` + + +```csharp +using (var session = store.OpenSession()) +{ + List vacantRooms = session.Advanced + .DocumentQuery() + .WhereEquals(x => x.NumberOfGuests, 0) + .OfType() + .ToList(); +} +``` + + +```csharp +using (var asyncSession = store.OpenAsyncSession()) +{ + List vacantRooms = await asyncSession.Advanced + .AsyncDocumentQuery() + .WhereEquals(x => x.NumberOfGuests, 0) + .OfType() + .ToListAsync(); +} +``` + + +```csharp +using (var session = store.OpenSession()) +{ + var vacantRooms = session.Advanced + .RawQuery(@" + from index 'Rooms/ByGuestsAndPaymentStatus' + where NumberOfGuests == 0") + .ToList(); +} +``` + + +```csharp +using (var asyncSession = store.OpenAsyncSession()) +{ + var vacantRooms = await asyncSession.Advanced + .AsyncRawQuery(@" + from index 'Rooms/ByGuestsAndPaymentStatus' + where NumberOfGuests = 0") + .ToListAsync(); +} +``` + + +```sql +from index "Rooms/ByGuestsAndPaymentStatus" +where NumberOfGuests == 0 +``` + + + +--- + +## Query the index and project compare-exchange values + +* In addition to querying index-fields that already contain information from the related compare-exchange value, + you can also project fields from the compare-exchange value into the query results. + +* In the following query example, we retrieve all customers who haven't fully paid yet, + and project their phone number from the compare-exchange value using `RavenQuery.CmpXchg`. + + + +```csharp +// The session does not need to be opened in cluster-wide mode +using (var session = store.OpenSession()) +{ + List phonesOfCustomersThatNeedToPay = session + .Query() + // Index-field 'FullyPaid' contains info from the compare-exchange item + .Where(x => x.FullyPaid == false && x.NumberOfGuests > 0) + // Project query results: + .Select(x => new ProjectedCustomerDetails + { + // Project content from the compare-exchange item: + // Call 'RavenQuery.CmpXchg' to load the compare-exchange value by its key. + CustomerPhone = RavenQuery.CmpXchg(x.RoomNumber).CustomerPhone, + // Project content from the index-field: + RoomNumber = x.RoomNumber + }) + .ToList(); + + // Using the sample data created above, customer from room R216 will be returned + // in the projected data, since they haven't fully paid yet. +} +``` + + +```csharp +// The session does not need to be opened in cluster-wide mode +using (var asyncSession = store.OpenAsyncSession()) +{ + List phonesOfCustomersThatNeedToPay = await asyncSession + .Query() + // Index-field 'FullyPaid' contains info from the compare-exchange item + .Where(x => x.FullyPaid == false && x.NumberOfGuests > 0) + // Project query results: + .Select(x => new ProjectedCustomerDetails + { + // Project content from the compare-exchange item: + // Call 'RavenQuery.CmpXchg' to load the compare-exchange value by its key. + CustomerPhone = RavenQuery.CmpXchg(x.RoomNumber).CustomerPhone, + // Project content from the index-field: + RoomNumber = x.RoomNumber + }) + .ToListAsync(); + + // Using the sample data created above, customer from room R216 will be returned + // in the projected data, since they haven't fully paid yet. +} +``` + + +```csharp +using (var session = store.OpenSession()) +{ + List phonesOfCustomersThatNeedToPay = session.Advanced + .DocumentQuery() + .WhereEquals(x => x.FullyPaid, false) + .AndAlso() + .WhereGreaterThan(x=> x.NumberOfGuests, 0) + // Define the projection using a custom function: + .SelectFields(QueryData.CustomFunction( + alias: "room", + func: @"{ + // Project content from the compare-exchange item: + // Call 'cmpxchg' to load the compare-exchange value by its key. + CustomerPhone : cmpxchg(room.RoomNumber).CustomerPhone, + + // Project content from the index-field: + RoomNumber : room.RoomNumber + }") + ) + .ToList(); +} +``` + + +```csharp +using (var asyncSession = store.OpenAsyncSession()) +{ + List phonesOfCustomersThatNeedToPay = await asyncSession.Advanced + .AsyncDocumentQuery() + .WhereEquals(x => x.FullyPaid, false) + .AndAlso() + .WhereGreaterThan(x=> x.NumberOfGuests, 0) + // Define the projection using a custom function: + .SelectFields(QueryData.CustomFunction( + alias: "room", + func: @"{ + // Project content from the compare-exchange item: + // Call 'cmpxchg' to load the compare-exchange value by its key. + CustomerPhone : cmpxchg(room.RoomNumber).CustomerPhone, + + // Project content from the index-field: + RoomNumber : room.RoomNumber + }") + ) + .ToListAsync(); +} +``` + + +```csharp +using (var session = store.OpenSession()) +{ + var phonesOfCustomersThatNeedToPay = session.Advanced + .RawQuery(@" + from index "Rooms/ByGuestsAndPaymentStatus" as x + where x.FullyPaid = false and (x.NumberOfGuests > 0 and x.NumberOfGuests != null) + select { + CustomerPhone : cmpxchg(x.RoomNumber).CustomerPhone, + RoomNumber : x.RoomNumber + } + ") + .ToList(); +} +``` + + +```csharp +using (var asyncSession = store.OpenAsyncSession()) +{ + var phonesOfCustomersThatNeedToPay = await asyncSession.Advanced + .AsyncRawQuery(@" + from index "Rooms/ByGuestsAndPaymentStatus" as x + where x.FullyPaid = false and (x.NumberOfGuests > 0 and x.NumberOfGuests != null) + select { + CustomerPhone : cmpxchg(x.RoomNumber).CustomerPhone, + RoomNumber : x.RoomNumber + } + ") + .ToListAsync(); +} +``` + + +```sql +from index "Rooms/ByGuestsAndPaymentStatus" as x +where x.FullyPaid = false and (x.NumberOfGuests > 0 and x.NumberOfGuests != null) +select { + CustomerPhone : cmpxchg(x.RoomNumber).CustomerPhone, + RoomNumber : x.RoomNumber +} +``` + + + +--- + +## Syntax + +--- + +### `LoadCompareExchangeValue()` +Load a compare exchange value in the LINQ index-definition by its key. + + +```csharp +//Load one compare exchange value +T LoadCompareExchangeValue(string key); + +//Load multiple compare exchange values +T[] LoadCompareExchangeValue(IEnumerable keys); +``` + + +### `cmpxchg()` +Load a compare exchange value in the JavaScript index-definition by its key. + + +```js +//Load one compare exchange value +cmpxchg(key); + +``` + + +| Parameter | Type | Description | +|-----------|----------------------|---------------------------------------------------| +| **T** | `object` | The type of the compare-exchange item's value | +| **key** | `string` | The key of a particular compare exchange value. | +| **keys** | `IEnumerable`| The keys of multiple compare exchange values. | + +### `RavenQuery.CmpXchg()` +This method is used when **querying the index** with a LINQ query +and projecting fields from the compare-exchange value into the query results. + + +```csharp +// Get a compare-exchange value by key. +public static T CmpXchg(string key) +``` + diff --git a/versioned_docs/version-7.1/compare-exchange/content/_indexing-compare-exchange-values-java.mdx b/versioned_docs/version-7.1/compare-exchange/content/_indexing-compare-exchange-values-java.mdx new file mode 100644 index 0000000000..4210ecca70 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_indexing-compare-exchange-values-java.mdx @@ -0,0 +1,66 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can index compare-exchange values in a static-index. + This lets you query documents based on values stored in compare-exchange items. + +* Modifications to the indexed compare-exchange values will trigger index updates. + The index will be updated whenever an indexed compare-exchange value changes, + or when documents in the indexed collection(s) are modified. + +* In this page: + * [How to use](../indexes/indexing-compare-exchange-values.mdx#how-to-use) + * [Examples](../indexes/indexing-compare-exchange-values.mdx#examples) + * [Querying the Index](../indexes/indexing-compare-exchange-values.mdx#querying-the-index) + + + +## How to use + +When creating an index using `AbstractIndexCreationTask`, use javaScript +to load a compare exchange value by its key. + +### Examples + +These indexes map the rooms in a hotel, as well as compare exchange values +representing the guests in those rooms. + + + + +{`private class Compare_Exchange_JS_Index extends AbstractJavaScriptIndexCreationTask{ + public Compare_Exchange_JS_Index() + { + setMaps(Collections.singleton( + "map('HotelRooms', function (room) {\\n"+ + " var guests = cmpxchg(room.RoomID);\\n"+ + " return {\\n"+ + " RoomID: room.RoomID,\\n"+ + " Guests: guests\\n"+ + " }\\n"+ + "});" + ) + ); + }; +} +`} + + + + +## Querying the Index + + + + +{`IRawDocumentQuery VIPRooms = session.advanced().rawQuery(Hotelroom.class, + "from Hotelrooms as room\\n" + + "where room.Guests == cmpxchg('VIP')"); +`} + + + diff --git a/versioned_docs/version-7.1/compare-exchange/content/_indexing-compare-exchange-values-nodejs.mdx b/versioned_docs/version-7.1/compare-exchange/content/_indexing-compare-exchange-values-nodejs.mdx new file mode 100644 index 0000000000..65b08dd941 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_indexing-compare-exchange-values-nodejs.mdx @@ -0,0 +1,323 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can index compare-exchange values in a static-index. + This lets you query documents based on values stored in compare-exchange items. + +* Modifications to the indexed compare-exchange values will trigger index updates. + The index will be updated whenever an indexed compare-exchange value changes, + or when documents in the indexed collection(s) are modified. + +* In this article: + * [Create sample compare-exchange items](../compare-exchange/indexing-cmpxchg-values#create-sample-compare-exchange-items) + * [Index compare-exchange values](../compare-exchange/indexing-cmpxchg-values#index-compare-exchange-values) + * [Query the index](../compare-exchange/indexing-cmpxchg-values#query-the-index) + * [Query the index and project compare-exchange values](../compare-exchange/indexing-cmpxchg-values#query-the-index-and-project-compare-exchange-values) + * [Syntax](../compare-exchange/indexing-cmpxchg-values#syntax) + + + +--- + +## Create sample compare-exchange items + +Let’s create some sample documents and compare-exchange items to use in the examples below. +To learn about ALL the available methods for creating a compare-exchange item, see [Create compare-exchange item](../compare-exchange/create-cmpxchg-items). + + + +```js +// Create some hotel room DOCUMENTS with general info: +// =================================================== + +const session = documentStore.openSession(); + +for (let i = 215; i <= 217; i++) { + const room = new HotelRoom(`R${i}`, `Description of room number R${i}`); + await session.store(room, `hotelRooms/R${i}`); +} + +await session.saveChanges(); +``` + + +```js +// Create some COMPARE-EXCHANGE ITEMS to track current details of each room: +// ========================================================================== + +// Value for room R215 +let hotelRoomDetails = new HotelRoomCurrentDetails({ + CurrentNumberOfGuests: 2, + ReservedBy: "customers/2", + ReservedUntil: new Date(Date.now() + 2 * 24 * 60 * 60 * 1000), + FullyPaid: true, + CustomerEmail: "customer2@gmail.com", + CustomerPhone: "123-123-1234" +}); + +let putResult = await documentStore.operations.send( + new PutCompareExchangeValueOperation("R215", hotelRoomDetails, 0) +); + +// Value for room R216 +hotelRoomDetails = new HotelRoomCurrentDetails({ + CurrentNumberOfGuests: 3, + ReservedBy: "customers/3", + ReservedUntil: new Date(Date.now() + 5 * 24 * 60 * 60 * 1000), + FullyPaid: false, + CustomerEmail: "customer3@gmail.com", + CustomerPhone: "456-456-6789" +}); + +putResult = await documentStore.operations.send( + new PutCompareExchangeValueOperation("R216", hotelRoomDetails, 0) +); + +// Value for room R217 +// This room is currently not occupied... +hotelRoomDetails = new HotelRoomCurrentDetails({ + CurrentNumberOfGuests: 0 +}); + +putResult = await documentStore.operations.send( + new PutCompareExchangeValueOperation("R217", hotelRoomDetails, 0) +); +``` + + +```js +// Sample classes used: +// ==================== + +// The document +class HotelRoom { + constructor(roomNumber = '', description = '') { + Object.assign(this, { + RoomNumber: roomNumber, + Description: description + }); + } +} + +// The compare-exchange value +class HotelRoomCurrentDetails { + constructor({ + CurrentNumberOfGuests = 0, + ReservedBy = null, + ReservedUntil = null, + FullyPaid = false, + CustomerEmail = null, + CustomerPhone = null + } = {}) { + Object.assign(this, { + CurrentNumberOfGuests, + ReservedBy, + ReservedUntil, + FullyPaid, + CustomerEmail, + CustomerPhone + }); + } +} + +// Projected content +class ProjectedCustomerDetails { + constructor({ + CustomerEmail = null, + CustomerPhone = null, + RoomNumber = null + } = {}) { + Object.assign(this, { + CustomerEmail, + CustomerPhone, + RoomNumber + }); + } +} + +// Projected content +class ProjectedNumberOfGuests { + constructor({ + CurrentNumberOfGuests = 0, + RoomNumber = null + } = {}) { + Object.assign(this, { + CurrentNumberOfGuests, + RoomNumber + }); + } +} +``` + + + +--- + +## Index compare-exchange values + +* This index maps the rooms in a hotel, as well as compare-exchange values representing the guests in those rooms. + +* Use method `cmpxchg` to load the current details of each room from the associated compare-exchange value. + + + +```js +class Rooms_ByGuestsAndPaymentStatus extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("HotelRooms", room => { + // Call method 'cmpxchg' + // to load the compare-exchange value by its key (room number) + const cmpXchgValue = cmpxchg(room.RoomNumber); + + return { + // Index content from the document: + RoomNumber: room.RoomNumber, + + // Index content from the compare-exchange value: + NumberOfGuests: cmpXchgValue ? cmpXchgValue.CurrentNumberOfGuests : null, + FullyPaid: cmpXchgValue ? cmpXchgValue.FullyPaid : null + }; + }); + } +} +``` + + + +--- + +## Query the index + +* Using the index above, you can query for all rooms (room documents) that are occupied by a specific number of guests. + The _NumberOfGuests_ index-field, which is used in the query, contains the number of guests taken from the related compare-exchange value. + +* For example, you can find all vacant rooms (0 guests) or rooms occupied by any specific number of guests. + + + +```js +// When querying the index, +// the session does not need to be opened in cluster-wide mode. +const session = documentStore.openSession(); + +// Query for all vacant rooms (0 guests) +const vacantRooms = await session + .query({ + indexName: "Rooms/ByGuestsAndPaymentStatus" + }) + // Index-field 'NumberOfGuests' contains guest count from the compare-exchange item + .whereEquals("NumberOfGuests", 0) + .all(); + +// Using the sample data created above, Room R217 will be returned, since it has no guests. +``` + + +```js +const session = documentStore.openSession(); + +const vacantRooms = await session.advanced + .rawQuery("from index 'Rooms/ByGuestsAndPaymentStatus' where NumberOfGuests == 0") + .all(); +``` + + +```sql +from index "Rooms/ByGuestsAndPaymentStatus" +where NumberOfGuests == 0 +``` + + + +--- + +## Query the index and project compare-exchange values + +* In addition to querying index-fields that already contain information from the related compare-exchange value, + you can also project fields from the compare-exchange value into the query results. + +* In the following query example, we retrieve all customers who haven't fully paid yet, + and project their phone number from the compare-exchange value using `cmpxchg()`. + + + +```js +// The session does not need to be opened in cluster-wide mode +const session = documentStore.openSession(); + +// Define the projection +const queryData = QueryData.customFunction("room", `{ + // Use method 'cmpxchg' to retrieve data from the compare-exchange value + customerPhone: cmpxchg(room.RoomNumber).CustomerPhone, + roomNumber: room.RoomNumber +}`); + +const phonesOfCustomersThatNeedToPay = await session + .query({ indexName: "Rooms/ByGuestsAndPaymentStatus" }) + // Index-fields 'FullyPaid' & 'NumberOfGuests' contain info from the compare-exchange item + .whereGreaterThanOrEqual("NumberOfGuests", 1) + .andAlso() + .whereEquals("FullyPaid", false) + // Project query results + .selectFields(queryData) + .all(); + +// Using the sample data created above, +// customer from room R216 will be returned in the projected data, since they haven't fully paid yet. +``` + + +```js +const session = documentStore.openSession(); + +const phonesOfCustomersThatNeedToPay = await session.advanced + .rawQuery(` + from index "Rooms/ByGuestsAndPaymentStatus" as x + where x.FullyPaid = false + and (x.NumberOfGuests > 0 and x.NumberOfGuests != null) + select { + CustomerPhone : cmpxchg(x.RoomNumber).CustomerPhone, + RoomNumber : x.RoomNumber + } + `) + .all(); +``` + + +```sql +from index "Rooms/ByGuestsAndPaymentStatus" as x +where x.FullyPaid = false and (x.NumberOfGuests > 0 and x.NumberOfGuests != null) +select { + CustomerPhone : cmpxchg(x.RoomNumber).CustomerPhone, + RoomNumber : x.RoomNumber +} +``` + + + +--- + +## Syntax + +--- + +### `cmpxchg()` +Load a compare exchange value in the index-definition by its key. + + +```js +// Load one compare exchange value +cmpxchg(key); + +``` + + +| Parameter | Type | Description | +|-----------|----------------------|---------------------------------------------------| +| **key** | `string` | The key of a particular compare exchange value. | diff --git a/versioned_docs/version-7.1/compare-exchange/content/_overview-csharp.mdx b/versioned_docs/version-7.1/compare-exchange/content/_overview-csharp.mdx new file mode 100644 index 0000000000..a289b21b09 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_overview-csharp.mdx @@ -0,0 +1,311 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Compare-exchange items are **key/value pairs** where the key is a globally unique identifier in the database. + Items are versioned and managed at the cluster level. + +* Compare-exchange provides a built-in consensus mechanism for safe coordination across sessions and nodes. + It ensures global consistency in the database and prevents conflicts when multiple clients try to modify or reserve + the same resource, allowing you to: + * Enforce global uniqueness (e.g., prevent duplicate usernames or emails). + * Assign work to a single client or reserve a resource once. + * Handle concurrency safely, without external services or custom locking logic. + +* Compare-exchange items are also suitable for storing shared or global values that aren't tied to a specific document - + such as configuration flags, feature toggles, or reusable identifiers stored under a unique key. + However, [unlike regular documents](../compare-exchange/overview#why-not-use-regular-documents-to-enforce-uniqueness), + compare-exchange provides atomic updates, version-based conflict prevention, and Raft-based consistency for distributed safety. + +* Compare-exchange items are [not replicated externally](../compare-exchange/overview#why-compare-exchange-items-are-not-replicated-to-external-databases) to other databases. + +* In this article: + * [What compare-exchange items are](../compare-exchange/overview#what-compare-exchange-items-are) + * [Ways to create and manage compare-exchange items](../compare-exchange/overview#ways-to-create-and-manage-compare-exchange-items) + * [Why compare-exchange items are not replicated to external databases](../compare-exchange/overview#why-compare-exchange-items-are-not-replicated-to-external-databases) + * [Why not use regular documents to enforce uniqueness](../compare-exchange/overview#why-not-use-regular-documents-to-enforce-uniqueness) + * [Example I - Email address reservation](../compare-exchange/overview#example-i---email-address-reservation) + * [Example II - Reserve a shared resource](../compare-exchange/overview#example-ii---reserve-a-shared-resource) + + + +--- + +## What compare-exchange items are + +Compare-exchange items are key/value pairs where the key serves as a unique value across your database. + +* Each compare-exchange item contains: + * **A key** - A unique string identifier in the database scope. + * **A value** - Can be any value (a number, string, array, or any valid JSON object). + * **Metadata** - Optional data that is associated with the compare-exchange item. Must be a valid JSON object. + For example, the metadata can be used to set expiration time for the compare-exchange item. + Learn more in [compare-exchange expiration](../compare-exchange/cmpxchg-expiration). + * **Raft index** - The compare-exchange item's version. + Any change to the value or metadata will increase this number. + +* Creating and modifying a compare-exchange item follows the same principle as the [compare-and-swap](https://en.wikipedia.org/wiki/Compare-and-swap) operation in multi-threaded systems, + but in RavenDB, this concept is applied to a distributed environment across multiple nodes instead of within a single multi-threaded process. + These operations require cluster consensus to ensure consistency. + Once consensus is reached, the compare-exchange items are distributed through the Raft algorithm to all nodes in the database group. + +--- + +## Ways to create and manage compare-exchange items + +Compare exchange items can be created and managed using any of the following approaches: + +* **Document Store Operations** + You can create and manage compare-exchange items using _document store_ operations. + For example, see [Create items using a store operation](../compare-exchange/create-cmpxchg-items#create-items-using-a-store-operation). + +* **Cluster-Wide Sessions** + You can create and manage compare-exchange items from within a [Cluster-Wide session](../client-api/session/cluster-transaction/overview#cluster-wide-transaction-vs-single-node-transaction). + For example, see [Create items using a cluster-wide session](../compare-exchange/create-cmpxchg-items#create-items-using-a-cluster-wide-session). + When using a cluster-wide session, the compare-exchange item is created as part of the cluster-wide transaction. + Like any transaction, all operations either succeed as a group or are rolled back. + If the transaction is not committed, the new compare-exchange item will not be stored on any node in the database group. + +* **Atomic Guards** + When creating _documents_ using a cluster-wide session, RavenDB automatically creates [Atomic Guards](../compare-exchange/atomic-guards), + which are special compare-exchange items that guarantee ACID transactions. + See [Cluster-wide transaction vs. Single-node transaction](../client-api/session/cluster-transaction/overview#cluster-wide-transaction-vs-single-node-transaction) for a session comparison overview. + +* **Studio** + All compare-exchange items can also be managed from the **Compare-Exchange view** in the [Studio](../todo..): + + ![The compare-exchange view](../assets/the-cmpxchg-view.png) + + 1. Open the **Documents** section in the Studio sidebar. + 2. Click on the **Compare-Exchange** tab. + 3. This is a compare-exchange item. + In this view you can create, edit, and delete compare-exchange items. + +--- + +## Why compare-exchange items are not replicated to external databases + +* Each cluster defines its own policies and configurations, and should ideally have sole responsibility for managing its own documents. + Read [Consistency in a Globally Distributed System](https://ayende.com/blog/196769-B/data-ownership-in-a-distributed-system) + to learn more about why global database modeling is more efficient this way. + +* When creating a compare-exchange item, a Raft consensus is required from the nodes in the database group. + Externally replicating such data is problematic because the target database may reside within a cluster that is in an + unstable state where Raft decisions cannot be made. In such a state, the compare-exchange item will not be persisted in the target database. + +* Conflicts between documents that occur between two databases are resolved using the documents' change-vector. + Compare-exchange conflicts cannot be resolved in the same way, as they lack a similar conflict resolution mechanism. + +* To ensure unique values between two databases without using compare-exchange items see [Example III](../compare-exchange/overview#example-iii---ensuring-unique-values-without-using-compare-exchange). + +* Learn more about Replication in RavenDB in [Replication overview](../server/clustering/replication/replication-overview). + For details about what is and what isn't replicated in [What is Replicated](../server/ongoing-tasks/external-replication#what-is-replicated). + +--- + +## Why not use regular documents to enforce uniqueness + +* You might consider storing a document with a predictable ID (for example, _phones/123456_) as a way to enforce uniqueness, + and then checking for its existence before allowing another document to use the same value. + +* While this might work in a single-node setup or with external replication, + it does not reliably enforce uniqueness in a clustered environment. + +* If a node was not part of the cluster when the document was created, it might not be aware of its existence when it comes back online. + In such cases, attempting to load the document on this node may return _null_, leading to duplicate values being inserted. + +* To reliably enforce uniqueness across all cluster nodes, you must use compare-exchange items, + which are designed for this purpose and ensure global consistency. + +--- + +## Example I - Email address reservation + +The following example shows how to use compare-exchange to create documents with unique values. +The scope is within the database group on a single cluster. + + +```csharp +string email = "user@example.com"; + +User user = new User +{ + Email = email +}; + +using (IDocumentSession session = store.OpenSession()) +{ + session.Store(user); + // At this point, the user object has a document ID assigned by the session. + + // Try to reserve the user email using a compare-exchange item. + // Note: This 'put compare-exchange operation' is not part of the session transaction, + // It is a separate, cluster-wide reservation. + CompareExchangeResult cmpXchgResult + = store.Operations.Send( + // parameters passed to the operation: + // email - the unique key of the compare-exchange item + // user.Id - the value associated with the key + // 0 - pass `0` to ensure the item is created only if it doesn't already exist. + // If a compare-exchange item with the given key already exists, the operation will fail. + new PutCompareExchangeValueOperation(email, user.Id, 0)); + + if (cmpXchgResult.Successful == false) + throw new Exception("Email is already in use"); + + // At this point, the email has been successfully reserved/saved. + // We can now save the user document to the database. + session.SaveChanges(); +} +``` + + +**Implications**: + +* This compare-exchange item was [created as an operation](../compare-exchange/create-cmpxchg-items#create-items-using-a-store-operation) + rather than with a [cluster-wide session](../compare-exchange/create-cmpxchg-items#create-items-using-a-cluster-wide-session). + Thus, if `session.SaveChanges` fails, then the email reservation is Not rolled back automatically. + It is your responsibility to do so. + +* The compare-exchange value that was saved can be accessed in a query using the `CmpXchg` method: + + + +```csharp + using (var session = store.OpenSession()) + { + // Retrieve the user document that has the specified email: + var user = session.Query() + // Access the compare-exchange value using the CmpXchg method: + .Where(x => x.Id == RavenQuery.CmpXchg("ayende@ayende.com")) + .FirstOrDefault(); + } +``` + + +```csharp +using (var asyncSession = store.OpenAsyncSession()) +{ + var user = await asyncSession.Query() + .Where(x => x.Id == RavenQuery.CmpXchg("ayende@ayende.com")) + .FirstOrDefaultAsync(); +} +``` + + +```csharp +using (var session = store.OpenSession()) +{ + var user = session.Advanced + .DocumentQuery() + .WhereEquals("Id", CmpXchg.Value("ayende@ayende.com")) + .FirstOrDefault(); + } +``` + + +```sql +from "Users" +where id() == cmpxchg("ayende@ayende.com") +limit 0, 1 // take the first result +``` + + + +--- + +## Example II - Reserve a shared resource + +In the following example, we use compare-exchange to reserve a shared resource. +The scope is within the database group on a single cluster. + +The code also checks for clients which never release resources (i.e. due to failure) by using timeout. + + +```csharp +private class SharedResource +{ + public DateTime? ReservedUntil { get; set; } +} + +public void PrintWork() +{ + // Try to get hold of the printer resource + long reservationIndex = LockResource(store, "Printer/First-Floor", TimeSpan.FromMinutes(20)); + + try + { + // Do some work for the duration that was set (TimeSpan.FromMinutes(20)). + // + // In a distributed system (unlike a multi-threaded app), a process may crash or exit unexpectedly + // without releasing the resource it reserved (i.e. never reaching the 'finally' block). + // This can leave the resource locked indefinitely. + // + // To prevent that, each reservation includes a timeout (TimeSpan.FromMinutes(20)). + // If the process fails or exits, the resource becomes available again once the timeout expires. + // + // Important: Ensure the work completes within the timeout period. + // If it runs longer, another client may acquire the same resource at the same time. + } + finally + { + ReleaseResource(store, "Printer/First-Floor", reservationIndex); + } +} + +public long LockResource(IDocumentStore store, string resourceName, TimeSpan duration) +{ + while (true) + { + DateTime now = DateTime.UtcNow; + + SharedResource resource = new SharedResource + { + ReservedUntil = now.Add(duration) + }; + + CompareExchangeResult putResult = store.Operations.Send( + new PutCompareExchangeValueOperation(resourceName, resource, 0)); + + if (putResult.Successful) + { + // resourceName wasn't present - we managed to reserve + return putResult.Index; + } + + // At this point, another process owns the resource. + // But if that process crashed and never released the resource, the reservation may have expired, + // so we can try to take the lock by overwriting the value using the current index. + if (putResult.Value.ReservedUntil < now) + { + // Time expired - Update the existing key with the new value + CompareExchangeResult takeLockWithTimeoutResult = store.Operations.Send( + new PutCompareExchangeValueOperation( + resourceName, resource, putResult.Index + )); + + if (takeLockWithTimeoutResult.Successful) + { + return takeLockWithTimeoutResult.Index; + } + } + + // Wait a little bit and retry + Thread.Sleep(20); + } +} + +public void ReleaseResource(IDocumentStore store, string resourceName, long index) +{ + CompareExchangeResult deleteResult = store.Operations.Send( + new DeleteCompareExchangeValueOperation(resourceName, index)); + + // We have 2 options here: + // deleteResult.Successful is true - we managed to release resource + // deleteResult.Successful is false - someone else took the lock due to timeout +} +``` + diff --git a/versioned_docs/version-7.1/compare-exchange/content/_overview-java.mdx b/versioned_docs/version-7.1/compare-exchange/content/_overview-java.mdx new file mode 100644 index 0000000000..ffb07e4506 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_overview-java.mdx @@ -0,0 +1,288 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Compare-exchange items are **key/value pairs** where the key is a globally unique identifier in the database. + Items are versioned and managed at the cluster level. + +* Compare-exchange provides a built-in consensus mechanism for safe coordination across sessions and nodes. + It ensures global consistency in the database and prevents conflicts when multiple clients try to modify or reserve + the same resource, allowing you to: + * Enforce global uniqueness (e.g., prevent duplicate usernames or emails). + * Assign work to a single client or reserve a resource once. + * Handle concurrency safely, without external services or custom locking logic. + +* Compare-exchange items are also suitable for storing shared or global values that aren't tied to a specific document - + such as configuration flags, feature toggles, or reusable identifiers stored under a unique key. + However, [unlike regular documents](../compare-exchange/overview#why-not-use-regular-documents-to-enforce-uniqueness), + compare-exchange provides atomic updates, version-based conflict prevention, and Raft-based consistency for distributed safety. + +* Compare-exchange items are [not replicated externally](../compare-exchange/overview#why-compare-exchange-items-are-not-replicated-to-external-databases) to other databases. + +* In this article: + * [What compare-exchange items are](../compare-exchange/overview#what-compare-exchange-items-are) + * [Ways to create and manage compare-exchange items](../compare-exchange/overview#ways-to-create-and-manage-compare-exchange-items) + * [Why compare-exchange items are not replicated to external databases](../compare-exchange/overview#why-compare-exchange-items-are-not-replicated-to-external-databases) + * [Why not use regular documents to enforce uniqueness](../compare-exchange/overview#why-not-use-regular-documents-to-enforce-uniqueness) + * [Example I - Email address reservation](../compare-exchange/overview#example-i---email-address-reservation) + * [Example II - Reserve a shared resource](../compare-exchange/overview#example-ii---reserve-a-shared-resource) + + + +--- + +## What compare-exchange items are + +Compare-exchange items are key/value pairs where the key serves as a unique value across your database. + +* Each compare-exchange item contains: + * **A key** - A unique string identifier in the database scope. + * **A value** - Can be any value (a number, string, array, or any valid JSON object). + * **Metadata** - Optional data that is associated with the compare-exchange item. Must be a valid JSON object. + For example, the metadata can be used to set expiration time for the compare-exchange item. + Learn more in [compare-exchange expiration](../compare-exchange/cmpxchg-expiration). + * **Raft index** - The compare-exchange item's version. + Any change to the value or metadata will increase this number. + +* Creating and modifying a compare-exchange item follows the same principle as the [compare-and-swap](https://en.wikipedia.org/wiki/Compare-and-swap) operation in multi-threaded systems, + but in RavenDB, this concept is applied to a distributed environment across multiple nodes instead of within a single multi-threaded process. + These operations require cluster consensus to ensure consistency. + Once consensus is reached, the compare-exchange items are distributed through the Raft algorithm to all nodes in the database group. + +--- + +## Ways to create and manage compare-exchange items + +Compare exchange items can be created and managed using any of the following approaches: + +* **Document Store Operations** + You can create and manage compare-exchange items using _document store_ operations. + For example, see [Create items using a store operation](../compare-exchange/create-cmpxchg-items#create-items-using-a-store-operation). + +* **Cluster-Wide Sessions** + You can create and manage compare-exchange items from within a [Cluster-Wide session](../client-api/session/cluster-transaction/overview#cluster-wide-transaction-vs-single-node-transaction). + For example, see [Create items using a cluster-wide session](../compare-exchange/create-cmpxchg-items#create-items-using-a-cluster-wide-session). + When using a cluster-wide session, the compare-exchange item is created as part of the cluster-wide transaction. + If the session fails, the item creation also fails, and none of the nodes in the database group will store the new compare-exchange item. + +* **Atomic Guards** + When creating _documents_ using a cluster-wide session, RavenDB automatically creates [Atomic Guards](../compare-exchange/atomic-guards), + which are special compare-exchange items that guarantee ACID transactions. + See [Cluster-wide transaction vs. Single-node transaction](../client-api/session/cluster-transaction/overview#cluster-wide-transaction-vs-single-node-transaction) for a session comparison overview. + +* **Studio** + All compare-exchange items can also be managed from the **Compare-Exchange view** in the [Studio](../todo..): + + ![The compare-exchange view](../assets/the-cmpxchg-view.png) + + 1. Open the **Documents** section in the Studio sidebar. + 2. Click on the **Compare-Exchange** tab. + 3. This is a compare-exchange item. + In this view you can create, edit, and delete compare-exchange items. + +--- + +## Why compare-exchange items are not replicated to external databases + +* Each cluster defines its own policies and configurations, and should ideally have sole responsibility for managing its own documents. + Read [Consistency in a Globally Distributed System](https://ayende.com/blog/196769-B/data-ownership-in-a-distributed-system) + to learn more about why global database modeling is more efficient this way. + +* When creating a compare-exchange item, a Raft consensus is required from the nodes in the database group. + Externally replicating such data is problematic because the target database may reside within a cluster that is in an + unstable state where Raft decisions cannot be made. In such a state, the compare-exchange item will not be persisted in the target database. + +* Conflicts between documents that occur between two databases are resolved using the documents' change-vector. + Compare-exchange conflicts cannot be resolved in the same way, as they lack a similar conflict resolution mechanism. + +* To ensure unique values between two databases without using compare-exchange items see [Example III](../compare-exchange/overview#example-iii---ensuring-unique-values-without-using-compare-exchange). + +* Learn more about Replication in RavenDB in [Replication overview](../server/clustering/replication/replication-overview). + For details about what is and what isn't replicated in [What is Replicated](../server/ongoing-tasks/external-replication#what-is-replicated). + + --- + + ## Why not use regular documents to enforce uniqueness + +* You might consider storing a document with a predictable ID (for example, _phones/123456_) as a way to enforce uniqueness, + and then checking for its existence before allowing another document to use the same value. + +* While this might work in a single-node setup or with external replication, + it does not reliably enforce uniqueness in a clustered environment. + +* If a node was not part of the cluster when the document was created, it might not be aware of its existence when it comes back online. + In such cases, attempting to load the document on this node may return _null_, leading to duplicate values being inserted. + +* To reliably enforce uniqueness across all cluster nodes, you must use compare-exchange items, + which are designed for this purpose and ensure global consistency. + +--- + +## Example I - Email address reservation + +The following example shows how to use compare-exchange to create documents with unique values. +The scope is within the database group on a single cluster. + + +```java +String email = "user@example.com"; + +User user = new User(); +user.setEmail(email); + +try (IDocumentSession session = store.openSession()) { + session.store(user); + + // At this point, the user object has a document ID assigned by the session. + + // Try to reserve the user email using a compare-exchange item. + // Note: This 'put compare-exchange operation' is not part of the session transaction, + // It is a separate, cluster-wide reservation. + CompareExchangeResult cmpXchgResult = store + .operations().send( + // parameters passed to the operation: + // email - the unique key of the compare-exchange item + // user.getId() - the value associated with the key + // 0 - pass `0` to ensure the item is created only if it doesn't already exist. + // If a compare-exchange item with the given key already exists, the operation will fail. + new PutCompareExchangeValueOperation<>(email, user.getId(), 0)); + + if (!cmpXchgResult.isSuccessful()) { + throw new RuntimeException("Email is already in use"); + } + + // At this point, the email has been successfully reserved/saved. + // We can now save the user document to the database. + session.saveChanges(); +} +``` + + +**Implications**: + +* This compare-exchange item was [created as an operation](../compare-exchange/create-cmpxchg-items#create-items-using-a-store-operation) + rather than with a [cluster-wide session](../compare-exchange/create-cmpxchg-items#create-items-using-a-cluster-wide-session). + Thus, if `session.SaveChanges` fails, then the email reservation is Not rolled back automatically. + It is your responsibility to do so. + +* The compare-exchange value that was saved can be accessed in a query using the `CmpXchg` method: + + + + + +{`try (IDocumentSession session = store.openSession()) { + List query = session.advanced().rawQuery(User.class, + "from Users as s where id() == cmpxchg(\\"ayende@ayende.com\\")") + .toList(); + + IDocumentQuery q = session.advanced() + .documentQuery(User.class) + .whereEquals("id", CmpXchg.value("ayende@ayende.com")); +} +`} + + + +```sql +from "Users" +where id() == cmpxchg("ayende@ayende.com") +limit 0, 1 // take the first result +``` + + + +--- + +## Example II - Reserve a shared resource + +In the following example, we use compare-exchange to reserve a shared resource. +The scope is within the database group on a single cluster. + +The code also checks for clients which never release resources (i.e. due to failure) by using timeout. + + + +{`private class SharedResource \{ + private LocalDateTime reservedUntil; + + public LocalDateTime getReservedUntil() \{ + return reservedUntil; + \} + + public void setReservedUntil(LocalDateTime reservedUntil) \{ + this.reservedUntil = reservedUntil; + \} +\} + +public void printWork() throws InterruptedException \{ + // Try to get hold of the printer resource + long reservationIndex = lockResource(store, "Printer/First-Floor", Duration.ofMinutes(20)); + + try \{ + // Do some work for the duration that was set (TimeSpan.FromMinutes(20)). + // + // In a distributed system (unlike a multi-threaded app), a process may crash or exit unexpectedly + // without releasing the resource it reserved (i.e. never reaching the 'finally' block). + // This can leave the resource locked indefinitely. + // + // To prevent that, each reservation includes a timeout (TimeSpan.FromMinutes(20)). + // If the process fails or exits, the resource becomes available again once the timeout expires. + // + // Important: Ensure the work completes within the timeout period. + // If it runs longer, another client may acquire the same resource at the same time. + \} finally \{ + releaseResource(store, "Printer/First-Floor", reservationIndex); + \} +\} + +public long lockResource(IDocumentStore store, String resourceName, Duration duration) throws InterruptedException \{ + while (true) \{ + LocalDateTime now = LocalDateTime.now(); + + SharedResource resource = new SharedResource(); + resource.setReservedUntil(now.plus(duration)); + + CompareExchangeResult saveResult = + store.operations().send( + new PutCompareExchangeValueOperation(resourceName, resource, 0)); + + if (saveResult.isSuccessful()) \{ + // resourceName wasn't present - we managed to reserve + return saveResult.getIndex(); + \} + + // At this point, another process owns the resource. + // But if that process crashed and never released the resource, the reservation may have expired, + // so we can try to take the lock by overwriting the value using the current index. + if (saveResult.getValue().reservedUntil.isBefore(now)) \{ + // Time expired - Update the existing key with the new value + CompareExchangeResult takeLockWithTimeoutResult = + store.operations().send( + new PutCompareExchangeValueOperation<>(resourceName, resource, saveResult.getIndex())); + + if (takeLockWithTimeoutResult.isSuccessful()) \{ + return takeLockWithTimeoutResult.getIndex(); + \} + \} + + // Wait a little bit and retry + Thread.sleep(20); + \} +\} + +public void releaseResource(IDocumentStore store, String resourceName, long index) \{ + CompareExchangeResult deleteResult = store + .operations().send( + new DeleteCompareExchangeValueOperation<>(SharedResource.class, resourceName, index)); + + // We have 2 options here: + // deleteResult.Successful is true - we managed to release resource + // deleteResult.Successful is false - someone else took the lock due to timeout +\} +`} + + diff --git a/versioned_docs/version-7.1/compare-exchange/content/_overview-php.mdx b/versioned_docs/version-7.1/compare-exchange/content/_overview-php.mdx new file mode 100644 index 0000000000..03f0abab9f --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_overview-php.mdx @@ -0,0 +1,305 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Compare-exchange items are **key/value pairs** where the key is a globally unique identifier in the database. + Items are versioned and managed at the cluster level. + +* Compare-exchange provides a built-in consensus mechanism for safe coordination across sessions and nodes. + It ensures global consistency in the database and prevents conflicts when multiple clients try to modify or reserve + the same resource, allowing you to: + * Enforce global uniqueness (e.g., prevent duplicate usernames or emails). + * Assign work to a single client or reserve a resource once. + * Handle concurrency safely, without external services or custom locking logic. + +* Compare-exchange items are also suitable for storing shared or global values that aren't tied to a specific document - + such as configuration flags, feature toggles, or reusable identifiers stored under a unique key. + However, [unlike regular documents](../compare-exchange/overview#why-not-use-regular-documents-to-enforce-uniqueness), + compare-exchange provides atomic updates, version-based conflict prevention, and Raft-based consistency for distributed safety. + +* Compare-exchange items are [not replicated externally](../compare-exchange/overview#why-compare-exchange-items-are-not-replicated-to-external-databases) to other databases. + +* In this article: + * [What compare-exchange items are](../compare-exchange/overview#what-compare-exchange-items-are) + * [Ways to create and manage compare-exchange items](../compare-exchange/overview#ways-to-create-and-manage-compare-exchange-items) + * [Why compare-exchange items are not replicated to external databases](../compare-exchange/overview#why-compare-exchange-items-are-not-replicated-to-external-databases) + * [Why not use regular documents to enforce uniqueness](../compare-exchange/overview#why-not-use-regular-documents-to-enforce-uniqueness) + * [Example I - Email address reservation](../compare-exchange/overview#example-i---email-address-reservation) + * [Example II - Reserve a shared resource](../compare-exchange/overview#example-ii---reserve-a-shared-resource) + + + +--- + +## What compare-exchange items are + +Compare-exchange items are key/value pairs where the key serves as a unique value across your database. + +* Each compare-exchange item contains: + * **A key** - A unique string identifier in the database scope. + * **A value** - Can be any value (a number, string, array, or any valid JSON object). + * **Metadata** - Optional data that is associated with the compare-exchange item. Must be a valid JSON object. + For example, the metadata can be used to set expiration time for the compare-exchange item. + Learn more in [compare-exchange expiration](../compare-exchange/cmpxchg-expiration). + * **Raft index** - The compare-exchange item's version. + Any change to the value or metadata will increase this number. + +* Creating and modifying a compare-exchange item follows the same principle as the [compare-and-swap](https://en.wikipedia.org/wiki/Compare-and-swap) operation in multi-threaded systems, + but in RavenDB, this concept is applied to a distributed environment across multiple nodes instead of within a single multi-threaded process. + These operations require cluster consensus to ensure consistency. + Once consensus is reached, the compare-exchange items are distributed through the Raft algorithm to all nodes in the database group. + +--- + +## Ways to create and manage compare-exchange items + +Compare exchange items can be created and managed using any of the following approaches: + +* **Document Store Operations** + You can create and manage compare-exchange items using _document store_ operations. + For example, see [Create items using a store operation](../compare-exchange/create-cmpxchg-items#create-items-using-a-store-operation). + +* **Cluster-Wide Sessions** + You can create and manage compare-exchange items from within a [Cluster-Wide session](../client-api/session/cluster-transaction/overview#cluster-wide-transaction-vs-single-node-transaction). + For example, see [Create items using a cluster-wide session](../compare-exchange/create-cmpxchg-items#create-items-using-a-cluster-wide-session). + When using a cluster-wide session, the compare-exchange item is created as part of the cluster-wide transaction. + If the session fails, the item creation also fails, and none of the nodes in the database group will store the new compare-exchange item. + +* **Atomic Guards** + When creating _documents_ using a cluster-wide session, RavenDB automatically creates [Atomic Guards](../compare-exchange/atomic-guards), + which are special compare-exchange items that guarantee ACID transactions. + See [Cluster-wide transaction vs. Single-node transaction](../client-api/session/cluster-transaction/overview#cluster-wide-transaction-vs-single-node-transaction) for a session comparison overview. + +* **Studio** + All compare-exchange items can also be managed from the **Compare-Exchange view** in the [Studio](../todo..): + + ![The compare-exchange view](../assets/the-cmpxchg-view.png) + + 1. Open the **Documents** section in the Studio sidebar. + 2. Click on the **Compare-Exchange** tab. + 3. This is a compare-exchange item. + In this view you can create, edit, and delete compare-exchange items. + +--- + +## Why compare-exchange items are not replicated to external databases + +* Each cluster defines its own policies and configurations, and should ideally have sole responsibility for managing its own documents. + Read [Consistency in a Globally Distributed System](https://ayende.com/blog/196769-B/data-ownership-in-a-distributed-system) + to learn more about why global database modeling is more efficient this way. + +* When creating a compare-exchange item, a Raft consensus is required from the nodes in the database group. + Externally replicating such data is problematic because the target database may reside within a cluster that is in an + unstable state where Raft decisions cannot be made. In such a state, the compare-exchange item will not be persisted in the target database. + +* Conflicts between documents that occur between two databases are resolved using the documents' change-vector. + Compare-exchange conflicts cannot be resolved in the same way, as they lack a similar conflict resolution mechanism. + +* To ensure unique values between two databases without using compare-exchange items see [Example III](../compare-exchange/overview#example-iii---ensuring-unique-values-without-using-compare-exchange). + +* Learn more about Replication in RavenDB in [Replication overview](../server/clustering/replication/replication-overview). + For details about what is and what isn't replicated in [What is Replicated](../server/ongoing-tasks/external-replication#what-is-replicated). + +--- + +## Why not use regular documents to enforce uniqueness + +* You might consider storing a document with a predictable ID (for example, _phones/123456_) as a way to enforce uniqueness, + and then checking for its existence before allowing another document to use the same value. + +* While this might work in a single-node setup or with external replication, + it does not reliably enforce uniqueness in a clustered environment. + +* If a node was not part of the cluster when the document was created, it might not be aware of its existence when it comes back online. + In such cases, attempting to load the document on this node may return _null_, leading to duplicate values being inserted. + +* To reliably enforce uniqueness across all cluster nodes, you must use compare-exchange items, + which are designed for this purpose and ensure global consistency. + +--- + +## Example I - Email address reservation + +The following example shows how to use compare-exchange to create documents with unique values. +The scope is within the database group on a single cluster. + + + +{`$email = "user@example.com"; + +$user = new User(); +$user->setEmail($email); + +$session = $store->openSession(); +try \{ + $session->store($user); + + // At this point, the user object has a document ID assigned by the session. + + // Try to reserve the user email using a compare-exchange item. + // Note: This 'put compare-exchange operation' is not part of the session transaction, + // It is a separate, cluster-wide reservation. + + /** @var CompareExchangeResult $cmpXchgResult */ + $cmpXchgResult = $store->operations()->send( + // Parameters passed to the operation: + // $email - the unique key of the compare-exchange item + // $user->getId() - the value associated with the key + // 0 - ensures the item is created only if it doesn't already exist + // If a compare-exchange item with the given key already exists, the operation will fail. + new PutCompareExchangeValueOperation($email, $user->getId(), 0)); + + if (!$cmpXchgResult->isSuccessful()) \{ + throw new RuntimeException("Email is already in use"); + \} + + // At this point, the email has been successfully reserved/saved. + // We can now save the user document to the database. + $session->saveChanges(); +\} finally \{ + $session->close(); +\} +`} + + + +**Implications**: + +* This compare-exchange item was [created as an operation](../compare-exchange/create-cmpxchg-items#create-items-using-a-store-operation) + rather than with a [cluster-wide session](../compare-exchange/create-cmpxchg-items#create-items-using-a-cluster-wide-session). + Thus, if `session.SaveChanges` fails, then the email reservation is Not rolled back automatically. + It is your responsibility to do so. + +* The compare-exchange value that was saved can be accessed in a query using the `CmpXchg` method: + + + + +{`$query = $session->advanced()->rawQuery(User::class, + "from Users as s where id() == cmpxchg(\\"emails/ayende@ayende.com\\")") + ->toList(); +`} + + + + +{`$q = $session->advanced() + ->documentQuery(User::class) + ->whereEquals("id", CmpXchg::value("emails/ayende@ayende.com")); +`} + + + + +{`from Users as s where id() == cmpxchg("emails/ayende@ayende.com") +`} + + + + +--- + +## Example II - Reserve a shared resource + +In the following example, we use compare-exchange to reserve a shared resource. +The scope is within the database group on a single cluster. + +The code also checks for clients which never release resources (i.e. due to failure) by using timeout. + + + +{`class SharedResource +\{ + private ?DateTime $reservedUntil = null; + + public function getReservedUntil(): ?DateTime + \{ + return $this->reservedUntil; + \} + + public function setReservedUntil(?DateTime $reservedUntil): void + \{ + $this->reservedUntil = $reservedUntil; + \} +\} + +class CompareExchangeSharedResource +\{ + private ?DocumentStore $store = null; + + public function printWork(): void + \{ + // Try to get hold of the printer resource + $reservationIndex = $this->lockResource($this->store, "Printer/First-Floor", Duration::ofMinutes(20)); + + try \{ + // Do some work for the duration that was set (TimeSpan.FromMinutes(20)). + // + // In a distributed system (unlike a multi-threaded app), a process may crash or exit unexpectedly + // without releasing the resource it reserved (i.e. never reaching the 'finally' block). + // This can leave the resource locked indefinitely. + // + // To prevent that, each reservation includes a timeout (TimeSpan.FromMinutes(20)). + // If the process fails or exits, the resource becomes available again once the timeout expires. + // + // Important: Ensure the work completes within the timeout period. + // If it runs longer, another client may acquire the same resource at the same time. + \} finally \{ + $this->releaseResource($this->store, "Printer/First-Floor", $reservationIndex); + \} + \} + + /** throws InterruptedException */ + public function lockResource(DocumentStoreInterface $store, ?string $resourceName, Duration $duration): int + \{ + while (true) \{ + $now = new DateTime(); + + $resource = new SharedResource(); + $resource->setReservedUntil($now->add($duration->toDateInterval())); + + /** @var CompareExchangeResult $saveResult */ + $saveResult = $store->operations()->send( + new PutCompareExchangeValueOperation($resourceName, $resource, 0)); + + if ($saveResult->isSuccessful()) \{ + // resourceName wasn't present - we managed to reserve + return $saveResult->getIndex(); + \} + + // At this point, another process owns the resource. + // But if that process crashed and never released the resource, the reservation may have expired, + // so we can try to take the lock by overwriting the value using the current index. + if ($saveResult->getValue()->getReservedUntil() < $now) \{ + // Time expired - Update the existing key with the new value + /** @var CompareExchangeResult takeLockWithTimeoutResult */ + $takeLockWithTimeoutResult = $store->operations()->send( + new PutCompareExchangeValueOperation($resourceName, $resource, $saveResult->getIndex())); + + if ($takeLockWithTimeoutResult->isSuccessful()) \{ + return $takeLockWithTimeoutResult->getIndex(); + \} + \} + + // Wait a little bit and retry + usleep(20000); + \} + \} + + public function releaseResource(DocumentStoreInterface $store, ?string $resourceName, int $index): void + \{ + $deleteResult = $store->operations()->send( + new DeleteCompareExchangeValueOperation(SharedResource::class, $resourceName, $index) + ); + + // We have 2 options here: + // $deleteResult->isSuccessful is true - we managed to release resource + // $deleteResult->isSuccessful is false - someone else took the lock due to timeout + \} +\} +`} + + diff --git a/versioned_docs/version-7.1/compare-exchange/content/_overview-python.mdx b/versioned_docs/version-7.1/compare-exchange/content/_overview-python.mdx new file mode 100644 index 0000000000..86c6644d25 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_overview-python.mdx @@ -0,0 +1,256 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Compare-exchange items are **key/value pairs** where the key is a globally unique identifier in the database. + Items are versioned and managed at the cluster level. + +* Compare-exchange provides a built-in consensus mechanism for safe coordination across sessions and nodes. + It ensures global consistency in the database and prevents conflicts when multiple clients try to modify or reserve + the same resource, allowing you to: + * Enforce global uniqueness (e.g., prevent duplicate usernames or emails). + * Assign work to a single client or reserve a resource once. + * Handle concurrency safely, without external services or custom locking logic. + +* Compare-exchange items are also suitable for storing shared or global values that aren't tied to a specific document - + such as configuration flags, feature toggles, or reusable identifiers stored under a unique key. + However, [unlike regular documents](../compare-exchange/overview#why-not-use-regular-documents-to-enforce-uniqueness), + compare-exchange provides atomic updates, version-based conflict prevention, and Raft-based consistency for distributed safety. + +* Compare-exchange items are [not replicated externally](../compare-exchange/overview#why-compare-exchange-items-are-not-replicated-to-external-databases) to other databases. + +* In this article: + * [What compare-exchange items are](../compare-exchange/overview#what-compare-exchange-items-are) + * [Ways to create and manage compare-exchange items](../compare-exchange/overview#ways-to-create-and-manage-compare-exchange-items) + * [Why compare-exchange items are not replicated to external databases](../compare-exchange/overview#why-compare-exchange-items-are-not-replicated-to-external-databases) + * [Why not use regular documents to enforce uniqueness](../compare-exchange/overview#why-not-use-regular-documents-to-enforce-uniqueness) + * [Example I - Email address reservation](../compare-exchange/overview#example-i---email-address-reservation) + * [Example II - Reserve a shared resource](../compare-exchange/overview#example-ii---reserve-a-shared-resource) + + + +--- + +## What compare-exchange items are + +Compare-exchange items are key/value pairs where the key serves as a unique value across your database. + +* Each compare-exchange item contains: + * **A key** - A unique string identifier in the database scope. + * **A value** - Can be any value (a number, string, array, or any valid JSON object). + * **Metadata** - Optional data that is associated with the compare-exchange item. Must be a valid JSON object. + For example, the metadata can be used to set expiration time for the compare-exchange item. + Learn more in [compare-exchange expiration](../compare-exchange/cmpxchg-expiration). + * **Raft index** - The compare-exchange item's version. + Any change to the value or metadata will increase this number. + +* Creating and modifying a compare-exchange item follows the same principle as the [compare-and-swap](https://en.wikipedia.org/wiki/Compare-and-swap) operation in multi-threaded systems, + but in RavenDB, this concept is applied to a distributed environment across multiple nodes instead of within a single multi-threaded process. + These operations require cluster consensus to ensure consistency. + Once consensus is reached, the compare-exchange items are distributed through the Raft algorithm to all nodes in the database group. + +--- + +## Ways to create and manage compare-exchange items + +Compare exchange items can be created and managed using any of the following approaches: + +* **Document Store Operations** + You can create and manage compare-exchange items using _document store_ operations. + For example, see [Create items using a store operation](../compare-exchange/create-cmpxchg-items#create-items-using-a-store-operation). + +* **Cluster-Wide Sessions** + You can create and manage compare-exchange items from within a [Cluster-Wide session](../client-api/session/cluster-transaction/overview#cluster-wide-transaction-vs-single-node-transaction). + For example, see [Create items using a cluster-wide session](../compare-exchange/create-cmpxchg-items#create-items-using-a-cluster-wide-session). + When using a cluster-wide session, the compare-exchange item is created as part of the cluster-wide transaction. + If the session fails, the item creation also fails, and none of the nodes in the database group will store the new compare-exchange item. + +* **Atomic Guards** + When creating _documents_ using a cluster-wide session, RavenDB automatically creates [Atomic Guards](../compare-exchange/atomic-guards), + which are special compare-exchange items that guarantee ACID transactions. + See [Cluster-wide transaction vs. Single-node transaction](../client-api/session/cluster-transaction/overview#cluster-wide-transaction-vs-single-node-transaction) for a session comparison overview. + +* **Studio** + All compare-exchange items can also be managed from the **Compare-Exchange view** in the [Studio](../todo..): + + ![The compare-exchange view](../assets/the-cmpxchg-view.png) + + 1. Open the **Documents** section in the Studio sidebar. + 2. Click on the **Compare-Exchange** tab. + 3. This is a compare-exchange item. + In this view you can create, edit, and delete compare-exchange items. + +--- + +## Why compare-exchange items are not replicated to external databases + +* Each cluster defines its own policies and configurations, and should ideally have sole responsibility for managing its own documents. + Read [Consistency in a Globally Distributed System](https://ayende.com/blog/196769-B/data-ownership-in-a-distributed-system) + to learn more about why global database modeling is more efficient this way. + +* When creating a compare-exchange item, a Raft consensus is required from the nodes in the database group. + Externally replicating such data is problematic because the target database may reside within a cluster that is in an + unstable state where Raft decisions cannot be made. In such a state, the compare-exchange item will not be persisted in the target database. + +* Conflicts between documents that occur between two databases are resolved using the documents' change-vector. + Compare-exchange conflicts cannot be resolved in the same way, as they lack a similar conflict resolution mechanism. + +* To ensure unique values between two databases without using compare-exchange items see [Example III](../compare-exchange/overview#example-iii---ensuring-unique-values-without-using-compare-exchange). + +* Learn more about Replication in RavenDB in [Replication overview](../server/clustering/replication/replication-overview). + For details about what is and what isn't replicated in [What is Replicated](../server/ongoing-tasks/external-replication#what-is-replicated). + +--- + +## Why not use regular documents to enforce uniqueness + +* You might consider storing a document with a predictable ID (for example, _phones/123456_) as a way to enforce uniqueness, + and then checking for its existence before allowing another document to use the same value. + +* While this might work in a single-node setup or with external replication, + it does not reliably enforce uniqueness in a clustered environment. + +* If a node was not part of the cluster when the document was created, it might not be aware of its existence when it comes back online. + In such cases, attempting to load the document on this node may return _null_, leading to duplicate values being inserted. + +* To reliably enforce uniqueness across all cluster nodes, you must use compare-exchange items, + which are designed for this purpose and ensure global consistency. + +--- + +## Example I - Email address reservation + +The following example shows how to use compare-exchange to create documents with unique values. +The scope is within the database group on a single cluster. + + + +{`email = "user@example.com" + +user = User(email=email) + +with store.open_session() as session: + session.store(user) + # At this point, the user object has a document ID assigned by the session. + + # Try to reserve the user email using a compare-exchange item. + # Note: This 'put compare-exchange operation' is not part of the session transaction, + # It is a separate, cluster-wide reservation. + cmp_xchg_result = store.operations.send( + # Parameters passed to the operation: + # email - the unique key of the compare-exchange item + # user.Id - the value associated with the key + # 0 - ensures the item is created only if it doesn't already exist + # If a compare-exchange item with the given key already exists, the operation will fail. + PutCompareExchangeValueOperation(email, user.Id, 0) + ) + + if cmp_xchg_result.successful is False: + raise RuntimeError("Email is already in use") + + # At this point, the email has been successfully reserved/saved. + # We can now save the user document to the database. + session.save_changes() +`} + + + +**Implications**: + +* This compare-exchange item was [created as an operation](../compare-exchange/create-cmpxchg-items#create-items-using-a-store-operation) + rather than with a [cluster-wide session](../compare-exchange/create-cmpxchg-items#create-items-using-a-cluster-wide-session). + Thus, if `session.SaveChanges` fails, then the email reservation is Not rolled back automatically. + It is your responsibility to do so. + +* The compare-exchange value that was saved can be accessed in a query using the `CmpXchg` method: + + + + +{`query = sesion.query(object_type=User).where_equals("Id", CmpXchg.value("emails/ayende@ayende.com")) +`} + + + + +{`from Users as s where id() == cmpxchg("emails/ayende@ayende.com") +`} + + + + +--- + +## Example II - Reserve a shared resource + +In the following example, we use compare-exchange to reserve a shared resource. +The scope is within the database group on a single cluster. + +The code also checks for clients which never release resources (i.e. due to failure) by using timeout. + + + +{`class SharedResource: + def __init__(self, reserved_until: datetime = None): + self.reserved_until = reserved_until + +def print_work() -> None: + # Try to get hold of the printer resource + reservation_index = lock_resource(store, "Printer/First-Floor", timedelta(minutes=20)) + + try: + ... + # Do some work for the duration that was set (TimeSpan.FromMinutes(20)). + # + # In a distributed system (unlike a multi-threaded app), a process may crash or exit unexpectedly + # without releasing the resource it reserved (i.e. never reaching the 'finally' block). + # This can leave the resource locked indefinitely. + # + # To prevent that, each reservation includes a timeout (TimeSpan.FromMinutes(20)). + # If the process fails or exits, the resource becomes available again once the timeout expires. + # + # Important: Ensure the work completes within the timeout period. + # If it runs longer, another client may acquire the same resource at the same time. + finally: + release_resource(store, "Printer/First-Floor", reservation_index) + +def lock_resource(document_store: DocumentStore, resource_name: str, duration: timedelta): + while True: + now = datetime.utcnow() + + resource = SharedResource(reserved_until=now + duration) + save_result = document_store.operations.send( + PutCompareExchangeValueOperation(resource_name, resource, 0) + ) + + if save_result.successful: + # resource_name wasn't present - we managed to reserve + return save_result.index + + # At this point, another process owns the resource. + # But if that process crashed and never released the resource, the reservation may have expired, + # so we can try to take the lock by overwriting the value using the current index. + if save_result.value.reserved_until < now: + # Time expired - Update the existing key with new value + take_lock_with_timeout_result = document_store.operations.send( + PutCompareExchangeValueOperation(resource_name, resource, save_result.index) + ) + + if take_lock_with_timeout_result.successful: + return take_lock_with_timeout_result.index + + # Wait a little bit and retry + time.sleep(0.02) + +def release_resource(store: DocumentStore, resource_name: str, index: int) -> None: + delete_result = store.operations.send(DeleteCompareExchangeValueOperation(resource_name, index)) + + # We have 2 options here: + # delete_result.successful is True - we managed to release resource + # delete_result.successful is False - someone else took the lock due to timeout +`} + + diff --git a/versioned_docs/version-7.1/compare-exchange/content/_update-cmpxchg-item-csharp.mdx b/versioned_docs/version-7.1/compare-exchange/content/_update-cmpxchg-item-csharp.mdx new file mode 100644 index 0000000000..41d46fc4f3 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_update-cmpxchg-item-csharp.mdx @@ -0,0 +1,236 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* An existing compare-exchange item can be updated in the following ways: + * Using a cluster-wide session + * Using a store operation + * Using the Studio + +* In this article: + * [Update compare-exchange item using a **cluster-wide session**](../compare-exchange/update-cmpxchg-item#update-compare-exchange-item-using-a-cluster-wide-session) + * [Update compare-exchange item using a **store operation**](../compare-exchange/update-cmpxchg-item#update-compare-exchange-item-using-a-store-operation) + * [Update compare-exchange item using the **Studio**](../compare-exchange/update-cmpxchg-item#update-compare-exchange-item-using-the-studio) + * [Syntax](../compare-exchange/update-cmpxchg-item#syntax) + + + +--- + +## Update compare-exchange item using a cluster-wide session + + + +```csharp +// The session must be opened in cluster-wide mode. +using (var session = store.OpenSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) +{ + // Get the existing item from the server + // ===================================== + + CompareExchangeValue item = session.Advanced.ClusterTransaction + .GetCompareExchangeValue("user1-name@example.com"); + + // The item is now tracked in the session's internal state + // Modify the value / metadata as needed + // ===================================== + + item.Value = "users/99"; + item.Metadata["email-type"] = "work email"; + item.Metadata["updated-at"] = DateTime.UtcNow.ToString("o"); + + // Save changes for the update to take effect + // ========================================== + + session.SaveChanges(); + + // A 'ClusterTransactionConcurrencyException' is thrown if the compare-exchange item + // no longer exists on the server at the time of calling saveChanges(). + // This can happen if another client deletes or modifies the item before your update is saved. +} +``` + + +```csharp +// The session must be opened in cluster-wide mode. +using (var asyncSession = store.OpenAsyncSession( + new SessionOptions { TransactionMode = TransactionMode.ClusterWide })) +{ + // Get the existing item from the server + // ===================================== + + CompareExchangeValue item = await asyncSession.Advanced.ClusterTransaction + .GetCompareExchangeValueAsync("user1-name@example.com"); + + // The item is now tracked in the session's internal state + // Modify the value / metadata as needed + // ===================================== + + item.Value = "users/99"; + item.Metadata["email-type"] = "work email"; + item.Metadata["updated-at"] = DateTime.UtcNow.ToString("o"); + + // Save changes for the update to take effect + // ========================================== + + await asyncSession.SaveChangesAsync(); +} +``` + + + +--- + +## Update compare-exchange item using a store operation + +* Use `PutCompareExchangeValueOperation` to **update the _value_ and/or _metadata_** of an existing compare-exchange item. + This operation is also used to create new compare-exchange items, see [Create compare-exchange item](../compare-exchange/create-cmpxchg-items). + +* To perform an update, provide: + * The existing key + * A new value and/or metadata + * The expected index (version) of the item, which must match the current version stored on the server. + +* The update will succeed only if the index you provide matches the current index stored on the server for that key. + This ensures that the item hasn’t been modified by another client since you last read it. + +* If the index does not match, or if the specified key does not exist: + * The item is not updated. + * No exception is thrown. + * The operation result has `Successful = false`. + +* If the update is successful: + * The value and/or metadata are updated. + * The server increments the index number of the item. + * The operation result has `Successful = true` and will contain the new value and new index. + + + +```csharp +// Get the existing item from the server +// ===================================== + +CompareExchangeValue item = store.Operations.Send( + new GetCompareExchangeValueOperation("user1-name@example.com")); + +// Modify the value / metadata as needed +// ===================================== + +var newValue = "users/99"; // Modify the value to be associated with the unique email key +var newMetadata = new MetadataAsDictionary +{ + { "email-type", "work email" }, + { "updated-at", DateTime.UtcNow.ToString("o") } // Add entries / modify the metadata +}; + +// Update the item +// =============== + +// The put operation will succeed only if the 'index' of the compare-exchange item +// has not changed between the read and write operations. +CompareExchangeResult putResult = store.Operations.Send( + new PutCompareExchangeValueOperation(item.Key, newValue, item.Index, newMetadata)); + +// Check results +// ============= + +bool putResultSuccessful = putResult.Successful; // Has operation succeeded +long putResultIndex = putResult.Index; // The new version number assigned if update succeeded +``` + + +```csharp +// Get the existing item from the server +// ===================================== + +CompareExchangeValue item = await store.Operations.SendAsync( + new GetCompareExchangeValueOperation("user1-name@example.com")); + +// Modify the value / metadata as needed +// ===================================== + +var newValue = "users/99"; // Modify the value associated with the unique email key +var newMetadata = new MetadataAsDictionary +{ + { "email-type", "work email" }, + { "updated-at", DateTime.UtcNow.ToString("o") } // Add entries / modify the metadata +}; + +// Update the item +// =============== + +// The put operation will succeed only if the 'index' of the compare-exchange item +// has not changed between the read and write operations. +CompareExchangeResult putResult = await store.Operations.SendAsync( + new PutCompareExchangeValueOperation(item.Key, newValue, item.Index, newMetadata)); + +// Check results +// ============= + +bool putResultSuccessful = putResult.Successful; // Has operation succeeded +long putResultIndex = putResult.Index; // The new version number assigned if update succeeded +``` + + + +--- + +## Update compare-exchange item using the Studio + +You can update any existing compare-exchange item from the Studio. + +![The compare-exchange view](../assets/update-cmpxchg-1.png) + +1. Go to **Documents > Compare Exchange**. +2. Click to edit a compare-exchange item. + +--- + +![The compare-exchange view](../assets/update-cmpxchg-2.png) + +1. Enter the value. +2. Enter the metadata (optional). +3. Click _Save_ to update the item. + +--- + +## Syntax + +**Method**: + + +```csharp +public PutCompareExchangeValueOperation( + string key, T value, long index, IMetadataDictionary metadata = null) +``` + + +| Parameter | Type | Description | +|--------------|----------|-------------------------------------------------------------------------------------------------------------------------| +| **key** | `string` | The unique identifier in the database scope. | +| **value** | `T` | The value to be saved for the specified _key_.
Can be any object (number, string, array, or any valid JSON object). | +| **index** | `long` | The current version of the item when updating an existing item.
Pass `0` to [create a new key](../compare-exchange/create-cmpxchg-items). | +| **metadata** | `IMetadataDictionary` | Metadata to be saved for the specified key.
Must be a valid JSON object. | + +**Returned object**: + + +```csharp +public class CompareExchangeResult +{ + public bool Successful; + public T Value; + public long Index; +} +``` + + +| Return Value | Type | Description | +|---------------|--------|---------------------------------------------------------------------------------------------------------------------------------------------------| +| **Successful**| `bool` |
  • _true_ if the put operation has completed successfully.
  • _false_ if the put operation failed.
| +| **Value** | `T` |
  • Upon success - the value of the compare-exchange item that was saved.
  • Upon failure - the existing value on the server.
| +| **Index** | `long` |
  • Upon success - the updated version (the incremented index of the modified item).
  • Upon failure (if indexes do not match) - the existing version from the server.
| diff --git a/versioned_docs/version-7.1/compare-exchange/content/_update-cmpxchg-item-java.mdx b/versioned_docs/version-7.1/compare-exchange/content/_update-cmpxchg-item-java.mdx new file mode 100644 index 0000000000..7647be2255 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_update-cmpxchg-item-java.mdx @@ -0,0 +1,140 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `PutCompareExchangeValueOperation` to **update the _value_ and/or _metadata_** of an existing compare-exchange item. + This operation is also used to create new compare-exchange items, see [Create compare-exchange item](../compare-exchange/create-cmpxchg-items). + +* To perform an update, provide: + * The existing key + * A new value and/or metadata + * The expected index (version) of the item, which must match the current version stored on the server. + +* The update will succeed only if the index you provide matches the current index stored on the server for that key. + This ensures that the item hasn’t been modified by another client since you last read it. + +* If the index does not match, or if the specified key does not exist: + * The item is not updated. + * No exception is thrown. + * The operation result has `Successful = false`. + +* If the update is successful: + * The value and/or metadata are updated. + * The server increments the index number of the item. + * The operation result has `Successful = true` and will contain the new value and new index. + +--- + +* In this article: + * [Update compare-exchange item using a **store operation**](../compare-exchange/update-cmpxchg-item#update-compare-exchange-item-using-a-store-operation) + * [Update compare-exchange item using the **Studio**](../compare-exchange/update-cmpxchg-item#update-compare-exchange-item-using-the-studio) + * [Syntax](../compare-exchange/update-cmpxchg-item#syntax) + + + +--- + +## Update compare-exchange item using a Store operation + + + +```java +// Get existing value +CompareExchangeValue readResult + = store.operations().send( + new GetCompareExchangeValueOperation<>(User.class, "AdminUser")); + +readResult.getValue().setAge(readResult.getValue().getAge() + 1); + +// Update value +CompareExchangeResult saveResult + = store.operations().send( + new PutCompareExchangeValueOperation<>("AdminUser", readResult.getValue(), readResult.getIndex())); + +// The save result is successful only if 'index' wasn't changed between the read and write operations +boolean saveResultSuccessful = saveResult.isSuccessful(); +``` + + + +--- + +## Update compare-exchange item using the Studio + +You can update any existing compare-exchange item from the Studio. + +![The compare-exchange view](../assets/update-cmpxchg-1.png) + +1. Go to **Documents > Compare Exchange**. +2. Click to edit a compare-exchange item. + +--- + +![The compare-exchange view](../assets/update-cmpxchg-2.png) + +1. Enter the value. +2. Enter the metadata (optional). +3. Click _Save_ to update the item. + +--- + +## Syntax + +**Method**: + + +```java +public PutCompareExchangeValueOperation(String key, T value, long index) +``` + + +| Parameter | Type | Description | +|--------------|----------|------------------------------------------------| +| **key** | `String` | The unique identifier in the database scope. | +| **value** | `T` | The value to be saved for the specified _key_. | +| **index** | `long` | The current version of the item when updating an existing item. | + +**Returned object**: + + +```java +public class CompareExchangeResult { + private T value; + private long index; + private boolean successful; + + public T getValue() { + return value; + } + + public void setValue(T value) { + this.value = value; + } + + public long getIndex() { + return index; + } + + public void setIndex(long index) { + this.index = index; + } + + public boolean isSuccessful() { + return successful; + } + + public void setSuccessful(boolean successful) { + this.successful = successful; + } +} +``` + + +| Return Value | Type | Description | +|---------------|-----------|---------------------------------------------------------------------------------------------------------------------------------------------------| +| **Successful**| `boolean` |
  • _true_ if the put operation has completed successfully.
  • _false_ if the put operation failed.
| +| **Value** | `T` |
  • Upon success - the value of the compare-exchange item that was saved.
  • Upon failure - the existing value on the server.
| +| **Index** | `long` |
  • Upon success - the updated version (the incremented index of the modified item).
  • Upon failure (if indexes do not match) - the existing version from the server.
| diff --git a/versioned_docs/version-7.1/compare-exchange/content/_update-cmpxchg-item-nodejs.mdx b/versioned_docs/version-7.1/compare-exchange/content/_update-cmpxchg-item-nodejs.mdx new file mode 100644 index 0000000000..ec114a40cb --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/content/_update-cmpxchg-item-nodejs.mdx @@ -0,0 +1,178 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* An existing compare-exchange item can be updated in the following ways: + * Using a cluster-wide session + * Using a store operation + * Using the Studio + +* In this article: + * [Update compare-exchange item using a **cluster-wide session**](../compare-exchange/update-cmpxchg-item#update-compare-exchange-item-using-a-cluster-wide-session) + * [Update compare-exchange item using a **store operation**](../compare-exchange/update-cmpxchg-item#update-compare-exchange-item-using-a-store-operation) + * [Update compare-exchange item using the **Studio**](../compare-exchange/update-cmpxchg-item#update-compare-exchange-item-using-the-studio) + * [Syntax](../compare-exchange/update-cmpxchg-item#syntax) + + + +--- + +## Update compare-exchange item using a cluster-wide session + + + +```csharp +// The session must be opened in cluster-wide mode. +const session = documentStore.openSession({ + transactionMode: "ClusterWide" +}); + +// Get the existing item from the server +// ===================================== + +const item = await session.advanced.clusterTransaction + .getCompareExchangeValue("user1-name@example.com"); + +// The item is now tracked in the session's internal state +// Modify the value / metadata as needed +// ===================================== + +item.value = "users/99"; +item.metadata["email-type"] = "work email"; +item.metadata["updated-at"] = new Date().toISOString(); + +// Save changes for the update to take effect +// ========================================== + +await session.saveChanges(); + +// A 'ClusterTransactionConcurrencyException' is thrown if the compare-exchange item +// no longer exists on the server at the time of calling saveChanges(). +// This can happen if another client deletes or modifies the item before your update is saved. +``` + + + +--- + +## Update compare-exchange item using a Store operation + +* Use `PutCompareExchangeValueOperation` to **update the _value_ and/or _metadata_** of an existing compare-exchange item. + This operation is also used to create new compare-exchange items, see [Create compare-exchange item](../compare-exchange/create-cmpxchg-items). + +* To perform an update, provide: + * The existing key + * A new value and/or metadata + * The expected index (version) of the item, which must match the current version stored on the server. + +* The update will succeed only if the index you provide matches the current index stored on the server for that key. + This ensures that the item hasn’t been modified by another client since you last read it. + +* If the index does not match, or if the specified key does not exist: + * The item is not updated. + * No exception is thrown. + * The operation result has `Successful = false`. + +* If the update is successful: + * The value and/or metadata are updated. + * The server increments the index number of the item. + * The operation result has `Successful = true` and will contain the new value and new index. + + + +```js +// Get the existing item from the server +// ===================================== + +const getCmpXchgOp = new GetCompareExchangeValueOperation("user1-name@example.com"); +const item = await documentStore.operations.send(getCmpXchgOp); + +// Modify the value / metadata as needed +// ===================================== + +const newValue = "users/99"; // Modify the value associated with the unique email key +const newMetadata = { + "email-type": "work email", + "updated-at": new Date().toISOString() // Add entries / modify the metadata +}; + +// Update the item +// =============== + +// The put operation will succeed only if the 'index' of the compare-exchange item +// has not changed between the read and write operations. +const putCmpXchgOp = new PutCompareExchangeValueOperation(item.key, newValue, item.index, newMetadata); +const putResult = await documentStore.operations.send(putCmpXchgOp); + +// Check results +// ============= + +const successful = putResult.successful; // Has operation succeeded +const indexForItem = putResult.index; // The new version number assigned if update succeeded +``` + + + +--- + +## Update compare-exchange item using the Studio + +You can update any existing compare-exchange item from the Studio. + +![The compare-exchange view](../assets/update-cmpxchg-1.png) + +1. Go to **Documents > Compare Exchange**. +2. Click to edit a compare-exchange item. + +--- + +![The compare-exchange view](../assets/update-cmpxchg-2.png) + +1. Enter the value. +2. Enter the metadata (optional). +3. Click _Save_ to update the item. + +--- + +## Syntax + +**Method**: + + +```js +// Available overloads: +// ==================== +const putCmpXchgOp = new PutCompareExchangeValueOperation(key, value, index); +const putCmpXchgOp = new PutCompareExchangeValueOperation(key, value, index, metadata); +``` + + +| Parameter | Type | Description | +|--------------|----------|-------------------------------------------------------------------------------------------------------------------------| +| **key** | `string` | The unique identifier in the database scope. | +| **value** | `object` | The value to be saved for the specified _key_.
Can be any object (number, string, array, or any valid JSON object). | +| **index** | `number` | The current version of the item when updating an existing item.
Pass `0` to [create a new key](../compare-exchange/create-cmpxchg-items). | +| **metadata** | `object` | Metadata to be saved for the specified key.
Must be a valid JSON object. | + +**Returned object**: + + +```js +// Return value of store.operations.send(putCmpXchgOp) +// =================================================== +class CompareExchangeResult { + successful; + value; + index; +} +``` + + +| Return Value | Type | Description | +|---------------|--------|---------------------------------------------------------------------------------------------------------------------------------------------------| +| **Successful**| `bool` |
  • _true_ if the put operation has completed successfully.
  • _false_ if the put operation failed.
| +| **Value** | `T` |
  • Upon success - the value of the compare-exchange item that was saved.
  • Upon failure - the existing value on the server.
| +| **Index** | `long` |
  • Upon success - the updated version (the incremented index of the modified item).
  • Upon failure (if indexes do not match) - the existing version from the server.
| diff --git a/versioned_docs/version-7.1/compare-exchange/create-cmpxchg-items.mdx b/versioned_docs/version-7.1/compare-exchange/create-cmpxchg-items.mdx new file mode 100644 index 0000000000..a9b6a98bf0 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/create-cmpxchg-items.mdx @@ -0,0 +1,48 @@ +--- +title: "Create Compare-Exchange Items" +hide_table_of_contents: true +sidebar_label: "Create Compare-Exchange Items" +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import CreateCmpXchgItemsCsharp from './content/_create-cmpxchg-items-csharp.mdx'; +import CreateCmpXchgItemsJava from './content/_create-cmpxchg-items-java.mdx'; +import CreateCmpXchgItemsNodejs from './content/_create-cmpxchg-items-nodejs.mdx'; +import CreateCmpXchgItemsPython from './content/_create-cmpxchg-items-python.mdx'; +import CreateCmpXchgItemsPhp from './content/_create-cmpxchg-items-php.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/versioned_docs/version-7.1/compare-exchange/delete-cmpxchg-items.mdx b/versioned_docs/version-7.1/compare-exchange/delete-cmpxchg-items.mdx new file mode 100644 index 0000000000..8d36c64cc6 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/delete-cmpxchg-items.mdx @@ -0,0 +1,46 @@ +--- +title: "Delete Compare-Exchange Items" +hide_table_of_contents: true +sidebar_label: "Delete Compare-Exchange Items" +sidebar_position: 5 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import DeleteCmpXchgItemsCsharp from './content/_delete-cmpxchg-items-csharp.mdx'; +import DeleteCmpXchgItemsJava from './content/_delete-cmpxchg-items-java.mdx'; +import DeleteCmpXchgItemsNodejs from './content/_delete-cmpxchg-items-nodejs.mdx'; +import DeleteCmpXchgItemsPython from './content/_delete-cmpxchg-items-python.mdx'; +import DeleteCmpXchgItemsPhp from './content/_delete-cmpxchg-items-php.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/versioned_docs/version-7.1/compare-exchange/get-cmpxchg-item.mdx b/versioned_docs/version-7.1/compare-exchange/get-cmpxchg-item.mdx new file mode 100644 index 0000000000..7d3204c85d --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/get-cmpxchg-item.mdx @@ -0,0 +1,46 @@ +--- +title: "Get Compare-Exchange Item" +hide_table_of_contents: true +sidebar_label: "Get Compare-Exchange Item" +sidebar_position: 3 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetCmpXchgItemCsharp from './content/_get-cmpxchg-item-csharp.mdx'; +import GetCmpXchgItemJava from './content/_get-cmpxchg-item-java.mdx'; +import GetCmpXchgItemNodejs from './content/_get-cmpxchg-item-nodejs.mdx'; +import GetCmpXchgItemPython from './content/_get-cmpxchg-item-python.mdx'; +import GetCmpXchgItemPhp from './content/_get-cmpxchg-item-php.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/versioned_docs/version-7.1/compare-exchange/get-cmpxchg-items.mdx b/versioned_docs/version-7.1/compare-exchange/get-cmpxchg-items.mdx new file mode 100644 index 0000000000..21adaf744d --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/get-cmpxchg-items.mdx @@ -0,0 +1,46 @@ +--- +title: "Get Multiple Compare-Exchange Items" +hide_table_of_contents: true +sidebar_label: "Get Compare-Exchange Items" +sidebar_position: 4 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetCmpXchgItemsCsharp from './content/_get-cmpxchg-items-csharp.mdx'; +import GetCmpXchgItemsJava from './content/_get-cmpxchg-items-java.mdx'; +import GetCmpXchgItemsNodejs from './content/_get-cmpxchg-items-nodejs.mdx'; +import GetCmpXchgItemsPython from './content/_get-cmpxchg-items-python.mdx'; +import GetCmpXchgItemsPhp from './content/_get-cmpxchg-items-php.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/versioned_docs/version-7.1/compare-exchange/include-cmpxchg-items.mdx b/versioned_docs/version-7.1/compare-exchange/include-cmpxchg-items.mdx new file mode 100644 index 0000000000..fd60e9878b --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/include-cmpxchg-items.mdx @@ -0,0 +1,33 @@ +--- +title: "Include Compare-Exchange Items" +hide_table_of_contents: true +sidebar_label: "Include Compare-Exchange Items" +sidebar_position: 7 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import IncludeCmpXchgItemsCsharp from './content/_include-compare-exchange-items-csharp.mdx'; +import IncludeCmpXchgItemsNodejs from './content/_include-compare-exchange-items-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + diff --git a/versioned_docs/version-7.1/compare-exchange/indexing-cmpxchg-values.mdx b/versioned_docs/version-7.1/compare-exchange/indexing-cmpxchg-values.mdx new file mode 100644 index 0000000000..33b7ee1ac3 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/indexing-cmpxchg-values.mdx @@ -0,0 +1,42 @@ +--- +title: "Indexing Compare-Exchange Values" +hide_table_of_contents: true +sidebar_label: "Indexing Compare-Exchange Values" +sidebar_position: 8 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import IndexingCmpXchgValuesCsharp from './content/_indexing-compare-exchange-values-csharp.mdx'; +import IndexingCmpXchgValuesJava from './content/_indexing-compare-exchange-values-java.mdx'; +import IndexingCmpXchgValuesNodejs from './content/_indexing-compare-exchange-values-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + diff --git a/versioned_docs/version-7.1/compare-exchange/overview.mdx b/versioned_docs/version-7.1/compare-exchange/overview.mdx new file mode 100644 index 0000000000..510d9e2c59 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/overview.mdx @@ -0,0 +1,45 @@ +--- +title: "Compare Exchange Overview" +hide_table_of_contents: true +sidebar_label: Overview +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import OverviewCsharp from './content/_overview-csharp.mdx'; +import OverviewJava from './content/_overview-java.mdx'; +import OverviewPython from './content/_overview-python.mdx'; +import OverviewPhp from './content/_overview-php.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php"]; + + + + + + + + + + + + + + + + + + + + diff --git a/versioned_docs/version-7.1/compare-exchange/start.mdx b/versioned_docs/version-7.1/compare-exchange/start.mdx new file mode 100644 index 0000000000..3a0c2d1c4e --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/start.mdx @@ -0,0 +1,292 @@ +--- +title: "Compare-Exchange: Start" +hide_table_of_contents: true +sidebar_label: "Start" +sidebar_position: 0 +--- + +import Card from "@site/src/components/Common/Card"; +import CardWithImage from "@site/src/components/Common/CardWithImage"; +import CardWithImageHorizontal from "@site/src/components/Common/CardWithImageHorizontal"; +import ColGrid from "@site/src/components/ColGrid"; +import CardWithIcon from "@site/src/components/Common/CardWithIcon"; +import Admonition from '@theme/Admonition'; + + + +* Compare-exchange is a RavenDB feature for storing atomic, cluster-wide **key-value pairs** where each key is a globally unique identifier in the database. + Items are versioned and managed at the cluster level. + +* Compare-exchange provides a built-in consensus mechanism ideal for safe coordination and global consistency + in distributed environments, allowing you to: + * Enforce global uniqueness (e.g., prevent duplicate usernames or emails). + * Assign work to a single client or reserve a resource once. + * Handle concurrency safely, without external services or custom locking logic. + +* Key Characteristics of a compare-exchange item: + * Cluster-wide - Visible and consistent across all nodes in the [database group](../studio/database/settings/manage-database-group). + * Atomic - Only one client can successfully modify an item at a time (all-or-nothing updates). + * Versioned - Each update increments the version, enabling conflict detection. + * Flexible - Values can be strings, numbers, arrays, or JSON objects. + * Internal - Not replicated outside the database. + +* On this start page, you'll find: + * [Technical documentation links](../compare-exchange/start#technical-documentation-links-) + * [RavenDB Demo links](../compare-exchange/start#ravendb-demo-links) + * [Related blog posts](../compare-exchange/start#related-blog-posts) + * [Related in-depth articles](../compare-exchange/start#related-in-depth-articles) + * [Sample use cases](../compare-exchange/start#sample-use-cases) + + + +--- + +## Technical documentation links ✨ + +* [Overview](../compare-exchange/overview) +* [Create Compare-Exchange Items](../compare-exchange/create-cmpxchg-items) +* [Get Compare-Exchange Item](../compare-exchange/get-cmpxchg-item) +* [Get Compare-Exchange Items](../compare-exchange/get-cmpxchg-items) +* [Delete Compare-Exchange Items](../compare-exchange/delete-cmpxchg-items) +* [Update Compare-Exchange Item](../compare-exchange/update-cmpxchg-item) +* [Include Compare-Exchange Items](../compare-exchange/include-cmpxchg-items) +* [Indexing Compare-Exchange Values](../compare-exchange/indexing-cmpxchg-values) +* [Compare-Exchange in Dynamic Queries](../compare-exchange/cmpxchg-in-dynamic-queries) +* [Compare-Exchange Expiration](../compare-exchange/cmpxchg-expiration) +* [Atomic Guards](../compare-exchange/atomic-guards) + +--- + +## RavenDB Demo links + + + + + + +## Related blog posts + + + + + + + +## Related in-depth articles + + + + + + +--- + +## Sample use cases + + + +### Enforce unique usernames or emails + +* Use compare-exchange to enforce global uniqueness in your database even under concurrent operations. + For example, ensure that no two users can register with the same username or email, even if they do so simultaneously on different servers. + Compare-exchange guarantees that a specific value can only be claimed once across the cluster reliably and without race conditions. + +* ✅ Why compare-exchange? + It provides a guaranteed, cluster-wide check for uniqueness. + +* How it works: + * When a user registers, the app attempts to create a compare-exchange item like + (**key**: `"emails/john@example.com"`, **value**: `"users/1-A"`). + * Only the first attempt to claim this key succeeds. + * Any concurrent or repeated attempts to claim the same key fail automatically. + +* This makes it easy to enforce rules like: + * No two users can register with the same email address. + * No two orders can use the same external reference ID. + + + + + +### Claim a job or task once + +* Use compare-exchange to safely assign client-side jobs or tasks in a distributed system, + ensuring that each task is claimed only once. + +* ✅ Why compare-exchange? + It provides a reliable, cluster-wide locking mechanism for coordination within your database scope. + +* How it works: + * Each worker attempts to create a compare-exchange item like (**key**: `"locks/job/1234"`, **value**: `"worker-A"`). + * The first worker to succeed gets the job. + * Other workers trying to claim the same job will fail - they can back off or retry later. + +* This ensures: + * No two workers process the same job. + * Each job runs exactly once, even with multiple competing workers or nodes. + +* Also useful for: + * Implementing mutex-style locks between clients. + * Ensuring that scheduled tasks or batch jobs run only once across the cluster. + + + + + +### Reserve a resource + +* Need to reserve a table in a restaurant app or a seat at an event? + Use compare-exchange to lock the reservation and prevent double booking, even under concurrent access. + +* ✅ Why compare-exchange? + It gives you a reliable, cluster-wide way to reserve something exactly once - no race conditions, no conflicts. + +* How it works: + * Try to create a Compare-Exchange item for the resource + (e.g., **key**: `"reservations/seat/17"`, **value**: `"user/123"`). + * If the item doesn't exist, the reservation is successful. + * If it already exists, someone else claimed it - you can show an error or let the user pick another. + +* This pattern is useful for: + * Reserving seats, tables, or event slots. + * Assigning support engineers to incoming tickets. + * Allocating limited resources like promotion codes or serial numbers. + +* Only one client can claim the item so your reservation logic stays safe and simple, even under high load. + + + + + +### Prevent double processing + +* Use compare-exchange to make sure an operation runs only once even in a distributed setup. + This is useful for avoiding things like sending the same email twice, processing the same order multiple times, + or executing duplicate actions after retries. + +* ✅ Why compare-exchange? + It acts as a once-only flag - a lightweight, atomic check to prevent duplicate processing. + +* How it works: + * Before running the operation, try to create a compare-exchange key like `processed/orders/9876`. + * If the key creation succeeds - run the operation. + * If the key already exists - skip processing. It's already been handled. + +* This approach is especially useful in retry scenarios, background jobs, or any flow where idempotency matters. + + + + + +### Run business logic only if data hasn't changed + +* Use compare-exchange as a version guard to ensure the data wasn't modified while you were working on it. + This is useful when applying business logic that depends on the current state of the data - like approving a request, processing a payment, or updating a workflow step. + +* ✅ Why compare-exchange? + It helps detect changes and prevents acting on stale or outdated data. + +* How it works: + * Load the compare-exchange item that tracks the current version or state of the resource. + * After performing your checks and logic, attempt to update the item - but only if the version is still current. + * If the item was modified in the meantime, the update fails and you can abort or retry your business logic. + +* This pattern helps you maintain correctness and consistency in flows that involve multiple steps, + long-running tasks, or user input. + + + + + +### Lock a document for editing + +* In collaborative systems, it's common to allow only one user edit a document at a time. + Use compare-exchange to create a lightweight, distributed lock on the document. + +* ✅ Why compare-exchange? + It ensures that only one client can acquire the lock - preventing conflicting edits across users or servers. + +* How it works: + * When a user starts editing a document (e.g., `task/72`), try to create a compare-exchange item: + (**key**: `"editing/task/72"`, **value**: `"user/123"`). + * If the item is created successfully, the user holds the lock. + * Other users attempting the same key will fail and can be blocked, shown a message, or put into read-only mode. + * When editing is done, delete the compare-exchange item to release the lock. + +* This is useful for: + * Locking tasks, issues, or shared forms during editing. + * Preventing data loss or conflicts from simultaneous updates. + * Letting users know who’s currently editing a shared resource. + +* Simple to implement and works seamlessly across the cluster. + + + + + +### Add safety to cluster-wide transactions + +* When using cluster-wide sessions to handle documents, RavenDB automatically creates internal compare-exchange items, + called [atomic guards](../compare-exchange/atomic-guards), to enforce atomic document modifications. + These items coordinate access and prevent conflicting writes across nodes. + +* ✅ Why compare-exchange? + It provides a Raft-based coordination mechanism that ensures consistency and safety during multi-node transactions. + +* How it works: + * When you store or update a document in a cluster-wide session, + RavenDB creates an atomic guard to track the document’s version across the cluster. + * If another session modifies the document in the meantime, + your transaction fails with a `ConcurrencyException`, ensuring data consistency. + +* This protects you from: + * Writing over documents that were modified by other sessions. + * Acting on stale data in a distributed environment. + * Violating ACID guarantees in multi-node clusters. + +* You don’t need to manage these guards manually - + RavenDB handles everything automatically when you use a session in cluster-wide mode. + + diff --git a/versioned_docs/version-7.1/compare-exchange/update-cmpxchg-item.mdx b/versioned_docs/version-7.1/compare-exchange/update-cmpxchg-item.mdx new file mode 100644 index 0000000000..e871f63245 --- /dev/null +++ b/versioned_docs/version-7.1/compare-exchange/update-cmpxchg-item.mdx @@ -0,0 +1,37 @@ +--- +title: "Update Compare-Exchange Item" +hide_table_of_contents: true +sidebar_label: "Update Compare-Exchange Item" +sidebar_position: 6 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import UpdateCmpXchgItemCsharp from './content/_update-cmpxchg-item-csharp.mdx'; +import UpdateCmpXchgItemJava from './content/_update-cmpxchg-item-java.mdx'; +import UpdateCmpXchgItemNodejs from './content/_update-cmpxchg-item-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + diff --git a/versioned_docs/version-7.1/data-archival/_archived-documents-and-other-features-csharp.mdx b/versioned_docs/version-7.1/data-archival/_archived-documents-and-other-features-csharp.mdx new file mode 100644 index 0000000000..b11a7bc194 --- /dev/null +++ b/versioned_docs/version-7.1/data-archival/_archived-documents-and-other-features-csharp.mdx @@ -0,0 +1,556 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Once you have archived documents in your database (see how to [enable](../data-archival/enable-data-archiving.mdx) and [schedule](../data-archival/schedule-document-archiving.mdx) document archiving), + RavenDB features can detect these documents and handle them in different ways. + +* Some features, like indexes and data subscriptions, provide native support for configuring whether to: + * **Exclude** archived documents from processing, reducing index size and improving query relevance. + * **Include** only archived documents, for tasks that target archived data specifically. + * **Process both** archived and non-archived documents when needed. + +* Other features can manage archived documents differently based on their purpose. For example: + * ETL tasks can skip or selectively process archived documents. + * Archived documents can be included or excluded when exporting or importing data. + +* Limiting processing to either archived or non-archived documents may improve performance by reducing workload and transfer volume. + +* Learn more below about how various RavenDB features interact with archived documents. +* In this article: + * [Archived documents and indexing](../data-archival/archived-documents-and-other-features.mdx#archived-documents-and-indexing) + * [Archived documents and querying](../data-archival/archived-documents-and-other-features.mdx#archived-documents-and-querying) + * [Archived documents and data subscriptions](../data-archival/archived-documents-and-other-features.mdx#archived-documents-and-subscriptions) + * [Archived documents and document extensions](../data-archival/archived-documents-and-other-features.mdx#archived-documents-and-document-extensions) + * [Archived documents and smuggler (export/import)](../data-archival/archived-documents-and-other-features.mdx#archived-documents-and-smuggler-(export/import)) + * [Archived documents and expiration](../data-archival/archived-documents-and-other-features.mdx#archived-documents-and-expiration) + * [Archived documents and ETL](../data-archival/archived-documents-and-other-features.mdx#archived-documents-and-etl) + * [Archived documents and backup](../data-archival/archived-documents-and-other-features.mdx#archived-documents-and-backup) + * [Archived documents and replication](../data-archival/archived-documents-and-other-features.mdx#archived-documents-and-replication) + * [Archived documents and patching](../data-archival/archived-documents-and-other-features.mdx#archived-documents-and-patching) + + +## Archived documents and indexing + +* Indexing performance may decline as the database grows, since a larger number of documents increases indexing load, expands index size, and can eventually reduce query speed. +* Archiving documents and excluding them from indexing can be an effective way to maintain performance. + By removing low-priority or infrequently accessed documents from the indexing process, RavenDB can create smaller, faster indexes focused on current or high-value data. + This also improves the relevance and responsiveness of queries, as they execute over a smaller and more meaningful dataset. +* **Configuring indexing behavior - Static indexes**: + * **At the database level or server-wide**: + To control whether static indexes process archived documents from the source collection, + set the [Indexing.Static.ArchivedDataProcessingBehavior](../server/configuration/indexing-configuration.mdx#indexingstaticarchiveddataprocessingbehavior) + configuration key at either the database level or server-wide (default: `ExcludeArchived`). + * Note that this setting applies only to static-indexes that are using _Documents_ as their data source. + This global configuration does Not apply to static-indexes based on _Time Series_ or _Counters_, which default to `IncludeArchived`. + * **Per index**: + You can override this global behavior per-index directly in the index definition, using the Client API from the Studio + (see the examples below). + +* **Configuring indexing behavior - Auto indexes:** + * **At the database level or server-wide**: + To control whether auto-indexes process archived documents at the database level or server-wide, + set the [Indexing.Auto.ArchivedDataProcessingBehavior](../server/configuration/indexing-configuration.mdx#indexingautoarchiveddataprocessingbehavior) configuration key (default `ExcludeArchived`). + * **Per index**: + Unlike static indexes, you cannot configure this behavior per auto-index, + because dynamic queries (which trigger auto-index creation) do not provide a way to control this setting. +* The available configuration options are: + * `ExcludeArchived`: only non-archived documents are processed by the index. + * `IncludeArchived`: both archived and non-archived documents are processed by the index. + * `ArchivedOnly`: only archived documents are processed by the index. +##### Configuring archived document processing for a static index - from the Client API + +You can configure how a static index handles archived documents when creating the index using the Client API. +This setting will **override** the global configuration defined by the [Indexing.Static.ArchivedDataProcessingBehavior](../server/configuration/indexing-configuration.mdx#indexingstaticarchiveddataprocessingbehavior) configuration key. + + + +Example: + + + + +{`public class Orders_ByOrderDate : + AbstractIndexCreationTask +{ + public class IndexEntry + { + public DateTime OrderDate { get; set; } + } + + public Orders_ByOrderDate() + { + Map = orders => from order in orders + select new IndexEntry + { + OrderDate = order.OrderedAt + }; + + // Configure whether the index should process data from archived documents: + // ======================================================================== + ArchivedDataProcessingBehavior = + // You can set to 'ExcludeArchived', 'IncludeArchived, or 'ArchivedOnly' + Raven.Client.Documents.DataArchival.ArchivedDataProcessingBehavior.IncludeArchived; + } +} +`} + + + + +{`public class Orders_ByOrderDate_JS : AbstractJavaScriptIndexCreationTask +{ + public Orders_ByOrderDate_JS() + { + Maps = new HashSet() + { + @"map('Orders', function (order) { + return { + OrderDate: order.OrderedAt + }; + })" + }; + + // Configure whether the index should process data from archived documents: + // ======================================================================== + ArchivedDataProcessingBehavior = + // Can set the to 'ExcludeArchived', 'IncludeArchived, or 'ArchivedOnly' + Raven.Client.Documents.DataArchival.ArchivedDataProcessingBehavior.IncludeArchived; + } +} +`} + + + + +{`var indexDefinition = new IndexDefinitionBuilder() +{ + Map = orders => from order in orders + select new { order.OrderedAt } +} + .ToIndexDefinition(new DocumentConventions()); + +indexDefinition.Name = "Orders/ByOrderDate"; + +// Configure whether the index should process data from archived documents: +// ======================================================================== +indexDefinition.ArchivedDataProcessingBehavior = + // You can set to 'ExcludeArchived', 'IncludeArchived, or 'ArchivedOnly' + ArchivedDataProcessingBehavior.IncludeArchived; + +store.Maintenance.Send(new PutIndexesOperation(indexDefinition)); +`} + + + + + + + +When a static-index is configured to include **both** archived and non-archived documents in its processing, +you can also apply custom logic based on the presence of the `@archived` metadata property. + +For example: + + + + +{`public class Orders_ByArchivedStatus : + AbstractIndexCreationTask +{ + public class IndexEntry + { + public bool? isArchived { get; set; } + public DateTime? OrderDate { get; set; } + public string ShipToCountry { get; set; } + } + + public Orders_ByArchivedStatus() + { + Map = orders => from order in orders + let metadata = MetadataFor(order) + + // Retrieve the '@archived' metadata property from the document: + let archivedProperty = + metadata.Value(Raven.Client.Constants.Documents.Metadata.Archived) + // Alternative syntax: + // let archivedProperty = + // (bool?)metadata[Raven.Client.Constants.Documents.Metadata.Archived] + + select new IndexEntry + { + // Index whether the document is archived: + isArchived = archivedProperty == true, + + // Index the order date only if the document is archived: + OrderDate = archivedProperty == true ? order.OrderedAt : null, + + // Index the destination country only if the document is not archived: + ShipToCountry = archivedProperty == null ? order.ShipTo.Country : null + }; + + ArchivedDataProcessingBehavior = + Raven.Client.Documents.DataArchival.ArchivedDataProcessingBehavior.IncludeArchived; + } +} +`} + + + + +{`public class Orders_ByArchivedStatus_JS : AbstractJavaScriptIndexCreationTask +{ + public Orders_ByArchivedStatus_JS() + { + Maps = new HashSet() + { + @"map('Orders', function (order) { + var metadata = metadataFor(order); + var archivedProperty = metadata['@archived']; + + var isArchived = (archivedProperty === true); + + var orderDate = isArchived ? order.OrderedAt : null; + var shipToCountry = !isArchived ? order.ShipTo.Country : null; + + return { + IsArchived: isArchived, + OrderDate: orderDate, + ShipToCountry: shipToCountry + }; + })" + }; + + ArchivedDataProcessingBehavior = + Raven.Client.Documents.DataArchival.ArchivedDataProcessingBehavior.IncludeArchived; + } +} +`} + + + + +{`var indexDefinition = new IndexDefinition +{ + Name = "Orders/ByArchivedStatus", + + Maps = new HashSet + { + @"from order in docs.Orders + let metadata = MetadataFor(order) + let archivedProperty = (bool?)metadata[""@archived""] + + select new + { + IsArchived = archivedProperty == true, + OrderDate = archivedProperty == true ? order.OrderedAt : null, + ShipToCountry = archivedProperty == null ? order.ShipTo.Country : null + }" + }, + + ArchivedDataProcessingBehavior = ArchivedDataProcessingBehavior.IncludeArchived +}; + +store.Maintenance.Send(new PutIndexesOperation(indexDefinition)); +`} + + + + + +##### Configuring archived document processing for a static index - from the Studio + +You can configure how a static index handles archived documents directly from the Studio. +This setting will **override** the global configuration defined by the [Indexing.Static.ArchivedDataProcessingBehavior](../server/configuration/indexing-configuration.mdx#indexingstaticarchiveddataprocessingbehavior) configuration key. + +![Configure index](./assets/configure-static-index.png) + +1. Open the [Indexes list view](../studio/database/indexes/indexes-list-view.mdx) and select the index you want to configure, + or create a new index. +2. Scroll down and open the **Archived Data** tab. +3. Click to select how this index should process archived documents: + * **Default**: The index will use the behavior set by the global configuration. + * **Exclude Archived**: Index only non-archived documents. + * **Include Archived**: Index both archived and non-archived documents. + * **Archived Only**: Index only archived documents. + +![Processing options](./assets/processing-options.png) + + + +## Archived documents and querying + +* **Full collection queries**: + * Queries that scan an entire collection without any filtering condition (e.g. `from Orders`) will include archived documents. + * These queries are not influenced by indexing configuration related to archived documents because they do not use indexes. + * Learn more about full collection queries in [Full collection query](../client-api/session/querying/how-to-query.mdx#collectionQuery). + +* **Dynamic queries (auto-indexes)**: + * When making a dynamic query, RavenDB creates an auto-index to serve it. + Whether that index processes archived documents depends on the value of the [Indexing.Auto.ArchivedDataProcessingBehavior](../server/configuration/indexing-configuration.mdx#indexingautoarchiveddataprocessingbehavior) configuration key at the time the query is made. + * Once created, the auto-index retains that behavior. + Query results will continue to reflect the configuration that was in effect when the index was first built - even if the setting is changed later. + * Learn more about dynamic queries in [Query a collection - with filtering](../client-api/session/querying/how-to-query.mdx#dynamicQuery). + +* **Querying static-indexes**: + * When querying a static-index, the results will include, exclude, or consist solely of archived documents depending on how the static-index was configured. + The index behavior is determined by: + * the value of the [Indexing.Static.ArchivedDataProcessingBehavior](../server/configuration/indexing-configuration.mdx#indexingstaticarchiveddataprocessingbehavior) configuration key at the time the static-index was created, or - + * the explicit setting in the index definition, which overrides the global configuration key. + * The index's archived data processing behavior can be modified after its creation using the Studio or the Client API. + + + +## Archived documents and subscriptions + +* Processing large volumes of documents in data subscriptions increases the workload on both the server and subscription workers. +* You can reduce this load by defining the subscription query to exclude archived documents, include only archived documents, or process both archived and non-archived data. + This gives you control over which documents are sent to workers - helping you focus on the most relevant data and reduce unnecessary processing. +* **Configuring the subscription task behavior**: + * **At the database level or server-wide**: + To control whether queries in data subscription tasks process archived documents, + set the [Subscriptions.ArchivedDataProcessingBehavior](../todo...mdx) configuration key at either the database level or server-wide + (default: `ExcludeArchived`). + * **Per task**: + You can override this global behavior per data subscription task directly in the task definition, + using the Client API or from the Studio (see the examples below). +* The available configuration options are: + * `ExcludeArchived`: only non-archived documents are processed by the subscription query. + * `IncludeArchived`: both archived and non-archived documents are processed by the subscription query. + * `ArchivedOnly`: only archived documents are processed by the subscription query. +##### Configuring archived document processing for a data subscription task - from the Client API + +You can configure how a subscription task handles archived documents when creating the subscription using the Client API. +This setting will **override** the global configuration defined by the [Subscriptions.ArchivedDataProcessingBehavior](../server/configuration/subscription-configuration.mdx#subscriptionsarchiveddataprocessingbehavior) configuration key. + + + +Example: + + + + +{`var subscriptionName = store.Subscriptions + .Create(new SubscriptionCreationOptions() +{ + Name = "ArchivedOrdersSubscription", + // Workers that will subscribe to this subscription task + // will receive only archived documents from the 'Orders' collection. + ArchivedDataProcessingBehavior = ArchivedDataProcessingBehavior.ArchivedOnly + + // You can set the behavior to 'ExcludeArchived', 'IncludeArchived, or 'ArchivedOnly' +}); +`} + + + + +{`var subscriptionName = store.Subscriptions + .Create(new SubscriptionCreationOptions() +{ + Name = "ArchivedOrdersSubscription", + Query = "from Orders", + // Workers that will subscribe to this subscription task + // will receive only archived documents from the 'Orders' collection. + ArchivedDataProcessingBehavior = ArchivedDataProcessingBehavior.ArchivedOnly + + // You can set the behavior to 'ExcludeArchived', 'IncludeArchived, or 'ArchivedOnly' +}); +`} + + + + + +##### Configuring archived document processing for a data subscription task - from the Studio + +You can configure how a subscription task handles archived documents directly from the Studio. +This setting will **override** the global configuration defined by the [Subscriptions.ArchivedDataProcessingBehavior](../server/configuration/subscription-configuration.mdx#subscriptionsarchiveddataprocessingbehavior) configuration key. + +![Configure subscription](./assets/configure-subscription.png) + +1. Open the [Ongoing tasks list view](../studio/database/tasks/ongoing-tasks/general-info.mdx) and select the subscription task you want to configure, + or create a new subscription. +2. Click to select how the subscription query should process archived documents: + * **Default**: The subscription will use the behavior set by the global configuration. + * **Exclude Archived**: Process only non-archived documents. + * **Include Archived**: Process both archived and non-archived documents. + * **Archived Only**: Process only archived documents. + + + +## Archived documents and document extensions + +* **Attachments**: + * Attachments are Not archived (compressed), even if the document they belong to is archived. + +* **Counters**: + * Counters are Not archived (compressed), even if the document they belong to is archived. + * Unlike indexes whose source data is _Documents_ - which default to `ExcludeArchived` - + indexes whose source data is _Counters_ do process archived documents by default (`IncludeArchived`). + This behavior can be modified in the index definition. + +* **Time series**: + * Time series are Not archived (compressed), even if the document they belong to is archived. + * Unlike indexes whose source data is _Documents_ - which default to `ExcludeArchived` - + indexes whose source data is _Time series_ do process archived documents by default (`IncludeArchived`). + This behavior can be modified in the index definition. + +* **Revisions**: + * No revision is created at the time the server archives a document, even if the Revisions feature is enabled. + * However, if you modify an archived document (when Revisions are enabled), a revision is created for that document - and that revision is archived as well. + + + +## Archived documents and smuggler (export/import) + +You can control whether archived documents are included when exporting or importing a database. +##### Export/Import archived documents - from the Client API + +[Smuggler](../client-api/smuggler/what-is-smuggler.mdx), RavenDB’s tool for database export and import, can be configured to include or exclude archived documents. +By default, archived documents are **included** in the operation. + + + +In this example, exported data **excludes** archived documents: + + + +{`var exportOperation = store.Smuggler.ExportAsync( + new DatabaseSmugglerExportOptions() + \{ + // Export only non-archived documents: + IncludeArchived = false + \}, "DestinationFilePath"); +`} + + + + + + + +In this example, imported data **includes** archived documents: + + + +{`var importOperation = store.Smuggler.ImportAsync( + new DatabaseSmugglerImportOptions() + \{ + // Include archived documents in the import: + IncludeArchived = true + \}, "SourceFilePath"); +`} + + + + +##### Export archived documents - from the Studio + +![Export archived documents](./assets/export-archived-documents.png) + +1. Go to **Tasks > Export Database**. +2. Toggle the **Include archived documents** option to control whether archived documents are included in the database export. +##### Import archived documents - from the Studio + +![Import archived documents](./assets/import-archived-documents.png) + +1. Go to **Tasks > Import Data**. +2. Toggle the **Include archived documents** option to control whether archived documents are included in the import. + + + +## Archived documents and expiration + +* Archiving can be used alongside other features, such as [Document expiration](../server/extensions/expiration.mdx). + +* For example, a document can be scheduled to be archived after six months and expired after one year. + This allows you to keep recent documents active and quickly accessible, move older documents to archival storage where slower access is acceptable, + and eventually remove documents that are no longer needed. + +* In the following example, both the `@archive-at` and the `@expires` metadata properties have been added to document `companies/90-A` + to schedule it for archiving and expiration, respectively: + + + +{`\{ + "Name": "Wilman Kala", + "Phone": "90-224 8858", + ... + "@metadata": \{ + "@archive-at": "2026-01-06T22:45:30.018Z", + "@expires": "2026-07-06T22:45:30.018Z", + "@collection": "Companies", + ... + \} +\} +`} + + + + + +## Archived documents and ETL + +* An ETL transformation script can examine each source document’s [metadata](../server/ongoing-tasks/etl/raven.mdx#accessing-metadata) + for the existence of the `@archived: true` property, which indicates that the document is archived. + Based on this check, the script can decide how to handle the document - for example, skip it entirely or send only selected fields. + +* With [RavenDB ETL](../../../server/ongoing-tasks/etl/raven.mdx), documents that are archived in the source database and sent to the target + are not archived in the destination database. + +* In the following example, the ETL script checks whether the document is archived, and skips it if it is: + + + +{`var isArchived = this['@metadata']['@archived']; + +if (isArchived === true) \{ + return; // Do not process archived documents +\} + +// Transfer only non-archived documents to the target +loadToOrders(this); +`} + + + + + +## Archived documents and backup + +* Archived documents are included in database backups (both _logical backups_ and _snapshots_), + no special configuration is required. + +* When restoring a database from a backup, archived documents are restored as well, + and their archived status is preserved. + + + +## Archived documents and replication + +Archived documents are included in [Internal](../server/clustering/replication/replication-overview.mdx#internal-replication) replication, +[External](../server/clustering/replication/replication-overview.mdx#external-replication) replication, and [Hub/Sink](../server/clustering/replication/replication-overview.mdx#hubsink-replication) replication - +no special configuration is required. + + + +## Archived documents and patching + +* Patching can be used to **schedule** multiple documents for archiving. See the dedicated sections: + [Schedule multiple documents for archiving - from the Studio](../data-archival/schedule-document-archiving.mdx#schedule-multiple-documents-for-archiving---from-the-studio). + [Schedule multiple documents for archiving - from the Client API](../data-archival/schedule-document-archiving.mdx#schedule-multiple-documents-for-archiving---from-the-client-api). + +* Patching is used to **unarchive** documents. + See the dedicated article [Unarchiving documents](../data-archival/unarchiving-documents.mdx). + +* When **cloning** an archived document using the `put` method within a patching script + (see this [clone document example](../client-api/operations/patching/single-document.mdx#clone-document)) the cloned document will Not be archived, + and the `@archived: true` property will be removed from the cloned document. + + + + diff --git a/versioned_docs/version-7.1/data-archival/_category_.json b/versioned_docs/version-7.1/data-archival/_category_.json new file mode 100644 index 0000000000..f2d5bb4b23 --- /dev/null +++ b/versioned_docs/version-7.1/data-archival/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 4, + "label": "Data Archival" +} diff --git a/versioned_docs/version-7.1/data-archival/_enable-data-archiving-csharp.mdx b/versioned_docs/version-7.1/data-archival/_enable-data-archiving-csharp.mdx new file mode 100644 index 0000000000..4b2741bca0 --- /dev/null +++ b/versioned_docs/version-7.1/data-archival/_enable-data-archiving-csharp.mdx @@ -0,0 +1,120 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, data archiving is disabled. + To use the archiving feature, you must first **enable** it. + +* When configuring the feature, + you can also set the **frequency** at which RavenDB scans the database for documents scheduled for archiving. + +* Once enabled, the archiving task runs periodically at the configured frequency, + scanning the database for documents that have been scheduled for archival. + Learn how to schedule documents for archival in [Schedule document archiving](../data-archival/schedule-document-archiving.mdx). + +* In this article: + * [Enable archiving - from the Client API](../data-archival/enable-data-archiving.mdx#enable-archiving---from-the-client-api) + * [Enable archiving - from the Studio](../data-archival/enable-data-archiving.mdx#enable-archiving---from-the-studio) + + +## Enable archiving - from the Client API + +Use `ConfigureDataArchivalOperation` to enable archiving for the database and configure the archiving task. +**Example**: + + + + +{`// Define the archival configuration object +var configuration = new DataArchivalConfiguration +{ + // Enable archiving + Disabled = false, + + // Optional: override the default archiving frequency + // Scan for documents scheduled for archiving every 180 seconds + ArchiveFrequencyInSec = 180, + + // Optional: limit the number of documents processed in each archival run + MaxItemsToProcess = 100 +}; + +// Define the archival operation, pass the configuration +var configureArchivalOp = new ConfigureDataArchivalOperation(configuration); + +// Execute the operation by passing it to Maintenance.Send +store.Maintenance.Send(configureArchivalOp); +`} + + + + +{`// Define the archival configuration object +var configuration = new DataArchivalConfiguration +{ + // Enable archiving + Disabled = false, + + // Optional: override the default archiving frequency + // Scan for documents scheduled for archiving every 180 seconds + ArchiveFrequencyInSec = 180, + + // Optional: limit the number of documents processed in each archival run + MaxItemsToProcess = 100 +}; + +// Define the archival operation, pass the configuration +var configureArchivalOp = new ConfigureDataArchivalOperation(configuration); + +// Execute the operation by passing it to Maintenance.SendAsync +await store.Maintenance.SendAsync(configureArchivalOp); +`} + + + +**Syntax**: + + + +{`public ConfigureDataArchivalOperation(DataArchivalConfiguration configuration) +`} + + + + + +{`public class DataArchivalConfiguration +\{ + public bool Disabled \{ get; set; \} + public long? ArchiveFrequencyInSec \{ get; set; \} + public long? MaxItemsToProcess \{ get; set; \} +\} +`} + + + +| Parameter | Type | Description | +|---------------------------|---------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **Disabled** | `bool` | `true` - archiving is disabled for the entire database (default).
`false` - archiving is enabled for the database. | +| **ArchiveFrequencyInSec** | `long?` | Frequency (in seconds) at which the server scans for documents scheduled for archiving. Default: `60` | +| **MaxItemsToProcess** | `long?` | The maximum number of documents the archiving task will process in a single run (i.e., each time it is triggered by the configured frequency). Default: `int.MaxValue` | + + + +## Enable archiving - from the Studio + +![Enable archiving](./assets/enable-archiving.png) + +1. Go to **Settings > Data Archival**. +2. Toggle on to enable data archival. +3. Toggle on to customize the frequency at which the server scans for documents scheduled for archiving. + Default is 60 seconds. +4. Toggle on to customize the maximum number of documents the archiving task will process in a single run. +5. Click Save to apply your settings. + + + + diff --git a/versioned_docs/version-7.1/data-archival/_schedule-document-archiving-csharp.mdx b/versioned_docs/version-7.1/data-archival/_schedule-document-archiving-csharp.mdx new file mode 100644 index 0000000000..cd81710280 --- /dev/null +++ b/versioned_docs/version-7.1/data-archival/_schedule-document-archiving-csharp.mdx @@ -0,0 +1,274 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Documents cannot be archived directly - they must be scheduled for archival. + To **schedule a document** for archival, add the `@archive-at` property to the document's metadata and set its value to the desired archival time (in UTC). + This can be done in several ways, as described in this article. + +* **Note**: + Just scheduling a document for archival does Not archive it at the specified time. + Actual archiving is performed only by a background task that runs when the archival feature is [enabled](../data-archival/enable-data-archiving.mdx). + This task periodically scans the database for documents scheduled for archival. + The scan frequency is configurable when [enabling ](../data-archival/enable-data-archiving.mdx) the archival feature (default: 60 seconds). + +* The archiving task will archive any document whose `@archive-at` time has passed at the time of the scan. + The `@archive-at` metadata property will then be replaced with `@archived: true`. + +* You can schedule documents for archival even if the archiving feature is not yet enabled. + These documents will be archived once the feature is enabled and the task runs - provided the scheduled time has already passed. +* In this article: + * [Schedule a SINGLE document for archiving - from the Client API](../data-archival/schedule-document-archiving.mdx#schedule-a-single-document-for-archiving---from-the-client-api) + * [Schedule a SINGLE document for archiving - from the Studio](../data-archival/schedule-document-archiving.mdx#schedule-a-single-document-for-archiving---from-the-studio) + * [Schedule MULTIPLE documents for archiving - from the Client API](../data-archival/schedule-document-archiving.mdx#schedule-multiple-documents-for-archiving---from-the-client-api) + * [Schedule MULTIPLE documents for archiving - from the Studio](../data-archival/schedule-document-archiving.mdx#schedule-multiple-documents-for-archiving---from-the-studio) + + +## Schedule a SINGLE document for archiving - from the Client API + +To schedule a single document for archival from the Client API, +add the `@archive-at` property directly to the document metadata as follows: + + + + +{`using (var session = store.OpenSession()) +{ + // Load the document to schedule for archiving + var company = session.Load("companies/91-A"); + + // Access the document's metadata + var metadata = session.Advanced.GetMetadataFor(company); + + // Set the future archival date (in UTC) + var archiveDate = SystemTime.UtcNow.AddDays(1); + metadata["@archive-at"] = archiveDate; + + // Save the changes + session.SaveChanges(); +} +`} + + + + +{`using (var asyncSession = store.OpenAsyncSession()) +{ + // Load the document to schedule for archiving + var company = await asyncSession.LoadAsync("companies/91-A"); + + // Access the document's metadata + var metadata = asyncSession.Advanced.GetMetadataFor(company); + + // Set the future archival date (in UTC) + var archiveDate = SystemTime.UtcNow.AddDays(1); + metadata["@archive-at"] = archiveDate; + + // Save the changes + await asyncSession.SaveChangesAsync(); +} +`} + + + + +Learn more about modifying the metadata of a document in [Modifying Document Metadata](../client-api/session/how-to/get-and-modify-entity-metadata.mdx). + + + +## Schedule a SINGLE document for archiving - from the Studio + +* To schedule a single document for archival from the Studio: + * Open the Document view. + * Add the `@archive-at` property to the document's metadata. + * Set its value to the desired archive time in UTC format. + * Save the document. + +![Schedule a document for archiving](./assets/schedule-document-for-archiving.png) + +1. This is the `@archive-at` property that was added to the document's metadata. + E.g.: `"@archive-at": "2025-06-25T14:00:00.0000000Z"` +2. After saving the document, the Studio displays the time remaining until the document is archived. + + + +## Schedule MULTIPLE documents for archiving - from the Client API + +* Use the `PatchByQueryOperation` to schedule multiple documents for archiving. + +* In the **patch query**, you can apply any filtering condition to select only the documents you want to archive. + In the **patch script**, call the `archived.archiveAt` method to set the desired archival time (in UTC). + +* When the patch operation is executed, + the server will add the `@archive-at` property to the metadata of all documents that match the query filter. +**Example:** + +The following example schedules all orders that were made at least a year ago for archival. +The **patch query** filters for these older orders. +Any document matching the query is then scheduled for archival by the **patch script**. + + + + +{`var archiveDate = SystemTime.UtcNow.AddDays(1); +string archiveDateString = archiveDate.ToString("o"); + +var oldDate = SystemTime.UtcNow.AddYears(-1); +string oldDateString = oldDate.ToString("o"); + +// Define the patch query string +// Request to archive all Orders older than one year +string patchByQuery = $@" + // The patch query: + // ================ + from Orders + where OrderedAt < '{oldDateString}' + update {{ + // The patch script - schedule for archival: + // ========================================= + archived.archiveAt(this, '{archiveDateString}') + }}"; + +// Define the patch operation, pass the patch query string +var patchByQueryOp = new PatchByQueryOperation(patchByQuery); + +// Execute the operation by passing it to Operations.Send +store.Operations.Send(patchByQueryOp); +`} + + + + +{`var archiveDate = SystemTime.UtcNow.AddDays(1); +string archiveDateString = archiveDate.ToString("o"); + +var oldDate = SystemTime.UtcNow.AddYears(-1); +string oldDateString = oldDate.ToString("o"); + +// Define the patch query string +// Request to archive all Orders older than one year +string patchByQuery = $@" + from Orders + where OrderedAt < '{oldDateString}' + update {{ + archived.archiveAt(this, '{archiveDateString}') + }}"; + +// Define the patch operation, pass the patch query string +var patchByQueryOp = new PatchByQueryOperation(patchByQuery); + +// Execute the operation by passing it to Operations.SendAsync +await store.Operations.SendAsync(patchByQueryOp); +`} + + + + +{`var archiveDate = SystemTime.UtcNow.AddDays(1); +string archiveDateString = archiveDate.ToString("o"); + +var oldDate = SystemTime.UtcNow.AddYears(-1); +string oldDateString = oldDate.ToString("o"); + +// Define the patch string +// Request to archive all Orders older than one year +string patchByQuery = $@" + from Orders + where OrderedAt < $p0 + update {{ + archived.archiveAt(this, $p1) + }}"; + +// Define the patch operation, pass the patch query +var patchByQueryOp = new PatchByQueryOperation(new IndexQuery() +{ + Query = patchByQuery, + QueryParameters = new Parameters() + { + { "p0", oldDateString }, + { "p1", archiveDateString } + } +}); + +// Execute the operation by passing it to Operations.Send +store.Operations.Send(patchByQueryOp); +`} + + + + +{`var archiveDate = SystemTime.UtcNow.AddDays(1); +string archiveDateString = archiveDate.ToString("o"); + +var oldDate = SystemTime.UtcNow.AddYears(-1); +string oldDateString = oldDate.ToString("o"); + +// Define the patch string +// Request to archive all Orders older than one year +string patchByQuery = $@" + from Orders + where OrderedAt < $p0 + update {{ + archived.archiveAt(this, $p1) + }}"; + +// Define the patch operation, pass the patch query +var patchByQueryOp = new PatchByQueryOperation(new IndexQuery() +{ + Query = patchByQuery, + QueryParameters = new Parameters() + { + { "p0", oldDateString }, + { "p1", archiveDateString } + } +}); + +// Execute the operation by passing it to Operations.SendAsync +await store.Operations.SendAsync(patchByQueryOp); +`} + + + +**Syntax:** + + + +{`archived.archiveAt(doc, utcDateTimeString) +`} + + + +| Parameter | Type | Description | +|-----------------------|-------------|--------------------------------------------------------------------------------------| +| **doc** | `object` | The document to schedule for archiving. | +| **utcDateTimeString** | `string` | The UTC timestamp (as a string) that specifies when the document should be archived. | + +To learn more about the `PatchByQueryOperation`, see [Set-based patch operations](../client-api/operations/patching/set-based.mdx). + + + +## Schedule MULTIPLE documents for archiving - from the Studio + +* To schedule multiple documents for archiving from the Studio: + * Open the Patch view. + * Enter a patch script that uses the `archived.archiveAt` method, providing the desired archive date in UTC. + * Execute the patch. +**Example**: + +The following patch script, used directly in the Studio, +performs the same operation as the [Client API example](../data-archival/schedule-document-archiving.mdx#schedule-multiple-documents-for-archiving---from-the-client-api) shown above. + +![Schedule multiple documents for archiving](./assets/schedule-multiple-documents-for-archiving.png) + +1. Open the Patch view. +2. Enter the patch script. +3. Click to execute the patch. +4. You can test the patch on a sample document before executing the whole operation. + Learn more in [Test patch](../studio/database/documents/patch-view.mdx#test-patch). + + + + diff --git a/versioned_docs/version-7.1/data-archival/_unarchiving-documents-csharp.mdx b/versioned_docs/version-7.1/data-archival/_unarchiving-documents-csharp.mdx new file mode 100644 index 0000000000..1f12e49710 --- /dev/null +++ b/versioned_docs/version-7.1/data-archival/_unarchiving-documents-csharp.mdx @@ -0,0 +1,230 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Archived documents can be unarchived at any time. + +* The archiving feature does Not need to be enabled to unarchive documents. + Any previously archived document can be unarchived, regardless of the feature's current state. + +* Do **not** attempt to unarchive a document by manually removing the `@archived: true` metadata property from the document. + This will not clear the internal archived status of the document. + To properly unarchive a document, use the `archived.unarchive()` method as described below. + +* In this article: + * [Unarchive documents - from the Client API](../data-archival/unarchiving-documents.mdx#unarchive-documents---from-the-client-api) + * [Unarchive documents - from the Studio](../data-archival/unarchiving-documents.mdx#unarchive-documents---from-the-studio) + * [Unarchiving documents with index-based patch queries](../data-archival/unarchiving-documents.mdx#unarchiving-documents-with-index-based-patch-queries) + + +## Unarchive documents - from the Client API + +* To unarchive documents from the Client API, use the `PatchByQueryOperation` operation, + which targets one or more documents based on the patch query. + +* You can apply any filtering condition within the query to target only the documents you want to unarchive. + +* Within the **patch script**, call the `archived.unarchive()` method to unarchive all documents + that match the **patch query**. +**Example:** + +The following operation will unarchive ALL archived documents in the _Orders_ collection. + + + + +{`// Define the patch query string +string patchByQuery = @" + // The patch query: + // ================ + from Orders + update + { + // The patch script: + // ================= + archived.unarchive(this) + }"; + +// Define the patch operation, pass the patch string +var patchByQueryOp = new PatchByQueryOperation(patchByQuery); + +// Execute the operation by passing it to Operations.Send +store.Operations.Send(patchByQueryOp); +`} + + + + +{`// Define the patch query string +string patchByQuery = @" + from Orders + update + { + archived.unarchive(this) + }"; + +// Define the patch operation, pass the patch string +var patchByQueryOp = new PatchByQueryOperation(patchByQuery); + +// Execute the operation by passing it to Operations.Send +await store.Operations.SendAsync(patchByQueryOp); +`} + + + + +{`// Define the patch query string +string patchByQuery = @" + from Orders + update + { + archived.unarchive(this) + }"; + +// Define the patch operation, pass the patch string +var patchByQueryOp = new PatchByQueryOperation(new IndexQuery() +{ + Query = patchByQuery +}); + +// Execute the operation by passing it to Operations.Send +store.Operations.Send(patchByQueryOp); +`} + + + + +{`// Define the patch query string +string patchByQuery = @" + from Orders + update + { + archived.unarchive(this) + }"; + +// Define the patch operation, pass the patch string +var patchByQueryOp = new PatchByQueryOperation(new IndexQuery() +{ + Query = patchByQuery +}); + +// Execute the operation by passing it to Operations.Send +await store.Operations.SendAsync(patchByQueryOp); +`} + + + +**Syntax:** + + + +{`archived.unarchive(doc) +`} + + + +| Parameter | Type | Description | +|------------|----------|----------------------------| +| **doc** | `object` | The document to unarchive. | + + + +## Unarchive documents - from the Studio + +* To unarchive documents from the Studio: + * Open the Patch view. + * Enter a patch script that uses the `archived.unarchive()` method. + * Execute the patch. +**Example**: + +The following patch script, used directly in the Studio, +performs the same operation as the [Client API example](../data-archival/unarchiving-documents.mdx#unarchive-documents---from-the-client-api) shown above. +It will unarchive all archived documents in the _Orders_ collection. + +![Unarchive documents](./assets/unarchive-documents.png) + +1. Open the Patch view. +2. Enter the patch script. +3. Click to execute the patch. +4. You can test the patch on a sample document before executing the whole operation. + Learn more in [Test patch](../studio/database/documents/patch-view.mdx#test-patch). + + + +## Unarchiving documents with index-based patch queries + +* When running a patch query to unarchive documents over an index, + you need to consider the index configuration regarding archived documents. + +* If the index is configured to exclude archived documents, the query portion of the patch operation will not match any archived documents - + because those documents are not included in the index. + As a result, no documents will be unarchived by the patch operation. + +* For example, the following patch query uses a dynamic query that creates an auto-index. + If the [Indexing.Auto.ArchivedDataProcessingBehavior](../server/configuration/indexing-configuration.mdx#indexingautoarchiveddataprocessingbehavior) configuration key is set to its default `ExcludeArchived` value, + then even if archived documents exist in the _Orders_ collection with `ShipTo.Country == 'USA'`, they will not be matched - because the auto-index does not include them - + and the patch operation will not unarchive any documents. + + + +{`string patchByQuery = @" + // This filtering query creates an auto-index: + // =========================================== + from Orders + where ShipTo.Country == 'USA' + update + \{ + archived.unarchive(this) + \}"; + +var patchByQueryOp = new PatchByQueryOperation(patchByQuery); +store.Operations.Send(patchByQueryOp); +`} + + +Two possible workarounds are: + +1. **Configure the index to include archived documents**: + This ensures archived documents are included in the index and can be matched by the patch query. + Use this option only if including archived documents in the index aligns with your indexing strategy. + + **For auto-indexes**: + Set the [Indexing.Auto.ArchivedDataProcessingBehavior](../server/configuration/indexing-configuration.mdx#indexingautoarchiveddataprocessingbehavior) configuration key to `IncludeArchived`. + **For static-indexes**: + Set the [Indexing.Static.ArchivedDataProcessingBehavior](../server/configuration/indexing-configuration.mdx#indexingstaticarchiveddataprocessingbehavior) configuration key to `IncludeArchived`, + or - configure the definition of the specific static-index to include archived documents. + See [Archived documents and indexing](../data-archival/archived-documents-and-other-features.mdx#archived-documents-and-indexing). + +2. **Use a collection query instead of an index query**: + Run a simple collection-based query that does not rely on an index. + Apply your filtering logic inside the patch script to unarchive only the relevant documents. + + For example: + + +{`string patchByQuery = @" + // Perform a collection query: + // =========================== + from Orders as order + update + \{ + // Filter documents inside the script: + // =================================== + if (order.ShipTo.City == 'New York') + \{ + archived.unarchive(this) + \} + \}"; + +var patchByQueryOp = new PatchByQueryOperation(patchByQuery); +store.Operations.Send(patchByQueryOp); +`} + + + + + + diff --git a/versioned_docs/version-7.1/data-archival/archived-documents-and-other-features.mdx b/versioned_docs/version-7.1/data-archival/archived-documents-and-other-features.mdx new file mode 100644 index 0000000000..a27ada3cd6 --- /dev/null +++ b/versioned_docs/version-7.1/data-archival/archived-documents-and-other-features.mdx @@ -0,0 +1,45 @@ +--- +title: "Archived Documents and Other Features" +hide_table_of_contents: true +sidebar_label: Archived Documents and Other Features +sidebar_position: 3 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import ArchivedDocumentsAndOtherFeaturesCsharp from './_archived-documents-and-other-features-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/data-archival/assets/an-archived-document.png b/versioned_docs/version-7.1/data-archival/assets/an-archived-document.png new file mode 100644 index 0000000000..f5f9972251 Binary files /dev/null and b/versioned_docs/version-7.1/data-archival/assets/an-archived-document.png differ diff --git a/versioned_docs/version-7.1/data-archival/assets/archived-document-in-list-view.png b/versioned_docs/version-7.1/data-archival/assets/archived-document-in-list-view.png new file mode 100644 index 0000000000..295c8ce9f9 Binary files /dev/null and b/versioned_docs/version-7.1/data-archival/assets/archived-document-in-list-view.png differ diff --git a/versioned_docs/version-7.1/data-archival/assets/configure-static-index.png b/versioned_docs/version-7.1/data-archival/assets/configure-static-index.png new file mode 100644 index 0000000000..8fb74b5f8b Binary files /dev/null and b/versioned_docs/version-7.1/data-archival/assets/configure-static-index.png differ diff --git a/versioned_docs/version-7.1/data-archival/assets/configure-subscription.png b/versioned_docs/version-7.1/data-archival/assets/configure-subscription.png new file mode 100644 index 0000000000..634e994855 Binary files /dev/null and b/versioned_docs/version-7.1/data-archival/assets/configure-subscription.png differ diff --git a/versioned_docs/version-7.1/data-archival/assets/enable-archiving.png b/versioned_docs/version-7.1/data-archival/assets/enable-archiving.png new file mode 100644 index 0000000000..2a74f754d0 Binary files /dev/null and b/versioned_docs/version-7.1/data-archival/assets/enable-archiving.png differ diff --git a/versioned_docs/version-7.1/data-archival/assets/export-archived-documents.png b/versioned_docs/version-7.1/data-archival/assets/export-archived-documents.png new file mode 100644 index 0000000000..02df88c74d Binary files /dev/null and b/versioned_docs/version-7.1/data-archival/assets/export-archived-documents.png differ diff --git a/versioned_docs/version-7.1/data-archival/assets/import-archived-documents.png b/versioned_docs/version-7.1/data-archival/assets/import-archived-documents.png new file mode 100644 index 0000000000..c038e3302a Binary files /dev/null and b/versioned_docs/version-7.1/data-archival/assets/import-archived-documents.png differ diff --git a/versioned_docs/version-7.1/data-archival/assets/processing-options.png b/versioned_docs/version-7.1/data-archival/assets/processing-options.png new file mode 100644 index 0000000000..8092219ff4 Binary files /dev/null and b/versioned_docs/version-7.1/data-archival/assets/processing-options.png differ diff --git a/versioned_docs/version-7.1/data-archival/assets/schedule-document-for-archiving.png b/versioned_docs/version-7.1/data-archival/assets/schedule-document-for-archiving.png new file mode 100644 index 0000000000..75cd0879ae Binary files /dev/null and b/versioned_docs/version-7.1/data-archival/assets/schedule-document-for-archiving.png differ diff --git a/versioned_docs/version-7.1/data-archival/assets/schedule-multiple-documents-for-archiving.png b/versioned_docs/version-7.1/data-archival/assets/schedule-multiple-documents-for-archiving.png new file mode 100644 index 0000000000..3a00911f82 Binary files /dev/null and b/versioned_docs/version-7.1/data-archival/assets/schedule-multiple-documents-for-archiving.png differ diff --git a/versioned_docs/version-7.1/data-archival/assets/unarchive-documents.png b/versioned_docs/version-7.1/data-archival/assets/unarchive-documents.png new file mode 100644 index 0000000000..7ab3118786 Binary files /dev/null and b/versioned_docs/version-7.1/data-archival/assets/unarchive-documents.png differ diff --git a/versioned_docs/version-7.1/data-archival/enable-data-archiving.mdx b/versioned_docs/version-7.1/data-archival/enable-data-archiving.mdx new file mode 100644 index 0000000000..6853488eef --- /dev/null +++ b/versioned_docs/version-7.1/data-archival/enable-data-archiving.mdx @@ -0,0 +1,40 @@ +--- +title: "Enable Data Archiving" +hide_table_of_contents: true +sidebar_label: Enable Data Archiving +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import EnableDataArchivingCsharp from './_enable-data-archiving-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/data-archival/overview.mdx b/versioned_docs/version-7.1/data-archival/overview.mdx new file mode 100644 index 0000000000..767a5ae4db --- /dev/null +++ b/versioned_docs/version-7.1/data-archival/overview.mdx @@ -0,0 +1,110 @@ +--- +title: "Data Archival Overview" +hide_table_of_contents: true +sidebar_label: Overview +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Data Archival Overview + + +* As a database grows, basic functions like indexing can slow down. + To address this, RavenDB offers the ability to **archive selected documents**. + Documents that are rarely accessed or no longer relevant for active use, but still need to be kept for compliance or historical purposes, can be archived. + +* RavenDB features can detect archived documents and handle them in different ways. + For example, indexing and data subscriptions can be configured to exclude, include, or handle only archived documents during processing. + Limiting processing to either archived or non-archived documents may **improve performance**. + Learn more in [Archived documents and other features](../data-archival/archived-documents-and-other-features.mdx). + +* Archived documents are stored in **compressed** form, which helps **reduce database size**. + Note that while compression saves disk space, retrieving archived documents may be slower and consume more CPU/memory than accessing regular documents. + +* To take advantage of the archiving feature, you must first **enable** it. + Once enabled, you can **schedule** documents for archiving. + Learn more in the [Overview](../data-archival/overview.mdx#overview) section below. +* In this article: + * [Overview](../data-archival/overview.mdx#overview) + * [The archived document](../data-archival/overview.mdx#the-archived-document) + * [Licensing](../data-archival/overview.mdx#licensing) + + +## Overview + +* **Enable the archiving feature** + * To archive documents, you must first enable the archiving feature in RavenDB. + Learn how in the dedicated article: [Enable data archiving](../data-archival/enable-data-archiving.mdx). + +* **Schedule documents for archival** + * Once the feature is enabled, you can schedule any document to be archived at a specific future time. + This is done by adding the `@archive-at` metadata property to the document, which specifies the time at which it should be archived. + Learn more in the dedicated article: [Schedule document archiving](../data-archival/schedule-document-archiving.mdx). + * The server scans the database periodically (at an interval specified when the task is enabled), + identifies documents scheduled for archiving, and archives them at the scheduled time. + * In a cluster, the archiving task runs on the [preferred node](../client-api/configuration/load-balance/overview.mdx#the-preferred-node) (the first node in the cluster topology). + Archived documents are then propagated to the other nodes through [internal replication](../server/clustering/replication/replication-overview.mdx#internal-replication). + +* **What is archived** + * The JSON document itself is archived. + Time series, counters, and attachments associated with the document are Not archived. + A revision that is created from an archived document is archived as well. + +* **Modifying archived documents** + * Archived documents and their extensions (time series, counters, attachments) remain accessible and can be updated + (except for revisions, which are immutable). + * Modifying an archived document or any of its extensions does not affect its archival status - + the document remains archived. + +* **Unarchiving documents** + * An archived document can be unarchived. + Learn more in the dedicated article: [Unarchiving documents](../data-archival/unarchiving-documents.mdx). + + + +## The archived document + +A document that has been archived is compressed and marked with both a metadata property and an internal metadata flag: + +* **Metadata property**: + * When a document is archived, + the archiving task replaces the `@archive-at` metadata property with `"@archived": true`. + * This property is informational only - you **cannot** archive a document by manually adding `"@archived": true` to its metadata. + To archive a document, you must schedule it for archival. Learn more in [Schedule document archiving](../data-archival/schedule-document-archiving.mdx). + * This property allows RavenDB features, clients, and users to recognize the document’s archived status and handle it accordingly. + For example, a user-defined ETL task can use the presence of this property to skip or handle archived documents differently. + Learn more in [Archived documents and other features](../data-archival/archived-documents-and-other-features.mdx). + +* **Internal flag**: + * When a document is archived, the internal flag `"@flags": "Archived"` is added to its metadata. + * Features like indexing and data subscriptions use this flag to detect archived documents and handle them based on the configured behavior. + Learn more in [Archived documents and indexing](../data-archival/archived-documents-and-other-features.mdx#archived-documents-and-indexing) & + [Archived documents and data subscriptions](../data-archival/archived-documents-and-other-features.mdx#archived-documents-and-subscriptions). +**An archived document**: + +![An archived document](./assets/an-archived-document.png) + +1. This label in the Studio indicates that this document is archived. +2. The `@flags` metadata property contains the `Archived` flag. +3. The document’s metadata contains the `@archived: true` property. +**An archived document in the list view**: + +![Archived document in list view](./assets/archived-document-in-list-view.png) + +1. This icon indicates the document is archived. + + + +## Licensing + +The archival feature is available with the **Enterprise** license. +Learn more about licensing in [Licensing overview](../start/licensing/licensing-overview.mdx). + + + diff --git a/versioned_docs/version-7.1/data-archival/schedule-document-archiving.mdx b/versioned_docs/version-7.1/data-archival/schedule-document-archiving.mdx new file mode 100644 index 0000000000..aab72079c2 --- /dev/null +++ b/versioned_docs/version-7.1/data-archival/schedule-document-archiving.mdx @@ -0,0 +1,40 @@ +--- +title: "Schedule Document Archiving" +hide_table_of_contents: true +sidebar_label: Schedule Document Archiving +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import ScheduleDocumentArchivingCsharp from './_schedule-document-archiving-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/data-archival/unarchiving-documents.mdx b/versioned_docs/version-7.1/data-archival/unarchiving-documents.mdx new file mode 100644 index 0000000000..e71c531b85 --- /dev/null +++ b/versioned_docs/version-7.1/data-archival/unarchiving-documents.mdx @@ -0,0 +1,40 @@ +--- +title: "Unarchiving Documents" +hide_table_of_contents: true +sidebar_label: Unarchiving Documents +sidebar_position: 4 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import UnarchivingDocumentsCsharp from './_unarchiving-documents-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/_category_.json b/versioned_docs/version-7.1/document-extensions/_category_.json new file mode 100644 index 0000000000..49bb9c4b28 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 3, + "label": "Document Extensions" +} diff --git a/versioned_docs/version-7.1/document-extensions/assets/extensions-collections-view.png b/versioned_docs/version-7.1/document-extensions/assets/extensions-collections-view.png new file mode 100644 index 0000000000..acce5fe300 Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/assets/extensions-collections-view.png differ diff --git a/versioned_docs/version-7.1/document-extensions/assets/extensions-logos.png b/versioned_docs/version-7.1/document-extensions/assets/extensions-logos.png new file mode 100644 index 0000000000..006c698ef5 Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/assets/extensions-logos.png differ diff --git a/versioned_docs/version-7.1/document-extensions/assets/extensions-managing-single-doc.png b/versioned_docs/version-7.1/document-extensions/assets/extensions-managing-single-doc.png new file mode 100644 index 0000000000..0245af1738 Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/assets/extensions-managing-single-doc.png differ diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_bulk-insert-csharp.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_bulk-insert-csharp.mdx new file mode 100644 index 0000000000..9ca3ffbcbe --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_bulk-insert-csharp.mdx @@ -0,0 +1,119 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* [BulkInsert](../../client-api/bulk-insert/how-to-work-with-bulk-insert-operation.mdx) is RavenDB's high-performance data insertion operation. + Use its `AttachmentsFor` interface to add attachments to documents with great speed. + +* In this page: + * [Usage flow](../../document-extensions/attachments/bulk-insert.mdx#usage-flow) + * [Usage example](../../document-extensions/attachments/bulk-insert.mdx#usage-example) + * [Syntax](../../document-extensions/attachments/bulk-insert.mdx#syntax) + + + +## Usage flow + +* Create a `BulkInsert` instance. + +* Pass the Document ID to the instance's `AttachmentsFor` method. + +* To add an attachment, call `Store`. + Pass it the attachment's name, stream, and type (optional). + The `Store` function can be called repeatedly as necessary. + +* Note: + If an attachment with the specified name already exists on the document, + the bulk insert operation will overwrite it. + + + +## Usage example + +In this example, we attach a file to all User documents that match a query. + + + + +{`List users; + +// Choose user profiles for which to attach a file +using (var session = store.OpenSession()) +{ + users = session.Query() + .Where(u => u.Age < 30) + .ToList(); +} + +// Prepare content to attach +byte[] byteArray = Encoding.UTF8.GetBytes("some contents here"); +var stream = new MemoryStream(byteArray); + +// Create a BulkInsert instance +using (var bulkInsert = store.BulkInsert()) +{ + for (var i = 0; i < users.Count; i++) + { + string userId = users[i].Id; + + // Call 'AttachmentsFor', pass the document ID for which to attach the file + var attachmentsBulkInsert = bulkInsert.AttachmentsFor(userId); + + // Call 'Store' to add the file to the BulkInsert instance + // The data stored in bulkInsert will be streamed to the server in batches + attachmentsBulkInsert.Store("AttachmentName", stream); + } +} +`} + + + + +{`public class User +{ + public string Id { get; set; } + public string Name { get; set; } + public string LastName { get; set; } + public string AddressId { get; set; } + public int Count { get; set; } + public int Age { get; set; } +} +`} + + + + + + +## Syntax + + + +{`public AttachmentsBulkInsert AttachmentsFor(string id) +`} + + + +| Parameter | Type | Description | +|------------|----------|----------------------------------------------------------| +| `id` | `string` | The document ID to which the attachment should be added. | + + + +{`public void Store(string name, Stream stream, string contentType = null) +`} + + + +| Parameter | Type | Description | +|---------------|----------|------------------------------------| +| `name` | `string` | Name of attachment | +| `stream` | `Stream` | The attachment's stream | +| `contentType` | `string` | Type of attachment (default: null) | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_bulk-insert-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_bulk-insert-nodejs.mdx new file mode 100644 index 0000000000..8061e57034 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_bulk-insert-nodejs.mdx @@ -0,0 +1,124 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* [BulkInsert](../../client-api/bulk-insert/how-to-work-with-bulk-insert-operation.mdx) is RavenDB's high-performance data insertion operation. + Use its `attachmentsFor` interface to add attachments to documents with great speed. + +* In this page: + * [Usage flow](../../document-extensions/attachments/bulk-insert.mdx#usage-flow) + * [Usage example](../../document-extensions/attachments/bulk-insert.mdx#usage-example) + * [Syntax](../../document-extensions/attachments/bulk-insert.mdx#syntax) + + + +## Usage flow + +* Create a `bulkInsert` instance. + +* Pass the Document ID to the instance's `attachmentsFor` method. + +* To add an attachment, call `store`. + Pass it the attachment's name, content, and type (optional). + The `store` function can be called repeatedly as necessary. + +* Note: + If an attachment with the specified name already exists on the document, + the bulk insert operation will overwrite it. + + + +## Usage example + +In this example, we attach a file to all User documents that match a query. + + + + +{`// Open a session +const session = documentStore.openSession(); + +// Choose user profiles for which to attach a file +const users = await session.query({ collection: "users" }) + .whereLessThan("age", 30) + .all(); + +// Prepare content that will be attached +const text = "Some contents here"; +const byteArray = Buffer.from(text); + +// Create a bulkInsert instance +const bulkInsert = documentStore.bulkInsert(); + +try { + for (let i = 0; i < users.length; i++) { + + // Call \`attachmentsFor\`, pass the document ID for which to attach the file + const attachmentsBulkInsert = bulkInsert.attachmentsFor(users[i].id); + + // Call 'store' to attach the byte array to the bulkInsert instance + // The data stored in bulkInsert will be streamed to the server in batches + await attachmentsBulkInsert.store("attachmentName", byteArray); + } +} finally { + // Call finish to send all remaining data to the server + await bulkInsert.finish(); +} +`} + + + + +{`class User { + constructor( + id = null, + age = 0, + name = '' + ) { + Object.assign(this, { + id, + age, + name + }); + } +} +`} + + + + + + +## Syntax + + + +{`attachmentsFor(id); +`} + + + +| Parameter | Type | Description | +|------------|----------|----------------------------------------------------------| +| `id` | `string` | The document ID to which the attachment should be added. | + + + +{`store(name, bytes); +store(name, bytes, contentType); +`} + + + +| Parameter | Type | Description | +|---------------|----------|------------------------------------| +| `name` | `string` | Name of attachment | +| `bytes` | `Buffer` | The attachment's content | +| `contentType` | `string` | Type of attachment (default: null) | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_bulk-insert-python.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_bulk-insert-python.mdx new file mode 100644 index 0000000000..f79c683dbe --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_bulk-insert-python.mdx @@ -0,0 +1,65 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* [bulk_insert](../../client-api/bulk-insert/how-to-work-with-bulk-insert-operation.mdx) is RavenDB's + high-performance data insertion operation. + Use its `attachments_for` interface to add attachments to multiple documents with great speed. +* Use `store` + +* In this page: + * [Usage flow](../../document-extensions/attachments/bulk-insert.mdx#usage-flow) + * [Usage example](../../document-extensions/attachments/bulk-insert.mdx#usage-example) + + + +## Usage flow + +* Create a `bulk_insert` instance. + +* Pass the Document ID to the instance's `attachments_for` method. + +* To add an attachment, call the `store` method. + Pass it the attachment's name, stream, and type (optional). + `store` can be called repeatedly, as many times as needed. + +* Note: + If an attachment with the specified name already exists on the document, + the bulk insert operation will overwrite it. + + + +## Usage example + +In this example, we attach a file to all User documents that match a query. + + +{`# Choose user profiles for which to attach a file +with store.open_session() as session: + user_ids = [ + session.advanced.get_document_id(user) + for user in list(session.query(object_type=User).where_less_than("Age", 30)) + ] + +# Prepare content to attach +bytes_to_attach = b"some contents here" + +# Create a BulkInsert instance +with store.bulk_insert() as bulk_insert: + for user_id in user_ids: + # Call 'attachments_for', pass the document ID for which to attach the file + attachments_bulk_insert = bulk_insert.attachments_for(user_id) + + # Call 'store' to add the file to the BulkInsert instance + # The data stored in bulk_insert will be streamed to the server in batches + attachments_bulk_insert.store("AttachmentName", bytes_to_attach) +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_category_.json b/versioned_docs/version-7.1/document-extensions/attachments/_category_.json new file mode 100644 index 0000000000..fc6c0b1e6e --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 3, + "label": Attachments, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_copying-moving-renaming-csharp.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_copying-moving-renaming-csharp.mdx new file mode 100644 index 0000000000..1537f1a14a --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_copying-moving-renaming-csharp.mdx @@ -0,0 +1,148 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Attachments can be copied, moved, or renamed using built-in session methods. +All of those actions are executed when `SaveChanges` is called and take place on the server-side, +removing the need to transfer the entire attachment binary data over the network in order to perform the action. + +## Copy attachment + +Attachment can be copied using one of the `session.Advanced.Attachments.Copy` methods: + +### Syntax + + + +{`void Copy( + object sourceEntity, + string sourceName, + object destinationEntity, + string destinationName); + +void Copy( + string sourceDocumentId, + string sourceName, + string destinationDocumentId, + string destinationName); +`} + + + +### Example + + + + +{`var employee1 = session.Load("employees/1-A"); +var employee2 = session.Load("employees/2-A"); + +session.Advanced.Attachments.Copy(employee1, "photo.jpg", employee2, "photo-copy.jpg"); + +session.SaveChanges(); +`} + + + + +{`var employee1 = await asyncSession.LoadAsync("employees/1-A"); +var employee2 = await asyncSession.LoadAsync("employees/2-A"); + +asyncSession.Advanced.Attachments.Copy(employee1, "photo.jpg", employee2, "photo-copy.jpg"); + +await asyncSession.SaveChangesAsync(); +`} + + + + + + +## Move attachment + +Attachment can be moved using one of the `session.Advanced.Attachments.Move` methods: + +### Syntax + + + +{`void Move(object sourceEntity, string sourceName, object destinationEntity, string destinationName); + +void Move(string sourceDocumentId, string sourceName, string destinationDocumentId, string destinationName); +`} + + + +### Example + + + + +{`var employee1 = session.Load("employees/1-A"); +var employee2 = session.Load("employees/2-A"); + +session.Advanced.Attachments.Move(employee1, "photo.jpg", employee2, "photo.jpg"); + +session.SaveChanges(); +`} + + + + +{`var employee1 = await asyncSession.LoadAsync("employees/1-A"); +var employee2 = await asyncSession.LoadAsync("employees/2-A"); + +asyncSession.Advanced.Attachments.Move(employee1, "photo.jpg", employee2, "photo.jpg"); + +await asyncSession.SaveChangesAsync(); +`} + + + + + + +## Rename attachment + +Attachment can be renamed using one of the `session.Advanced.Attachments.Rename` methods: + +### Syntax + + + +{`void Rename(object entity, string name, string newName); + +void Rename(string documentId, string name, string newName); +`} + + + +### Example + + + + +{`var employee = session.Load("employees/1-A"); + +session.Advanced.Attachments.Rename(employee, "photo.jpg", "photo-new.jpg"); + +session.SaveChanges(); +`} + + + + +{`var employee = await asyncSession.LoadAsync("employees/1-A"); + +asyncSession.Advanced.Attachments.Rename(employee, "photo.jpg", "photo-new.jpg"); + +await asyncSession.SaveChangesAsync(); +`} + + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_copying-moving-renaming-java.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_copying-moving-renaming-java.mdx new file mode 100644 index 0000000000..bb9db7b11d --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_copying-moving-renaming-java.mdx @@ -0,0 +1,112 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Attachments can be copied, moved, or renamed using built-in session methods. +All of those actions are executed when `saveChanges` is called and take place on the server-side, +removing the need to transfer the entire attachment binary data over the network in order to perform the action. + +## Copy attachment + +Attachment can be copied using one of the `session.advanced().attachments().copy` methods: + +### Syntax + + + +{`void copy(Object sourceEntity, String sourceName, + Object destinationEntity, String destinationName); + +void copy(String sourceDocumentId, String sourceName, + String destinationDocumentId, String destinationName); +`} + + + +### Example + + + +{`Employee employee1 = session.load(Employee.class, "employees/1-A"); +Employee employee2 = session.load(Employee.class, "employees/2-A"); + +session.advanced() + .attachments() + .copy(employee1, "photo.jpg", employee2, "photo-copy.jpg"); + +session.saveChanges(); +`} + + + + + +## Move attachment + +Attachment can be moved using one of the `session.advanced().attachments().move` methods: + +### Syntax + + + +{`void move(Object sourceEntity, String sourceName, + Object destinationEntity, String destinationName); + +void move(String sourceDocumentId, String sourceName, + String destinationDocumentId, String destinationName); +`} + + + +### Example + + + +{`Employee employee1 = session.load(Employee.class, "employees/1-A"); +Employee employee2 = session.load(Employee.class, "employees/2-A"); + +session.advanced() + .attachments() + .copy(employee1, "photo.jpg", employee2, "photo.jpg"); + +session.saveChanges(); +`} + + + + + +## Rename attachment + +Attachment can be renamed using one of the `session.advanced().attachments().rename` methods: + +### Syntax + + + +{`void rename(Object entity, String name, String newName); + +void rename(String documentId, String name, String newName); +`} + + + +### Example + + + +{`Employee employee1 = session.load(Employee.class, "employees/1-A"); + +session.advanced() + .attachments() + .rename(employee1, "photo.jpg", "photo-new.jpg"); + +session.saveChanges(); +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_copying-moving-renaming-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_copying-moving-renaming-nodejs.mdx new file mode 100644 index 0000000000..b3b0605659 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_copying-moving-renaming-nodejs.mdx @@ -0,0 +1,175 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Attachments can be copied, moved, or renamed using built-in session methods. + +* All of those actions are executed when `saveChanges` is called and take place on the server-side, + removing the need to transfer the entire attachment binary data over the network in order to perform the action. + +* In this page: + * [Copy attachment](../../document-extensions/attachments/copying-moving-renaming.mdx#copy-attachment) + * [Move attachment](../../document-extensions/attachments/copying-moving-renaming.mdx#move-attachment) + * [Rename attachment](../../document-extensions/attachments/copying-moving-renaming.mdx#rename-attachment) + * [Syntax](../../document-extensions/attachments/copying-moving-renaming.mdx#syntax) + + +## Copy attachment + +Use `session.advanced.attachments.copy` to copy an attachment from one document to another. + + + + +{`// Load entities +const employee1 = await session.load("employees/1-A"); +const employee2 = await session.load("employees/2-A"); + +// Call method 'copy' +// Copy attachment from employee1 to employee2 +session.advanced.attachments.copy(employee1, "photo.jpg", employee2, "photo-copy.jpg"); + +// Attachment will be copied on the server-side only when saveChanges is called +await session.saveChanges(); +`} + + + + +{`// Call method 'copy' +// Copy attachment from "employees/1-A" to "employees/2-A" +session.advanced.attachments.copy("employees/1-A", "photo.jpg", "employees/2-A", "photo-copy.jpg"); + +// Attachment will be copied on the server-side only when saveChanges is called +await session.saveChanges(); +`} + + + + + + +## Move attachment + +Use `session.advanced.attachments.move` to move an attachment from one document to another. + + + + +{`// Load entities +const employee1 = await session.load("employees/1-A"); +const employee2 = await session.load("employees/2-A"); + +// Call method 'move' +// Move attachment from employee1 to employee2 +session.advanced.attachments.move(employee1, "photo.jpg", employee2, "photo.jpg"); + +// Attachment will be moved on the server-side only when saveChanges is called +await session.saveChanges(); +`} + + + + +{`// Call method 'move' +// Move attachment from "employees/1-A" to "employees/2-A" +session.advanced.attachments.move("employees/1-A", "photo.jpg", "employees/2-A", "photo.jpg"); + +// Attachment will be moved on the server-side only when saveChanges is called +await session.saveChanges(); +`} + + + + + + +## Rename attachment + +Use `session.advanced.attachments.rename` to rename an attachment. + + + + +{`// Load entity +const employee = await session.load("employees/1-A"); + +// Call method 'rename' +// Rename "photo.jpg" +session.advanced.attachments.rename(employee, "photo.jpg", "photo-new.jpg"); + +// Attachment will be renamed on the server-side only when saveChanges is called +await session.saveChanges(); +`} + + + + +{`// Call method 'rename' +// Rename "photo.jpg" +session.advanced.attachments.rename("employees/1-A", "photo.jpg", "photo-new.jpg"); + +// Attachment will be renamed on the server-side only when saveChanges is called +await session.saveChanges(); +`} + + + + + + +## Syntax + + + +{`// Copy - available overloads: +// =========================== +copy(sourceEntity, sourceName, destinationEntity, destinationName); +copy(sourceDocumentId, sourceName, destinationDocumentId, destinationName); +`} + + + + + +{`// Move - a vailable overloads: +// ============================ +move(sourceEntity, sourceName, destinationEntity, destinationName); +move(sourceDocumentId, sourceName, destinationDocumentId, destinationName); +`} + + + +| Parameter | Type | Description | +|---------------------------|----------|-----------------------------| +| **sourceEntity** | `object` | Source entity | +| **destinationEntity** | `object` | Destination entity | +| **sourceDocumentId** | string | Source document Id | +| **destinationDocumentId** | string | Destination document Id | +| **sourceName** | string | Source attachment name | +| **destinationName** | string | Destination attachment name | + + + + +{`// Rename - available overloads: +// ============================= +rename(entity, name, newName); +rename(documentId, name, newName); +`} + + + +| Parameter | Type | Description | +|----------------|----------|---------------------------------| +| **entity** | `object` | The document entity | +| **documentId** | string | The document Id | +| **name** | string | Current name of attachment | +| **newName** | string | The new name for the attachment | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_copying-moving-renaming-php.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_copying-moving-renaming-php.mdx new file mode 100644 index 0000000000..5003ed3b6b --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_copying-moving-renaming-php.mdx @@ -0,0 +1,96 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Attachments can be copied, moved, or renamed using built-in session methods. +All of those actions are executed when `saveChanges` is called and take place on the server-side, +removing the need to transfer the entire attachment binary data over the network in order to perform the action. + +## Copy attachment + +Attachment can be copied using one of the `session.advanced.attachments.copy` methods: + +### Syntax + + + +{`function copy(object|string $sourceIdOrEntity, ?string $sourceName, object|string $destinationIdOrEntity, ?string $destinationName): void; +`} + + + +### Example + + + +{`$employee1 = $session->load(Employee::class, "employees/1-A"); +$employee2 = $session->load(Employee::class, "employees/2-A"); + +$session->advanced()->attachments()->copy($employee1, "photo.jpg", $employee2, "photo-copy.jpg"); + +$session->saveChanges(); +`} + + + + + +## Move attachment + +Attachment can be moved using one of the `session.advanced.attachments.move` methods: + +### Syntax + + + +{`public function move(object|string $sourceIdOrEntity, ?string $sourceName, object|string $destinationIdOrEntity, ?string $destinationName): void; +`} + + + +### Example + + + +{`$employee1 = $session->load(Employee::class, "employees/1-A"); +$employee2 = $session->load(Employee::class, "employees/2-A"); + +$session->advanced()->attachments()->move($employee1, "photo.jpg", $employee2, "photo.jpg"); + +$session->saveChanges(); +`} + + + + + +## Rename attachment + +Attachment can be renamed using one of the `session.advanced.attachments.rename` methods: + +### Syntax + + + +{`function rename(string|object $idOrEntity, ?string $name, ?string $newName): void; +`} + + + +### Example + + + +{`$employee = $session->load(Employee::class, "employees/1-A"); + +$session->advanced()->attachments()->rename($employee, "photo.jpg", "photo-new.jpg"); + +$session->saveChanges(); +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_copying-moving-renaming-python.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_copying-moving-renaming-python.mdx new file mode 100644 index 0000000000..c73d0c43e4 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_copying-moving-renaming-python.mdx @@ -0,0 +1,108 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Attachments can be copied, moved, or renamed using built-in session methods. +All of those actions are executed when `save_changes` is called and take place on the server-side, +removing the need to transfer the entire attachment binary data over the network in order to perform the action. + +## Copy attachment + +Attachment can be copied using one of the `session.advanced.attachments.copy` methods: + +### Syntax + + + +{`def copy( + self, + entity_or_document_id: Union[object, str], + source_name: str, + destination_entity_or_document_id: object, + destination_name: str, +) -> None: ... +`} + + + +### Example + + + +{`employee_1 = session.load("employees/1-A") +employee_2 = session.load("employees/2-A") + +session.advanced.attachments.copy(employee_1, "photo.jpg", employee_2, "photo-copy.jpg") + +session.save_changes() +`} + + + + + +## Move attachment + +Attachment can be moved using one of the `session.advanced.attachments.move` methods: + +### Syntax + + + +{`def move( + self, + source_entity_or_document_id: Union[str, object], + source_name: str, + destination_entity_or_document_id: Union[str, object], + destination_name: str, +) -> None: ... +`} + + + +### Example + + + +{`employee1 = session.load("employees/1-A") +employee2 = session.load("employees/2-A") + +session.advanced.attachments.move(employee1, "photo.jpg", employee2, "photo.jpg") + +session.save_changes() +`} + + + + + +## Rename attachment + +Attachment can be renamed using one of the `session.advanced.attachments.rename` methods: + +### Syntax + + + +{`def rename(self, entity_or_document_id: Union[str, object], name: str, new_name: str) -> None: ... +`} + + + +### Example + + + +{`employee = session.load("employees/1-A", Employee) + +session.advanced.attachments.rename(employee, "photo.jpg", "photo-new.jpg") + +session.save_changes() +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_deleting-csharp.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_deleting-csharp.mdx new file mode 100644 index 0000000000..de50ebeb51 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_deleting-csharp.mdx @@ -0,0 +1,49 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +**Delete** from `session.Advanced.Attachments` is used to remove an attachment from a document. + +## Syntax + + + +{`void Delete(string documentId, string name); +void Delete(object entity, string name); +`} + + + +## Example + + + + +{`using (var session = store.OpenSession()) +{ + Album album = session.Load("albums/1"); + session.Advanced.Attachments.Delete(album, "001.jpg"); + session.Advanced.Attachments.Delete("albums/1", "002.jpg"); + + session.SaveChanges(); +} +`} + + + + +{`using (var asyncSession = store.OpenAsyncSession()) +{ + Album album = await asyncSession.LoadAsync("albums/1"); + asyncSession.Advanced.Attachments.Delete(album, "001.jpg"); + asyncSession.Advanced.Attachments.Delete("albums/1", "002.jpg"); + + await asyncSession.SaveChangesAsync(); +} +`} + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_deleting-java.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_deleting-java.mdx new file mode 100644 index 0000000000..57b740bfda --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_deleting-java.mdx @@ -0,0 +1,34 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +**Delete** from `session.advanced().attachments()` is used to remove an attachment from a document. + +## Syntax + + + +{`void delete(String documentId, String name); + +void delete(Object entity, String name); +`} + + + +## Example + + + +{`try (IDocumentSession session = store.openSession()) \{ + Album album = session.load(Album.class, "albums/1"); + session.advanced().attachments().delete(album, "001.jpg"); + session.advanced().attachments().delete("albums/1", "002.jpg"); + + session.saveChanges(); +\} +`} + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_deleting-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_deleting-nodejs.mdx new file mode 100644 index 0000000000..9ab7f434d5 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_deleting-nodejs.mdx @@ -0,0 +1,34 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +The method `session.advanced.attachments.delete()` is used to remove an attachment from a document. + +## Syntax + + + +{`session.advanced.attachments.delete(documentId, name); + +session.advanced.attachments.delete(entity, name); +`} + + + +## Example + + + +{`const session = store.openSession(); +const album = await session.load("albums/1"); +session.advanced.attachments.delete(album, "001.jpg"); + +session.advanced.attachments.delete("albums/1", "002.jpg"); + +await session.saveChanges(); +`} + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_deleting-php.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_deleting-php.mdx new file mode 100644 index 0000000000..a1d70478ac --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_deleting-php.mdx @@ -0,0 +1,39 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +**Delete** from `session.advanced.attachments` is used to remove an attachment from a document. + +## Syntax + + + +{`/** + * Marks the specified document's attachment for deletion. + * The attachment will be deleted when saveChanges is called. + */ +public function delete(object|string $idOrEntity, ?string $name): void; +`} + + + +## Example + + + +{`$session = $store->openSession(); +try \{ + $album = $session->load(Album::class, "albums/1"); + $session->advanced()->attachments()->delete($album, "001.jpg"); + $session->advanced()->attachments()->delete("albums/1", "002.jpg"); + + $session->saveChanges(); +\} finally \{ + $session->close(); +\} +`} + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_deleting-python.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_deleting-python.mdx new file mode 100644 index 0000000000..545ce12b6c --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_deleting-python.mdx @@ -0,0 +1,31 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +**Delete** from `session.advanced.attachments` is used to remove an attachment from a document. + +## Syntax + + + +{`def delete(self, entity_or_document_id, name): ... +`} + + + +## Example + + + +{`with store.open_session() as session: + album = session.load("albums/1") + session.advanced.attachments.delete(album, "001.jpg") + session.advanced.attachments.delete("albums/1", "002.jpg") + + session.save_changes() +`} + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_indexing-csharp.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_indexing-csharp.mdx new file mode 100644 index 0000000000..3a37a59522 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_indexing-csharp.mdx @@ -0,0 +1,576 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Indexing attachments allows you to query for documents based on their attachments' details and content. + +* **Static indexes**: + Both attachments' details and content can be indexed within a static-index definition. + +* **Auto-indexes**: + Auto-indexing attachments via dynamic queries is not available at this time. + +* In this page: + * [Index attachments details](../../document-extensions/attachments/indexing.mdx#index-attachments-details) + * [Index details & content - by attachment name](../../document-extensions/attachments/indexing.mdx#index-details--content---by-attachment-name) + * [Index details & content - all attachments](../../document-extensions/attachments/indexing.mdx#index-details--content---all-attachments) + * [Leveraging indexed attachments](../../document-extensions/attachments/indexing.mdx#leveraging-indexed-attachments) + * [Syntax](../../document-extensions/attachments/indexing.mdx#syntax) + + +## Index attachments details + +**The index**: + +* To index **attachments' details**, call `AttachmentsFor()` within the index definition. + +* `AttachmentsFor()` provides access to the **name**, **size**, **hash**, and **content-type** of each attachment a document has. + These details can then be used when defining the index-fields. + Once the index is deployed, you can query the index to find Employee documents based on these attachment properties. + +* To index **attachments' content**, see the examples [below](../../document-extensions/attachments/indexing.mdx#index-details--content---by-attachment-name). + + + + +{`public class Employees_ByAttachmentDetails : + AbstractIndexCreationTask +{ + public class IndexEntry + { + public string EmployeeName { get; set; } + + public string[] AttachmentNames { get; set; } + public string[] AttachmentContentTypes { get; set; } + public long[] AttachmentSizes { get; set; } + } + + public Employees_ByAttachmentDetails() + { + Map = employees => from employee in employees + + // Call 'AttachmentsFor' to get attachments details + let attachments = AttachmentsFor(employee) + + select new IndexEntry() + { + // Can index info from document properties: + EmployeeName = employee.FirstName + " " + employee.LastName, + + // Index DETAILS of attachments: + AttachmentNames = attachments.Select(x => x.Name).ToArray(), + AttachmentContentTypes = attachments.Select(x => x.ContentType).ToArray(), + AttachmentSizes = attachments.Select(x => x.Size).ToArray() + }; + } +} +`} + + + + +{`public class Employees_ByAttachmentDetails_JS : AbstractJavaScriptIndexCreationTask +{ + public Employees_ByAttachmentDetails_JS() + { + Maps = new HashSet + { + @"map('Employees', function (employee) { + var attachments = attachmentsFor(employee); + + return { + EmployeeName: employee.FirstName + ' ' + employee.LastName, + + AttachmentNames: attachments.map(function(attachment) { return attachment.Name; }), + AttachmentContentTypes: attachments.map(function(attachment) { return attachment.ContentType; }), + AttachmentSizes: attachments.map(function(attachment) { return attachment.Size; }) + }; + })" + }; + } +} +`} + + + +**Query the Index**: + +You can now query for Employee documents based on their attachments details. + + + + +{`List employees = session + // Query the index for matching employees + .Query() + // Filter employee results by their attachments details + .Where(x => x.AttachmentNames.Contains("photo.jpg")) + .Where(x => x.AttachmentSizes.Any(size => size > 20_000)) + // Return matching Employee docs + .OfType() + .ToList(); + +// Results: +// ======== +// Running this query on the Northwind sample data, +// results will include 'employees/4-A' and 'employees/5-A'. +// These 2 documents contain an attachment by name 'photo.jpg' with a matching size. +`} + + + + +{`List employees = await asyncSession + .Query() + .Where(x => x.AttachmentNames.Contains("photo.jpg")) + .Where(x => x.AttachmentSizes.Any(size => size > 20_000)) + .OfType() + .ToListAsync(); +`} + + + + +{`List employees = session.Advanced + .DocumentQuery() + .WhereEquals("AttachmentNames", "photo.jpg") + .WhereGreaterThan("AttachmentSizes", 20_000) + .OfType() + .ToList(); +`} + + + + +{`from index "Employees/ByAttachmentDetails" +where AttachmentNames == "photo.jpg" and AttachmentSizes > 20000 +`} + + + + + + +## Index details & content - by attachment name + + + +**Sample data**: + +* Each Employee document in RavenDB's sample data already includes a _photo.jpg_ attachment. + +* For all following examples, let's store a textual attachment (file _notes.txt_) on 3 documents in the 'Employees' collection. + + + +{`// Create some sample attachments: +for (var i = 1; i <= 3; i++) +\{ + var id = $"employees/\{i\}-A"; + + // Load an employee document: + var employee = session.Load($"employees/\{i\}-A"); + if (employee?.Notes == null || employee.Notes.Count == 0) + continue; + + // Store the employee's notes as an attachment on the document: + byte[] bytes = System.Text.Encoding.UTF8.GetBytes(employee.Notes[0]); + using (var stream = new MemoryStream(bytes)) + \{ + session.Advanced.Attachments.Store( + $"employees/\{i\}-A", + "notes.txt", stream, + "text/plain"); + + session.SaveChanges(); + \} +\} +`} + + + + +**The index**: + +* To index the **details & content** for a specific attachment, call `LoadAttachment()` within the index definition. + +* In addition to accessing the attachment details, `LoadAttachment()` provides access to the attachment's content, + which can be used when defining the index-fields. + + + + +{`public class Employees_ByAttachment: + AbstractIndexCreationTask +{ + public class IndexEntry + { + public string AttachmentName { get; set; } + public string AttachmentContentType { get; set; } + public long AttachmentSize { get; set; } + + public string AttachmentContent { get; set; } + } + + public Employees_ByAttachment() + { + Map = employees => + from employee in employees + + // Call 'LoadAttachment' to get attachment's details and content + // pass the attachment name, e.g. "notes.txt" + let attachment = LoadAttachment(employee, "notes.txt") + + select new IndexEntry() + { + // Index DETAILS of attachment: + AttachmentName = attachment.Name, + AttachmentContentType = attachment.ContentType, + AttachmentSize = attachment.Size, + + // Index CONTENT of attachment: + // Call 'GetContentAsString' to access content + AttachmentContent = attachment.GetContentAsString() + }; + + // It can be useful configure Full-Text search on the attachment content index-field + Index(x => x.AttachmentContent, FieldIndexing.Search); + + // Documents with an attachment named 'notes.txt' will be indexed, + // allowing you to query them by either the attachment's details or its content. + } +} +`} + + + + +{`public class Employees_ByAttachment_JS : AbstractJavaScriptIndexCreationTask +{ + public Employees_ByAttachment_JS() + { + Maps = new HashSet + { + @"map('Employees', function (employee) { + var attachment = loadAttachment(employee, 'notes.txt'); + + return { + AttachmentName: attachment.Name, + AttachmentContentType: attachment.ContentType, + AttachmentSize: attachment.Size, + AttachmentContent: attachment.getContentAsString() + }; + })" + }; + + Fields = new Dictionary + { + { + "AttachmentContent", new IndexFieldOptions + { + Indexing = FieldIndexing.Search + } + } + }; + } +} +`} + + + +**Query the Index**: + +You can now query for Employee documents based on their attachment details and/or its content. + + + + +{`List employees = session + // Query the index for matching employees + .Query() + // Can make a full-text search + // Looking for employees with an attachment content that contains 'Colorado' OR 'Dallas' + .Search(x => x.AttachmentContent, "Colorado Dallas") + .OfType() + .ToList(); + +// Results: +// ======== +// Results will include 'employees/1-A' and 'employees/2-A'. +// Only these 2 documents have an attachment by name 'notes.txt' +// that contains either 'Colorado' or 'Dallas'. +`} + + + + +{`List employees = await asyncSession + // Query the index for matching employees + .Query() + // Can make a full-text search + // Looking for employees with an attachment content that contains 'Colorado' OR 'Dallas' + .Search(x => x.AttachmentContent, "Colorado Dallas") + .OfType() + .ToListAsync(); +`} + + + + +{`List employees = session.Advanced + .DocumentQuery() + .Search(x => x.AttachmentContent, "Colorado Dallas") + .OfType() + .ToList(); +`} + + + + +{`from index "Employees/ByAttachment" +where search(AttachmentContent, "Colorado Dallas") +`} + + + + + + +## Index details & content - all attachments + +**The index**: + +* Use `LoadAttachments()` to be able to index the **details & content** of ALL attachments. + +* Note how the index example below is employing the [Fanout index](../../indexes/indexing-nested-data.mdx#fanout-index---multiple-index-entries-per-document) pattern. + + + + +{`public class Employees_ByAllAttachments : + AbstractIndexCreationTask +{ + public class IndexEntry + { + public string AttachmentName { get; set; } + public string AttachmentContentType { get; set; } + public long AttachmentSize { get; set; } + public string AttachmentContent { get; set; } + } + + public Employees_ByAllAttachments() + { + Map = employees => + + // Call 'LoadAttachments' to get details and content for ALL attachments + from employee in employees + from attachment in LoadAttachments(employee) + + // This will be a Fanout index - + // the index will generate an index-entry for each attachment per document + + select new IndexEntry + { + // Index DETAILS of attachment: + AttachmentName = attachment.Name, + AttachmentContentType = attachment.ContentType, + AttachmentSize = attachment.Size, + + // Index CONTENT of attachment: + // Call 'getContentAsString' to access content + AttachmentContent = attachment.GetContentAsString() + }; + + // It can be useful configure Full-Text search on the attachment content index-field + Index(x => x.AttachmentContent, FieldIndexing.Search); + } +} +`} + + + + +{`public class Employees_ByAllAttachments_JS : AbstractJavaScriptIndexCreationTask +{ + public Employees_ByAllAttachments_JS() + { + Maps = new HashSet + { + @"map('Employees', function (employee) { + const allAttachments = loadAttachments(employee); + + return allAttachments.map(function (attachment) { + return { + attachmentName: attachment.Name, + attachmentContentType: attachment.ContentType, + attachmentSize: attachment.Size, + attachmentContent: attachment.getContentAsString() + }; + }); + })" + }; + + Fields = new Dictionary + { + { + "attachmentContent", new IndexFieldOptions + { + Indexing = FieldIndexing.Search + } + } + }; + } +} +`} + + + +**Query the Index**: + + + + +{`// Query the index for matching employees +List employees = session + .Query() + // Filter employee results by their attachments details and content: + // Using 'SearchOptions.Or' combines the full-text search on 'AttachmentContent' + // with the following 'Where' condition using OR logic. + .Search(x => x.AttachmentContent, "Colorado Dallas", options: SearchOptions.Or) + .Where(x => x.AttachmentSize > 20_000) + .OfType() + .ToList(); + +// Results: +// ======== +// Results will include: +// 'employees/1-A' and 'employees/2-A' that match the content criteria +// 'employees/4-A' and 'employees/5-A' that match the size criteria +`} + + + + +{`List employees = await asyncSession + .Query() + .Search(x => x.AttachmentContent, "Colorado Dallas", options: SearchOptions.Or) + .Where(x => x.AttachmentSize > 20_000) + .OfType() + .ToListAsync(); +`} + + + + +{`List employees = session + .Advanced + .DocumentQuery() + .Search(x => x.AttachmentContent, "Colorado Dallas") + .OrElse() + .WhereGreaterThan(x => x.AttachmentSize, 20_000) + .OfType() + .ToList(); +`} + + + + +{`from index "Employees/ByAllAttachments" +where search(AttachmentContent, "Colorado Dallas") or AttachmentSize > 20000 +`} + + + + + + +## Leveraging indexed attachments + +* Access to the indexed attachment content opens the door to many different applications, + including many that can be integrated directly into RavenDB. + +* This [blog post](https://ayende.com/blog/192001-B/using-machine-learning-with-ravendb) demonstrates + how image recognition can be applied to indexed attachments using the [additional sources](../../indexes/extending-indexes.mdx) feature. + The resulting index allows filtering and querying based on image content. + + + +## Syntax + +#### `AttachmentsFor` + + + +{`// Returns a list of attachment details for the specified document. +IEnumerable AttachmentsFor(object document); +`} + + + +| Parameter | Type | Description | +|--------------|----------|-----------------------------------------------------------------| +| **document** | `object` | The document object whose attachments details you want to load. | + + + +{`// AttachmentsFor returns a list containing the following attachment details object: +public class AttachmentName +\{ + public string Name; + public string Hash; + public string ContentType; + public long Size; +\} +`} + + + +#### `LoadAttachment` + + + +{`// LoadAttachment returns attachment details and methods to access its content. +public IAttachmentObject LoadAttachment(object document, string attachmentName); +`} + + + +| Parameter | Type | Description | +|---------------------|-----------|-------------------------------------------------| +| **document** | `object` | The document whose attachment you want to load. | +| **attachmentName** | `string` | The name of the attachment to load. | + + + +{`public interface IAttachmentObject +\{ + public string Name \{ get; \} + public string Hash \{ get; \} + public string ContentType \{ get; \} + public long Size \{ get; \} + + public string GetContentAsString(); + public string GetContentAsString(Encoding encoding); + public Stream GetContentAsStream(); +\} +`} + + + +#### `LoadAttachments` + + + +{`// Returns a list of ALL attachments for the specified document. +public IEnumerable LoadAttachments(object document); +`} + + + +| Parameter | Type | Description | +|----------------|-----------|--------------------------------------------------| +| **document** | `object` | The document whose attachments you want to load. | + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_indexing-java.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_indexing-java.mdx new file mode 100644 index 0000000000..3d3f1a7f93 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_indexing-java.mdx @@ -0,0 +1,187 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Indexing attachments allows you to query for documents based on their attachments' details and content. + +* **Static indexes**: + Both attachments' details and content can be indexed within a static-index definition. + +* **Auto-indexes**: + Auto-indexing attachments via dynamic queries is not available at this time. + +* In this page: + * [Syntax](../../document-extensions/attachments/indexing.mdx#syntax) + * [Examples](../../document-extensions/attachments/indexing.mdx#examples) + * [Leveraging indexed attachments](../../document-extensions/attachments/indexing.mdx#leveraging-indexed-attachments) + + +## Syntax + +### Using AttachmentsFor() + +The `AttachmentsFor` method returns information about each attachment that extends +a specified document, including their names, sizes, and content type. write index definition as string. + + + + +{`IEnumerable AttachmentsFor(object doc); +`} + + + + +{`private String name; +private String hash; +private String contentType; +private long size; +`} + + + + +The `AttachmentsFor` method is available in `AbstractIndexCreationTask`. + +### Using LoadAttachment()/LoadAttachments() + +`LoadAttachment()` loads an attachment to the index by document and attachment name. +`LoadAttachments()` loads all the attachments of a given document. + + + +{`public IAttachmentObject LoadAttachment(object doc, string name); +public IEnumerable LoadAttachments(object doc); +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **doc** | A server-side document, an entity | The document whose attachments you want to load | +| **name** | `String` | The name of the attachment you want to load | + +#### GetContentAs Methods + +To access the attachment content itself, use `GetContentAsStream()`. To +convert the content into a `string`, use `GetContentAsString()` with +the desired character encoding. + + + +{`public Stream GetContentAsStream(); + +public string GetContentAsString(Encoding encoding); + +public string GetContentAsString(); // Default: UTF-8 +`} + + + + + +## Examples + +#### Indexes with `AttachmentsFor()` + + + + +{`public static class Employees_ByAttachmentNames extends AbstractIndexCreationTask { + public Employees_ByAttachmentNames() { + map = "from e in docs.Employees\\n" + + "let attachments = AttachmentsFor(e)\\n" + + "select new {\\n" + + " attachmentNames = attachments.Select(x => x.Name).ToArray()\\n" + + "}"; + } +} +`} + + + + +#### Indexes with `LoadAttachment()` + + + + +{`private class Companies_With_Attachments_JavaScript extends AbstractJavaScriptIndexCreationTask { + public Companies_With_Attachments_JavaScript() { + setMaps(Collections.singleton( + "map('Companies', function (company) {\\n" + + " var attachment = LoadAttachment(company, company.ExternalId);\\n" + + " return {\\n" + + " CompanyName: company.Name,\\n" + + " AttachmentName: attachment.Name,\\n" + + " AttachmentContentType: attachment.ContentType,\\n" + + " AttachmentHash: attachment.Hash,\\n" + + " AttachmentSize: attachment.Size,\\n" + + " AttachmentContent: attachment.getContentAsString('utf8')\\n" + + " }\\n"+ + "});" + ) + ); + } +} +`} + + + + +#### Indexes with `LoadAttachments()` + + + + +{`private class Companies_With_All_Attachments_JS extends AbstractJavaScriptIndexCreationTask { + public Companies_With_All_Attachments_JS() { + setMaps(Collections.singleton( + "map('Companies', function (company) {\\n" + + " var attachments = LoadAttachments(company);\\n" + + " return attachments.map(attachment => ({\\n" + + " AttachmentName: attachment.Name,\\n" + + " AttachmentContent: attachment.getContentAsString('utf8')\\n" + + " }));\\n" + + "})" + ) + ); + } +} +`} + + + + +#### Querying the Index + + + + +{`//return all employees that have an attachment called "cv.pdf" +List employees = session.query(Employees_ByAttachmentNames.class) + .containsAny("attachmentNames", Arrays.asList("employees_cv.pdf")) + .selectFields(Company.class, "cv.pdf").ofType(Employee.class) + .toList(); +`} + + + + + +## Leveraging indexed attachments + +* Access to the indexed attachment content opens the door to many different applications, + including many that can be integrated directly into RavenDB. + +* In this [blog post](https://ayende.com/blog/192001-B/using-machine-learning-with-ravendb), + Oren Eini demonstrates how image recognition can be applied to indexed attachments using the [additional sources](../../indexes/extending-indexes.mdx) feature. + The resulting index allows filtering and querying based on image content. + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_indexing-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_indexing-nodejs.mdx new file mode 100644 index 0000000000..cbe219d55d --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_indexing-nodejs.mdx @@ -0,0 +1,385 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Indexing attachments allows you to query for documents based on their attachments' details and content. + +* **Static indexes**: + Both attachments' details and content can be indexed within a static-index definition. + +* **Auto-indexes**: + Auto-indexing attachments via dynamic queries is not available at this time. + +* In this page: + * [Index attachments details](../../document-extensions/attachments/indexing.mdx#index-attachments-details) + * [Index details & content - by attachment name](../../document-extensions/attachments/indexing.mdx#index-details--content---by-attachment-name) + * [Index details & content - all attachments](../../document-extensions/attachments/indexing.mdx#index-details--content---all-attachments) + * [Leveraging indexed attachments](../../document-extensions/attachments/indexing.mdx#leveraging-indexed-attachments) + * [Syntax](../../document-extensions/attachments/indexing.mdx#syntax) + + +## Index attachments details + + + +**The index**: +* To index **attachments' details**, call `attachmentsFor()` within the index definition. + +* `attachmentsFor()` provides access to the **name**, **size**, **hash**, and **content-type** of each attachment a document has. + These details can then be used when defining the index-fields. + Once the index is deployed, you can query the index to find Employee documents based on these attachment properties. + +* To index **attachments' content**, see the examples [below](../../document-extensions/attachments/indexing.mdx#index-details--content---by-attachment-name). + + + +{`class Employees_ByAttachmentDetails extends AbstractJavaScriptIndexCreationTask \{ + constructor () \{ + super(); + + const \{ attachmentsFor \} = this.mapUtils(); + + this.map("employees", employee => \{ + // Call 'attachmentsFor' to get attachments details + const attachments = attachmentsFor(employee); + + return \{ + // Can index info from document properties: + employeeName: employee.FirstName + " " + employee.LastName, + + // Index DETAILS of attachments: + attachmentNames: attachments.map(x => x.Name), + attachmentContentTypes: attachments.map(x => x.ContentType), + attachmentSizes: attachments.map(x => x.Size) + \} + \}); + \} +\} +`} + + + + + + + +**Query the Index**: +You can now query for Employee documents based on their attachments details. + + + + +{`const employees = await session + // Query the index for matching employees + .query({ indexName: "Employees/ByAttachmentDetails" }) + // Filter employee results by their attachments details + .whereEquals("attachmentNames", "photo.jpg") + .whereGreaterThan("attachmentSizes", 20_000) + .all(); + +// Results: +// ======== +// Running this query on the Northwind sample data, +// results will include 'employees/4-A' and 'employees/5-A'. +// These 2 documents contain an attachment by name 'photo.jpg' with a matching size. +`} + + + + +{`from index "Employees/ByAttachmentDetails" +where attachmentNames == "photo.jpg" and attachmentSizes > 20000 +`} + + + + + + + +## Index details & content - by attachment name + + + +**Sample data**: +* Each Employee document in RavenDB's sample data already includes a _photo.jpg_ attachment. + +* For all following examples, let's store a textual attachment (file _notes.txt_) on 3 documents + in the 'Employees' collection. + + + +{`const session = documentStore.openSession(); + +for (let i = 1; i <= 3; i++) \{ + // Load an employee document: + const employee = await session.load(\`employees/$\{i\}-A\`); + + // Store the employee's notes as an attachment on the document: + const stream = Buffer.from(employee.Notes[0]); + session.advanced.attachments.store(\`employees/$\{i\}-A\`, "notes.txt", stream, "text/plain"); +\} + +await session.saveChanges(); +`} + + + + + + + +**The index**: +* To index the **details & content** for a specific attachment, call `loadAttachment()` within the index definition. + +* In addition to accessing the attachment details, `loadAttachment()` provides access to the attachment's content, + which can be used when defining the index-fields. + + + +{`class Employees_ByAttachment extends AbstractJavaScriptIndexCreationTask \{ + constructor () \{ + super(); + + const \{ loadAttachment \} = this.mapUtils(); + + this.map("employees", employee => \{ + // Call 'loadAttachment' to get attachment's details and content + // pass the attachment name, e.g. "notes.txt" + const attachment = loadAttachment(employee, "notes.txt"); + + return \{ + // Index DETAILS of attachment: + attachmentName: attachment.Name, + attachmentContentType: attachment.ContentType, + attachmentSize: attachment.Size, + + // Index CONTENT of attachment: + // Call 'getContentAsString' to access content + attachmentContent: attachment.getContentAsString() + \} + \}); + + // It can be useful configure Full-Text search on the attachment content index-field + this.index("attachmentContent", "Search"); + + // Documents with an attachment named 'notes.txt' will be indexed, + // allowing you to query them by either the attachment's details or its content. + \} +\} +`} + + + + + + + +**Query the Index**: +You can now query for Employee documents based on their attachment details and/or its content. + + + + +{`const employees = await session + // Query the index for matching employees + .query({indexName: "Employees/ByAttachment"}) + // Can make a full-text search + // Looking for employees with an attachment content that contains 'Colorado' OR 'Dallas' + .search("attachmentContent", "Colorado Dallas") + .all(); + +// Results: +// ======== +// Results will include 'employees/1-A' and 'employees/2-A'. +// Only these 2 documents have an attachment by name 'notes.txt' +// that contains either 'Colorado' or 'Dallas'. +`} + + + + +{`from index "Employees/ByAttachment" +where search(attachmentContent, "Colorado Dallas") +`} + + + + + + + + +## Index details & content - all attachments + + + +**The index**: +* Use `loadAttachments()` to be able to index the **details & content** of ALL attachments. + +* Note how the index example below is employing the [Fanout index](../../indexes/indexing-nested-data.mdx#fanout-index---multiple-index-entries-per-document) pattern. + + + +{`class Employees_ByAllAttachments extends AbstractJavaScriptIndexCreationTask \{ + constructor () \{ + super(); + + const \{ loadAttachments \} = this.mapUtils(); + + this.map("employees", employee => \{ + // Call 'loadAttachments' to get details and content for ALL attachments + const allAttachments = loadAttachments(employee); + + // This will be a Fanout index - + // the index will generate an index-entry for each attachment per document + + return allAttachments.map(attachment => (\{ + + // Index DETAILS of attachment: + attachmentName: attachment.Name, + attachmentContentType: attachment.ContentType, + attachmentSize: attachment.Size, + + // Index CONTENT of attachment: + // Call 'getContentAsString' to access content + attachmentContent: attachment.getContentAsString() + \})); + \}); + + // It can be useful configure Full-Text search on the attachment content index-field + this.index("attachmentContent", "Search"); + \} +\} +`} + + + + + + + +**Query the Index**: + + + +{`const employees = await session + // Query the index for matching employees + .query({indexName: "Employees/ByAllAttachments"}) + // Filter employee results by their attachments details and content + .whereGreaterThan("attachmentSize", 20_000) + .orElse() + .search("attachmentContent", "Colorado Dallas") + .all(); + +// Results: +// ======== +// Results will include: +// 'employees/1-A' and 'employees/2-A' that match the content criteria +// 'employees/4-A' and 'employees/5-A' that match the size criteria +`} + + + + +{`from index "Employees/ByAllAttachments" +where attachmentSize > 20000 or search(attachmentContent, "Colorado Dallas") +`} + + + + + + + +## Leveraging indexed attachments + +* Access to the indexed attachment content opens the door to many different applications, + including many that can be integrated directly into RavenDB. + +* This [blog post](https://ayende.com/blog/192001-B/using-machine-learning-with-ravendb) demonstrates + how image recognition can be applied to indexed attachments using the [additional sources](../../indexes/extending-indexes.mdx) feature. + The resulting index allows filtering and querying based on image content. + + + +## Syntax + +#### `attachmentsFor` + + + +{`attachmentsFor(document); +`} + + + +| Parameter | Type | Description | +|--------------|----------|---------------------------------------------------------| +| **document** | `object` | The document whose attachments details you want to load | + + + +{`// Returns a list containing the following attachment details object: +\{ + name; // string + hash; // string + contentType; // string + size; // number +\} +`} + + + +#### `loadAttachment` + + + +{`loadAttachment(document, attachmentName); +`} + + + +| Parameter | Type | Description | +|---------------------|-----------|-------------------------------------------------| +| **document** | `object` | The document whose attachment you want to load. | +| **attachmentName** | `string` | The name of the attachment to load. | + + + +{`// Returns the following attachment object: +\{ + // Properties accessing DETAILS: + // ============================= + name; // string + hash; // string + contentType; // string + size; // number + + // Methods accessing CONTENT: + // ========================== + getContentAsStream(); + getContentAsString(encoding); + getContentAsString(); // Default encoding is "utf8" +\} +`} + + + +#### `loadAttachments` + + + +{`// Returns a list containing the above attachment object per attachment. +loadAttachments(document); +`} + + + +| Parameter | Type | Description | +|-----------------|-----------|--------------------------------------------------| +| **document** | `object` | The document whose attachments you want to load. | + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_indexing-php.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_indexing-php.mdx new file mode 100644 index 0000000000..5f6d6d47b0 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_indexing-php.mdx @@ -0,0 +1,296 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Indexing attachments allows you to query for documents based on their attachments' details and content. + +* **Static indexes**: + Both attachments' details and content can be indexed within a static-index definition. + +* **Auto-indexes**: + Auto-indexing attachments via dynamic queries is not available at this time. + + +* In this page: + * [Syntax](../../document-extensions/attachments/indexing.mdx#syntax) + * [Examples](../../document-extensions/attachments/indexing.mdx#examples) + * [Leveraging indexed attachments](../../document-extensions/attachments/indexing.mdx#leveraging-indexed-attachments) + + +## Syntax + +### Using `AttachmentsFor` + +The `AttachmentsFor` method returns information about each attachment that extends +a specified document, including their names, sizes, and content type. + + + + +{`IEnumerable AttachmentsFor(object doc); +`} + + + + +{`public string Name; +public string Hash; +public string ContentType; +public long Size; +`} + + + + +The `AttachmentsFor` method is available in `AbstractIndexCreationTask`. +### Using `LoadAttachment`/`LoadAttachments` + +`LoadAttachment` loads an attachment to the index by document and attachment name. +`LoadAttachments` loads all the attachments of a given document. + + + +{`public IAttachmentObject LoadAttachment(object doc, string name); +public IEnumerable LoadAttachments(object doc); +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **doc** | A server-side document, an entity | The document whose attachments you want to load | +| **name** | `string` | The name of the attachment you want to load | + +#### `GetContentAs` Methods: + +To access the attachment content itself, use `GetContentAsStream`. To +convert the content into a `string`, use `GetContentAsString` with +the desired character encoding. + + + +{`public Stream GetContentAsStream(); +public string GetContentAsString(Encoding encoding); +public string GetContentAsString(); // Default: UTF-8 +`} + + + + + +## Examples + +#### Indexes with `AttachmentsFor`" + + + + +{`class Employees_ByAttachmentNames_Result +{ + public ?StringArray $attachmentNames = null; + + public function getAttachmentNames(): ?StringArray + { + return $this->attachmentNames; + } + + public function setAttachmentNames(?StringArray $attachmentNames): void + { + $this->attachmentNames = $attachmentNames; + } +} +class Employees_ByAttachmentNames extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = + "map('Employees', function (e) {" . + " var attachments = attachmentsFor(e);" . + " return {" . + " AttachmentNames: attachments.map(" . + " function(attachment) {" . + " return attachment.Name;" . + " }" . + " };" . + "})"; + } +} +`} + + + + +{`class Employees_ByAttachmentNames_JS_Result +{ + public ?StringArray $attachmentNames = null; + + public function getAttachmentNames(): ?StringArray + { + return $this->attachmentNames; + } + + public function setAttachmentNames(?StringArray $attachmentNames): void + { + $this->attachmentNames = $attachmentNames; + } +} +class Employees_ByAttachmentNames_JS extends AbstractJavaScriptIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->setMaps([ + "map('Employees', function (e) { + var attachments = attachmentsFor(e); + return { + AttachmentNames: attachments.map( + function(attachment) { + return attachment.Name; + } + }; + })" + ]); + } +} +`} + + + +#### Indexes with `LoadAttachment`: + + + + +{`class Companies_With_Attachments extends AbstractIndexCreationTask +{ + + public function __construct() + { + parent::__construct(); + + $this->map = + "from company in companies" . + "let attachments = LoadAttachment(company, company.ExternalId)" . + "select new" . + "{" . + " CompanyName = company.Name," . + " AttachmentName = attachment.Name," . + " AttachmentContentType = attachment.ContentType," . + " AttachmentHash = attachment.Hash," . + " AttachmentContent = attachment.GetContentAsString(Encoding.UTF8)," . + "}"; + } +} +`} + + + + +{`class Companies_With_Attachments_JavaScript extends AbstractJavaScriptIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->setMaps([ + "map('Companies', function (company) {\\n" . + " var attachment = LoadAttachment(company, company.ExternalId);\\n" . + " return {\\n" . + " CompanyName: company.Name,\\n" . + " AttachmentName: attachment.Name,\\n" . + " AttachmentContentType: attachment.ContentType,\\n" . + " AttachmentHash: attachment.Hash,\\n" . + " AttachmentSize: attachment.Size,\\n" . + " AttachmentContent: attachment.getContentAsString('utf8')\\n" . + " }\\n". + "});" + ]); + } +} +`} + + + +#### Indexes with `LoadAttachments`: + + + + +{`class Companies_With_All_Attachments extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "from company in companies " . + "let attachments = LoadAttachments(company)" . + "from attachment in attachments" . + "select new" . + "{" . + " attachment_name = attachment.Name," . + " attachment_content = attachment.GetContentAsString(Encoding.UTF8)" . + "}"; + } +} +`} + + + + +{`class Companies_With_All_Attachments_JS extends AbstractJavaScriptIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->setMaps([ + "map('Companies', function (company) {\\n" . + " var attachments = LoadAttachments(company);\\n" . + " return attachments.map(attachment => ({\\n" . + " AttachmentName: attachment.Name,\\n" . + " AttachmentContent: attachment.getContentAsString('utf8')\\n" . + " }));\\n" . + "})" + ]; + } +} +`} + + + +#### Querying the Index: + + + +{`//return all employees that have an attachment called "cv.pdf" +/** @var array $employees */ +$employees = $session + ->query(Employees_ByAttachmentNames_Result::class, Employees_ByAttachmentNames::class) + ->whereContainsAny("AttachmentNames", ["cv.pdf"]) + ->ofType(Employee::class) + ->toList(); +`} + + + + + +## Leveraging indexed attachments + +* Access to the indexed attachment content opens a door to many different applications, + including ones that can be integrated directly into RavenDB. + +* In this [blog post](https://ayende.com/blog/192001-B/using-machine-learning-with-ravendb), + Oren Eini demonstrates how image recognition can be applied to indexed attachments using the + `additional sources` feature. + The resulting index allows filtering and querying based on image content. + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_indexing-python.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_indexing-python.mdx new file mode 100644 index 0000000000..bc70ead547 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_indexing-python.mdx @@ -0,0 +1,260 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Indexing attachments allows you to query for documents based on their attachments' details and content. + +* **Static indexes**: + Both attachments' details and content can be indexed within a static-index definition. + +* **Auto-indexes**: + Auto-indexing attachments via dynamic queries is not available at this time. + + +* In this page: + * [Syntax](../../document-extensions/attachments/indexing.mdx#syntax) + * [Examples](../../document-extensions/attachments/indexing.mdx#examples) + * [Leveraging indexed attachments](../../document-extensions/attachments/indexing.mdx#leveraging-indexed-attachments) + + +## Syntax + +### Using `AttachmentsFor` + +The `AttachmentsFor` method returns information about each attachment that extends +a specified document, including their names, sizes, and content type. + + + + +{`IEnumerable AttachmentsFor(object doc); +`} + + + + +{`public string Name; +public string Hash; +public string ContentType; +public long Size; +`} + + + + +The `AttachmentsFor` method is available in `AbstractIndexCreationTask`. + +### Using `LoadAttachment`/`LoadAttachments` + +`LoadAttachment` loads an attachment to the index by document and attachment name. +`LoadAttachments` loads all the attachments of a given document. + + + +{`public IAttachmentObject LoadAttachment(object doc, string name); +public IEnumerable LoadAttachments(object doc); +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **doc** | A server-side document, an entity | The document whose attachments you want to load | +| **name** | `string` | The name of the attachment you want to load | + +#### `GetContentAs` Methods + +To access the attachment content itself, use `GetContentAsStream`. To +convert the content into a `string`, use `GetContentAsString` with +the desired character encoding. + + + +{`public Stream GetContentAsStream(); +public string GetContentAsString(Encoding encoding); +public string GetContentAsString(); // Default: UTF-8 +`} + + + + + +## Examples + +#### Indexes with `AttachmentsFor` + + + + +{`class Employees_ByAttachmentNames(AbstractIndexCreationTask): + class Result: + def __init__(self, attachment_names: List[str] = None): + self.attachment_names = attachment_names + + def __init__(self): + super().__init__() + self.map = ( + "from e in employees " + "let attachments = AttachmentsFor(e) " + "select new " + "{" + " attachment_names = attachments.Select(x => x.Name).ToArray()" + "}" + ) +`} + + + + +{`class Employees_ByAttachmentNames_JS(AbstractJavaScriptIndexCreationTask): + class Result: + def __init__(self, attachment_names: List[str] = None): + self.attachment_names = attachment_names + + def __init__(self): + super().__init__() + self.maps = { + """ + map('Employees', function (e) { + var attachments = attachmentsFor(e); + return { + attachment_names: attachments.map( + function(attachment) { + return attachment.Name; + } + }; + }) + """ + } +`} + + + + +#### Indexes with `LoadAttachment` + + + + +{`class Companies_With_Attachments(AbstractJavaScriptIndexCreationTask): + class Result: + def __init__(self, attachment_names: List[str] = None): + self.attachment_names = attachment_names + + def __init__(self): + super().__init__() + self.maps = { + """ + map('Employees', function (e) { + var attachments = attachmentsFor(e); + return { + attachment_names: attachments.map( + function(attachment) { + return attachment.Name; + } + }; + }) + """ + } +`} + + + + +{`class Companies_With_Attachments_JavaScript(AbstractJavaScriptIndexCreationTask): + def __init__(self): + super().__init__() + self.maps = { + """ + map('Companies', function (company) { + var attachment = loadAttachment(company, company.ExternalId); + return { + company_name: company.Name, + attachment_name: attachment.Name, + attachment_content_type: attachment.ContentType, + attachment_hash: attachment.Hash, + attachment_size: attachment.Size, + attachment_content: attachment.getContentAsString('utf8') + }; + }) + """ + } +`} + + + + +#### Indexes with `LoadAttachments` + + + + +{`class Companies_With_All_Attachments(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = ( + "from company in companies " + "let attachments = LoadAttachments(company)" + "from attachment in attachments" + "select new" + "{" + " attachment_name = attachment.Name," + " attachment_content = attachment.GetContentAsString(Encoding.UTF8)" + "}" + ) +`} + + + + +{`class Companies_With_All_Attachments_JS(AbstractJavaScriptIndexCreationTask): + def __init__(self): + super().__init__() + self.maps = { + """ + map('Companies', function (company) { + var attachments = loadAttachments(company); + return attachments.map(attachment => ({ + attachment_name: attachment.Name, + attachment_content: attachment.getContentAsString('utf8') + })); + }) + """ + } +`} + + + + +#### Querying the Index + + + +{`# return all employees that have an attachment called "cv.pdf" +employees = list( + session.query_index_type( + Employees_ByAttachmentNames, Employees_ByAttachmentNames.Result + ).contains_any("attachment_names", ["cv.pdf"]) +) +`} + + + + + +## Leveraging indexed attachments + +* Access to the indexed attachment content opens a door to many different applications, + including ones that can be integrated directly into RavenDB. + +* In this [blog post](https://ayende.com/blog/192001-B/using-machine-learning-with-ravendb), + Oren Eini demonstrates how image recognition can be applied to indexed attachments using the + [additional sources](../../indexes/extending-indexes.mdx) feature. + The resulting index allows filtering and querying based on image content. + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_loading-csharp.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_loading-csharp.mdx new file mode 100644 index 0000000000..cf088ddae7 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_loading-csharp.mdx @@ -0,0 +1,231 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + +Learn in this page how to load a part of an attachment, an entire attachment, +or multiple attachments. + +* In this page: + * [Load attachments](../../document-extensions/attachments/loading.mdx#load-attachments) + * [Load a part of an attachment](../../document-extensions/attachments/loading.mdx#load-a-part-of-an-attachment) + + +## Load attachments + +* Use these methods to load attachments from the database. + * **session.Advanced.Attachments.Get** + Can be used to download an attachment or multiple attachments. + * **session.Advanced.Attachments.GetNames** + Can be used to download all attachment names that are attached to a document. + * **session.Advanced.Attachments.GetRevision** + Can be used to download an attachment of a revision document. + +* Use this method to verify that an attachment exists. + * **session.Advanced.Attachments.Exists** + +## Syntax + + + + +{`AttachmentResult Get(string documentId, string name); +AttachmentResult Get(object entity, string name); +IEnumerator Get(IEnumerable attachments); +AttachmentName[] GetNames(object entity); +AttachmentResult GetRevision(string documentId, string name, string changeVector); +bool Exists(string documentId, string name); +`} + + + + +{`Task GetAsync(string documentId, string name, CancellationToken token = default); +Task GetAsync(object entity, string name, CancellationToken token = default); +Task> GetAsync(IEnumerable attachments, CancellationToken token = default); +Task GetRevisionAsync(string documentId, string name, string changeVector, CancellationToken token = default); +Task ExistsAsync(string documentId, string name, CancellationToken token = default); +`} + + + + +## Example I + + + + +{`using (var session = store.OpenSession()) +{ + Album album = session.Load("albums/1"); + + using (AttachmentResult file1 = session.Advanced.Attachments.Get(album, "001.jpg")) + using (AttachmentResult file2 = session.Advanced.Attachments.Get("albums/1", "002.jpg")) + { + Stream stream = file1.Stream; + + AttachmentDetails attachmentDetails = file1.Details; + string name = attachmentDetails.Name; + string contentType = attachmentDetails.ContentType; + string hash = attachmentDetails.Hash; + long size = attachmentDetails.Size; + string documentId = attachmentDetails.DocumentId; + string changeVector = attachmentDetails.ChangeVector; + } + + AttachmentName[] attachmentNames = session.Advanced.Attachments.GetNames(album); + foreach (AttachmentName attachmentName in attachmentNames) + { + string name = attachmentName.Name; + string contentType = attachmentName.ContentType; + string hash = attachmentName.Hash; + long size = attachmentName.Size; + } + + bool exists = session.Advanced.Attachments.Exists("albums/1", "003.jpg"); +} +`} + + + + +{`using (var asyncSession = store.OpenAsyncSession()) +{ + Album album = await asyncSession.LoadAsync("albums/1"); + + using (AttachmentResult file1 = await asyncSession.Advanced.Attachments.GetAsync(album, "001.jpg")) + using (AttachmentResult file2 = await asyncSession.Advanced.Attachments.GetAsync("albums/1", "002.jpg")) + { + Stream stream = file1.Stream; + + AttachmentDetails attachmentDetails = file1.Details; + string name = attachmentDetails.Name; + string contentType = attachmentDetails.ContentType; + string hash = attachmentDetails.Hash; + long size = attachmentDetails.Size; + string documentId = attachmentDetails.DocumentId; + string changeVector = attachmentDetails.ChangeVector; + } + + AttachmentName[] attachmentNames = asyncSession.Advanced.Attachments.GetNames(album); + foreach (AttachmentName attachmentName in attachmentNames) + { + string name = attachmentName.Name; + string contentType = attachmentName.ContentType; + string hash = attachmentName.Hash; + long size = attachmentName.Size; + } + + bool exists = await asyncSession.Advanced.Attachments.ExistsAsync("albums/1", "003.jpg"); +} +`} + + + + +## Example II +Here, we load multiple string attachments we previously created for a document. We then +go through them, and decode each attachment to its original text. + + + +{`// Load a user profile +var user = session.Load(userId); + +// Get the names of files attached to this document +IEnumerable attachmentNames = session.Advanced.Attachments.GetNames(user).Select(x => new AttachmentRequest(userId, x.Name)); + +// Get the attached files +IEnumerator attachmentsEnumerator = session.Advanced.Attachments.Get(attachmentNames); + +// Go through the document's attachments +while (attachmentsEnumerator.MoveNext()) +{ + AttachmentEnumeratorResult res = attachmentsEnumerator.Current; + + AttachmentDetails attachmentDetails = res.Details; // attachment details + + Stream attachmentStream = res.Stream; // attachment contents + + // In this case it is a string attachment, that can be decoded back to text + var ms = new MemoryStream(); + attachmentStream.CopyTo(ms); + string decodedStream = Encoding.UTF8.GetString(ms.ToArray()); +} +`} + + + + +{`// Load a user profile +var user = await session.LoadAsync(userId); + +// Get the names of files attached to this document +IEnumerable attachmentNames = session.Advanced.Attachments.GetNames(user).Select(x => new AttachmentRequest(userId, x.Name)); + +// Get the attached files +IEnumerator attachmentsEnumerator = await session.Advanced.Attachments.GetAsync(attachmentNames); + +// Go through the document's attachments +while (attachmentsEnumerator.MoveNext()) +{ + AttachmentEnumeratorResult res = attachmentsEnumerator.Current; + + AttachmentDetails attachmentDetails = res.Details; // attachment details + + Stream attachmentStream = res.Stream; // attachment contents + + // In this case it is a string attachment, that can be decoded back to text + var ms = new MemoryStream(); + attachmentStream.CopyTo(ms); + string decodedStream = Encoding.UTF8.GetString(ms.ToArray()); +} +`} + + + + + + +## Load a part of an attachment + +Use `GetRange` to load a part of an attachment by document ID and the attachment name. + +## Syntax + + +{`// Returns a range of the attachment by the document id and attachment name. +AttachmentResult GetRange(string documentId, string name, long? from, long? to); + +// Returns a range of the attachment by the document id and attachment name. +AttachmentResult GetRange(object entity, string name, long? from, long? to); +`} + + + +## Sample + + + +{`Album album = session.Load("albums/1"); + +AttachmentResult attachmentPart = session.Advanced.Attachments.GetRange( + album, "track1.mp3", 101, 200); +`} + + + + +{`Album album = await asyncSession.LoadAsync("albums/1"); + +AttachmentResult file1 = await asyncSession.Advanced.Attachments.GetRangeAsync( + album, "track1.mp3", 101, 200); +`} + + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_loading-java.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_loading-java.mdx new file mode 100644 index 0000000000..60560f3ea0 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_loading-java.mdx @@ -0,0 +1,69 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +There are a few methods that allow you to download attachments from a database: + +**session.advanced().attachments().get** can be used to download an attachment. +**session.advanced().attachments().getNames** can be used to download all attachment names that are attached to a document. +**session.advanced().attachments().getRevision** can be used to download an attachment of a revision document. +**session.advanced().attachments().exists** can be used to determine if an attachment exists on a document. + +## Syntax + + + +{`AttachmentName[] getNames(Object entity); + +boolean exists(String documentId, String name); + +CloseableAttachmentResult get(String documentId, String name); + +CloseableAttachmentResult get(Object entity, String name); + +CloseableAttachmentResult getRevision(String documentId, String name, String changeVector); +`} + + + +## Example + + + +{`try (IDocumentSession session = store.openSession()) \{ + Album album = session.load(Album.class, "albums/1"); + + try (CloseableAttachmentResult file1 = session + .advanced().attachments().get(album, "001.jpg"); + CloseableAttachmentResult file2 = session + .advanced().attachments().get("albums/1", "002.jpg")) \{ + + InputStream inputStream = file1 + .getData(); + + AttachmentDetails attachmentDetails = file1.getDetails(); + String name = attachmentDetails.getName(); + String contentType = attachmentDetails.getContentType(); + String hash = attachmentDetails.getHash(); + long size = attachmentDetails.getSize(); + String documentId = attachmentDetails.getDocumentId(); + String changeVector = attachmentDetails.getChangeVector(); + \} + + AttachmentName[] attachmentNames = session.advanced().attachments().getNames(album); + for (AttachmentName attachmentName : attachmentNames) \{ + + String name = attachmentName.getName(); + String contentType = attachmentName.getContentType(); + String hash = attachmentName.getHash(); + long size = attachmentName.getSize(); + \} + + boolean exists = session.advanced().attachments().exists("albums/1", "003.jpg"); +\} +`} + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_loading-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_loading-nodejs.mdx new file mode 100644 index 0000000000..8df5bc3ea3 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_loading-nodejs.mdx @@ -0,0 +1,76 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +There are a few methods that allow you to download attachments from a database: + +**session.advanced.attachments.get()** can be used to download an attachment. +**session.advanced.attachments.getNames()** can be used to download all attachment names that are attached to a document. +**session.advanced.attachments.getRevision()** can be used to download an attachment of a revision document. +**session.advanced.attachments.exists()** can be used to determine if an attachment exists on a document. + +## Syntax + + + +{`session.advanced.attachments.getNames(entity); + +session.advanced.attachments.exists(documentId, name); + +session.advanced.attachments.get(documentId, name); + +session.advanced.attachments.get(entity, name); + +session.advanced.attachments.getRevision(documentId, name, changeVector); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **entity** or **documentId** | object or string | instance of the entity or the entity ID | +| **name** | string | attachment name | +| **changeVector** | string | change vector for revision identification | + +| Return Value | | +| ------------- | ------------- | +| `Promise` | Promise resolving to a Readable for attachment content | + +## Example + + + +{`const album = await session.load("albums/1"); + +const file1 = await session.advanced.attachments.get(album, "001.jpg"); +const file2 = await session.advanced.attachments.get("albums/1", "002.jpg"); + +const inputStream = file1.data; + +const attachmentDetails = file1.details; +// \{ +// name: '001.jpg', +// documentId: 'albums/1', +// contentType: 'image/jpeg', +// hash: 'MvUEcrFHSVDts5ZQv2bQ3r9RwtynqnyJzIbNYzu1ZXk=', +// changeVector: '"A:3-K5TR36dafUC98AItzIa6ow"', +// size: 25793 +// \} + +const attachmentNames = await session.advanced.attachments.getNames(album); +for (const attachmentName of attachmentNames) \{ + const name = attachmentName.name; + const contentType = attachmentName.contentType; + const hash = attachmentName.hash; + const size = attachmentName.size; +\} + +const exists = session.advanced.attachments.exists("albums/1", "003.jpg"); +// true + +`} + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_loading-php.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_loading-php.mdx new file mode 100644 index 0000000000..f6464f9a08 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_loading-php.mdx @@ -0,0 +1,64 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Several methods allow you to download attachments from a database: + +Use `session.advanced.attachments.get` to download an attachment or multiple attachments. +Use `session.advanced.attachments.getNames` to get all the names of a document's attachments. +Use `session.advanced.attachments.getRevision` to download an attachment of a document revision. +Use `session.advanced.attachments.exists` to determine if an attachment exists on a document. + +## Syntax + + + +{`function get(object|string $idOrEntity, ?string $name): CloseableAttachmentResult; +function getNames(?object $entity): AttachmentNameArray; +function exists(?string $documentId, ?string $name): bool; +function getRevision(?string $documentId, ?string $name, ?string $changeVector): CloseableAttachmentResult; +`} + + + +## Example I + + + +{`$session = $store->openSession(); +try \{ + $album = $session->load(Album::class, "albums/1"); + + $file1 = $session->advanced()->attachments()->get($album, "001.jpg"); + $file2 = $session->advanced()->attachments()->get("albums/1", "002.jpg"); + + $data = $file1->getData(); + + $attachmentDetails = $file1->getDetails(); + $name = $attachmentDetails->getName(); + $contentType = $attachmentDetails->getContentType(); + $hash = $attachmentDetails->getHash(); + $size = $attachmentDetails->getSize(); + $documentId = $attachmentDetails->getDocumentId(); + $changeVector = $attachmentDetails->getChangeVector(); + + $attachmentNames = $session->advanced()->attachments()->getNames($album); + /** @var AttachmentName $attachmentName */ + foreach ($attachmentNames as $attachmentName) + \{ + $name = $attachmentName->getName(); + $contentType = $attachmentName->getContentType(); + $hash = $attachmentName->getHash(); + $size = $attachmentName->getSize(); + \} + + $exists = $session->advanced()->attachments()->exists("albums/1", "003.jpg"); +\} finally \{ + $session->close(); +\} +`} + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_loading-python.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_loading-python.mdx new file mode 100644 index 0000000000..c6f8ce6f8e --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_loading-python.mdx @@ -0,0 +1,72 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Several methods allow you to download attachments from a database: + +Use `session.advanced.attachments.get` to download an attachment or multiple attachments. +Use `session.advanced.attachments.get_names` to get all the names of a document's attachments. +Use `session.advanced.attachments.get_revision` to download an attachment of a document revision. +Use `session.advanced.attachments.exists` to determine if an attachment exists on a document. + +## Syntax + + + +{`def get(self, entity_or_document_id: str = None, name: str = None) -> CloseableAttachmentResult: ... + +class CloseableAttachmentResult: + def __init__(self, response: requests.Response, details: AttachmentDetails): + self.__details = details + self.__response = response + +class AttachmentDetails(AttachmentName): + def __init__( + self, name: str, hash: str, content_type: str, size: int, change_vector: str = None, document_id: str = None + ): + super().__init__(...) + ... + +class AttachmentName: + def __init__(self, name: str, hash: str, content_type: str, size: int): ... + +def get_names(self, entity: object) -> List[AttachmentName]: ... + +def exists(self, document_id: str, name: str) -> bool: ... +`} + + + +## Example I + + + +{`with store.open_session() as session: + album = session.load("albums/1", Album) + + with session.advanced.attachments.get(album, "001.jpg") as file1: + with session.advanced.attachments.get("albums/1", "002.jpg") as file2: + bytes_data = file1.data + + attachment_details = file1.details + name = attachment_details.name + content_type = attachment_details.content_type + hash_ = attachment_details.hash + size = attachment_details.size + document_id = attachment_details.document_id + change_vector = attachment_details.change_vector + + attachment_names = session.advanced.attachments.get_names(album) + for attachment_name in attachment_names: + name = attachment_name.name + content_type = attachment_name.content_type + hash_ = attachment_name.hash + size = attachment_name.size + + exists = session.advanced.attachments.exists("albums/1", "003.jpg") +`} + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_storing-csharp.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_storing-csharp.mdx new file mode 100644 index 0000000000..4562e5b827 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_storing-csharp.mdx @@ -0,0 +1,79 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +In order to store an attachment in RavenDB you need to create a document. Then you can attach an attachment to the document using the `session.Advanced.Attachments.Store` method. + +Attachments, just like documents, are a part of the session and will only be saved on the Server when `DocumentSession.SaveChanges` is executed (you can read more about saving changes in session [here](../../client-api/session/saving-changes.mdx)). + +## Syntax + +Attachments can be stored using one of the following `session.Advanced.Attachments.Store` methods: + + + +{`void Store(string documentId, string name, Stream stream, string contentType = null); +void Store(object entity, string name, Stream stream, string contentType = null); +`} + + + +## Example + + + + +{`using (var session = store.OpenSession()) +using (var file1 = File.Open("001.jpg", FileMode.Open)) +using (var file2 = File.Open("002.jpg", FileMode.Open)) +using (var file3 = File.Open("003.jpg", FileMode.Open)) +using (var file4 = File.Open("004.mp4", FileMode.Open)) +{ + var album = new Album + { + Name = "Holidays", + Description = "Holidays travel pictures of the all family", + Tags = new[] { "Holidays Travel", "All Family" }, + }; + session.Store(album, "albums/1"); + + session.Advanced.Attachments.Store("albums/1", "001.jpg", file1, "image/jpeg"); + session.Advanced.Attachments.Store("albums/1", "002.jpg", file2, "image/jpeg"); + session.Advanced.Attachments.Store("albums/1", "003.jpg", file3, "image/jpeg"); + session.Advanced.Attachments.Store("albums/1", "004.mp4", file4, "video/mp4"); + + session.SaveChanges(); +} +`} + + + + +{`using (var asyncSession = store.OpenAsyncSession()) +using (var file1 = File.Open("001.jpg", FileMode.Open)) +using (var file2 = File.Open("002.jpg", FileMode.Open)) +using (var file3 = File.Open("003.jpg", FileMode.Open)) +using (var file4 = File.Open("004.mp4", FileMode.Open)) +{ + var album = new Album + { + Name = "Holidays", + Description = "Holidays travel pictures of the all family", + Tags = new[] { "Holidays Travel", "All Family" }, + }; + await asyncSession.StoreAsync(album, "albums/1"); + + asyncSession.Advanced.Attachments.Store("albums/1", "001.jpg", file1, "image/jpeg"); + asyncSession.Advanced.Attachments.Store("albums/1", "002.jpg", file2, "image/jpeg"); + asyncSession.Advanced.Attachments.Store("albums/1", "003.jpg", file3, "image/jpeg"); + asyncSession.Advanced.Attachments.Store("albums/1", "004.mp4", file4, "video/mp4"); + + await asyncSession.SaveChangesAsync(); +} +`} + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_storing-java.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_storing-java.mdx new file mode 100644 index 0000000000..bfa024b483 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_storing-java.mdx @@ -0,0 +1,60 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +In order to store an attachment in RavenDB you need to create a document. Then you can attach an attachment to the document using the `session.advanced().attachments().store` method. + +Attachments, just like documents, are a part of the session and will only be saved on the Server when `DocumentSession.saveChanges` is executed (you can read more about saving changes in session [here](../../client-api/session/saving-changes.mdx)). + +## Syntax + +Attachments can be stored using one of the following `session.advanced().attachments().store` methods: + + + +{`void store(String documentId, String name, InputStream stream); + +void store(String documentId, String name, InputStream stream, String contentType); + +void store(Object entity, String name, InputStream stream); + +void store(Object entity, String name, InputStream stream, String contentType); +`} + + + +## Example + + + +{`try (IDocumentSession session = store.openSession()) \{ + try ( + FileInputStream file1 = new FileInputStream("001.jpg"); + FileInputStream file2 = new FileInputStream("002.jpg"); + FileInputStream file3 = new FileInputStream("003.jpg"); + FileInputStream file4 = new FileInputStream("004.mp4") + ) \{ + Album album = new Album(); + album.setName("Holidays"); + album.setDescription("Holidays travel pictures of the all family"); + album.setTags(new String[] \{ "Holidays Travel", "All Family" \}); + session.store(album, "albums/1"); + + session.advanced().attachments() + .store("albums/1", "001.jpg", file1, "image/jpeg"); + session.advanced().attachments() + .store("albums/1", "002.jpg", file2, "image/jpeg"); + session.advanced().attachments() + .store("albums/1", "003.jpg", file3, "image/jpeg"); + session.advanced().attachments() + .store("albums/1", "004.mp4", file4, "video/mp4"); + + session.saveChanges(); + \} +\} +`} + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_storing-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_storing-nodejs.mdx new file mode 100644 index 0000000000..65910b461a --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_storing-nodejs.mdx @@ -0,0 +1,61 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +In order to store an attachment in RavenDB you need to create a document. Then you can attach an attachment to the document using the `session.advanced.attachments.store()` method. + +Attachments, just like documents, are a part of the session and will only be saved on the Server when `DocumentSession.saveChanges()` is executed. + +## Syntax + +Attachments can be stored using one of the following `session.advanced.attachments.store()` methods: + + + +{`session.advanced.attachments.store(documentId, name, stream, [contentType]); + +session.advanced.attachments.store(entity, name, stream, [contentType]); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **entity** or **documentId** | object or string | instance of the entity or the entity ID | +| **name** | string | attachment name | +| **stream** | `Readable` or `Buffer` | attachment content | +| **contentType** | string | attachment content type | + +## Example + + + +{`const session = store.openSession(); + +const file1 = fs.createReadStream("001.jpg"); +const file2 = fs.createReadStream("002.jpg"); +const file3 = fs.createReadStream("003.jpg"); +const file4 = fs.createReadStream("004.mp4"); + +const album = new Album(); +album.name = "Holidays"; +album.description = "Holidays travel pictures of the all family"; +album.tags = [ "Holidays Travel", "All Family" ]; +await session.store(album, "albums/1"); + +session.advanced.attachments + .store("albums/1", "001.jpg", file1, "image/jpeg"); +session.advanced.attachments + .store("albums/1", "002.jpg", file2, "image/jpeg"); +session.advanced.attachments + .store("albums/1", "003.jpg", file3, "image/jpeg"); +session.advanced.attachments + .store("albums/1", "004.mp4", file4, "video/mp4"); + +await session.saveChanges(); +`} + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_storing-php.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_storing-php.mdx new file mode 100644 index 0000000000..9d945fd0ee --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_storing-php.mdx @@ -0,0 +1,58 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To store an attachment in RavenDB, you need to first create a document. +Then, you can attach an attachment to the document using `session.advanced.attachments.store` +or `session.advanced.attachments.storeFile`. + +Just like documents, sttachments are a part of the session and will only be saved on the server +when `saveChanges` is executed (read more about saving changes in session [here](../../client-api/session/saving-changes.mdx)). + +## Syntax + +Attachments can be stored using `session.advanced.attachments.store` +or `session.advanced.attachments.storeFile`: + + + +{`public function store(object|string $idOrEntity, ?string $name, mixed $stream, ?string $contentType = null): void; + +public function storeFile(object|string $idOrEntity, ?string $name, string $filePath): void; +`} + + + +## Example + + + +{`$session = $store->openSession(); +try \{ + $file1 = file_get_contents("001.jpg"); + $file2 = file_get_contents("002.jpg"); + $file3 = file_get_contents("003.jpg"); + $file4 = file_get_contents("004.mp4"); + + $album = new Album(); + $album->setName("Holidays"); + $album->setDescription("Holidays travel pictures of the all family"); + $album->setTags(["Holidays Travel", "All Family"]); + + $session->store($album, "albums/1"); + + $session->advanced()->attachments()->store("albums/1", "001.jpg", $file1, "image/jpeg"); + $session->advanced()->attachments()->store("albums/1", "002.jpg", $file2, "image/jpeg"); + $session->advanced()->attachments()->store("albums/1", "003.jpg", $file3, "image/jpeg"); + $session->advanced()->attachments()->store("albums/1", "004.mp4", $file4, "video/mp4"); + + $session->saveChanges(); +\} finally \{ + $session->close(); +\} +`} + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_storing-python.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_storing-python.mdx new file mode 100644 index 0000000000..123a096272 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_storing-python.mdx @@ -0,0 +1,57 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To store an attachment in RavenDB, you need to first create a document. +Then, you can attach an attachment to the document using the `session.advanced.attachments.store` method. + +Just like documents, sttachments are a part of the session and will only be saved on the server +when `save_changes` is executed (read more about saving changes in session [here](../../client-api/session/saving-changes.mdx)). + +## Syntax + +Attachments can be stored using `session.advanced.attachments.store`: + + + +{`def store( + self, + entity_or_document_id: Union[object, str], + name: str, + stream: bytes, + content_type: str = None, + change_vector: str = None, +): ... +`} + + + +## Example + + + +{`with store.open_session() as session: + with open("001.jpg", "r") as file1: + file1_data = file1.read() + file2_data = b"file_2_content" # Mock + file3_data = b"file_3_content" # Mock + file4_data = b"file_4_content" # Mock + album = Album( + name="Holidays", + description="Holidays travel pictures of the all family", + tags=["Holidays Travel", "All Family"], + ) + session.store(album, "albums/1") + + session.advanced.attachments.store("albums/1", "001.jpg", file1_data, "image/jpeg") + session.advanced.attachments.store("albums/1", "002.jpg", file2_data, "image/jpeg") + session.advanced.attachments.store("albums/1", "003.jpg", file3_data, "image/jpeg") + session.advanced.attachments.store("albums/1", "004.jpg", file4_data, "image/jpeg") + + session.save_changes() +`} + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_what-are-attachments-csharp.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_what-are-attachments-csharp.mdx new file mode 100644 index 0000000000..3b5015f6a0 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_what-are-attachments-csharp.mdx @@ -0,0 +1,88 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +In RavenDB, attachments are binary streams which can be bound to an existing document. +Each attachment has a name, and you can specify the content type (`image/png` or `application/pdf` for example). + +A document can have any number of attachments. + +Each attachment is bound to an existing document. In order to get a document, you'll need to specify the document ID and the attachment name. +What's great in this approach is that you can specify the attachment's metadata in the document itself, and this document can be queried as any other document. + +## Example I + +In order to store an album of pictures in RavenDB, you can create the following "albums/1" document: + + + +{`\{ + "UserId": "users/1", + "Name": "Holidays", + "Description": "Holidays travel pictures of the all family", + "Tags": ["Holidays Travel", "All Family"], + "@metadata": \{ + "@collection": "Albums" + \} +\} +`} + + + +This document can have the following attachments: + +| Name | Content type | +| - | - | +| `001.jpg` | `image/jpeg` | +| `002.jpg` | `image/jpeg` | +| `003.jpg` | `image/jpeg` | +| `004.mp4` | `video/mp4` | + +## Example II + +You can store a `users/1` document and attach to it to a profile picture. +When requesting the document from the server the results would be: + + + +{`\{ + "Name": "Hibernating Rhinos", + "@metadata": \{ + "@attachments": [ + \{ + "ContentType": "image/png", + "Hash": "iFg0o6D38pUcWGVlP71ddDp8SCcoEal47kG3LtWx0+Y=", + "Name": "profile.png", + "Size": 33241 + \} + ], + "@collection": "Users", + "@change-vector": "A:1061-D11EJRPTVEGKpMaH2BUl9Q", + "@flags": "HasAttachments", + "@id": "users/1", + "@last-modified": "2017-12-05T12:36:24.0504021Z" + \} +\} +`} + + + +Note that this document has an HasAttachments flag and an @attachments array with the attachment's info. + +You can see the attachment's name, content type, hash and size. + + +We would store the attachment streams by the hash, so if many attachments have the same hash, their streams would be stored just once. + + +## Transaction Support + +In RavenDB, attachment and documents are stored as ACID transaction: You either get all of them saved to disk or none. + +## Revisions and Attachments + +When the revisions feature is turned on in your database, each attachment addition to a document (or deletion from a document) will create a new revision of the document, +as there will be a change to the document's metadata, as shown in example #2. + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_what-are-attachments-java.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_what-are-attachments-java.mdx new file mode 100644 index 0000000000..3b5015f6a0 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_what-are-attachments-java.mdx @@ -0,0 +1,88 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +In RavenDB, attachments are binary streams which can be bound to an existing document. +Each attachment has a name, and you can specify the content type (`image/png` or `application/pdf` for example). + +A document can have any number of attachments. + +Each attachment is bound to an existing document. In order to get a document, you'll need to specify the document ID and the attachment name. +What's great in this approach is that you can specify the attachment's metadata in the document itself, and this document can be queried as any other document. + +## Example I + +In order to store an album of pictures in RavenDB, you can create the following "albums/1" document: + + + +{`\{ + "UserId": "users/1", + "Name": "Holidays", + "Description": "Holidays travel pictures of the all family", + "Tags": ["Holidays Travel", "All Family"], + "@metadata": \{ + "@collection": "Albums" + \} +\} +`} + + + +This document can have the following attachments: + +| Name | Content type | +| - | - | +| `001.jpg` | `image/jpeg` | +| `002.jpg` | `image/jpeg` | +| `003.jpg` | `image/jpeg` | +| `004.mp4` | `video/mp4` | + +## Example II + +You can store a `users/1` document and attach to it to a profile picture. +When requesting the document from the server the results would be: + + + +{`\{ + "Name": "Hibernating Rhinos", + "@metadata": \{ + "@attachments": [ + \{ + "ContentType": "image/png", + "Hash": "iFg0o6D38pUcWGVlP71ddDp8SCcoEal47kG3LtWx0+Y=", + "Name": "profile.png", + "Size": 33241 + \} + ], + "@collection": "Users", + "@change-vector": "A:1061-D11EJRPTVEGKpMaH2BUl9Q", + "@flags": "HasAttachments", + "@id": "users/1", + "@last-modified": "2017-12-05T12:36:24.0504021Z" + \} +\} +`} + + + +Note that this document has an HasAttachments flag and an @attachments array with the attachment's info. + +You can see the attachment's name, content type, hash and size. + + +We would store the attachment streams by the hash, so if many attachments have the same hash, their streams would be stored just once. + + +## Transaction Support + +In RavenDB, attachment and documents are stored as ACID transaction: You either get all of them saved to disk or none. + +## Revisions and Attachments + +When the revisions feature is turned on in your database, each attachment addition to a document (or deletion from a document) will create a new revision of the document, +as there will be a change to the document's metadata, as shown in example #2. + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/_what-are-attachments-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/attachments/_what-are-attachments-nodejs.mdx new file mode 100644 index 0000000000..9ec9d9e08c --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/_what-are-attachments-nodejs.mdx @@ -0,0 +1,88 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +In RavenDB, attachments are binary streams which can be bound to an existing document. +Each attachment has a name, and you can specify the content type (`image/png` or `application/pdf` for example). + +A document can have any number of attachments. + +Each attachment is bound to an existing document. In order to get a document, you'll need to specify the document ID and the attachment name. +What's great in this approach is that you can specify the attachment's metadata in the document itself, and this document can be queried as any other document. + +## Example I + +In order to store an album of pictures in RavenDB, you can create the following "albums/1" document: + + + +{`\{ + "UserId": "users/1", + "Name": "Holidays", + "Description": "Holidays travel pictures of the all family", + "Tags": ["Holidays Travel", "All Family"], + "@metadata": \{ + "@collection": "Albums" + \} +\} +`} + + + +This document can have the following attachments: + +| Name | Content type | +| - | - | +| `001.jpg` | `image/jpeg` | +| `002.jpg` | `image/jpeg` | +| `003.jpg` | `image/jpeg` | +| `004.mp4` | `video/mp4` | + +## Example II + +You can store a `users/1` document and attach to it to a profile picture. +When requesting the document from the server the results would be: + + + +{`\{ + "Name": "Hibernating Rhinos", + "@metadata": \{ + "@attachments": [ + \{ + "ContentType": "image/png", + "Hash": "iFg0o6D38pUcWGVlP71ddDp8SCcoEal47kG3LtWx0+Y=", + "Name": "profile.png", + "Size": 33241 + \} + ], + "@collection": "Users", + "@change-vector": "A:1061-D11EJRPTVEGKpMaH2BUl9Q", + "@flags": "HasAttachments", + "@id": "users/1", + "@last-modified": "2017-12-05T12:36:24.0504021Z" + \} +\} +`} + + + +Note that this document has an *HasAttachments* flag and an *@attachments* array with the attachment's info. + +You can see the attachment's name, content type, hash and size. + + +We would store the attachment streams by the hash, so if many attachments have the same hash, their streams would be stored just once. + + +## Transaction Support + +In RavenDB, attachment and documents are stored as ACID transaction: You either get all of them saved to disk or none. + +## Revisions and Attachments + +When the revisions feature is turned on in your database, each attachment addition to a document (or deletion from a document) will create a new revision of the document, +as there will be a change to the document's metadata, as shown in example #2. + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/bulk-insert.mdx b/versioned_docs/version-7.1/document-extensions/attachments/bulk-insert.mdx new file mode 100644 index 0000000000..32bbd97712 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/bulk-insert.mdx @@ -0,0 +1,39 @@ +--- +title: "Bulk Insert Attachments" +hide_table_of_contents: true +sidebar_label: Bulk Insert +sidebar_position: 6 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import BulkInsertCsharp from './_bulk-insert-csharp.mdx'; +import BulkInsertPython from './_bulk-insert-python.mdx'; +import BulkInsertNodejs from './_bulk-insert-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/attachments/copying-moving-renaming.mdx b/versioned_docs/version-7.1/document-extensions/attachments/copying-moving-renaming.mdx new file mode 100644 index 0000000000..87f3ca09da --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/copying-moving-renaming.mdx @@ -0,0 +1,50 @@ +--- +title: "Attachments: Copy, Move, Rename" +hide_table_of_contents: true +sidebar_label: Copying, Moving & Renaming +sidebar_position: 4 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import CopyingMovingRenamingCsharp from './_copying-moving-renaming-csharp.mdx'; +import CopyingMovingRenamingJava from './_copying-moving-renaming-java.mdx'; +import CopyingMovingRenamingPython from './_copying-moving-renaming-python.mdx'; +import CopyingMovingRenamingPhp from './_copying-moving-renaming-php.mdx'; +import CopyingMovingRenamingNodejs from './_copying-moving-renaming-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/attachments/deleting.mdx b/versioned_docs/version-7.1/document-extensions/attachments/deleting.mdx new file mode 100644 index 0000000000..d735a67a9f --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/deleting.mdx @@ -0,0 +1,49 @@ +--- +title: "Attachments: Deleting Attachments" +hide_table_of_contents: true +sidebar_label: Deleting +sidebar_position: 3 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import DeletingCsharp from './_deleting-csharp.mdx'; +import DeletingJava from './_deleting-java.mdx'; +import DeletingPython from './_deleting-python.mdx'; +import DeletingPhp from './_deleting-php.mdx'; +import DeletingNodejs from './_deleting-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/attachments/indexing.mdx b/versioned_docs/version-7.1/document-extensions/attachments/indexing.mdx new file mode 100644 index 0000000000..c915eddc11 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/indexing.mdx @@ -0,0 +1,50 @@ +--- +title: "Index Attachments" +hide_table_of_contents: true +sidebar_label: Indexing +sidebar_position: 5 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import IndexingCsharp from './_indexing-csharp.mdx'; +import IndexingJava from './_indexing-java.mdx'; +import IndexingPython from './_indexing-python.mdx'; +import IndexingPhp from './_indexing-php.mdx'; +import IndexingNodejs from './_indexing-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/attachments/loading.mdx b/versioned_docs/version-7.1/document-extensions/attachments/loading.mdx new file mode 100644 index 0000000000..aceb01b7ef --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/loading.mdx @@ -0,0 +1,49 @@ +--- +title: "Attachments: Loading Attachments" +hide_table_of_contents: true +sidebar_label: Loading +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import LoadingCsharp from './_loading-csharp.mdx'; +import LoadingJava from './_loading-java.mdx'; +import LoadingPython from './_loading-python.mdx'; +import LoadingPhp from './_loading-php.mdx'; +import LoadingNodejs from './_loading-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/attachments/storing.mdx b/versioned_docs/version-7.1/document-extensions/attachments/storing.mdx new file mode 100644 index 0000000000..3a6e10f8cb --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/storing.mdx @@ -0,0 +1,49 @@ +--- +title: "Attachments: Storing Attachments" +hide_table_of_contents: true +sidebar_label: Storing +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import StoringCsharp from './_storing-csharp.mdx'; +import StoringJava from './_storing-java.mdx'; +import StoringPython from './_storing-python.mdx'; +import StoringPhp from './_storing-php.mdx'; +import StoringNodejs from './_storing-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/attachments/what-are-attachments.mdx b/versioned_docs/version-7.1/document-extensions/attachments/what-are-attachments.mdx new file mode 100644 index 0000000000..babf8b311f --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/attachments/what-are-attachments.mdx @@ -0,0 +1,39 @@ +--- +title: "What are Attachments" +hide_table_of_contents: true +sidebar_label: What are Attachments +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import WhatAreAttachmentsCsharp from './_what-are-attachments-csharp.mdx'; +import WhatAreAttachmentsJava from './_what-are-attachments-java.mdx'; +import WhatAreAttachmentsNodejs from './_what-are-attachments-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/counters/_category_.json b/versioned_docs/version-7.1/document-extensions/counters/_category_.json new file mode 100644 index 0000000000..310d42c728 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 2, + "label": Counters, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/counters/_counters-and-other-features-csharp.mdx b/versioned_docs/version-7.1/document-extensions/counters/_counters-and-other-features-csharp.mdx new file mode 100644 index 0000000000..7bfdf7dbba --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_counters-and-other-features-csharp.mdx @@ -0,0 +1,297 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This section describes the relationships between Counters and other RavenDB features: + * How Counters are supported by the different features. + * How Counters trigger features' execution. + +* In this page: + * [Counters and Indexing](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-indexing) + * [Counters and Queries](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-queries) + * [Counters and Revisions](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-revisions) + * [Counters and Smuggler](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-smuggler) + * [Counters and Changes API](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-changes-api) + * [Counters and Ongoing Tasks](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-ongoing-tasks) - `Backup`, `External replication`, `ETL`, `Data Subscription` + * [Counters and Other Features: summary](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-other-features-summary) + * [Counters Bulk-Insert](../../document-extensions/counters/counters-and-other-features.mdx#counters-bulk-insert) + + + +### Counters and Indexing + +Indexing Counters can speed-up finding them and the documents that contain them. + +* **Indexing Counter Values** + Dynamic indexes (aka auto-indexes) _cannot_ index counter values. To index counter values, + create a static index that inherits from `AbstractCountersIndexCreationTask` ([see here](../../document-extensions/counters/indexing.mdx)). + +* **Indexing Counter Names** + Re-indexing due to Counter-name modification is normally rare enough to pause no performance issues. + To index a document's Counters by name, use [CounterNamesFor](../../document-extensions/counters/indexing.mdx#section-1). + +### Counters and Queries + +Create queries **using code**, or send the server **raw queries** for execution. + +* Either way, you can query Counters **by name** but **not by value**. + This is because queries are generally [based on indexes](../../start/getting-started.mdx#example-iii---querying), and Counter values are [not indexed](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-indexing). + +* Counter values **can** be [projected](../../indexes/querying/projections.mdx) from query results, as demonstrated in the following examples. + This way a client can get Counter values from a query without downloading whole documents. + +* Use [Session.Query](../../client-api/session/querying/how-to-query.mdx#sessionquery) to code queries yourself. + * **Returned Counter Value**: **Accumulated** + A Counter's value is returned as a single sum, with no specification of the Counter's value on each node. + + + +{`using (var session = docStore.OpenSession()) +\{ + //Select documents from the "Products" collection, with a Counter named \`ProductLikes\`. + //Querying upon Counter values (e.g. using "Where") is not possible. + //In this example the documents that contain the Counter are NOT returned, only Counter values. + var query = session.Query() + .Select(product => RavenQuery.Counter(product, "ProductLikes")); + + var queryResults = query.ToList(); + + //Show ProductLikes's value, or NULL for documents with no such Counter. + foreach (var counterValue in queryResults) + \{ + Console.WriteLine("ProductLikes: " + counterValue); + \} +\} +`} + + + +* Use [RawQuery](../../client-api/session/querying/how-to-query.mdx#sessionadvancedrawquery) to send the server raw RQL expressions for execution. + * You can use the `counter` method. + **Returned Counter Value**: **Accumulated** + + * You can use the `counterRaw` method. + **Returned Counter Value**: **Distributed** + A Counter's value is returned as a series of values, each maintained by one of the nodes. + * It is not expected of you to use this in your application. + Applications normally use the Counter's overall value, and very rarely refer to the value each node gives it. + +`counter` and `counterRaw` samples: + + + + +{`//Various RQL expressions sent to the server using counter() +//Returned Counter value is accumulated +var rawquery1 = session.Advanced + .RawQuery("from products as p select counter(p, \\"ProductLikes\\")") + .ToList(); + +var rawquery2 = session.Advanced + .RawQuery("from products select counter(\\"ProductLikes\\") as ProductLikesCount") + .ToList(); + +var rawquery3 = session.Advanced + .RawQuery("from products where PricePerUnit > 50 select Name, counter(\\"ProductLikes\\")") + .ToList(); +`} + + + + +{`//An RQL expression sent to the server using counterRaw() +//Returned Counter value is distributed +var query = session.Advanced + .RawQuery("from users as u select counterRaw(u, \\"downloads\\")") + .ToList(); +`} + + + + +### Counters and Revisions + +A [document revision](../../document-extensions/revisions/overview.mdx) stores all the document Counters' +names and values when the revision was created. + +* **Stored Counter Values**: **Accumulated** + A revision stores a Counter's value as a single sum, with no specification of the Counter's value on each node. + +* Revisions-creation can be initiated by **Counter-name modification**. + * When the Revisions feature is enabled, the creation or deletion of a Counter initiates the creation of a new document revision. + * Counter **value** modifications do **not** cause the creation of new revisions. + +### Counters and Smuggler + +[Smuggler](../../client-api/smuggler/what-is-smuggler.mdx) is a DocumentStore property, that can be used +to [export](../../client-api/smuggler/what-is-smuggler.mdx#databasesmugglerexportoptions) chosen +database items to an external file or to [import](../../client-api/smuggler/what-is-smuggler.mdx#databasesmugglerimportoptions) +database items from an existing file into the database. + +* **Transferred Counter Value**: **Distributed** + Smuggler transfers the entire series of values that the different nodes maintain for a Counter. +* To make Smuggler handle Counters, include `DatabaseItemType.CountersGroups` in `OperateOnTypes`'s list of entities to import or export. + + + +{`OperateOnTypes = DatabaseItemType.Indexes + | DatabaseItemType.Documents + | DatabaseItemType.CounterGroups +`} + + + +### Counters and Changes API + +[Changes API](../../client-api/changes/what-is-changes-api.mdx#changes-api) is a Push service, that can inform you of various changes on the Server, including [changes in Counter values](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#changes-api--how-to-subscribe-to-counter-changes). +You can target all Counters, or specify the ones you wish to follow. + +* **Pushed Counter Value**: **Accumulated** + `Changes API` methods return a Counter's value as a single sum, without specifying its value on each node. +* The service is initiated by **Counter Value Modification**. + +### Counters and Ongoing Tasks: + +Each [ongoing task](../../studio/database/tasks/ongoing-tasks/general-info.mdx) relates to Counters in its own way. + +* **Counters** and the **Backup task** + There are two [backup](../../studio/database/tasks/backup-task.mdx) types: **logical-backup** and **snapshot**. + Both types store and restore **all** data, including Counters. + Both types operate as an ongoing backup routine, with a pre-set time schedule. + * Logical Backup: + **Backed-up Counter values**: **Distributed** + A logical backup is a higher-level implementation of [Smuggler](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-smuggler). + As with Smuggler, Counters are backed-up and restored including their values on all nodes. + * Snapshot: + A snapshot stores all data and settings as a single binary image. + All components, including Counters, are restored to the exact same state they've been at during backup. + +* **Counters** and the **External Replication task** + The ongoing [external replication](../../studio/database/tasks/ongoing-tasks/external-replication-task.mdx) task replicates all data, including Counters. + * **Replicated Counter Value**: **Distributed** + Counters are replicated along with their values on all nodes. + * Replication can be initiated by both **Counter-name update** _and_ **Counter-value modification**. + +* **Counters** and the **ETL task** + [ETL](../../server/ongoing-tasks/etl/basics.mdx) is used to export data from RavenDB to an external (either Raven or SQL) database. + * [SQL ETL](../../server/ongoing-tasks/etl/sql.mdx) is **not supported**. + Counters cannot be exported to an SQL database over SQL ETL. + * [RavenDB ETL](../../server/ongoing-tasks/etl/raven.mdx) **is supported**. + Counters [are](../../server/ongoing-tasks/etl/raven.mdx#counters) exported over RavenDB ETL. + * Export can be initiated by both **Counter-name update** _and_ **Counter-value modification**. + * **Exported Counter Value**: **Distributed** + Counters are exported along with their values on all nodes. + * Counters can be [exported using a script](../../server/ongoing-tasks/etl/raven.mdx#adding-counter-explicitly-in-a-script). + **Default behavior**: When an ETL script is not provided, Counters are exported. + +* **Counters** and the **[Data Subscriptions](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#data-subscriptions) task** + Data Subscriptions can be initiated by document changes, including those caused by **Counter Name updates**. + Documents will **not** be delivered in reaction to Counter Value modification. + + + +### Counters and Other Features: Summary + +Use this table to find if and how various RavenDB features are triggered by Counters, +and how the various features handle Counter values. + +* In the **Triggered By** column: + * _Document Change_ - Feature is triggered by a Counter Name update. + * _Countrer Value Modification_ - Feature is triggered by a Counter Value modification. + * _Time Schedule_ - Feature is invoked by a pre-set time routine. + * _No Trigger_ - Feature is executed manually, through the Studio or by a Client. + +* In the **Counter Value** column: + * _Accumulated_ - Counter Value is handled as a single accumulated sum. + * _Distributed_ - Counter Value is handled as a series of values maintained by cluster nodes. + +| **Feature** | **Triggered by** | **Counter Value** | +|-------------|:-------------|:-------------| +| [Indexing](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-indexing) | _Document Change_ | doesn't handle values | +| [LINQ Queries](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-queries) | _No trigger_ | _Accumulated_ | +| [Raw Queries](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-queries) | _No trigger_ | `counter()` - _Accumulated_
`counterRaw()` - _Distributed_ | +| [Smuggler](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-smuggler) | _No trigger_ | _Distributed_ | +| [Backup Task](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-ongoing-tasks) | _Time Schedule_ | _Distributed_ | +| [RavenDB ETL Task](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-ongoing-tasks) | _Document Change_,
_Countrer Value Modification_ | _Distributed_ | +| [External Replication task](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-ongoing-tasks) | _Document Change_,
_Countrer Value Modification_ | _Distributed_ | +| [Data Subscriptions Update Task](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-ongoing-tasks) | _Document Change_ | | +| [Changes API](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-changes-api) | _Countrer Value Modification_ | _Accumulated_ | +| [Revision creation](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-revisions) | _Document Change_ | _Accumulated_ | + +
+ +### Counters Bulk-Insert + +`store.BulkInsert` is RavenDB's high-performance data insertion operation. +Use its `CountersFor` interface's `Increment` method to add or update counters with great speed. + +Syntax: + +* `CountersFor` + + + +{`public CountersBulkInsert CountersFor(string id) +`} + + + + | Parameters | Type | Description | + |:-------------|:-------------|:-------------| + | `id` | `string` | Document ID | + +* `Increment` + + + +{`public void Increment(string name, long delta = 1L) +`} + + + + | Parameters | Type | Description | + |:-------------|:-------------|:-------------| + | `name` | `string` | Counter Name | + | `delta` | `long` | Default: 1L | + +* Usage Flow + + * Create a `store.BulkInsert` instance. + * Pass the instance's `CountersFor` interface, the document ID + * Call `Increment` as many times as you like. Pass it - + The Counter Name and Value (delta to be added). + +* Usage Sample + +In this sample, we attach a counter to all User documents. + + + +{`// Choose user profiles to add counters to +using (var session = store.OpenSession()) +\{ + IRavenQueryable query = session.Query() + .Where(u => u.Age < 30); + + result = query.ToList(); +\} + +using (var bulkInsert = store.BulkInsert()) +\{ + for (var user = 0; user < result.Count; user++) + \{ + string userId = result[user].Id; + + // Choose document + var countersFor = bulkInsert.CountersFor(userId); + + // Add or Increment a counter + await bulkInsert.CountersFor(userId).IncrementAsync("downloaded", 100); + \} +\} +`} + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/counters/_counters-and-other-features-java.mdx b/versioned_docs/version-7.1/document-extensions/counters/_counters-and-other-features-java.mdx new file mode 100644 index 0000000000..866443c231 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_counters-and-other-features-java.mdx @@ -0,0 +1,284 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This section describes the relationships between Counters and other RavenDB features: + * How Counters are supported by the different features. + * How Counters trigger features' execution. + +* In this page: + * [Counters and Indexing](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-indexing) + * [Counters and Queries](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-queries) + * [Counters and Revisions](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-revisions) + * [Counters and Smuggler](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-smuggler) + * [Counters and Changes API](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-changes-api) + * [Counters and Ongoing Tasks](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-ongoing-tasks) - `Backup`, `External replication`, `ETL`, `Data Subscription` + * [Counters and Other Features: summary](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-other-features-summary) + * [Counters Bulk-Insert](../../document-extensions/counters/counters-and-other-features.mdx#counters-bulk-insert) + + + +### Counters and Indexing + +Indexing Counters can speed-up finding them and the documents that contain them. + +* **Indexing Counter Values** + Dynamic indexes (aka auto-indexes) _cannot_ index counter values. To index counter values, + create a static index that inherits from `AbstractCountersIndexCreationTask`. + +* **Indexing Counter Names** + Re-indexing due to Counter-name modification is normally rare enough to pause no performance issues. + To index a document's Counters by name, use `counterNamesFor`. +### Counters and Queries + +Create queries **using code**, or send the server **raw queries** for execution. + +* Either way, you can query Counters **by name** but **not by value**. + This is because queries are generally [based on indexes](../../start/getting-started.mdx#example-iii---querying), and Counter values are [not indexed](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-indexing). +* Counter values **can** be projected from query results, as demonstrated in the following examples. + This way a client can get Counter values from a query without downloading whole documents. + +* Use `Session.Query` to code queries yourself. + * **Returned Counter Value**: **Accumulated** + A Counter's value is returned as a single sum, with no specification of the Counter's value on each node. + + +{`using (var session = docStore.OpenSession()) +\{ + //Select documents from the "Products" collection, with a Counter named \`ProductLikes\`. + //Querying upon Counter values (e.g. using "Where") is not possible. + //In this example the documents that contain the Counter are NOT returned, only Counter values. + var query = session.Query() + .Select(product => RavenQuery.Counter(product, "ProductLikes")); + + var queryResults = query.ToList(); + + //Show ProductLikes's value, or NULL for documents with no such Counter. + foreach (var counterValue in queryResults) + \{ + Console.WriteLine("ProductLikes: " + counterValue); + \} +\} +`} + + + +* Use `rawQuery` to send the server raw RQL expressions for execution. + * You can use the `counter` method. + **Returned Counter Value**: **Accumulated** + + * You can use the `counterRaw` method. + **Returned Counter Value**: **Distributed** + A Counter's value is returned as a series of values, each maintained by one of the nodes. + * It is not expected of you to use this in your application. + Applications normally use the Counter's overall value, and very rarely refer to the value each node gives it. + + + `counter` and `counterRaw` samples: + + + +{`//Various RQL expressions sent to the server using counter() +//Returned Counter value is accumulated +List rawQuery1 = session + .advanced() + .rawQuery(CounterResult.class, "from products as p select counter(p, \\"productLikes\\")") + .toList(); + +List rawQuery2 = session.advanced().rawQuery(CounterResult.class, + "from products select counter(\\"productLikes\\") as productLikesCount") + .toList(); + +List rawQuery3 = session.advanced() + .rawQuery(CounterResult.class, + "from products where PricePerUnit > 50 select Name, counter(\\"productLikes\\")") + .toList(); +`} + + + + +{`//An RQL expression sent to the server using counterRaw() +//Returned Counter value is distributed +List query = session + .advanced().rawQuery(CounterResultRaw.class, + "from users as u select counterRaw(u, \\"downloads\\")") + .toList(); +`} + + + +### Counters and Revisions + +A [document revision](../../document-extensions/revisions/overview.mdx) stores all the document Counters' +names and values when the revision was created. + +* **Stored Counter Values**: **Accumulated** + A revision stores a Counter's value as a single sum, with no specification of the Counter's value on each node. + +* Revisions-creation can be initiated by **Counter-name modification**. + * When the Revisions feature is enabled, the creation or deletion of a Counter initiates the creation of a new document revision. + * Counter **value** modifications do **not** cause the creation of new revisions. +### Counters and Smuggler + +`Smuggler` is a DocumentStore property, that can be used +to **export** chosen database items to an external file or to +**import** database items from an existing file into the database. + +* **Transferred Counter Value**: **Distributed** + Smuggler transfers the entire series of values that the different nodes maintain for a Counter. +* To make Smuggler handle Counters, include `DatabaseItemType.COUNTERS` in `OperateOnTypes`'s list of entities to import or export. + + +{`exportOptions.setOperateOnTypes(EnumSet.of( + DatabaseItemType.INDEXES, + DatabaseItemType.DOCUMENTS, + DatabaseItemType.COUNTERS)); +`} + + +### Counters and Changes API + +[Changes API](../../client-api/changes/what-is-changes-api.mdx#changes-api) is a Push service, that can inform you of various changes on the Server, including [changes in Counter values](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#changes-api--how-to-subscribe-to-counter-changes). +You can target all Counters, or specify the ones you wish to follow. + +* **Pushed Counter Value**: **Accumulated** + `Changes API` methods return a Counter's value as a single sum, without specifying its value on each node. +* The service is initiated by **Counter Value Modification**. +### Counters and Ongoing Tasks: + +Each [ongoing task](../../studio/database/tasks/ongoing-tasks/general-info.mdx) relates to Counters in its own way. + +* **Counters** and the **Backup task** + There are two backup types: **logical-backup** and **snapshot**. + Both types store and restore **all** data, including Counters. + Both types operate as an ongoing backup routine, with a pre-set time schedule. + * Logical Backup: + **Backed-up Counter values**: **Distributed** + A logical backup is a higher-level implementation of [Smuggler](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-smuggler). + As with Smuggler, Counters are backed-up and restored including their values on all nodes. + * Snapshot: + A snapshot stores all data and settings as a single binary image. + All components, including Counters, are restored to the exact same state they've been at during backup. + +* **Counters** and the **External Replication task** + The ongoing [external replication](../../studio/database/tasks/ongoing-tasks/external-replication-task.mdx) task replicates all data, including Counters. + * **Replicated Counter Value**: **Distributed** + Counters are replicated along with their values on all nodes. + * Replication can be initiated by both **Counter-name update** _and_ **Counter-value modification**. + +* **Counters** and the **ETL task** + [ETL](../../server/ongoing-tasks/etl/basics.mdx) is used to export data from RavenDB to an external (either Raven or SQL) database. + * [SQL ETL](../../server/ongoing-tasks/etl/sql.mdx) is **not supported**. + Counters cannot be exported to an SQL database over SQL ETL. + * [RavenDB ETL](../../server/ongoing-tasks/etl/raven.mdx) **is supported**. + Counters [are](../../server/ongoing-tasks/etl/raven.mdx#counters) exported over RavenDB ETL. + * Export can be initiated by both **Counter-name update** _and_ **Counter-value modification**. + * **Exported Counter Value**: **Distributed** + Counters are exported along with their values on all nodes. + * Counters can be [exported using a script](../../server/ongoing-tasks/etl/raven.mdx#adding-counter-explicitly-in-a-script). + **Default behavior**: When an ETL script is not provided, Counters are exported. + +* **Counters** and the **[Data Subscriptions](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#data-subscriptions) task** + Data Subscriptions can be initiated by document changes, including those caused by **Counter Name updates**. + Documents will **not** be delivered in reaction to Counter Value modification. + + + +### Counters and Other Features: Summary + +Use this table to find if and how various RavenDB features are triggered by Counters, +and how the various features handle Counter values. + +* In the **Triggered By** column: + * _Document Change_ - Feature is triggered by a Counter Name update. + * _Countrer Value Modification_ - Feature is triggered by a Counter Value modification. + * _Time Schedule_ - Feature is invoked by a pre-set time routine. + * _No Trigger_ - Feature is executed manually, through the Studio or by a Client. +* In the **Counter Value** column: + * _Accumulated_ - Counter Value is handled as a single accumulated sum. + * _Distributed_ - Counter Value is handled as a series of values maintained by cluster nodes. + +| **Feature** | **Triggered by** | **Counter Value** | +|-------------|:-------------|:-------------| +| [Indexing](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-indexing) | _Document Change_ | doesn't handle values | +| [Raw Queries](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-queries) | _No trigger_ | `counter()` - _Accumulated_
`counterRaw()` - _Distributed_ | +| [Smuggler](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-smuggler) | _No trigger_ | _Distributed_ | +| [Backup Task](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-ongoing-tasks) | _Time Schedule_ | _Distributed_ | +| [RavenDB ETL Task](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-ongoing-tasks) | _Document Change_,
_Countrer Value Modification_ | _Distributed_ | +| [External Replication task](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-ongoing-tasks) | _Document Change_,
_Countrer Value Modification_ | _Distributed_ | +| [Data Subscriptions Update Task](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-ongoing-tasks) | _Document Change_ | | +| [Changes API](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-changes-api) | _Countrer Value Modification_ | _Accumulated_ | +| [Revision creation](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-revisions) | _Document Change_ | _Accumulated_ | + +
+### Counters Bulk-Insert +`store.bulkInsert` is RavenDB's high-performance data insertion operation. +Use its `countersFor` interface's `increment` method to add or update counters with great speed. + +* Syntax + + * `countersFor` + + +{`public CountersBulkInsert countersFor(String id); +`} + + + + | Parameters | Type | Description | + |:-------------|:-------------|:-------------| + | `id` | `String` | Document ID | + + * `increment` + + +{`void increment(String counterName); + +void increment(String id, String name, long delta); +`} + + + + | Parameters | Type | Description | + |:-------------|:-------------|:-------------| + | `name` | `String` | Counter Name | + | `id` | `String` | Document ID | + | `delta` | `long` | Default: 1L | + +* Usage Flow + + * Create a `store.bulkInsert` instance. + * Pass the instance's `countersFor` interface, the document ID + * Call `increment` as many times as you like. Pass it - + The Counter Name and Value (delta to be added). + +* Usage Sample + In this sample, we attach a counter to all User documents. + + +{`try (IDocumentSession session = docStore.openSession()) \{ + IDocumentQuery query = session.query(User.class) + .whereLessThan("age", 30); + + result = query.toList(); +\} + +try (BulkInsertOperation bulkInsert = docStore.bulkInsert()) \{ + for (User user : result) \{ + String userId = user.getId(); + CountersBulkInsert countersFor = bulkInsert.countersFor(userId); + bulkInsert.countersFor(userId).increment("downloaded", 100); + \} +\} +`} + + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_counters-and-other-features-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/counters/_counters-and-other-features-nodejs.mdx new file mode 100644 index 0000000000..6397fa5214 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_counters-and-other-features-nodejs.mdx @@ -0,0 +1,285 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This section describes the relationships between Counters and other RavenDB features: + * How Counters are supported by the different features. + * How Counters trigger features' execution. + +* In this page: + * [Counters and Indexing](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-indexing) + * [Counters and Queries](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-queries) + * [Counters and Revisions](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-revisions) + * [Counters and Smuggler](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-smuggler) + * [Counters and Changes API](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-changes-api) + * [Counters and Ongoing Tasks](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-ongoing-tasks) - `Backup`, `External replication`, `ETL`, `Data Subscription` + * [Counters and Other Features: summary](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-other-features-summary) + * [Counters Bulk-Insert](../../document-extensions/counters/counters-and-other-features.mdx#counters-bulk-insert) + + + +### Counters and Indexing + +Indexing Counters can speed-up finding them and the documents that contain them. + +* **Indexing Counter Values** + Dynamic indexes (aka auto-indexes) _cannot_ index counter values. To index counter values, + create a static index that inherits from `AbstractCountersIndexCreationTask` ([see here](../../document-extensions/counters/indexing.mdx)). + +* **Indexing Counter Names** + Re-indexing due to Counter-name modification is normally rare enough to pause no performance issues. + To index a document's Counters by name, use [CounterNamesFor](../../document-extensions/counters/indexing.mdx#section-1). +### Counters and Queries + +Create queries **using code**, or send the server **raw queries** for execution. + +* Either way, you can query Counters **by name** but **not by value**. + This is because queries are generally [based on indexes](../../start/getting-started.mdx#example-iii---querying), and Counter values are [not indexed](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-indexing). +* Counter values **can** be [projected](../../indexes/querying/projections.mdx) from query results, as demonstrated in the following examples. + This way a client can get Counter values from a query without downloading whole documents. + +* Use [Session.Query](../../client-api/session/querying/how-to-query.mdx#sessionquery) to code queries yourself. + * **Returned Counter Value**: **Accumulated** + A Counter's value is returned as a single sum, with no specification of the Counter's value on each node. + + +{`const session = store.openSession(); +const query = session.query(\{collection: "Products"\}) + .selectFields("ProductLikes"); +const queryResults = query.all(); + +for (let counterValue in queryResults) \{ + console.log("ProductLikes: " + counterValue); +\} +`} + + + +* Use [RawQuery](../../client-api/session/querying/how-to-query.mdx#sessionadvancedrawquery) to send the server raw RQL expressions for execution. + * You can use the `counter` method. + **Returned Counter Value**: **Accumulated** + + * You can use the `counterRaw` method. + **Returned Counter Value**: **Distributed** + A Counter's value is returned as a series of values, each maintained by one of the nodes. + * It is not expected of you to use this in your application. + Applications normally use the Counter's overall value, and very rarely refer to the value each node gives it. + + + `counter` and `counterRaw` samples: + + + +{`//Various RQL expressions sent to the server using counter() +//Returned Counter value is accumulated +const rawquery1 = session.advanced + .rawQuery("from products as p select counter(p, \\"ProductLikes\\")") + .all(); + +const rawquery2 = session.advanced + .rawQuery("from products select counter(\\"ProductLikes\\") as ProductLikesCount") + .all(); + +const rawquery3 = session.advanced + .rawQuery("from products where PricePerUnit > 50 select Name, counter(\\"ProductLikes\\")") + .all(); +`} + + + + +{`//An RQL expression sent to the server using counterRaw() +//Returned Counter value is distributed +const query = session.advanced + .rawQuery("from users as u select counterRaw(u, \\"downloads\\")") + .all(); +`} + + + +### Counters and Revisions + +A [document revision](../../document-extensions/revisions/overview.mdx) stores all the document Counters' +names and values when the revision was created. + +* **Stored Counter Values**: **Accumulated** + A revision stores a Counter's value as a single sum, with no specification of the Counter's value on each node. + +* Revisions-creation can be initiated by **Counter-name modification**. + * When the Revisions feature is enabled, the creation or deletion of a Counter initiates the creation of a new document revision. + * Counter **value** modifications do **not** cause the creation of new revisions. +### Counters and Smuggler + +[Smuggler](../../client-api/smuggler/what-is-smuggler.mdx) is a DocumentStore property, that can be used +to [export](../../client-api/smuggler/what-is-smuggler.mdx#databasesmugglerexportoptions) chosen +database items to an external file or to [import](../../client-api/smuggler/what-is-smuggler.mdx#databasesmugglerimportoptions) +database items from an existing file into the database. + +* **Transferred Counter Value**: **Distributed** + Smuggler transfers the entire series of values that the different nodes maintain for a Counter. +* To make Smuggler handle Counters, use `DatabaseItemType` entities to import or export. + +`DatabaseItemType` options: +* `None` +* `Documents` +* `RevisionDocuments` +* `Indexes` +* `Identities` +* `Tombstones` +* `LegacyAttachments` +* `Conflicts` +* `CompareExchange` +* `LegacyDocumentDeletions` +* `LegacyAttachmentDeletions` +* `DatabaseRecord` +* `Unknown` +* `Attachments` +* `CounterGroups` +* `Subscriptions` +* `CompareExchangeTombstones` +* `TimeSeries` +### Counters and Changes API + +[Changes API](../../client-api/changes/what-is-changes-api.mdx#changes-api) is a Push service, that can inform you of various changes on the Server, including [changes in Counter values](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#changes-api--how-to-subscribe-to-counter-changes). +You can target all Counters, or specify the ones you wish to follow. + +* **Pushed Counter Value**: **Accumulated** + `Changes API` methods return a Counter's value as a single sum, without specifying its value on each node. +* The service is initiated by **Counter Value Modification**. +### Counters and Ongoing Tasks: + +Each [ongoing task](../../studio/database/tasks/ongoing-tasks/general-info.mdx) relates to Counters in its own way. + +* **Counters** and the **Backup task** + There are two [backup](../../studio/database/tasks/backup-task.mdx) types: **logical-backup** and **snapshot**. + Both types store and restore **all** data, including Counters. + Both types operate as an ongoing backup routine, with a pre-set time schedule. + * Logical Backup: + **Backed-up Counter values**: **Distributed** + A logical backup is a higher-level implementation of [Smuggler](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-smuggler). + As with Smuggler, Counters are backed-up and restored including their values on all nodes. + * Snapshot: + A snapshot stores all data and settings as a single binary image. + All components, including Counters, are restored to the exact same state they've been at during backup. + +* **Counters** and the **External Replication task** + The ongoing [external replication](../../studio/database/tasks/ongoing-tasks/external-replication-task.mdx) task replicates all data, including Counters. + * **Replicated Counter Value**: **Distributed** + Counters are replicated along with their values on all nodes. + * Replication can be initiated by both **Counter-name update** _and_ **Counter-value modification**. + +* **Counters** and the **ETL task** + [ETL](../../server/ongoing-tasks/etl/basics.mdx) is used to export data from RavenDB to an external (either Raven or SQL) database. + * [SQL ETL](../../server/ongoing-tasks/etl/sql.mdx) is **not supported**. + Counters cannot be exported to an SQL database over SQL ETL. + * [RavenDB ETL](../../server/ongoing-tasks/etl/raven.mdx) **is supported**. + Counters [are](../../server/ongoing-tasks/etl/raven.mdx#counters) exported over RavenDB ETL. + * Export can be initiated by both **Counter-name update** _and_ **Counter-value modification**. + * **Exported Counter Value**: **Distributed** + Counters are exported along with their values on all nodes. + * Counters can be [exported using a script](../../server/ongoing-tasks/etl/raven.mdx#adding-counter-explicitly-in-a-script). + **Default behavior**: When an ETL script is not provided, Counters are exported. + +* **Counters** and the **[Data Subscriptions](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#data-subscriptions) task** + Data Subscriptions can be initiated by document changes, including those caused by **Counter Name updates**. + Documents will **not** be delivered in reaction to Counter Value modification. + + + +### Counters and Other Features: Summary + +Use this table to find if and how various RavenDB features are triggered by Counters, +and how the various features handle Counter values. + +* In the **Triggered By** column: + * _Document Change_ - Feature is triggered by a Counter Name update. + * _Countrer Value Modification_ - Feature is triggered by a Counter Value modification. + * _Time Schedule_ - Feature is invoked by a pre-set time routine. + * _No Trigger_ - Feature is executed manually, through the Studio or by a Client. +* In the **Counter Value** column: + * _Accumulated_ - Counter Value is handled as a single accumulated sum. + * _Distributed_ - Counter Value is handled as a series of values maintained by cluster nodes. + +| **Feature** | **Triggered by** | **Counter Value** | +|-------------|:-------------|:-------------| +| [Indexing](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-indexing) | _Document Change_ | doesn't handle values | +| [LINQ Queries](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-queries) | _No trigger_ | _Accumulated_ | +| [Raw Queries](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-queries) | _No trigger_ | `counter()` - _Accumulated_
`counterRaw()` - _Distributed_ | +| [Smuggler](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-smuggler) | _No trigger_ | _Distributed_ | +| [Backup Task](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-ongoing-tasks) | _Time Schedule_ | _Distributed_ | +| [RavenDB ETL Task](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-ongoing-tasks) | _Document Change_,
_Countrer Value Modification_ | _Distributed_ | +| [External Replication task](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-ongoing-tasks) | _Document Change_,
_Countrer Value Modification_ | _Distributed_ | +| [Data Subscriptions Update Task](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-ongoing-tasks) | _Document Change_ | | +| [Changes API](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-changes-api) | _Countrer Value Modification_ | _Accumulated_ | +| [Revision creation](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-revisions) | _Document Change_ | _Accumulated_ | + +
+### Counters Bulk-Insert +`store.BulkInsert` is RavenDB's high-performance data insertion operation. +Use its `CountersFor` interface's `Increment` method to add or update counters with great speed. + +* Usage + + * `countersFor` + + +{`const counter = session.countersFor("documentid") +`} + + + + | Parameters | Type | Description | + |:-------------|:-------------|:-------------| + | `id` | `string` | Document ID | + + * `increment` + + +{`const incerment = store.openSession(); +session.countersFor("documentid").increment("likes", 100); +// +`} + + + + | Parameters | Type | Description | + |:-------------|:-------------|:-------------| + | `name` | `string` | Counter Name | + | `delta` | `long` | Default: 1L | + +* Usage Flow + + * Create a `store.BulkInsert` instance. + * Pass the instance's `CountersFor` interface, the document ID + * Call `Increment` as many times as you like. Pass it - + The Counter Name and Value (delta to be added). + +* Usage Sample + In this sample, we attach a counter to all User documents. + + +{`const query = session.query(\{collection: "User"\}) + .whereBetween("Age", 0, 30) +const result = query.all(); + +const bulkInsert = store.bulkInsert(); +for (let user = 0; user < result.length; user++) \{ + let userId = result[user].id; + + // Choose document + let countersFor = bulkInsert.countersFor(userId); + + // Add or Increment a counter + await bulkInsert.countersFor(userId).increment("downloaded", 100); +\} +`} + + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_counters-and-other-features-php.mdx b/versioned_docs/version-7.1/document-extensions/counters/_counters-and-other-features-php.mdx new file mode 100644 index 0000000000..aae19c14fd --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_counters-and-other-features-php.mdx @@ -0,0 +1,165 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This section describes the relationships between Counters and other RavenDB features: + * How Counters are supported by the different features. + * How Counters trigger features' execution. + +* In this page: + * [Counters and Indexing](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-indexing) + * [Counters and Queries](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-queries) + * [Counters and Revisions](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-revisions) + * [Counters and Changes API](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-changes-api) + * [Counters and Ongoing Tasks](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-ongoing-tasks) - `Backup`, `External replication`, `ETL` + * [Counters and Other Features: summary](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-other-features-summary) + + + +### Counters and Indexing + +Indexing Counters can speed-up finding them and the documents that contain them. + +* **Indexing Counter Values** + Dynamic indexes (aka auto-indexes) _cannot_ index counter values. To index counter values, + create a static index that inherits from `AbstractCountersIndexCreationTask` ([see here](../../document-extensions/counters/indexing.mdx)). + +* **Indexing Counter Names** + Re-indexing due to Counter-name modification is normally rare enough to pause no performance issues. + To index a document's Counters by name, use [CounterNamesFor](../../document-extensions/counters/indexing.mdx#section-1). +### Counters and Queries + +Send the server **raw queries** for execution. + +* You can query Counters **by name** but **not by value**. + This is because queries are generally [based on indexes](../../start/getting-started.mdx#example-iii---querying), and Counter values are [not indexed](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-indexing). +* Counter values **can** be [projected](../../indexes/querying/projections.mdx) from query results, as demonstrated in the following examples. + This way a client can get counter values from a query without downloading whole documents. + +* Use [RawQuery](../../client-api/session/querying/how-to-query.mdx#sessionadvancedrawquery) to send the server raw RQL expressions for execution. + * You can use the `counter` method. + **Returned Counter Value**: **Accumulated** + + * You can use the `counterRaw` method. + **Returned Counter Value**: **Distributed** + A Counter's value is returned as a series of values, each maintained by one of the nodes. + * It is not expected of you to use this in your application. + Applications normally use the Counter's overall value, and very rarely refer to the value each node gives it. + + + `counter` and `counterRaw` samples: + + + +{`//Various RQL expressions sent to the server using counter() +//Returned Counter value is accumulated +$rawQuery1 = $session->advanced() + ->rawQuery(CounterResult::class, "from products as p select counter(p, \\"ProductLikes\\")") + ->toList(); + +$rawQuery2 = $session->advanced() + ->rawQuery(CounterResult::class,"from products select counter(\\"ProductLikes\\") as ProductLikesCount") + ->toList(); + +$rawQuery3 = $session->advanced() + ->rawQuery(CounterResult::class,"from products where PricePerUnit > 50 select Name, counter(\\"ProductLikes\\")") + ->toList(); +`} + + + + +{`//An RQL expression sent to the server using counterRaw() +//Returned Counter value is distributed +$query = $session->advanced() + ->rawQuery(CounterResultRaw::class, "from users as u select counterRaw(u, \\"downloads\\")") + ->toList(); +`} + + + +### Counters and Revisions + +A document revision stores all the document Counters' names and values when the revision was created. + +* **Stored Counter Values**: **Accumulated** + A revision stores a Counter's value as a single sum, with no specification of the Counter's value on each node. + +* Revisions-creation can be initiated by **Counter-name modification**. + * When the Revisions feature is enabled, the creation or deletion of a Counter initiates the creation of a new document revision. + * Counter **value** modifications do **not** cause the creation of new revisions. +### Counters and Changes API + +[Changes API](../../client-api/changes/what-is-changes-api.mdx#changes-api) is a Push service, that can inform you of various changes on the Server, including [changes in Counter values](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#changes-api--how-to-subscribe-to-counter-changes). +You can target all Counters, or specify the ones you wish to follow. + +* **Pushed Counter Value**: **Accumulated** + `Changes API` methods return a Counter's value as a single sum, without specifying its value on each node. +* The service is initiated by **Counter Value Modification**. +### Counters and Ongoing Tasks: + +Each [ongoing task](../../studio/database/tasks/ongoing-tasks/general-info.mdx) relates to Counters in its own way. + +* **Counters** and the **Backup task** + There are two [backup](../../studio/database/tasks/backup-task.mdx) types: **logical-backup** and **snapshot**. + Both types store and restore **all** data, including Counters. + Both types operate as an ongoing backup routine, with a pre-set time schedule. + * Logical Backup: + **Backed-up Counter values**: **Distributed** + A logical backup is a higher-level implementation of Smuggler. + As with Smuggler, Counters are backed-up and restored including their values on all nodes. + * Snapshot: + A snapshot stores all data and settings as a single binary image. + All components, including Counters, are restored to the exact same state they've been at during backup. + +* **Counters** and the **External Replication task** + The ongoing [external replication](../../studio/database/tasks/ongoing-tasks/external-replication-task.mdx) task replicates all data, including Counters. + * **Replicated Counter Value**: **Distributed** + Counters are replicated along with their values on all nodes. + * Replication can be initiated by both **Counter-name update** _and_ **Counter-value modification**. + +* **Counters** and the **ETL task** + [ETL](../../server/ongoing-tasks/etl/basics.mdx) is used to export data from RavenDB to an external (either Raven or SQL) database. + * [SQL ETL](../../server/ongoing-tasks/etl/sql.mdx) is **not supported**. + Counters cannot be exported to an SQL database over SQL ETL. + * [RavenDB ETL](../../server/ongoing-tasks/etl/raven.mdx) **is supported**. + Counters [are](../../server/ongoing-tasks/etl/raven.mdx#counters) exported over RavenDB ETL. + * Export can be initiated by both **Counter-name update** _and_ **Counter-value modification**. + * **Exported Counter Value**: **Distributed** + Counters are exported along with their values on all nodes. + * Counters can be [exported using a script](../../server/ongoing-tasks/etl/raven.mdx#adding-counter-explicitly-in-a-script). + **Default behavior**: When an ETL script is not provided, Counters are exported. + + +### Counters and Other Features: Summary + +Use this table to find if and how various RavenDB features are triggered by Counters, +and how the various features handle Counter values. + +* In the **Triggered By** column: + * _Document Change_ - Feature is triggered by a Counter Name update. + * _Countrer Value Modification_ - Feature is triggered by a Counter Value modification. + * _Time Schedule_ - Feature is invoked by a pre-set time routine. + * _No Trigger_ - Feature is executed manually, through the Studio or by a Client. +* In the **Counter Value** column: + * _Accumulated_ - Counter Value is handled as a single accumulated sum. + * _Distributed_ - Counter Value is handled as a series of values maintained by cluster nodes. + +| **Feature** | **Triggered by** | **Counter Value** | +|-------------|:-------------|:-------------| +| [Indexing](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-indexing) | _Document Change_ | doesn't handle values | +| [LINQ Queries](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-queries) | _No trigger_ | _Accumulated_ | +| [Raw Queries](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-queries) | _No trigger_ | `counter()` - _Accumulated_
`counterRaw()` - _Distributed_ | +| [Backup Task](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-ongoing-tasks) | _Time Schedule_ | _Distributed_ | +| [RavenDB ETL Task](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-ongoing-tasks) | _Document Change_,
_Countrer Value Modification_ | _Distributed_ | +| [External Replication task](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-ongoing-tasks) | _Document Change_,
_Countrer Value Modification_ | _Distributed_ | +| [Changes API](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-changes-api) | _Countrer Value Modification_ | _Accumulated_ | +| [Revision creation](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-revisions) | _Document Change_ | _Accumulated_ | + +
+ + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_counters-and-other-features-python.mdx b/versioned_docs/version-7.1/document-extensions/counters/_counters-and-other-features-python.mdx new file mode 100644 index 0000000000..9b23a449d9 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_counters-and-other-features-python.mdx @@ -0,0 +1,242 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This section describes the relationships between Counters and other RavenDB features: + * How Counters are supported by the different features. + * How Counters trigger features' execution. + +* In this page: + * [Counters and Indexing](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-indexing) + * [Counters and Queries](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-queries) + * [Counters and Revisions](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-revisions) + * [Counters and Changes API](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-changes-api) + * [Counters and Ongoing Tasks](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-ongoing-tasks) - `Backup`, `External replication`, `ETL`, `Data Subscription` + * [Counters and Other Features: summary](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-other-features-summary) + * [Counters Bulk-Insert](../../document-extensions/counters/counters-and-other-features.mdx#counters-bulk-insert) + + + +### Counters and Indexing + +Indexing Counters can speed-up finding them and the documents that contain them. + +* **Indexing Counter Values** + Dynamic indexes (aka auto-indexes) _cannot_ index counter values. To index counter values, + create a static index that inherits from `AbstractCountersIndexCreationTask` ([see here](../../document-extensions/counters/indexing.mdx)). + +* **Indexing Counter Names** + Re-indexing due to Counter-name modification is normally rare enough to pause no performance issues. + To index a document's Counters by name, use [CounterNamesFor](../../document-extensions/counters/indexing.mdx#section-1). +### Counters and Queries + +Create queries **using code**, or send the server **raw queries** for execution. + +* Either way, you can query Counters **by name** but **not by value**. + This is because queries are generally [based on indexes](../../start/getting-started.mdx#example-iii---querying), and Counter values are [not indexed](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-indexing). +* Counter values **can** be [projected](../../indexes/querying/projections.mdx) from query results, as demonstrated in the following examples. + This way a client can get Counter values from a query without downloading whole documents. + +* Use [session.query](../../client-api/session/querying/how-to-query.mdx#sessionquery) to code queries yourself. + * **Returned Counter Value**: **Accumulated** + A Counter's value is returned as a single sum, with no specification of the Counter's value on each node. + + +{`_include_single_Counter + # include a single Counter + query = session.query(object_type=Product).include(lambda builder: builder.include_counter("ProductLikes")) +`} + + + +* Use [RawQuery](../../client-api/session/querying/how-to-query.mdx#sessionadvancedrawquery) to send the server raw RQL expressions for execution. + * You can use the `counter` method. + **Returned Counter Value**: **Accumulated** + + * You can use the `counterRaw` method. + **Returned Counter Value**: **Distributed** + A Counter's value is returned as a series of values, each maintained by one of the nodes. + * It is not expected of you to use this in your application. + Applications normally use the Counter's overall value, and very rarely refer to the value each node gives it. + + + `counter` and `counterRaw` samples: + + + +{`# Various RQL expressions sent to the server using counter() +# Returned Counter value is accumulated +rawquery1 = list(session.advanced.raw_query("from products as p select counter(p, 'ProductLikes')")) + +rawquery2 = list( + session.advanced.raw_query("from products select counter('ProductLikes') as ProductLikesCount") +) + +rawquery3 = list( + session.advanced.raw_query("from products where PricePerUnit > 50 select Name, counter('ProductLikes')") +) +`} + + + + +{`# An RQL expression sent to the server using counterRaw() +# Returned Counter value is distributed +query = list(session.advanced.raw_query("from users as u select counterRaw(u, 'downloads')")) +`} + + + +### Counters and Revisions + +A [document revision](../../document-extensions/revisions/overview.mdx) stores all the document Counters' +names and values when the revision was created. + +* **Stored Counter Values**: **Accumulated** + A revision stores a Counter's value as a single sum, with no specification of the Counter's value on each node. + +* Revisions-creation can be initiated by **Counter-name modification**. + * When the Revisions feature is enabled, the creation or deletion of a Counter initiates the creation of a new document revision. + * Counter **value** modifications do **not** cause the creation of new revisions. +### Counters and Changes API + +[Changes API](../../client-api/changes/what-is-changes-api.mdx#changes-api) is a Push service, that can inform you of various changes on the Server, including [changes in Counter values](../../client-api/changes/how-to-subscribe-to-counter-changes.mdx#changes-api--how-to-subscribe-to-counter-changes). +You can target all Counters, or specify the ones you wish to follow. + +* **Pushed Counter Value**: **Accumulated** + `Changes API` methods return a Counter's value as a single sum, without specifying its value on each node. +* The service is initiated by **Counter Value Modification**. +### Counters and Ongoing Tasks: + +Each [ongoing task](../../studio/database/tasks/ongoing-tasks/general-info.mdx) relates to Counters in its own way. + +* **Counters** and the **Backup task** + There are two [backup](../../studio/database/tasks/backup-task.mdx) types: **logical-backup** and **snapshot**. + Both types store and restore **all** data, including Counters. + Both types operate as an ongoing backup routine, with a pre-set time schedule. + * Logical Backup: + **Backed-up Counter values**: **Distributed** + A logical backup is a higher-level implementation of [Smuggler](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-smuggler). + As with Smuggler, Counters are backed-up and restored including their values on all nodes. + * Snapshot: + A snapshot stores all data and settings as a single binary image. + All components, including Counters, are restored to the exact same state they've been at during backup. + +* **Counters** and the **External Replication task** + The ongoing [external replication](../../studio/database/tasks/ongoing-tasks/external-replication-task.mdx) task replicates all data, including Counters. + * **Replicated Counter Value**: **Distributed** + Counters are replicated along with their values on all nodes. + * Replication can be initiated by both **Counter-name update** _and_ **Counter-value modification**. + +* **Counters** and the **ETL task** + [ETL](../../server/ongoing-tasks/etl/basics.mdx) is used to export data from RavenDB to an external (either Raven or SQL) database. + * [SQL ETL](../../server/ongoing-tasks/etl/sql.mdx) is **not supported**. + Counters cannot be exported to an SQL database over SQL ETL. + * [RavenDB ETL](../../server/ongoing-tasks/etl/raven.mdx) **is supported**. + Counters [are](../../server/ongoing-tasks/etl/raven.mdx#counters) exported over RavenDB ETL. + * Export can be initiated by both **Counter-name update** _and_ **Counter-value modification**. + * **Exported Counter Value**: **Distributed** + Counters are exported along with their values on all nodes. + * Counters can be [exported using a script](../../server/ongoing-tasks/etl/raven.mdx#adding-counter-explicitly-in-a-script). + **Default behavior**: When an ETL script is not provided, Counters are exported. + +* **Counters** and the **[Data Subscriptions](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx#data-subscriptions) task** + Data Subscriptions can be initiated by document changes, including those caused by **Counter Name updates**. + Documents will **not** be delivered in reaction to Counter Value modification. + + + +### Counters and Other Features: Summary + +Use this table to find if and how various RavenDB features are triggered by Counters, +and how the various features handle Counter values. + +* In the **Triggered By** column: + * _Document Change_ - Feature is triggered by a Counter Name update. + * _Countrer Value Modification_ - Feature is triggered by a Counter Value modification. + * _Time Schedule_ - Feature is invoked by a pre-set time routine. + * _No Trigger_ - Feature is executed manually, through the Studio or by a Client. +* In the **Counter Value** column: + * _Accumulated_ - Counter Value is handled as a single accumulated sum. + * _Distributed_ - Counter Value is handled as a series of values maintained by cluster nodes. + +| **Feature** | **Triggered by** | **Counter Value** | +|-------------|:-------------|:-------------| +| [Indexing](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-indexing) | _Document Change_ | doesn't handle values | +| [LINQ Queries](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-queries) | _No trigger_ | _Accumulated_ | +| [Raw Queries](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-queries) | _No trigger_ | `counter()` - _Accumulated_
`counterRaw()` - _Distributed_ | +| [Smuggler](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-smuggler) | _No trigger_ | _Distributed_ | +| [Backup Task](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-ongoing-tasks) | _Time Schedule_ | _Distributed_ | +| [RavenDB ETL Task](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-ongoing-tasks) | _Document Change_,
_Countrer Value Modification_ | _Distributed_ | +| [External Replication task](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-ongoing-tasks) | _Document Change_,
_Countrer Value Modification_ | _Distributed_ | +| [Data Subscriptions Update Task](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-ongoing-tasks) | _Document Change_ | | +| [Changes API](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-changes-api) | _Countrer Value Modification_ | _Accumulated_ | +| [Revision creation](../../document-extensions/counters/counters-and-other-features.mdx#counters-and-revisions) | _Document Change_ | _Accumulated_ | + +
+ +### Counters Bulk-Insert +`store.bulk_insert` is RavenDB's high-performance data insertion operation. +Use its `counters_for` interface's `increment` method to add or update counters with great speed. + +* Syntax + + * `counters_for` + + +{`def counters_for(self, id_: str) -> CountersBulkInsert: + ... +`} + + + + | Parameters | Type | Description | + |:-------------|:-------------|:-------------| + | **id** | `str` | Document ID | + + * `Increment` + + +{`def increment(self, name: str, delta: int = 1) -> None: ... +`} + + + + | Parameters | Type | Description | + |:-------------|:-------------|:-------------| + | **name** | `str` | Counter Name | + | **delta** | `int` | Default: 1L | + +* Usage Flow + + * Create a `store.bulk_insert` instance. + * Pass the instance's `counters_for` interface, the document ID + * Call `increment` as many times as you like. Pass it - + The Counter Name and Value (delta to be added). + +* Usage Sample + In this sample, we attach a counter to all User documents. + + +{`with store.open_session() as session: + result = list(session.query(object_type=User).where_less_than("Age", 30)) + users_ids = [session.advanced.get_document_id(user) for user in result] + +with store.bulk_insert() as bulk_insert: + for user_id in users_ids: + # Choose document + counters_for = bulk_insert.counters_for(user_id) + + # Add or Increment a counter + bulk_insert.counters_for(user_id).increment("download", 100) +`} + + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_create-or-modify-csharp.mdx b/versioned_docs/version-7.1/document-extensions/counters/_create-or-modify-csharp.mdx new file mode 100644 index 0000000000..c3df9e709a --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_create-or-modify-csharp.mdx @@ -0,0 +1,81 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the `CountersFor.Increment` method to **create** a new Counter or **modify** an existing Counter's value. + +* If the Counter exists, `Increment` will add the specified number to the Counter's current value. + If the Counter doesn't exist, `Increment` will create it and set its initial value. + +* For all other `CountersFor` methods see this [Overview](../../document-extensions/counters/overview.mdx#counter-methods-and-the--object). + +* In this page: + - [`Increment` usage](../../document-extensions/counters/create-or-modify.mdx#increment-usage) + - [Example](../../document-extensions/counters/create-or-modify.mdx#example) + - [Syntax](../../document-extensions/counters/create-or-modify.mdx#syntax) + + +## `Increment` usage + + **Flow**: + +* Open a session. +* Create an instance of `CountersFor`. + * Either pass `CountersFor` an explicit document ID, -or- + * Pass it an [entity tracked by the session](../../client-api/session/loading-entities.mdx), + e.g. a document object returned from [session.Query](../../client-api/session/querying/how-to-query.mdx) or from [session.Load](../../client-api/session/loading-entities.mdx#load). +* Call `CountersFor.Increment`. +* Call `session.SaveChanges` for the changes to take effect. + +**Note**: + +* Modifying a Counter using `Increment` only takes effect when `session.SaveChanges()` is executed. +* To **decrease** a Counter's value, pass the method a negative number to the `Increment` method. + + + +## Example + + + +{`// 1. Open a session +using (var session = docStore.OpenSession()) +\{ + // 2. pass CountersFor's constructor a document ID + var documentCounters = session.CountersFor("products/1-C"); + + // 3. Use \`CountersFor.Increment\` + documentCounters.Increment("ProductLikes"); // Increase "ProductLikes" by 1, or create it with a value of 1 + documentCounters.Increment("ProductDislikes", 1); // Increase "ProductDislikes" by 1, or create it with a value of 1 + documentCounters.Increment("ProductPageViews", 15); // Increase "ProductPageViews" by 15, or create it with a value of 15 + documentCounters.Increment("DaysLeftForSale", -10); // Decrease "DaysLeftForSale" by 10, or create it with a value of -10 + + // 4. Save changes to the session + session.SaveChanges(); +\} +`} + + + + + +## Syntax + + + +{`void Increment(string counterName, long incrementValue = 1); +`} + + + +| Parameter | Type | Description | +|---------------|---------|------------------------------------------------------------------------------------------------------------------| +| `counterName` | string | Counter's name | +| `delta` | long | Increase Counter by this value.
Default value is 1.
For a new Counter, this number will be its initial value. | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_create-or-modify-java.mdx b/versioned_docs/version-7.1/document-extensions/counters/_create-or-modify-java.mdx new file mode 100644 index 0000000000..073949f313 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_create-or-modify-java.mdx @@ -0,0 +1,81 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the `countersFor.increment` method to **create** a new Counter or **modify** an existing Counter's value. + +* If the Counter exists, `increment` will add the specified number to the Counter's current value. + If the Counter doesn't exist, `increment` will create it and set its initial value. + +* For all other `countersFor` methods see this [Overview](../../document-extensions/counters/overview.mdx#counter-methods-and-the--object). + +* In this page: + - [`increment` uage](../../document-extensions/counters/create-or-modify.mdx#increment-usage) + - [Example](../../document-extensions/counters/create-or-modify.mdx#example) + - [Syntax](../../document-extensions/counters/create-or-modify.mdx#syntax) + + +## `increment` usage + +**Flow**: + +* Open a session. +* Create an instance of `countersFor`. + * Either pass `countersFor` an explicit document ID, -or- + * Pass it an entity tracked by the session, e.g. a document object returned from `session.query` or from `session.load`. +* Call `countersFor.increment`. +* Call `session.saveChanges` for the changes to take effect. + +**Note**: + +* Modifying a Counter using `increment` only takes effect when `session.saveChanges()` is executed. +* To **decrease** a Counter's value, pass the method a negative number to the `increment` method. + + + +## Example + + + +{`// 1. Open a session +try (IDocumentSession session = docStore.openSession()) \{ + // 2. pass CountersFor's constructor a document ID + ISessionDocumentCounters documentCounters = session.countersFor("products/1-C"); + + // 3. Use \`countersFor.increment\` + documentCounters.increment("productLikes"); // Increase "productLikes" by 1, or create it with a value of 1 + documentCounters.increment("productDislikes", 1); // Increase "productDislikes" by 1, or create it with a value of 1 + documentCounters.increment("productPageViews", 15); // Increase "productPageViews" by 15, or create it with a value of 15 + documentCounters.increment("daysLeftForSale", -10); // Decrease "daysLeftForSale" by 10, or create it with a value of -10 + + // 4. Save changes to the session + session.saveChanges(); +\} +`} + + + + + +## Syntax + + + +{`void increment(String counterName); + +void increment(String id, String name, long delta); +`} + + + +| Parameter | Type | Description | +|------------------|--------|------------------------------------------------------------------------------------------------------------------| +| `counterName` | String | Counter's name | +| `incrementValue` | Long | Increase Counter by this value.
Default value is 1.
For a new Counter, this number will be its initial value. | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_create-or-modify-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/counters/_create-or-modify-nodejs.mdx new file mode 100644 index 0000000000..178d4519f6 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_create-or-modify-nodejs.mdx @@ -0,0 +1,87 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the `countersFor.increment` method to **create** a new Counter or **modify** an existing Counter's value. + +* If the Counter exists, `increment` will add the specified number to the Counter's current value. + If the Counter doesn't exist, `increment` will create it and set its initial value. + +* For all other `countersFor` methods see this [Overview](../../document-extensions/counters/overview.mdx#counter-methods-and-the--object). + +* In this page: + - [`increment` usage](../../document-extensions/counters/create-or-modify.mdx#increment-usage) + - [Example](../../document-extensions/counters/create-or-modify.mdx#example) + - [Syntax](../../document-extensions/counters/create-or-modify.mdx#syntax) + + +## `Increment` usage + + **Flow**: + +* Open a session. +* Create an instance of `countersFor`. + * Either pass `countersFor` an explicit document ID, -or- + * Pass it an [entity tracked by the session](../../client-api/session/loading-entities.mdx), + e.g. a document object returned from [session.query](../../client-api/session/querying/how-to-query.mdx) or from [session.load](../../client-api/session/loading-entities.mdx#load). +* Call `countersFor.increment`. +* Call `session.saveChanges` for the changes to take effect. + +**Note**: + +* Modifying a Counter using `increment` only takes effect when `session.saveChanges()` is executed. +* To **decrease** a Counter's value, pass the method a negative number to the `increment` method. + + + +## Example + + + +{`// Open a session +const session = documentStore.openSession(); + +// Pass a document ID to the countersFor constructor +const documentCounters = session.countersFor("products/1-A"); + +// Use \`countersFor.increment\`: +// ============================ + +// Increase "ProductLikes" by 1, or create it if doesn't exist with a value of 1 +documentCounters.increment("ProductLikes", 1); + +// Increase "ProductPageViews" by 15, or create it if doesn't exist with a value of 15 +documentCounters.increment("ProductPageViews", 15); + +// Decrease "DaysLeftForSale" by 10, or create it if doesn't exist with a value of -10 +documentCounters.increment("DaysLeftForSale", -10); + +// Save changes +await session.saveChanges(); +`} + + + + + +## Syntax + + + +{`increment(counter); +increment(counter, delta); +`} + + + +| Parameter | Type | Description | +|---------------|---------|----------------------------------------------------------------------------------------------------------------------| +| `counter` | string | The counter's name | +| `delta` | long | Increase Counter by this value.
Default value is 1.
For a new Counter, this number will be its initial value. | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_create-or-modify-php.mdx b/versioned_docs/version-7.1/document-extensions/counters/_create-or-modify-php.mdx new file mode 100644 index 0000000000..59aea48c6b --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_create-or-modify-php.mdx @@ -0,0 +1,99 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the `countersFor.increment` method to **create** a new Counter or **modify** an existing Counter's value. + +* If the Counter exists, `increment` will add the specified number to the Counter's current value. + If the Counter doesn't exist, `increment` will create it and set its initial value. + +* For all other `countersFor` methods see this [Overview](../../document-extensions/counters/overview.mdx#counter-methods-and-the--object). + +* In this page: + - [`increment` usage](../../document-extensions/counters/create-or-modify.mdx#increment-usage) + - [Example](../../document-extensions/counters/create-or-modify.mdx#example) + - [Syntax](../../document-extensions/counters/create-or-modify.mdx#syntax) + + +## `increment` usage + + **Flow**: + +* Open a session. +* Create an instance of `countersFor`. + * Either pass `countersFor` an explicit document ID, -or- + * Pass it an [entity tracked by the session](../../client-api/session/loading-entities.mdx), + e.g. a document object returned from [session.Query](../../client-api/session/querying/how-to-query.mdx) or from [session.Load](../../client-api/session/loading-entities.mdx#load). +* Call `countersFor.increment`. +* Call `session.saveChanges` for the changes to take effect. + +**Note**: + +* Modifying a Counter using `increment` only takes effect when `session.aaveChanges()` is executed. +* To **decrease** a Counter's value, pass the method a negative number to the `increment` method. + + + +## Example + + + +{`// Open a session +$session = $docStore->openSession(); +try \{ + // Pass the countersFor constructor a document ID + $documentCounters = $session->countersFor("products/1-A"); + + // Use \`countersFor.increment\`: + // ============================ + + // Increase "ProductLikes" by 1, or create it if doesn't exist with a value of 1 + $documentCounters->increment("ProductLikes"); + + // Increase "ProductPageViews" by 15, or create it if doesn't exist with a value of 15 + $documentCounters->increment("ProductPageViews", 15); + + // Decrease "DaysLeftForSale" by 10, or create it if doesn't exist with a value of -10 + $documentCounters->increment("DaysLeftForSale", -10); + + // Execute all changes by calling SaveChanges + $session->saveChanges(); +\} finally \{ + $session->close(); +\} +`} + + + + + +## Syntax + + + +{`public function countersFor(string|object $idOrEntity): SessionDocumentCountersInterface; +`} + + + +| Parameter | Type | Description | +|------------------|-------|-----------------| +| **idOrEntity** | `string` or `object`| The object to create or modify counters for | + + +{`public function increment(?string $counter, int $delta = 1): void; +`} + + + +| Parameter | Type | Description | +|------------------|-------|-----------------| +| **counter** | `string` | Counter name | +| **delta** | `int` | Increase Counter by this value.
Default value is 1.
For a new Counter, this number will be its initial value. | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_create-or-modify-python.mdx b/versioned_docs/version-7.1/document-extensions/counters/_create-or-modify-python.mdx new file mode 100644 index 0000000000..9eb1ac9c70 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_create-or-modify-python.mdx @@ -0,0 +1,85 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the `counters_for .increment` method to **create** a new Counter or **modify** an existing Counter's value. + +* If the Counter exists, `increment` will add the specified number to the Counter's current value. + If the Counter doesn't exist, `increment` will create it and set its initial value. + +* For all other `counters_for ` methods see this [Overview](../../document-extensions/counters/overview.mdx#counter-methods-and-the--object). + +* In this page: + - [`increment` usage](../../document-extensions/counters/create-or-modify.mdx#increment-usage) + - [Example](../../document-extensions/counters/create-or-modify.mdx#example) + - [Syntax](../../document-extensions/counters/create-or-modify.mdx#syntax) + + +## `increment` usage + + **Flow**: + +* Open a session. +* Create an instance of `counters_for `. + * Either pass `counters_for ` an explicit document ID, -or- + * Pass it an [entity tracked by the session](../../client-api/session/loading-entities.mdx), + e.g. a document object returned from [session.Query](../../client-api/session/querying/how-to-query.mdx) or from [session.Load](../../client-api/session/loading-entities.mdx#load). +* Call `counters_for .increment`. +* Call `session.save_changes` for the changes to take effect. + +**Note**: + +* Modifying a Counter using `increment` only takes effect when `session.aave_changes()` is executed. +* To **decrease** a Counter's value, pass the method a negative number to the `increment` method. + + + +## Example + + + +{`# Open a session +with store.open_session() as session: + # Pass CountersFor's constructor a document ID + document_counters = session.counters_for("products/1-A") + + # Use 'CountersFor.increment' + # =========================== + + # Increase "ProductLikes" by 1, or create it if doesn't exist with a value of 1 + document_counters.increment("ProductLikes") + + # Increase "ProductPageViews" by 15, or create it if doesn't exist with a value of 15 + document_counters.increment("ProductPageViews", 15) + + # Decrease "DaysLeftForSale" by 10, or create it if doesn't exist with a value of -10 + document_counters.increment("DaysLeftForSale", -10) + + # Execute all changes by calling save_changes + session.save_changes() +`} + + + + + +## Syntax + + + +{`def increment(self, counter: str, delta: int = 1) -> None: ... +`} + + + +| Parameter | Type | Description | +|------------------|-------|-----------------| +| **counter_name** | `str` | Counter's name | +| **delta** | `int` | Increase Counter by this value.
Default value is 1.
For a new Counter, this number will be its initial value. | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_delete-csharp.mdx b/versioned_docs/version-7.1/document-extensions/counters/_delete-csharp.mdx new file mode 100644 index 0000000000..70370db71c --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_delete-csharp.mdx @@ -0,0 +1,77 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the `CountersFor.Delete` method to remove a specific Counter from a document. + +* All the document's Counters are deleted when the document itself is deleted. + +* For all other `CountersFor` methods see this [Overview](../../document-extensions/counters/overview.mdx#counter-methods-and-the--object). + +* In this page: + * [`Delete ` usage](../../document-extensions/counters/delete.mdx#delete-usage) + * [Example](../../document-extensions/counters/delete.mdx#example) + * [Syntax](../../document-extensions/counters/delete.mdx#syntax) + + +## `Delete ` usage + +**Flow**: + +* Open a session. +* Create an instance of `CountersFor`. + * Either pass `CountersFor` an explicit document ID, -or- + * Pass it an [entity tracked by the session](../../client-api/session/loading-entities.mdx), + e.g. a document object returned from [session.Query](../../client-api/session/querying/how-to-query.mdx) or from [session.Load](../../client-api/session/loading-entities.mdx#load). +* Call `CountersFor.Delete`. +* Call `session.SaveChanges` for the changes to take effect. + +**Note**: + +* A Counter you deleted will be removed only after the execution of `SaveChanges()`. +* `Delete` will **not** generate an error if the Counter doesn't exist. +* Deleting a document deletes all its Counters as well. + + + +## Example + + + +{`// 1. Open a session +using (var session = docStore.OpenSession()) +\{ + // 2. pass CountersFor's constructor a document ID + var documentCounters = session.CountersFor("products/1-C"); + + // 3. Delete the "ProductLikes" Counter + documentCounters.Delete("ProductLikes"); + + // 4. Save changes to the session + session.SaveChanges(); +\} +`} + + + + + +## Syntax + + + +{`void Delete(string counterName); +`} + + + +| Parameter | Type | Description | +|---------------|--------|----------------| +| `counterName` | string | Counter's name | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_delete-java.mdx b/versioned_docs/version-7.1/document-extensions/counters/_delete-java.mdx new file mode 100644 index 0000000000..9fe457ef7b --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_delete-java.mdx @@ -0,0 +1,75 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the `countersFor.delete` method to remove a specific Counter from a document. + +* All the document's Counters are deleted when the document itself is deleted. + +* For all other `countersFor` methods see this [Overview](../../document-extensions/counters/overview.mdx#counter-methods-and-the--object). + +* In this page: + * [`delete ` usage](../../document-extensions/counters/delete.mdx#delete-usage) + * [Example](../../document-extensions/counters/delete.mdx#example) + * [Syntax](../../document-extensions/counters/delete.mdx#syntax) + + +## `delete ` usage + +**Flow**: + +* Open a session. +* Create an instance of `countersFor`. + * Either pass `countersFor` an explicit document ID, -or- + * Pass it an entity tracked by the session, e.g. a document object returned from `session.query` or from `session.load`. +* Call `countersFor.delete`. +* Call `session.saveChanges` for the changes to take effect. + +**Note**: + +* A Counter you deleted will be removed only after the execution of `saveChanges()`. +* `delete` will **not** generate an error if the Counter doesn't exist. +* Deleting a document deletes all its Counters as well. + + + +## Example + + + +{`// 1. Open a session +try (IDocumentSession session = docStore.openSession()) \{ + // 2. pass CountersFor's constructor a document ID + ISessionDocumentCounters documentCounters = session.countersFor("products/1-C"); + + // 3. Delete the "productLikes" Counter + documentCounters.delete("productLikes"); + + // 4. Save changes to the session + session.saveChanges(); +\} +`} + + + + + +## Syntax + + + +{`void delete(String counterName); +`} + + + +| Parameter | Type | Description | +|---------------|--------|-----------------| +| `counterName` | String | Counter's name | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_delete-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/counters/_delete-nodejs.mdx new file mode 100644 index 0000000000..39ee32e66d --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_delete-nodejs.mdx @@ -0,0 +1,76 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the `countersFor.delete` method to remove a specific Counter from a document. + +* All the document's Counters are deleted when the document itself is deleted. + +* For all other `countersFor` methods see this [Overview](../../document-extensions/counters/overview.mdx#counter-methods-and-the--object). + +* In this page: + * [`delete ` usage](../../document-extensions/counters/delete.mdx#delete-usage) + * [Example](../../document-extensions/counters/delete.mdx#example) + * [Syntax](../../document-extensions/counters/delete.mdx#syntax) + + +## `delete ` usage + +**Flow**: + +* Open a session. +* Create an instance of `countersFor`. + * Either pass `countersFor` an explicit document ID, -or- + * Pass it an [entity tracked by the session](../../client-api/session/loading-entities.mdx), + e.g. a document object returned from [session.query](../../client-api/session/querying/how-to-query.mdx) or from [session.load](../../client-api/session/loading-entities.mdx#load). +* Call `countersFor.delete`. +* Call `session.saveChanges` for the changes to take effect. + +**Note**: + +* A Counter you deleted will be removed only after the execution of `saveChanges()`. +* `delete` will **not** generate an error if the Counter doesn't exist. +* Deleting a document deletes all its Counters as well. + + + +## Example + + + +{`// Open a session +const session = documentStore.openSession(); + +// Pass a document ID to the countersFor constructor +const documentCounters = session.countersFor("products/1-A"); + +// Delete the "ProductLikes" Counter +documentCounters.delete("ProductLikes"); + +// The 'Delete' is executed upon calling saveChanges +await session.saveChanges(); +`} + + + + + +## Syntax + + + +{`delete(counter); +`} + + + +| Parameter | Type | Description | +|----------------|--------|----------------| +| `counter` | string | Counter's name | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_delete-php.mdx b/versioned_docs/version-7.1/document-extensions/counters/_delete-php.mdx new file mode 100644 index 0000000000..db58469ca0 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_delete-php.mdx @@ -0,0 +1,55 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the `countersFor.delete` method to remove a specific Counter from a document. + +* All the document's Counters are deleted when the document itself is deleted. + +* For all other `countersFor` methods see this [Overview](../../document-extensions/counters/overview.mdx#counter-methods-and-the--object). + +* In this page: + * [`delete` usage](../../document-extensions/counters/delete.mdx#delete-usage) + * [Example](../../document-extensions/counters/delete.mdx#example) + * [Syntax](../../document-extensions/counters/delete.mdx#syntax) + + +## `delete` usage + +**Flow**: + +* Open a session. +* Create an instance of `countersFor`. + * Either pass `countersFor` an explicit document ID, -or- + * Pass it an [entity tracked by the session](../../client-api/session/loading-entities.mdx), + e.g. a document object returned from [session.query](../../client-api/session/querying/how-to-query.mdx) or from [session.load](../../client-api/session/loading-entities.mdx#load). +* Call `documentCounters.delete`. +* Call `session.saveChanges` for the changes to take effect. + +**Note**: + +* A Counter you deleted will be removed only after the execution of `saveChanges()`. +* `delete` will **not** generate an error if the Counter doesn't exist. +* Deleting a document deletes all its Counters as well. + + + +## Syntax + + + +{`public function delete(?string $counter): void; +`} + + + +| Parameter | Type | Description | +|---------------|--------|----------------| +| **counter** | `string` | Counter name ([see example](../../document-extensions/counters/overview.mdx#managing-counters)) | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_delete-python.mdx b/versioned_docs/version-7.1/document-extensions/counters/_delete-python.mdx new file mode 100644 index 0000000000..7f61e2e770 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_delete-python.mdx @@ -0,0 +1,55 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the `counters_for.delete` method to remove a specific Counter from a document. + +* All the document's Counters are deleted when the document itself is deleted. + +* For all other `counters_for` methods see this [Overview](../../document-extensions/counters/overview.mdx#counter-methods-and-the--object). + +* In this page: + * [`delete ` usage](../../document-extensions/counters/delete.mdx#delete-usage) + * [Example](../../document-extensions/counters/delete.mdx#example) + * [Syntax](../../document-extensions/counters/delete.mdx#syntax) + + +## `delete ` usage + +**Flow**: + +* Open a session. +* Create an instance of `counters_for`. + * Either pass `counters_for` an explicit document ID, -or- + * Pass it an [entity tracked by the session](../../client-api/session/loading-entities.mdx), + e.g. a document object returned from [session.query](../../client-api/session/querying/how-to-query.mdx) or from [session.load](../../client-api/session/loading-entities.mdx#load). +* Call `document_counters.delete`. +* Call `session.save_changes` for the changes to take effect. + +**Note**: + +* A Counter you deleted will be removed only after the execution of `save_changes()`. +* `delete` will **not** generate an error if the Counter doesn't exist. +* Deleting a document deletes all its Counters as well. + + + +## Syntax + + + +{`def delete(self, counter: str) -> None: ... +`} + + + +| Parameter | Type | Description | +|---------------|--------|----------------| +| **counter** | `str` | Counter name ([see example](../../document-extensions/counters/overview.mdx#managing-counters)) | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_including-counters-csharp.mdx b/versioned_docs/version-7.1/document-extensions/counters/_including-counters-csharp.mdx new file mode 100644 index 0000000000..63d62b6c14 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_including-counters-csharp.mdx @@ -0,0 +1,732 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import ContentFrame from '@site/src/components/ContentFrame'; +import Panel from '@site/src/components/Panel'; + + + +* Counters can be included when [loading entities](../../client-api/session/loading-entities) + or when making [queries](../../client-api/session/querying/how-to-query). + +* The _Session_ stores the included counters in its in-memory cache, + so their values can be accessed later in the same session without making additional requests to the server. + +* To see how to include counters when creating a subscription, see + [Create subscription - include counters](../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---include-counters). + +* In this article: + * [Sample data](../../document-extensions/counters/including-counters.mdx#sample-data) + * [Include counters when **loading**](../../document-extensions/counters/including-counters.mdx#include-counters-when-loading) + * [Include single counter](../../document-extensions/counters/including-counters.mdx#include-single-counter) + * [Include multiple counters](../../document-extensions/counters/including-counters.mdx#include-multiple-counters) + * [Include all counters](../../document-extensions/counters/including-counters.mdx#include-all-counters) + * [Include counters when **querying**](../../document-extensions/counters/including-counters.mdx#include-counters-when-querying) + * [Include single counter](../../document-extensions/counters/including-counters.mdx#include-single-counter-1) + * [Include multiple counters](../../document-extensions/counters/including-counters.mdx#include-multiple-counters-1) + * [Include all counters](../../document-extensions/counters/including-counters.mdx#include-all-counters-1) + * [Including multiple item types](../../document-extensions/counters/including-counters.mdx#including-multiple-item-types) + * [Include behavior and constraints](../../document-extensions/counters/including-counters.mdx#include-behavior-and-constraints) + * [Syntax](../../document-extensions/counters/including-counters.mdx#syntax) + + + + + +The examples in this article are based on the following **sample data**: + + + +```csharp +using (var session = store.OpenSession()) +{ + var documentCounters = session.CountersFor("products/1-A"); + + // Increase counter "Likes" by 1, or create it with a value of 1 if it doesn't exist + documentCounters.Increment("Likes"); + // Increase counter "Dislikes" by 1, or create it with a value of 1 if it doesn't exist + documentCounters.Increment("Dislikes", 1); + // Increase counter "Downloads" by 15, or create it with a value of 15 if it doesn't exist + documentCounters.Increment("Downloads", 15); + + session.SaveChanges(); +} +``` + + +```csharp +public class Product +{ + public string Id { get; set; } + public string Name { get; set; } + public string Supplier { get; set; } + public string Descripton { get; set; } +} +``` + + + + + + + + + +### Include single counter: + +Include a single counter using `IncludeCounter()`: + + + +```csharp +using (var session = store.OpenSession()) +{ + // Load document "products/1-A" + var product = session.Load("products/1-A", includeBuilder => + // Call 'IncludeCounter', pass the name of the counter to include + includeBuilder.IncludeCounter("Likes")); + + // The included counter is now cached in the session's memory. + // Getting its value will NOT trigger a server request + var productLikes = session + .CountersFor("products/1-A") + .Get("Likes"); + + Console.WriteLine("Number of likes: " + productLikes); +} +``` + + +```csharp +using (var asyncSession = store.OpenAsyncSession()) +{ + // Load document "products/1-A" + var product = await asyncSession.LoadAsync("products/1-A", includeBuilder => + // Call 'IncludeCounter', pass the name of the counter to include + includeBuilder.IncludeCounter("Likes")); + + // The included counter is now cached in the session's memory. + // Getting its value will NOT trigger a server request. + var productLikes = await asyncSession + .CountersFor("products/1-A") + .GetAsync("Likes"); + + Console.WriteLine("Number of likes: " + productLikes); +} +``` + + + + + + + +### Include multiple counters: + +Include multiple counters using `IncludeCounters()`: + + + +```csharp +using (var session = store.OpenSession()) +{ + // Load document "products/1-A" + var product = session.Load("products/1-A", includeBuilder => + // Call 'IncludeCounters', pass a list of the counter names to include + includeBuilder.IncludeCounters(new[] { "Likes", "Downloads" })); + + // The included counters are now cached in the session's memory. + // Getting their values will NOT trigger a server request. + var productDownloads = session + .CountersFor("products/1-A") + .Get("Downloads"); + + Console.WriteLine("Number of downloads: " + productDownloads); +} +``` + + +```csharp +using (var asyncSession = store.OpenAsyncSession()) +{ + // Load document "products/1-A" + var product = await asyncSession.LoadAsync("products/1-A", includeBuilder => + // Call 'IncludeCounters' and pass a list of counter names to include + includeBuilder.IncludeCounters(new[] { "Likes", "Downloads" })); + + // The included counters are now cached in the session's memory. + // Getting their values will NOT trigger a server request. + var productDownloads = await asyncSession + .CountersFor("products/1-A") + .GetAsync("Downloads"); + + Console.WriteLine("Number of downloads: " + productDownloads); +} +``` + + + +You can also include multiple counters by chaining multiple `IncludeCounter()` calls: + + + +```csharp +using (var session = store.OpenSession()) +{ + // Load document "products/1-A" + var product = session.Load("products/1-A", includeBuilder => + // Include multiple counters by chaining 'IncludeCounter' calls + includeBuilder + .IncludeCounter("Likes") + .IncludeCounter("Downloads")); + + // The included counters are now cached in the session's memory. + // Getting their values will NOT trigger a server request. + var productDownloads = session + .CountersFor("products/1-A") + .Get("Downloads"); + + Console.WriteLine("Number of downloads: " + productDownloads); +} +``` + + +```csharp +using (var asyncSession = store.OpenAsyncSession()) +{ + // Load document "products/1-A" + var product = await asyncSession.LoadAsync("products/1-A", includeBuilder => + // Include multiple counters by chaining 'IncludeCounter' calls + includeBuilder + .IncludeCounter("Likes") + .IncludeCounter("Downloads")); + + // The included counters are now cached in the session's memory. + // Getting their values will NOT trigger a server request. + var productDownloads = await asyncSession + .CountersFor("products/1-A") + .GetAsync("Downloads"); + + Console.WriteLine("Number of downloads: " + productDownloads); +} +``` + + + + + + + +### Include all counters: + +Include ALL counters using `IncludeAllCounters()`: + + + +```csharp +using (var session = store.OpenSession()) +{ + // Load document "products/1-A" + var product = session.Load("products/1-A", includeBuilder => + // Call 'IncludeAllCounters' to include all counters + includeBuilder.IncludeAllCounters()); + + // All counters belonging to the document are now cached in the session's memory. + // Getting their values will NOT trigger a server request. + var productDislikes = session + .CountersFor("products/1-A") + .Get("Dislikes"); + + Console.WriteLine("Number of dislikes: " + productDislikes); +} +``` + + +```csharp +using (var asyncSession = store.OpenAsyncSession()) +{ + // Load document "products/1-A" + var product = await asyncSession.LoadAsync("products/1-A", includeBuilder => + // Call 'IncludeAllCounters' to include all counters + includeBuilder.IncludeAllCounters()); + + // All counters belonging to the document are now cached in the session's memory. + // Getting their values will NOT trigger a server request. + var productDislikes = await asyncSession + .CountersFor("products/1-A") + .GetAsync("Dislikes"); + + Console.WriteLine("Number of dislikes: " + productDislikes); +} +``` + + + + + + + + + + +### Include single counter: + +Use `IncludeCounter()` to include a single counter for each resulting document. +The counter with the specified name will be cached in the session's memory for each document returned. + + + +```csharp +using (var session = store.OpenSession()) +{ + // Query for product documents + var products = session.Query() + // Filter documents as needed + .Where(x=> x.Supplier == "suppliers/1-A") + .Include(includeBuilder => + // Include the "Likes" counter for each matching document + includeBuilder.IncludeCounter("Likes")) + .ToList(); + + foreach (var product in products) + { + // Access the counters included in the session + // Getting their values will NOT trigger a server request. + var productLikes = session.CountersFor(product).Get("Likes"); + + if (productLikes != null) + Console.WriteLine($"Product '{product.Id}' has {productLikes} likes."); + else + Console.WriteLine($"Product '{product.Id}' has no 'Likes' counter."); + } +} +``` + + +```csharp +using (var asyncSession = store.OpenAsyncSession()) +{ + // Query for product documents + var products = await asyncSession.Query() + // Filter documents as needed + .Where(x => x.Supplier == "suppliers/1-A") + .Include(includeBuilder => + // Include the "Likes" counter for each matching document + includeBuilder.IncludeCounter("Likes")) + .ToListAsync(); + + foreach (var product in products) + { + // Access the counters included in the session + // Getting their values will NOT trigger a server request. + var productLikes = await asyncSession.CountersFor(product).GetAsync("Likes"); + + if (productLikes != null) + Console.WriteLine($"Product '{product.Id}' has {productLikes} likes."); + else + Console.WriteLine($"Product '{product.Id}' has no 'Likes' counter."); + } +} +``` + + +```csharp +using (var session = store.OpenSession()) +{ + var products = session.Advanced.RawQuery(@" + from Products as p + where p.Supplier == 'suppliers/1-A' + include counters(p, 'Likes') + ") + .ToList(); + + foreach (var product in products) + { + var likes = session.CountersFor(product).Get("Likes"); + + if (likes != null) + Console.WriteLine($"Product '{product.Id}' has {likes} likes."); + else + Console.WriteLine($"Product '{product.Id}' has no 'Likes' counter."); + } +} +``` + + +```csharp +using (var asyncSession = store.OpenAsyncSession()) +{ + var products = await asyncSession.Advanced.AsyncRawQuery(@" + from Products as p + where p.Supplier == 'suppliers/1-A' + include counters(p, 'Likes') + ") + .ToListAsync(); + + foreach (var product in products) + { + var likes = await asyncSession.CountersFor(product).GetAsync("Likes"); + + if (likes != null) + Console.WriteLine($"Product '{product.Id}' has {likes} likes."); + else + Console.WriteLine($"Product '{product.Id}' has no 'Likes' counter."); + } +} +``` + + +```sql +from "Products" +where Supplier == "suppliers/1-A" +include counters("Likes") +``` + + + + + + + +### Include multiple counters: + +Use `IncludeCounters()` to include multiple counters for each resulting document. +Counters with the specified names will be cached in the session's memory for each document returned. + + + +```csharp +using (var session = store.OpenSession()) +{ + // Query for product documents + var products = session.Query() + // Filter documents as needed + .Where(x=> x.Supplier == "suppliers/1-A") + .Include(includeBuilder => + // Call 'IncludeCounters', pass a list of counter names to include + // Alternatively, you can chain individual calls: + // includeBuilder.IncludeCounter("Likes").IncludeCounter("Downloads") + includeBuilder.IncludeCounters(new[] { "Likes", "Downloads" })) + .ToList(); + + foreach (var product in products) + { + // Access the counters included in the session + // Getting their values will NOT trigger a server request. + var productLikes = session.CountersFor(product).Get("Likes"); + var productDownloads = session.CountersFor(product).Get("Downloads"); + } +} +``` + + +```csharp +using (var asyncSession = store.OpenAsyncSession()) +{ + // Query for product documents + var products = await asyncSession.Query() + // Filter documents as needed + .Where(x => x.Supplier == "suppliers/1-A") + .Include(includeBuilder => + // Call 'IncludeCounters', pass a list of counter names to include + // Alternatively, you can chain individual calls: + // includeBuilder.IncludeCounter("Likes").IncludeCounter("Downloads") + includeBuilder.IncludeCounters(new[] { "Likes", "Downloads" })) + .ToListAsync(); + + foreach (var product in products) + { + // Access the counters included in the session + // Getting their values will NOT trigger a server request. + var productLikes = await asyncSession.CountersFor(product).GetAsync("Likes"); + var productDownloads = await asyncSession.CountersFor(product).GetAsync("Downloads"); + } +} +``` + + +```csharp +using (var session = store.OpenSession()) +{ + var products = session.Advanced.RawQuery(@" + from Products + where Supplier == 'suppliers/1-A' + include counters('Likes'), counters('Downloads') + ") + .ToList(); + + foreach (var product in products) + { + var likes = session.CountersFor(product).Get("Likes"); + var downloads = session.CountersFor(product).Get("Downloads"); + } +} +``` + + +```csharp +using (var asyncSession = store.OpenAsyncSession()) +{ + var products = await asyncSession.Advanced.AsyncRawQuery(@" + from Products + where Supplier == 'suppliers/1-A' + include counters('Likes'), counters('Downloads') + ") + .ToListAsync(); + + foreach (var product in products) + { + var likes = await asyncSession.CountersFor(product).GetAsync("Likes"); + var downloads = await asyncSession.CountersFor(product).GetAsync("Downloads"); + } +} +``` + + +```sql +from "Products" +where Supplier = "suppliers/1-A" +include counters("Likes"), counters("Downloads") +``` + + + + + + + +### Include all counters: + +Use `IncludeAllCounters()` to include ALL counters. +All counters for each resulting document will be cached in the session's memory. + + + +```csharp +using (var session = store.OpenSession()) +{ + // Query for product documents + var products = session.Query() + // Filter documents as needed + .Where(x => x.Supplier == "suppliers/1-A") + .Include(includeBuilder => + // Include ALL counters for each matching document + includeBuilder.IncludeAllCounters()) + .ToList(); + + foreach (var product in products) + { + // All counters for the resulting documents are now cached in the session's memory + // Getting their values will NOT trigger a server request + var productDislikes = session.CountersFor(product).Get("Dislikes"); + } +} +``` + + +```csharp +using (var asyncSession = store.OpenAsyncSession()) +{ + // Query for product documents + var products = await asyncSession.Query() + // Filter documents as needed + .Where(x => x.Supplier == "suppliers/1-A") + .Include(includeBuilder => + // Include ALL counters for each matching document + includeBuilder.IncludeAllCounters()) + .ToListAsync(); + + foreach (var product in products) + { + // All counters for the resulting documents are now cached in the session's memory + // Getting their values will NOT trigger a server request + var productDislikes = await asyncSession.CountersFor(product).GetAsync("Dislikes"); + } +} +``` + + +```csharp +using (var session = store.OpenSession()) +{ + var products = session.Advanced.RawQuery(@" + from Products + where Supplier == 'suppliers/1-A' + include counters() + ") + .ToList(); + + foreach (var product in products) + { + var productDislikes = session.CountersFor(product).Get("Dislikes"); + } +} +``` + + +```csharp +using (var asyncSession = store.OpenAsyncSession()) +{ + var products = await asyncSession.Advanced.AsyncRawQuery(@" + from Products + where Supplier == 'suppliers/1-A' + include counters() + ") + .ToListAsync(); + + foreach (var product in products) + { + var productDislikes = await asyncSession.CountersFor(product).GetAsync("Dislikes"); + } +} +``` + + +```sql +from "Products" +where Supplier == "suppliers/1-A" +include counters() +``` + + + + + + + + +You can combine different include types **when loading or querying** a document using the fluent _includeBuilder_ syntax. +For example, you can include counters, related documents, time series, compare-exchange values, or past revisions. +This allows you to retrieve all related data in a single server call and avoid additional round trips later in the session. + +The order in which you chain the include methods does not matter. For example: + + +```csharp +using (var session = store.OpenSession(new SessionOptions +{ + // Required when including compare-exchange values + TransactionMode = TransactionMode.ClusterWide +})) +{ + var product = session.Load("products/1-A", includeBuilder => + includeBuilder + // include counter + .IncludeCounter("Likes") + // include related document + .IncludeDocuments(x => x.Supplier) + // include time series + .IncludeTimeSeries("HeartRates") + // include compare-exchange value + .IncludeCompareExchangeValue("document_property_that_holds_the_cmpxchg_Key") + // include past revisions from last month + .IncludeRevisions(DateTime.Now.AddMonths(-1)) + ); +} +``` + + + + + +The following sections describe behavior that applies when including counters - whether during **load or query**: +* [Counters that don't exist at include time](../../document-extensions/counters/including-counters.mdx#counters-that-dont-exist-at-include-time) +* [Do not mix IncludeAllCounters with individual counter includes](../../document-extensions/counters/including-counters.mdx#do-not-mix-includeallcounters-with-individual-counter-includes) + +--- + +### Counters that don't exist at include time + +If you include a counter that does Not exist at the time of the load or query execution, this will not cause an error. +However: +* Its value will be null. +* Accessing it later in the same session will not trigger a server request, because the counter is already cached in the session’s internal state. +* Even if the counter is created after the _include_, no server call will be made when trying to access its value. +* To retrieve the counter in this case, you can: + * Call [Clear](../../client-api/session/how-to/clear-a-session) to reset the session and discard all tracked state. + * Use [Evict](../../client-api/session/how-to/evict-entity-from-a-session) to remove the document (and its cached counters) from the session’s internal state. + * Or simply fetch the counter in a new session. + + +```csharp +using (var session = store.OpenSession()) +{ + // Load and include specific counters - one of them doesn't exist yet + var product = session.Load("products/1-A", includeBuilder => + includeBuilder.IncludeCounters(new[] { "Likes", "non-existent-counter" })); + + // ...ASSUME the counter "non-existent-counter" was just created *After* the above load + + // Trying to get the counter's value will NOT trigger a server call. + // The counter is already cached in the session's internal state. + var valueOfCounter = session.CountersFor(product).Get("non-existent-counter"); + Console.WriteLine("Value of counter: " + valueOfCounter); // null + + // You can call 'Evict' to remove the document (and its cached counters) from the session + session.Advanced.Evict(product); + + // Now a server call will be made to fetch the counter. + // Note: + // * You must specify the document ID since the entity is no longer tracked + // * Despite the name, "non-existent-counter" now exists on the server :) + valueOfCounter = session.CountersFor("products/1-A").Get("non-existent-counter"); + Console.WriteLine("Value of counter: " + valueOfCounter); // whatever the current value is +} +``` + + + +```csharp +using (var session = store.OpenSession()) +{ + // Load and include ALL counters + var product = session.Load("products/1-A", includeBuilder => + includeBuilder.IncludeAllCounters()); + + // ...ASSUME the counter "new-counter" was just created *After* the above load + + // No server call is made here, even if "new-counter" exists now + var valueOfCounter = session.CountersFor(product).Get("new-counter"); + Console.WriteLine("Value of counter: " + valueOfCounter); // null +} +``` + + +--- + +### Do not mix IncludeAllCounters with individual counter includes + +Once you've called `IncludeAllCounters()`, you cannot include individual counters using `IncludeCounter()` or `IncludeCounters()` in the same include chain. +Attempting to do so will throw an `InvalidOperationException` at runtime. + + +```csharp +using (var session = store.OpenSession()) +{ + session.Load("products/1-A", includeBuilder => includeBuilder + .IncludeAllCounters() + .IncludeCounter("Likes")); // This will throw +} +``` + + + + + + +```csharp +// Available overloads: +IncludeCounter(string name); +IncludeCounters(string[] names); +IncludeAllCounters(); + +``` + + +| Parameter | Type | Description | +|------------|------------|------------------------------------------| +| **name** | `string` | The name of a single counter to include. | +| **names** | `string[]` | An array of counter names to include. | + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/counters/_including-counters-java.mdx b/versioned_docs/version-7.1/document-extensions/counters/_including-counters-java.mdx new file mode 100644 index 0000000000..967197a76d --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_including-counters-java.mdx @@ -0,0 +1,78 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Counters can be included when [loading entities](../../client-api/session/loading-entities) + or when making [queries](../../client-api/session/querying/how-to-query). + +* The _Session_ stores the included counters in its in-memory cache, + so their values can be accessed later in the same session without making additional requests to the server. + +* To see how to include counters when creating a subscription, see + [Create subscription - include counters](../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---include-counters). + + + +* **Including Counters when using `Session.load`**: + * Include a single Counter using `includeCounter`. + * Include multiple Counters using `includeCounters`. + + `includeCounter` and `includeCounters` usage samples: + + + + +{`//include single Counters +Product productPage = session + .load(Product.class, "products/1-C", includeBuilder -> { + includeBuilder.includeCounter("productLikes") + .includeCounter("productDislikes") + .includeCounter("productDownloads"); + }); +`} + + + + +{`//include multiple Counters +//note that you can combine the inclusion of Counters and documents. + +Product productPage = session.load(Product.class, "orders/1-A", includeBuilder -> { + includeBuilder.includeDocuments("products/1-C") + .includeCounters(new String[]{"productLikes", "productDislikes"}); +}); +`} + + + + +* **Including Counters when using `Session.query`**: + * Include a single Counter using `includeCounter`. + * Include multiple Counters using `includeCounters`. + + `includeCounter` and `includeCounters` usage samples: + + + +{`//include a single Counter +IDocumentQuery query = session.query(Product.class) + .include(includeBuilder -> { + includeBuilder.includeCounter("productLikes"); + }); +`} + + + + +{`//include multiple Counters +IDocumentQuery query = session.query(Product.class) + .include(includeBuilder -> { + includeBuilder.includeCounters(new String[]{"productLikes", "productDownloads"}); + }); +`} + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/counters/_including-counters-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/counters/_including-counters-nodejs.mdx new file mode 100644 index 0000000000..68252b3b9e --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_including-counters-nodejs.mdx @@ -0,0 +1,515 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import ContentFrame from '@site/src/components/ContentFrame'; +import Panel from '@site/src/components/Panel'; + + + +* Counters can be included when [loading entities](../../client-api/session/loading-entities) + or when making [queries](../../client-api/session/querying/how-to-query). + +* The _session_ stores the included counters in its in-memory cache, + so their values can be accessed later in the same session without making additional requests to the server. + +* To see how to include counters when creating a subscription, see + [Create subscription - include counters](../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---include-counters). + +* In this article: + * [Sample data](../../document-extensions/counters/including-counters.mdx#sample-data) + * [Include counters when **loading**](../../document-extensions/counters/including-counters.mdx#include-counters-when-loading) + * [Include single counter](../../document-extensions/counters/including-counters.mdx#include-single-counter) + * [Include multiple counters](../../document-extensions/counters/including-counters.mdx#include-multiple-counters) + * [Include all counters](../../document-extensions/counters/including-counters.mdx#include-all-counters) + * [Include counters when **querying**](../../document-extensions/counters/including-counters.mdx#include-counters-when-querying) + * [Include single counter](../../document-extensions/counters/including-counters.mdx#include-single-counter-1) + * [Include multiple counters](../../document-extensions/counters/including-counters.mdx#include-multiple-counters-1) + * [Include all counters](../../document-extensions/counters/including-counters.mdx#include-all-counters-1) + * [Including multiple item types](../../document-extensions/counters/including-counters.mdx#including-multiple-item-types) + * [Include behavior and constraints](../../document-extensions/counters/including-counters.mdx#include-behavior-and-constraints) + * [Syntax](../../document-extensions/counters/including-counters.mdx#syntax) + + + + + +The examples in this article are based on the following **sample data**: + + + +```js +const session = store.openSession(); + +// Increase counter "Likes" by 1, or create it with a value of 1 if it doesn't exist +await session.countersFor("products/1-A").increment("Likes"); + +// Increase counter "Dislikes" by 1, or create it with a value of 1 if it doesn't exist +await session.countersFor("products/1-A").increment("Dislikes", 1); + +// Increase counter "Downloads" by 15, or create it with a value of 15 if it doesn't exist +await session.countersFor("products/1-A").increment("Downloads", 15); + +await session.saveChanges(); +``` + + +```js +class Product { + constructor({ + Id = null, + Name = null, + Supplier = null, + Description = null + } = {}) { + Object.assign(this, { + Id, + Name, + Supplier, + Description + }); + } +} +``` + + + + + + + + + +### Include single counter: + +Include a single counter using `includeCounter()`: + + +```js +const session = store.openSession(); + +// Load document "products/1-A" +const product = await session.load("products/1-A", { + includes: includeBuilder => includeBuilder + // Call 'includeCounter', pass the name of the counter to include + .includeCounter("Likes") + } +); + +// The included counter is now cached in the session's memory. +// Getting its value will NOT trigger a server request. +const productLikes = await session + .countersFor("products/1-A") + .get("Likes"); + +console.log("Number of likes:", productLikes); +``` + + + + + + +### Include multiple counters: + +Include multiple counters using `includeCounters()`: + + +```js +const session = store.openSession(); + +// Load document "products/1-A" +const product = await session.load("products/1-A", { + includes: includeBuilder => includeBuilder + // Call 'includeCounters', pass a list of the counter names to include + .includeCounters(["Likes", "Downloads"]) +}); + +// The included counters are now cached in the session's memory. +// Getting their values will NOT trigger a server request. +const productDownloads = await session + .countersFor("products/1-A") + .get("Downloads"); + +console.log("Number of downloads:", productDownloads); +``` + + +You can also include multiple counters by chaining multiple `includeCounter()` calls: + + +```js +const session = store.openSession(); + +// Load document "products/1-A" +const product = await session.load("products/1-A", { + includes: includeBuilder => includeBuilder + // Include multiple counters by chaining 'IncludeCounter' calls + .includeCounter("Likes") + .includeCounter("Downloads") +}); + +// The included counters are now cached in the session's memory. +// Getting their values will NOT trigger a server request. +const productDownloads = await session.countersFor("products/1-A").get("Downloads"); + +console.log("Number of downloads:", productDownloads); +``` + + + + + + +### Include all counters: + +Include ALL counters using `includeAllCounters()`: + + +```js +const session = store.openSession(); + +// Load document "products/1-A" +const product = await session.load("products/1-A", { + includes: includeBuilder => + // Call 'IncludeAllCounters' to include all counters + includeBuilder.includeAllCounters() +}); + +// All counters are now cached in the session's memory. +// Getting their values will NOT trigger a server request. +const productDislikes = await session.countersFor("products/1-A").get("Dislikes"); + +console.log("Number of dislikes:", productDislikes); +``` + + + + + + + + + +### Include single counter: + +Use `includeCounter()` to include a single counter for each resulting document. +The counter with the specified name will be cached in the session's memory for each document returned. + + + +```js +const session = store.openSession(); + +// Query for Product documents +const products = await session + .query({ collection: "Products" }) + // Filter documents as needed + .whereEquals("Supplier", "suppliers/1-A") + // Include the "Likes" counter for each matching document + .include(includeBuilder => includeBuilder.includeCounter("Likes")) + .all(); + +for (const product of products) { + // Access the counters included in the session + // Getting their values will NOT trigger a server request. + const productLikes = await session.countersFor(product).get("Likes"); + + if (productLikes !== null) + console.log(`Product '${product.id}' has ${productLikes} likes.`); + else + console.log(`Product '${product.id}' has no 'Likes' counter.`); +} +``` + + +```js +const session = store.openSession(); + +const products = await session.advanced + .rawQuery(` + from Products as p + where p.Supplier == "suppliers/1-A" + include counters(p, "Likes") + `) + .all(); + +for (const product of products) { + const likes = await session.countersFor(product).get("Likes"); + + if (likes !== null) + console.log(`Product '${product.id}' has ${likes} likes.`); + else + console.log(`Product '${product.id}' has no 'Likes' counter.`); +} +``` + + +```sql +from "Products" +where Supplier == "suppliers/1-A" +include counters("Likes") +``` + + + + + + + +### Include multiple counters: + +Use `includeCounters()` to include multiple counters for each resulting document. +Counters with the specified names will be cached in the session's memory for each document returned. + + + +```js +const session = store.openSession(); + +// Query for Product documents +const products = await session + .query({ collection: "Products" }) + // Filter documents as needed + .whereEquals("Supplier", "suppliers/1-A") + // Include multiple counters ("Likes" and "Downloads") for each matching document + // Alternatively, you can chain individual calls: + // includeBuilder.includeCounter("Likes").includeCounter("Downloads") + .include(includeBuilder => + includeBuilder.includeCounters(["Likes", "Downloads"]) + ) + .all(); + +for (const product of products) { + // Access the counters included in the session + // Getting their values will NOT trigger a server request. + const productLikes = await session.countersFor(product).get("Likes"); + const productDownloads = await session.countersFor(product).get("Downloads"); +} +``` + + +```js +const session = store.openSession(); + +const products = await session.advanced + .rawQuery(` + from Products + where Supplier == "suppliers/1-A" + include counters("Likes"), counters("Downloads") + `) + .all(); + +for (const product of products) { + const likes = await session.countersFor(product).get("Likes"); + const downloads = await session.countersFor(product).get("Downloads"); +} +``` + + +```sql +from "Products" +where Supplier = "suppliers/1-A" +include counters("Likes"), counters("Downloads") +``` + + + + + + + +### Include all counters: + +Use `includeAllCounters()` to include ALL counters. +All counters for each resulting document will be cached in the session's memory. + + + +```js +const session = store.openSession(); + +// Query for Product documents +const products = await session + .query({ collection: "Products" }) + // Filter documents as needed + .whereEquals("Supplier", "suppliers/1-A") + // Include ALL counters for each matching document + .include(includeBuilder => includeBuilder + .includeAllCounters()) + .all(); + +for (const product of products) { + // All counters for the resulting documents are now cached in the session's memory. + // Getting their values will NOT trigger a server request. + const productDislikes = await session.countersFor(product).get("Dislikes"); +} +``` + + +```js +const session = store.openSession(); + +const products = await session.advanced + .rawQuery(` + from Products + where Supplier == "suppliers/1-A" + include counters() + `) + .all(); + +for (const product of products) { + const productDislikes = await session.countersFor(product).get("Dislikes"); +} +``` + + +```sql +from "Products" +where Supplier == "suppliers/1-A" +include counters() +``` + + + + + + + + +You can combine different include types **when loading or querying** a document using the fluent _includeBuilder_ syntax. +For example, you can include counters, related documents, time series, compare-exchange values, or past revisions. +This allows you to retrieve all related data in a single server call and avoid additional round trips later in the session. + +The order in which you chain the include methods does not matter. For example: + + +```js +const session = store.openSession({ + // Required when including compare-exchange values + transactionMode: "ClusterWide" +}); + +// Load document "products/1-A" and include multiple related items +const product = await session.load("products/1-A", { + includes: includeBuilder => includeBuilder + // Include counter + .includeCounter("Likes") + // Include related document + .includeDocuments("Supplier") + // Include time series + .includeTimeSeries("HeartRates") + // Include compare-exchange value + .includeCompareExchangeValue("document_property_that_holds_the_cmpxchg_Key") + // Include past revisions from last month + .includeRevisions(new Date(Date.now() - 30 * 24 * 60 * 60 * 1000)) +}); +``` + + + + + +The following sections describe behavior that applies when including counters - whether during **load or query**: +* [Counters that don't exist at include time](../../document-extensions/counters/including-counters.mdx#counters-that-dont-exist-at-include-time) +* [Do not mix IncludeAllCounters with individual counter includes](../../document-extensions/counters/including-counters.mdx#do-not-mix-includeallcounters-with-individual-counter-includes) + +--- + +### Counters that don't exist at include time + +If you include a counter that does Not exist at the time of the load or query execution, this will not cause an error. +However: +* Its value will be null. +* Accessing it later in the same session will not trigger a server request, because the counter is already cached in the session’s internal state. +* Even if the counter is created after the _include_, no server call will be made when trying to access its value. +* To retrieve the counter in this case, you can: + * Call [Clear](../../client-api/session/how-to/clear-a-session) to reset the session and discard all tracked state. + * Use [Evict](../../client-api/session/how-to/evict-entity-from-a-session) to remove the document (and its cached counters) from the session’s internal state. + * Or simply fetch the counter in a new session. + + +```js +const session = store.openSession(); + +// Load and include specific counters - one of them doesn't exist yet +const product = await session.load("products/1-A", { + includes: includeBuilder => includeBuilder + .includeCounters(["Likes", "non-existent-counter"]) +}); + +// ...ASSUME the counter "non-existent-counter" was just created *After* the above load + +// Trying to get the counter's value will NOT trigger a server call. +// The counter is already cached in the session's internal state. +let valueOfCounter = await session.countersFor(product).get("non-existent-counter"); +console.log("Value of counter:", valueOfCounter); // null + +// You can call 'evict' to remove the document (and its cached counters) from the session +session.advanced.evict(product); + +// Now a server call will be made to fetch the counter. +// Note: +// * You must specify the document ID since the entity is no longer tracked +// * Despite the name, "non-existent-counter" now exists on the server :) +valueOfCounter = await session.countersFor("products/1-A").get("non-existent-counter"); +console.log("Value of counter:", valueOfCounter); // whatever the current value is +``` + + + +```js +const session = store.openSession(); + +// Load and include ALL counters +const product = await session.load("products/1-A", { + includes: includeBuilder => includeBuilder.includeAllCounters() +}); + +// ...ASSUME the counter "new-counter" was just created *After* the above load + +// No server call is made here, even if "new-counter" exists now. +// The counter is considered tracked in the session and will return null. +const valueOfCounter = await session.countersFor(product).get("new-counter"); +console.log("Value of counter:", valueOfCounter); // null +``` + + +--- + +### Do not mix includeAllCounters with individual counter includes + +Once you've called `includeAllCounters()`, you cannot include individual counters using `includeCounter()` or `includeCounters()` in the same include chain. +Attempting to do so will throw an `InvalidOperationException` at runtime. + + +```js +const session = store.openSession(); + +await session.load("products/1-A", { + includes: includeBuilder => includeBuilder + .includeAllCounters() + .includeCounter("Likes") // This will throw +}); +``` + + + + + + +```js +// Available overloads: +includeCounter(string name); +includeCounters(string[] names); +includeAllCounters(); + +``` + + +| Parameter | Type | Description | +|------------|------------|------------------------------------------| +| **name** | `string` | The name of a single counter to include. | +| **names** | `string[]` | An array of counter names to include. | + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/counters/_including-counters-php.mdx b/versioned_docs/version-7.1/document-extensions/counters/_including-counters-php.mdx new file mode 100644 index 0000000000..77a82c9455 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_including-counters-php.mdx @@ -0,0 +1,78 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Counters can be included when [loading entities](../../client-api/session/loading-entities) + or when making [queries](../../client-api/session/querying/how-to-query). + +* The _Session_ stores the included counters in its in-memory cache, + so their values can be accessed later in the same session without making additional requests to the server. + +* To see how to include counters when creating a subscription, see + [Create subscription - include counters](../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---include-counters). + + + +* **Including Counters when using [session.load](../../client-api/session/loading-entities.mdx#session--loading-entities)**: + * Include a single Counter using `include_counter` + * Include multiple Counters using `include_counters` + + `include_counter` and `include_counters` usage samples: + + + + +{`//include single Counters +$productPage = $session->load(Product::class, "products/1-C", function($includeBuilder) { + return $includeBuilder + ->includeCounter("ProductLikes") + ->includeCounter("ProductDislikes") + ->includeCounter("ProductDownloads"); +}); +`} + + + + +{`//include multiple Counters +//note that you can combine the inclusion of Counters and documents. +$productPage = $session->load(Product::class, "products/1-C", function($includeBuilder) { + return $includeBuilder + ->includeDocuments("products/1-C") + ->includeCounters(["ProductLikes", "ProductDislikes"]); +}); +`} + + + + +* **Including Counters when using [Session.Query](../../client-api/session/querying/how-to-query.mdx#session--querying--how-to-query)**: + * Include a single Counter using `include_counter`. + * Include multiple Counters using `include_counters`. + + `include_counter` and `include_counters` usage samples: + + + +{`//include a single Counter +$query = $session->query(Product::class) + ->include(function($includeBuilder) { + return $includeBuilder->includeCounter("ProductLikes"); + }); +`} + + + + +{`//include multiple Counters +$query = $session->query(Product::class) + ->include(function($includeBuilder){ + return $includeBuilder->includeCounters(["ProductLikes", "ProductDownloads"]); + }); +`} + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_including-counters-python.mdx b/versioned_docs/version-7.1/document-extensions/counters/_including-counters-python.mdx new file mode 100644 index 0000000000..d77671a273 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_including-counters-python.mdx @@ -0,0 +1,74 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Counters can be included when [loading entities](../../client-api/session/loading-entities) + or when making [queries](../../client-api/session/querying/how-to-query). + +* The _Session_ stores the included counters in its in-memory cache, + so their values can be accessed later in the same session without making additional requests to the server. + +* To see how to include counters when creating a subscription, see + [Create subscription - include counters](../../client-api/data-subscriptions/creation/examples.mdx#create-subscription---include-counters). + + + +* **Including Counters when using [session.load](../../client-api/session/loading-entities.mdx#session--loading-entities)**: + * Include a single Counter using `include_counter` + * Include multiple Counters using `include_counters` + + `include_counter` and `include_counters` usage samples: + + + +{`# include single Counters +product_page = session.load( + "products/1-C", + include_builder=lambda builder: builder.include_counter("ProductLikes") + .include_counter("ProductDislikes") + .include_counter("ProductDownloads"), +) +`} + + + + +{`# include multiple Counters +# note that you can combine the inclusion of Counters and documents. +product_page = session.load( + "orders/1-A", + include_builder=lambda builder: builder.include_documents("products/1-C").include_counters( + ["ProductLikes", "ProductDislikes"] + ), +) +`} + + + + +* **Including Counters when using [Session.Query](../../client-api/session/querying/how-to-query.mdx#session--querying--how-to-query)**: + * Include a single Counter using `include_counter`. + * Include multiple Counters using `include_counters`. + + `include_counter` and `include_counters` usage samples: + + + +{`# include a single Counter +query = session.query(object_type=Product).include(lambda builder: builder.include_counter("ProductLikes")) +`} + + + + +{`# include multiple Counters +query = session.query("Products").include( + lambda builder: builder.include_counters(["ProductLikes", "ProductDownloads"]) +) +`} + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/counters/_indexing-csharp.mdx b/versioned_docs/version-7.1/document-extensions/counters/_indexing-csharp.mdx new file mode 100644 index 0000000000..751e82a397 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_indexing-csharp.mdx @@ -0,0 +1,203 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To index counters, create a [static index](../../indexes/creating-and-deploying.mdx#static-indexes) +that inherits from `AbstractCountersIndexCreationTask` or `AbstractJavaScriptCountersIndexCreationTask`. + +* Auto-indexes for counters are not available at this time. + +* In this page: + * [Syntax](../../document-extensions/counters/indexing.mdx#syntax) + * [AbstractJavaScriptCountersIndexCreationTask](../../document-extensions/counters/indexing.mdx#section) + * [CounterNamesFor](../../document-extensions/counters/indexing.mdx#section-1) + * [Querying the Index](../../document-extensions/counters/indexing.mdx#querying-the-index) + + +## Syntax + +In order to index counter values, create an index that inherits from `AbstractCountersIndexCreationTask`. +Next, choose one of these two methods which take the index expression: + + + +{`protected void AddMapForAll(Expression, IEnumerable>> map) + +protected void AddMap(string counter, Expression, IEnumerable>> map) +`} + + + +`AddMapForAll` indexes all the counters in the indexed documents. `AddMap` only indexes the counters with +the specified name. + +Examples of indexes using each method: + + + + +{`private class MyCounterIndex : AbstractCountersIndexCreationTask +{ + public MyCounterIndex() + { + AddMap("Likes", counters => from counter in counters + select new + { + Likes = counter.Value, + Name = counter.Name, + User = counter.DocumentId + }); + } +} +`} + + + + +{`private class MyCounterIndex_AllCounters : AbstractCountersIndexCreationTask +{ + public MyCounterIndex_AllCounters() + { + AddMapForAll(counters => from counter in counters + select new + { + Count = counter.Value, + Name = counter.Name, + User = counter.DocumentId + }); + } +} +`} + + + + + +### `AbstractJavaScriptCountersIndexCreationTask` + +Creating an index inheriting from `AbstractJavaScriptCountersIndexCreationTask` allows +you to write your map and reduce functions in JavaScript. +Learn more about JavaScript indexes [here](../../indexes/javascript-indexes.mdx). + + + +{`public class AbstractJavaScriptCountersIndexCreationTask : AbstractCountersIndexCreationTask +\{ + public HashSet Maps; + protected string Reduce; +\} +`} + + + +| Property | Type | Description | +| - | - | - | +| **Maps** | `HashSet` | The set of javascript map functions | +| **Reduce** | `string` | The javascript reduce function | + +Example: + + + +{`private class MyMultiMapCounterIndex : AbstractJavaScriptCountersIndexCreationTask +\{ + public MyMultiMapCounterIndex() + \{ + Maps = new HashSet + \{ + @"counters.map('Blogposts', 'Likes', function (counter) \{ + return \{ + Likes: counter.Value, + Name: counter.Name, + Blog Post: counter.DocumentId + \}; + \})" + \}; + \} +\} +`} + + +### `CounterNamesFor` + +While indexes inheriting from `AbstractIndexCreationTask` cannot index counter _values_, the `CounterNamesFor` +method is available which returns the names of all counters for a specified document: + + + +{`IEnumerable CounterNamesFor(object doc); +`} + + + +Example of index using `CounterNamesFor`: + + + +{`public class Companies_ByCounterNames : AbstractIndexCreationTask +\{ + public class Result + \{ + public string[] CounterNames \{ get; set; \} + \} + + public Companies_ByCounterNames() + \{ + Map = employees => from e in employees + let counterNames = CounterNamesFor(e) + select new Result + \{ + CounterNames = counterNames.ToArray() + \}; + \} +\} +`} + + + + + +## Querying the Index + + + + +{`// return all companies that have 'Likes' counter +List companies = session + .Query() + .Where(x => x.CounterNames.Contains("Likes")) + .OfType() + .ToList(); +`} + + + + +{`// return all companies that have 'Likes' counter +List companies = await asyncSession + .Query() + .Where(x => x.CounterNames.Contains("Likes")) + .OfType() + .ToListAsync(); +`} + + + + +{`// return all companies that have 'Likes' counter +List companies = session + .Advanced + .DocumentQuery() + .ContainsAny("CounterNames", new[] { "Likes" }) + .ToList(); +`} + + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_indexing-java.mdx b/versioned_docs/version-7.1/document-extensions/counters/_indexing-java.mdx new file mode 100644 index 0000000000..1023bc6684 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_indexing-java.mdx @@ -0,0 +1,178 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To index counters, create a [static index](../../indexes/creating-and-deploying.mdx#static-indexes) +that inherits from `AbstractCountersIndexCreationTask` or `AbstractJavaScriptCountersIndexCreationTask`. + +* Auto-indexes for counters are not available at this time. + +* In this page: + * [Syntax](../../document-extensions/counters/indexing.mdx#syntax) + * [AbstractJavaScriptCountersIndexCreationTask](../../document-extensions/counters/indexing.mdx#section) + * [CounterNamesFor](../../document-extensions/counters/indexing.mdx#section-1) + * [Querying the Index](../../document-extensions/counters/indexing.mdx#querying-the-index) + + +## Syntax + +In order to index counter values, create an index that inherits from `AbstractCountersIndexCreationTask`. +Next, use these method which take the index expression: + + + +{`protected void addMap(String map) +`} + + + +`addMap` only indexes the counters with +the specified name. + +Examples of indexes using each method: + + + + +{`public static class MyCounterIndex extends AbstractCountersIndexCreationTask { + public MyCounterIndex() { + map = "counters.Companies.HeartRate.Select(counter => new {\\n" + + " heartBeat = counter.Value,\\n" + + " name = counter.Name,\\n" + + " user = counter.DocumentId\\n" + + "})"; + } +} +`} + + + + + +### `AbstractJavaScriptCountersIndexCreationTask` + +Creating an index inheriting from `AbstractJavaScriptCountersIndexCreationTask` allows +you to write your map and reduce functions in JavaScript. +Learn more about JavaScript indexes [here](../../indexes/javascript-indexes.mdx). + + + +{`public class AbstractJavaScriptCountersIndexCreationTask extends AbstractIndexCreationTaskBase +\{ + private final CountersIndexDefinition _definition = new CountersIndexDefinition(); + + public Set getMaps() \{ + return _definition.getMaps(); + \} + + public void setMaps(Set maps) \{ + _definition.setMaps(maps); + \} + + public Map getFields() \{ + return _definition.getFields(); + \} + + public void setFields(Map fields) \{ + _definition.setFields(fields); + \} + + protected String getReduce() \{ + return _definition.getReduce(); + \} + + protected void setReduce(String reduce) \{ + _definition.setReduce(reduce); + \} + + @Override + public boolean isMapReduce() \{ + return getReduce() != null; + \} + + /** + * @return If not null than each reduce result will be created as a document in the specified collection name. + */ + protected String getOutputReduceToCollection() \{ + return _definition.getOutputReduceToCollection(); + \} +\} +`} + + + +| Property | Type | Description | +| - | - | - | +| **Maps** | `Set` | The set of javascript map functions | +| **Reduce** | `String` | The javascript reduce function | + +Example: + + + +{`public static class MyMultiMapCounterIndex extends AbstractJavaScriptCountersIndexCreationTask \{ + public MyMultiMapCounterIndex() \{ + setMaps(Collections.singleton("counters.map('Blogposts', 'Likes', function (counter) \{\\n" + + "return \{\\n" + + " ikes: counter.Value,\\n" + + " name: counter.Name,\\n" + + " bolg post: counter.DocumentId\\n" + + "\};\\n" + + "\})")); + \} +\} +`} + + +### `CounterNamesFor` + +While indexes inheriting from `AbstractIndexCreationTask` cannot index counter _values_, the `counterNamesFor` +method is available which returns the names of all counters for a specified document: + + + +{`List counterNamesFor(Object doc); +`} + + + +Example of index using `counterNamesFor`: + + + +{`public static class Companies_ByCounterNames extends AbstractIndexCreationTask \{ + public Companies_ByCounterNames() \{ + map = "from e in docs.Employees\\n" + + "let counterNames = counterNamesFor(e)\\n" + + "select new\\n" + + "\{\\n" + + " counterNames = counterNames.ToArray()\\n" + + "\}"; + \} +\} +`} + + + + + +## Querying the Index + + + +{`// return all companies that have 'Likes' counter +List companies = session + .query(Company.class, Companies_ByCounterNames.class) + .containsAny("counterNames", Arrays.asList("companies_likes")) + .selectFields(Company.class, "likes") + .toList(); +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_indexing-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/counters/_indexing-nodejs.mdx new file mode 100644 index 0000000000..f73eec52ed --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_indexing-nodejs.mdx @@ -0,0 +1,124 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To index counters, create a [static index](../../indexes/creating-and-deploying.mdx#static-indexes) +that inherits from `AbstractCountersIndexCreationTask ` or `AbstractRawJavaScriptCountersIndexCreationTask `. + +* Auto-indexes for counters are not available at this time. + +* In this page: + * [Usage](../../document-extensions/counters/indexing.mdx#usage) + * [AbstractCsharpCountersIndexCreationTask ](../../document-extensions/counters/indexing.mdx#section) + * [CounterNamesFor](../../document-extensions/counters/indexing.mdx#section-1) + * [Querying the Index](../../document-extensions/counters/indexing.mdx#querying-the-index) + + +## Usage + +In order to index counter values, create an index that inherits from `AbstractCountersIndexCreationTask`. +Next, choose one of these two methods which take the index expression: + + + +{`this.map("map"); +`} + + + +`map ` only indexes the counters with +the specified name. + +Examples of indexes using each method: + + + + +{`export class MyCounterIndex extends AbstractCountersIndexCreationTask { + constructor() { + super(); + this.map = \`from counter in docs.counters select new { + Likes = counter.Value, + Name = counter.Name, + User = counter.DocumentId + }\`; + } +} +`} + + + + + +### `AbstractRawJavaScriptCountersIndexCreationTask ` + +Creating an index inheriting from `AbstractCsharpCountersIndexCreationTask ` allows +you to write your map and reduce functions in JavaScript. +Learn more about JavaScript indexes [here](../../indexes/javascript-indexes.mdx). + + + +{`class MyCounterIndex extends AbstractRawJavaScriptCountersIndexCreationTask \{ + public constructor() \{ + super(); + + this.maps.add( + "counters.map('Companies', 'HeartRate', function (counter) \{\\n" + + "return \{\\n" + + " heartBeat: counter.Value,\\n" + + " name: counter.Name,\\n" + + " user: counter.DocumentId\\n" + + "\};\\n" + + "\})" + ); + \} +\} +`} + + +### `CounterNamesFor` + +While indexes inheriting from `AbstractIndexCreationTask` cannot index counter _values_, the `counterNamesFor()` +method is available which returns the names of all counters for a specified document: + +Example of index using `CounterNamesFor`: + + + +{`class Companies_ByCounterNames extends AbstractCountersIndexCreationTask\{ + constructor() \{ + super(); + this.map = "docs.Companies.Select(e => new \{\\n"+ + "e = e,\\n"+ + " counterNames = this.CounterNamesFor(e)\\n"+ + "\}).Select(this0 => new \{\\n"+ + " CounterNames = Enumerable.ToArray(this0.counterNames)\\n"+ + "\})" + \} +\} +`} + + + + + +## Querying the Index + + + + +{`const companies = session + .query({index: "Companies_ByCounterNames"}) + .containsAny("counterNames", ["Likes"]) + .all(); +`} + + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_indexing-php.mdx b/versioned_docs/version-7.1/document-extensions/counters/_indexing-php.mdx new file mode 100644 index 0000000000..83fa325b5d --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_indexing-php.mdx @@ -0,0 +1,139 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To index counters, create a [static index](../../indexes/creating-and-deploying.mdx#static-indexes) +that inherits from `AbstractCountersIndexCreationTask` or `AbstractJavaScriptCountersIndexCreationTask`. + +* Auto-indexes for counters are not available at this time. + +* In this page: + * [Syntax](../../document-extensions/counters/indexing.mdx#syntax) + * [AbstractJavaScriptCountersIndexCreationTask](../../document-extensions/counters/indexing.mdx#section) + * [CounterNamesFor](../../document-extensions/counters/indexing.mdx#section) + + +## Syntax + +To index counter values, create an index that inherits from `AbstractCountersIndexCreationTask` +and set a map as follows. + + +{`class MyCounterIndex extends AbstractCountersIndexCreationTask +\{ + public function __construct() + \{ + parent::__construct(); + + $this->setMap( + "from counter in counters " . + "select new \{ " . + " Likes = counter.Value, " . + " Name = counter.Name, " . + " User = counter.DocumentId " . + "\}" + ); + \} +\} +`} + + +### AbstractJavaScriptCountersIndexCreationTask + +Creating an index inheriting from `AbstractJavaScriptCountersIndexCreationTask` allows +you to write your map and reduce functions in JavaScript. +Learn more about JavaScript indexes [here](../../indexes/javascript-indexes.mdx). + + + +{`public class AbstractJavaScriptCountersIndexCreationTask : AbstractCountersIndexCreationTask +\{ + public HashSet Maps; + protected string Reduce; +\} +`} + + + +| Property | Type | Description | +| - | - | - | +| **Maps** | `HashSet` | The set of javascript map functions | +| **Reduce** | `string` | The javascript reduce function | + +Example: + + + +{`class MyMultiMapCounterIndex extends AbstractJavaScriptCountersIndexCreationTask +\{ + public function __construct() + \{ + parent::__construct(); + + $this->setMaps([ + "counters.map('Blogposts', 'Likes', function (counter) \{ + return \{ + Likes: counter.Value, + Name: counter.Name, + Blog Post: counter.DocumentId + \}; + \})" + ]); + \} +\} +`} + + +### `CounterNamesFor` + +While indexes inheriting from `AbstractIndexCreationTask` cannot index counter _values_, the `CounterNamesFor` +method is available which returns the names of all counters for a specified document: + + + +{`List counterNamesFor(Object doc); +`} + + + +Example of index using `CounterNamesFor`: + + + +{`class Companies_ByCounterNames_Result +\{ + public ?StringArray $counterNames = null; + + public function getCounterNames(): ?StringArray + \{ + return $this->counterNames; + \} + + public function setCounterNames(?StringArray $counterNames): void + \{ + $this->counterNames = $counterNames; + \} +\} +class Companies_ByCounterNames extends AbstractIndexCreationTask +\{ + public function __construct() + \{ + parent::__construct(); + + $this->map = "from e in docs.Employees " . + "let counterNames = counterNamesFor(e) " . + "select new \{" . + " CounterNames = counterNames.ToArray() " . + "\}"; + \} +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_indexing-python.mdx b/versioned_docs/version-7.1/document-extensions/counters/_indexing-python.mdx new file mode 100644 index 0000000000..ef4810d30f --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_indexing-python.mdx @@ -0,0 +1,110 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To index counters, create a [static index](../../indexes/creating-and-deploying.mdx#static-indexes) +that inherits from `AbstractCountersIndexCreationTask` or `AbstractJavaScriptCountersIndexCreationTask`. + +* Auto-indexes for counters are not available at this time. + +* In this page: + * [Syntax](../../document-extensions/counters/indexing.mdx#syntax) + * [AbstractJavaScriptCountersIndexCreationTask](../../document-extensions/counters/indexing.mdx#section) + * [CounterNamesFor](../../document-extensions/counters/indexing.mdx#section) + + +## Syntax + +To index counter values, create an index that inherits from `AbstractCountersIndexCreationTask` +and use `map` as follows. + + +{`class MyCounterIndex(AbstractCountersIndexCreationTask): + def __init__(self): + super().__init__() + self.map = ( + "from counter in counters " + "select new \{ likes = counter.Value, name = counter.Name, user = counter.DocumentId \}" + ) +`} + + +### AbstractJavaScriptCountersIndexCreationTask + +Creating an index inheriting from `AbstractJavaScriptCountersIndexCreationTask` allows +you to write your map and reduce functions in JavaScript. +Learn more about JavaScript indexes [here](../../indexes/javascript-indexes.mdx). + + + +{`public class AbstractJavaScriptCountersIndexCreationTask : AbstractCountersIndexCreationTask +\{ + public HashSet Maps; + protected string Reduce; +\} +`} + + + +| Property | Type | Description | +| - | - | - | +| **maps** | `Set[str]` | The set of javascript map functions | +| **reduce** | `str` | The javascript reduce function | + +Example: + + + +{`class MyMultiMapCounterIndex(AbstractJavaScriptCountersIndexCreationTask): + def __init__(self): + super().__init__() + self.maps = """ + counters.map('Blogposts', 'Likes', function (counter) \{ + return \{ + Likes: counter.Value, + Name: counter.Name, + Blog Post: counter.DocumentId + \}; + \}) + """ +`} + + +### `CounterNamesFor` + +While indexes inheriting from `AbstractIndexCreationTask` cannot index counter _values_, the `CounterNamesFor` +method is available which returns the names of all counters for a specified document: + + + +{`IEnumerable CounterNamesFor(object doc); +`} + + + +Example of index using `CounterNamesFor`: + + + +{`class Companies_ByCounterNames(AbstractIndexCreationTask): + class Result: + def __init__(self, counter_names: List[str] = None): + self.counter_names = counter_names + + def __init__(self): + super().__init__() + self.map = ( + "from e in docs.Employees " + "let counterNames = CounterNamesFor(e) " + "select new \{ counter_names = counterNames.ToArray() \}" + ) +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_overview-csharp.mdx b/versioned_docs/version-7.1/document-extensions/counters/_overview-csharp.mdx new file mode 100644 index 0000000000..371ba47ab2 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_overview-csharp.mdx @@ -0,0 +1,217 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* RavenDB's distributed counters, **Counters** for short, are numeric data variables that can be added to documents. + Use a Counter to count anything that needs counting, like: + * Sold products + * Voting results + * Any event related to the document + +* Counters interact with and can trigger other RavenDB features. + To find out how to use counters with other features, read [Counters and Other Features](../../document-extensions/counters/counters-and-other-features.mdx). + +* Create and manage Counters using API methods, or through the [Studio](../../studio/database/document-extensions/counters.mdx). + +* In this page: + * [Why use Counters?](../../document-extensions/counters/overview.mdx#why-use-counters?) + * [Overview](../../document-extensions/counters/overview.mdx#overview) + * [Managing Counters](../../document-extensions/counters/overview.mdx#managing-counters) + * [Counter Methods and the `CountersFor` object](../../document-extensions/counters/overview.mdx#counter-methods-and-the--object) + * [Managing Counters using `Operations`](../../document-extensions/counters/overview.mdx#managing-counters-using-) + +## Why use Counters? + +#### Convenient Counting Mechanism + +Counters are very easy to manage, using simple API methods or through the Studio. + +E.g. Use counters when you want to - + +- Keep track of the number of times a document has been viewed or rated. +- Count how many visitors from certain countries or regions read a document. +- Continuously record the number of visitors on an event page. +- Avoid having to update the whole document for just a numeric value change. +- Have a need for a high-throughput counter (also see **Distributed Values** below). +#### Distributed Values + +A Counter's value is [distributed between cluster nodes](../../document-extensions/counters/counters-in-clusters.mdx). +Among the advantages of this: + +* The cluster **remains available** even when nodes crash. +* Any node can provide or modify a Counter's value immediately, without checking or coordinating this with other nodes. +#### High Performance, Low Resources + +A document includes the Counter's _name_, while the Counter's actual _value_ is kept in a separate location. +Modifying a Counter's value doesn't require the modification of the document itself. +This results in highly efficient operation. +#### High-Frequency Counting + +Counters are especially useful when a very large number of counting operations is required, +because of their speed and low resources usage. + +E.g. Use Counters - + +- For an online election page, to continuously update a Number-Of-Votes Counter for each candidate. +- To continuously update Counters with the number of visitors in different sections of a big online store. + + + +## Overview + +#### Design + +A document's metadata contains only the ***Counters' names-list*** for this document. +***Counter Values*** are not kept in the document's metadata, but in a separate location. + +Therefore, changes like adding a new counter or deleting an existing counter trigger a document change, +while simply modifying the Counter Value does not. +#### Cumulative Counter Actions + +* Counter value-modification actions are cumulative, the order in which they are executed doesn't matter. + E.g., It doesn't matter if a Counter has been incremented by 2 and then by 7, or by 7 first and then by 2. + +* When a Counter is deleted, the sequence of Counter actions becomes non-cumulative and may require + [special attention](../../document-extensions/counters/counters-in-clusters.mdx#concurrent-delete-and-increment). +#### Counters and Conflicts + +Counter actions (for either name or value) almost never cause conflicts. +The only exception to this is [concurrent `Delete` and `Increment`](../../document-extensions/counters/counters-in-clusters.mdx#concurrent-delete-and-increment) +actions by multiple cluster nodes. + +- Counter actions can be executed concurrently or in any order, without causing a conflict. +- You can successfully modify Counters while their document is being modified by a different client. + + +Counter actions **can still be performed** when their related documents are in a conflicted state. + +#### Counters Cost + +Counters are designated to lower the cost of counting, but do come with a price. + +* **All the names** of a document's Counters are added to its content, increasing its size. +* **Counter values** occupy storage space. + + +Be aware that the negligible amount of resources required by a few Counters, +may become significant when there are many. +A single document with thousands of Counters is probably an indication of a modeling mistake, +for example. + +#### Counters Naming Convention + +* Valid characters: All visible characters, [including Unicode symbols](../../studio/database/document-extensions/counters.mdx#section) +* Length: Up to 512 bytes +* Encoding: UTF-8 +#### Counter Values + +* Valid range: Signed 64-bit integer (-9223372036854775808 to 9223372036854775807) +* Only integer additions are supported (no floats or other mathematical operations). +#### Number of Counters Per Document + +RavenDB doesn't limit the number of Counters you can create. + + +Note that the Counter names are stored in the document metadata and [do impact the size of the document](../../document-extensions/counters/overview.mdx#counters-cost). + +#### The `HasCounters` Flag + +When a Counter is added to a document, RavenDB automatically sets a `HasCounters` Flag in the document's metadata. +When all Counters are removed from a document, the server automatically removes this flag. + + + +## Managing Counters + +#### Counter Methods and the `CountersFor` Object + +Managing Counters is performed using the `CountersFor` Session object. + +* **Counter methods**: + + | Method | Description | + |-------------------------|-----------------------------------------------------------------------------------------| + | `CountersFor.Increment` | Increment the value of an existing Counter, or create a new Counter if it doesn't exist | + | `CountersFor.Delete` | Delete a Counter | + | `CountersFor.Get` | Get the current value of a Counter | + | `CountersFor.GetAll` | Get *all* the Counters of a document and their values | + + +* **Usage flow**: + * Open a session. + * Create an instance of `CountersFor`. + * Either pass `CountersFor` an explicit document ID, -or- + * Pass it an [entity tracked by the session](../../client-api/session/loading-entities.mdx), + e.g. a document object returned from [session.Query](../../client-api/session/querying/how-to-query.mdx) or from [session.Load](../../client-api/session/loading-entities.mdx#load). + * Use Counter methods to manage the document's Counters. + * If you execute [Increment](../../document-extensions/counters/create-or-modify.mdx) or [Delete](../../document-extensions/counters/delete.mdx), call `session.SaveChanges` for the action to take effect on the server. + +* **Success and failure**: + * As long as the document exists, Counter actions (Increment, Get, Delete etc.) always succeed. + * When a transaction that includes a Counter modification fails for any reason (e.g. a document concurrency conflict), + the Counter modification is reverted. + +* **`CountersFor` usage samples**: + * You can Use `CountersFor` by **explicitly passing it a document ID** (without pre-loading the document). + * You can also use `CountersFor` by passing it **the document object**. + + + + +{`// Use CountersFor without loading a document + +// 1. Open a session +using (var session = docStore.OpenSession()) +{ + // 2. pass an explicit document ID to the CountersFor constructor + var documentCounters = session.CountersFor("products/1-C"); + + // 3. Use \`CountersFor\` methods to manage the product document's Counters + documentCounters.Delete("ProductLikes"); // Delete the "ProductLikes" Counter + documentCounters.Increment("ProductModified", 15); // Add 15 to Counter "ProductModified" + var counter = documentCounters.Get("DaysLeftForSale"); // Get "DaysLeftForSale"'s value + + // 4. Execute all changes by calling SaveChanges + session.SaveChanges(); +} +`} + + + + +{`// Use CountersFor by passing it a document object + +// 1. Open a session +using (var session = docStore.OpenSession()) +{ + // 2. Use the session to load a document. + var document = session.Load("products/1-C"); + + // 3. Create an instance of \`CountersFor\` + // Pass the document object returned from session.Load as a param. + var documentCounters = session.CountersFor(document); + + // 4. Use \`CountersFor\` methods to manage the product document's Counters + documentCounters.Delete("ProductLikes"); // Delete the "ProductLikes" Counter + documentCounters.Increment("ProductModified", 15); // Add 15 to Counter "ProductModified" + var counter = documentCounters.Get("DaysLeftForSale"); // Get value of "DaysLeftForSale" + + // 5. Execute all changes by calling SaveChanges + session.SaveChanges(); +} +`} + + + +#### Managing Counters using `Operations` + +* In addition to working with the high-level Session, you can manage Counters using the low-level [Operations](../../client-api/operations/what-are-operations.mdx). + +* [CounterBatchOperation](../../client-api/operations/counters/counter-batch.mdx) +can operate on a set of Counters of different documents in a single request. + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_overview-java.mdx b/versioned_docs/version-7.1/document-extensions/counters/_overview-java.mdx new file mode 100644 index 0000000000..8459d10ec1 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_overview-java.mdx @@ -0,0 +1,215 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* RavenDB's distributed counters, **Counters** for short, are numeric data variables that can be added to documents. + Use a Counter to count anything that needs counting, like: + * Sold products + * Voting results + * Any event related to the document + +* Counters interact with and can trigger other RavenDB features. + To find out how to use counters with other features, read [Counters and Other Features](../../../java/document-extensions/counters/counters-and-other-features.mdx). + +* Create and manage Counters using API methods, or through the [Studio](../../studio/database/document-extensions/counters.mdx). + +* In this page: + * [Why use Counters?](../../document-extensions/counters/overview.mdx#why-use-counters?) + * [Overview](../../document-extensions/counters/overview.mdx#overview) + * [Managing Counters](../../document-extensions/counters/overview.mdx#managing-counters) + * [Counter Methods and the `CountersFor` object](../../document-extensions/counters/overview.mdx#counter-methods-and-the--object) + * [Managing Counters using `Operations`](../../document-extensions/counters/overview.mdx#managing-counters-using-) + +## Why use Counters? + +#### Convenient Counting Mechanism + +Counters are very easy to manage, using simple API methods or through the Studio. + +E.g. Use counters when you want to - + +- Keep track of the number of times a document has been viewed or rated. +- Count how many visitors from certain countries or regions read a document. +- Continuously record the number of visitors on an event page. +- Avoid having to update the whole document for just a numeric value change. +- Have a need for a high-throughput counter (also see **Distributed Values** below). +#### Distributed Values + +A Counter's value is [distributed between cluster nodes](../../document-extensions/counters/counters-in-clusters.mdx). +Among the advantages of this: + +* The cluster **remains available** even when nodes crash. +* Any node can provide or modify a Counter's value immediately, without checking or coordinating this with other nodes. +#### High Performance, Low Resources + +A document includes the Counter's _name_, while the Counter's actual _value_ is kept in a separate location. +Modifying a Counter's value doesn't require the modification of the document itself. +This results in highly efficient operation. +#### High-Frequency Counting + +Counters are especially useful when a very large number of counting operations is required, +because of their speed and low resources usage. + +E.g. Use Counters - + +- For an online election page, to continuously update a Number-Of-Votes Counter for each candidate. +- To continuously update Counters with the number of visitors in different sections of a big online store. + + + +## Overview + +#### Design + +A document's metadata contains only the ***Counters' names-list*** for this document. +***Counter Values*** are not kept in the document's metadata, but in a separate location. + +Therefore, changes like adding a new counter or deleting an existing counter trigger a document change, +while simply modifying the Counter Value does not. +#### Cumulative Counter Actions + +- Counter value-modification actions are cumulative, the order in which they are executed doesn't matter. + E.g., It doesn't matter if a Counter has been incremented by 2 and then by 7, or by 7 first and then by 2. + +- When a Counter is deleted, the sequence of Counter actions becomes non-cumulative and may require + [special attention](../../document-extensions/counters/counters-in-clusters.mdx#concurrent-delete-and-increment). +#### Counters and Conflicts + +Counter actions (for either name or value) almost never cause conflicts. +The only exception to this is [concurrent `Delete` and `Increment`](../../document-extensions/counters/counters-in-clusters.mdx#concurrent-delete-and-increment) +actions by multiple cluster nodes. + +- Counter actions can be executed concurrently or in any order, without causing a conflict. +- You can successfully modify Counters while their document is being modified by a different client. + + +Counter actions **can still be performed** when their related documents are in a conflicted state. + +#### Counters Cost + +Counters are designated to lower the cost of counting, but do come with a price. + +* **All the names** of a document's Counters are added to its content, increasing its size. +* **Counter values** occupy storage space. + + +Be aware that the negligible amount of resources required by a few Counters, +may become significant when there are many. +A single document with thousands of Counters is probably an indication of a modeling mistake, +for example. + +#### Counters Naming Convention + +* Valid characters: All visible characters, including Unicode symbols +* Length: Up to 512 bytes +* Encoding: UTF-8 +#### Counter Values + +* Valid range: Signed 64-bit integer (-9223372036854775808 to 9223372036854775807) +* Only integer additions are supported (no floats or other mathematical operations). +#### Number of Counters Per Document + +RavenDB doesn't limit the number of Counters you can create. + + +Note that the Counter names are stored in the document metadata and [do impact the size of the document](../../document-extensions/counters/overview.mdx#counters-cost). + +#### The `HasCounters` Flag + +When a Counter is added to a document, RavenDB automatically sets a `HasCounters` Flag in the document's metadata. +When all Counters are removed from a document, the server automatically removes this flag. + + + +## Managing Counters + +#### Counter Methods and the `CountersFor` Object + +Managing Counters is performed using the `CountersFor` Session object. + +* **Counter methods**: + + - `CountersFor.Increment`: Increment the value of an existing Counter, or create a new Counter if it doesn't exist. + - `CountersFor.Delete`: Delete a Counter. + - `CountersFor.Get`: Get the current value of a Counter. + - `CountersFor.GetAll`: Get _all_ the Counters of a document and their values. + +* **Usage flow**: + + * Open a session. + * Create an instance of `countersFor`. + * Either pass `countersFor` an explicit document ID, -or- + * Pass it an entity tracked by the session, e.g. a document object + returned from `session.query` or from `session.load`. + * Use Counter methods to manage the document's Counters. + * If you execute [Increment](../../document-extensions/counters/create-or-modify.mdx) or [Delete](../../document-extensions/counters/delete.mdx), call `session.SaveChanges` for the action to take effect on the server. + +* **Success and failure**: + + - As long as the document exists, Counter actions (Increment, Get, Delete etc.) always succeed. + - When a transaction that includes a Counter modification fails for any reason (e.g. a document concurrency conflict), + the Counter modification is reverted. + +* **`CountersFor` usage samples**: + + - You can Use `CountersFor` by **explicitly passing it a document ID** (without pre-loading the document). + - You can also use `CountersFor` by passing it **the document object**. + + + + +{`// Use CountersFor without loading a document + +// 1. Open a session +try (IDocumentSession session = docStore.openSession()) { + // 2. pass an explicit document ID to the countersFor constructor + ISessionDocumentCounters documentCounters = session.countersFor("products/1-C"); + + // 3. Use \`countersFor\` methods to manage the product document's Counters + documentCounters.delete("productLikes"); // Delete the "productLikes" Counter + documentCounters.increment("productModified", 15); // Add 15 to Counter "productModified" + Long counter = documentCounters.get("daysLeftForSale");// Get "daysLeftForSale"'s value + + // 4. Save changes to the session + session.saveChanges(); +} +`} + + + + +{`// Use countersFor by passing it a document object + +// 1. Open a session +try (IDocumentSession session = docStore.openSession()) { + // 2. Use the session to load a document. + Product document = session.load(Product.class, "products/1-C"); + + // 3. Create an instance of \`countersFor\` + // Pass the document object returned from session.load as a param. + ISessionDocumentCounters documentCounters = session.countersFor(document); + + // 4. Use \`countersFor\` methods to manage the product document's Counters + documentCounters.delete("productLikes"); // Delete the "productLikes" Counter + documentCounters.increment("productModified", 15); // Add 15 to Counter "productModified" + Long counter = documentCounters.get("daysLeftForSale");// Get value of "daysLeftForSale" + + // 5. Save the changes to the session + session.saveChanges(); +} +`} + + + +#### Managing Counters using `Operations` + +* In addition to working with the high-level Session, you can manage Counters using the low-level [Operations](../../client-api/operations/what-are-operations.mdx). + +* [CounterBatchOperation](../../client-api/operations/counters/counter-batch.mdx) +can operate on a set of Counters of different documents in a single request. + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_overview-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/counters/_overview-nodejs.mdx new file mode 100644 index 0000000000..4ee3a25481 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_overview-nodejs.mdx @@ -0,0 +1,216 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* RavenDB's distributed counters, **Counters** for short, are numeric data variables that can be added to documents. + Use a Counter to count anything that needs counting, like: + * Sold products + * Voting results + * Any event related to the document + +* Counters interact with and can trigger other RavenDB features. + To find out how to use counters with other features, read [Counters and Other Features](../../../csharp/document-extensions/counters/counters-and-other-features.mdx). + +* Create and manage Counters using API methods, or through the [Studio](../../studio/database/document-extensions/counters.mdx). + +* In this page: + * [Why use Counters?](../../document-extensions/counters/overview.mdx#why-use-counters?) + * [Overview](../../document-extensions/counters/overview.mdx#overview) + * [Managing Counters](../../document-extensions/counters/overview.mdx#managing-counters) + * [Counter Methods and the `countersFor` object](../../document-extensions/counters/overview.mdx#counter-methods-and-the--object) + * [Managing Counters using `Operations`](../../document-extensions/counters/overview.mdx#managing-counters-using-) + +## Why use Counters? + +#### Convenient Counting Mechanism + +Counters are very easy to manage, using simple API methods or through the Studio. + +E.g. Use counters when you want to - + +- Keep track of the number of times a document has been viewed or rated. +- Count how many visitors from certain countries or regions read a document. +- Continuously record the number of visitors on an event page. +- Avoid having to update the whole document for just a numeric value change. +- Have a need for a high-throughput counter (also see **Distributed Values** below). +#### Distributed Values + +A Counter's value is [distributed between cluster nodes](../../document-extensions/counters/counters-in-clusters.mdx). +Among the advantages of this: + +* The cluster **remains available** even when nodes crash. +* Any node can provide or modify a Counter's value immediately, without checking or coordinating this with other nodes. +#### High Performance, Low Resources + +A document includes the Counter's _name_, while the Counter's actual _value_ is kept in a separate location. +Modifying a Counter's value doesn't require the modification of the document itself. +This results in highly efficient operation. +#### High-Frequency Counting + +Counters are especially useful when a very large number of counting operations is required, +because of their speed and low resources usage. + +E.g. Use Counters - + +- For an online election page, to continuously update a Number-Of-Votes Counter for each candidate. +- To continuously update Counters with the number of visitors in different sections of a big online store. + + + +## Overview + +#### Design + +A document's metadata contains only the ***Counters' names-list*** for this document. +***Counter Values*** are not kept in the document's metadata, but in a separate location. + +Therefore, changes like adding a new counter or deleting an existing counter trigger a document change, +while simply modifying the Counter Value does not. +#### Cumulative Counter Actions + +* Counter value-modification actions are cumulative, the order in which they are executed doesn't matter. + E.g., It doesn't matter if a Counter has been incremented by 2 and then by 7, or by 7 first and then by 2. + +* When a Counter is deleted, the sequence of Counter actions becomes non-cumulative and may require + [special attention](../../document-extensions/counters/counters-in-clusters.mdx#concurrent-delete-and-increment). +#### Counters and Conflicts + +Counter actions (for either name or value) almost never cause conflicts. +The only exception to this is [concurrent `delete` and `increment`](../../document-extensions/counters/counters-in-clusters.mdx#concurrent-delete-and-increment) +actions by multiple cluster nodes. + +- Counter actions can be executed concurrently or in any order, without causing a conflict. +- You can successfully modify Counters while their document is being modified by a different client. + + +Counter actions **can still be performed** when their related documents are in a conflicted state. + +#### Counters Cost + +Counters are designated to lower the cost of counting, but do come with a price. + +* **All the names** of a document's Counters are added to its content, increasing its size. +* **Counter values** occupy storage space. + + +Be aware that the negligible amount of resources required by a few Counters, +may become significant when there are many. +A single document with thousands of Counters is probably an indication of a modeling mistake, +for example. + +#### Counters Naming Convention + +* Valid characters: All visible characters, [including Unicode symbols](../../studio/database/document-extensions/counters.mdx#section) +* Length: Up to 512 bytes +* Encoding: UTF-8 +#### Counter Values + +* Valid range: Signed 64-bit integer (-9223372036854775808 to 9223372036854775807) +* Only integer additions are supported (no floats or other mathematical operations). +#### Number of Counters Per Document + +RavenDB doesn't limit the number of Counters you can create. + + +Note that the Counter names are stored in the document metadata and [do impact the size of the document](../../document-extensions/counters/overview.mdx#counters-cost). + +#### The `HasCounters` Flag + +When a Counter is added to a document, RavenDB automatically sets a `HasCounters` Flag in the document's metadata. +When all Counters are removed from a document, the server automatically removes this flag. + + + +## Managing Counters + +#### Counter Methods and the `countersFor` Object + +Managing Counters is performed using the `countersFor` Session object. + +* **Counter methods**: + + | Method | Description | + |-------------------------|-----------------------------------------------------------------------------------------| + | `countersFor.increment` | Increment the value of an existing Counter, or create a new Counter if it doesn't exist | + | `countersFor.delete` | Delete a Counter | + | `countersFor.get` | Get the current value of a Counter | + | `countersFor.getAll` | Get *all* the Counters of a document and their values | + + +* **Usage flow**: + * Open a session. + * Create an instance of `countersFor`. + * Either pass `countersFor` an explicit document ID, -or- + * Pass it an [entity tracked by the session](../../client-api/session/loading-entities.mdx), + e.g. a document object returned from [session.query](../../client-api/session/querying/how-to-query.mdx) or from [session.load](../../client-api/session/loading-entities.mdx#load). + * Use Counter methods to manage the document's Counters. + * If you execute [increment](../../document-extensions/counters/create-or-modify.mdx) or [delete](../../document-extensions/counters/delete.mdx), call `session.saveChanges` for the action to take effect on the server. + +* **Success and failure**: + * As long as the document exists, Counter actions (Increment, Get, Delete etc.) always succeed. + * When a transaction that includes a Counter modification fails for any reason (e.g. a document concurrency conflict), + the Counter modification is reverted. + +* **`countersFor` usage samples**: + * You can Use `countersFor` by **explicitly passing it a document ID** (without pre-loading the document). + * You can also use `countersFor` by passing it **the document object**. + + + + +{`// Use countersFor without loading a document: +// =========================================== + +// Open a session +const session = documentStore.openSession(); + +// Pass an explicit document ID to the countersFor constructor +const documentCounters = session.countersFor("products/1-A"); + +// Use \`CountersFor\` methods to manage the document's Counters +documentCounters.delete("ProductLikes"); // Delete the "ProductLikes" Counter +documentCounters.increment("ProductModified", 15); // Add 15 to Counter "ProductModified" +const counter = await documentCounters.get("DaysLeftForSale"); // Get value for "DaysLeftForSale" + +// Save changes +await session.saveChanges(); +`} + + + + +{`// Use countersFor by passing it a document entity: +// ================================================ + +// Open a session +const session = documentStore.openSession(); + +// Load a document +const product = await session.load("products/1-A"); + +// Pass the entity returned from session.load as a param. +const documentCounters = session.countersFor(product); + +// Use \`countersFor\` methods to manage the document's Counters +documentCounters.delete("ProductLikes"); // Delete the "ProductLikes" Counter +documentCounters.increment("ProductModified", 15); // Add 15 to Counter "ProductModified" +const counter = await documentCounters.get("DaysLeftForSale"); // Get value for "DaysLeftForSale" + +// Save changes +await session.saveChanges(); +`} + + + +#### Managing Counters using `Operations` + +* In addition to working with the high-level Session, you can manage Counters using the low-level [Operations](../../client-api/operations/what-are-operations.mdx). + +* [CounterBatchOperation](../../client-api/operations/counters/counter-batch.mdx) +can operate on a set of Counters of different documents in a single request. + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_overview-php.mdx b/versioned_docs/version-7.1/document-extensions/counters/_overview-php.mdx new file mode 100644 index 0000000000..707d2ca948 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_overview-php.mdx @@ -0,0 +1,221 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* RavenDB's distributed counters, **Counters** for short, are numeric data variables that can be added to documents. + Use a Counter to count anything that needs counting, like: + * Sold products + * Voting results + * Any event related to the document + +* Counters interact with and can trigger other RavenDB features. + To find out how to use counters with other features, read [Counters and Other Features](../../document-extensions/counters/counters-and-other-features.mdx). + +* Create and manage Counters using API methods, or through the [Studio](../../studio/database/document-extensions/counters.mdx). + +* In this page: + * [Why use Counters?](../../document-extensions/counters/overview.mdx#why-use-counters?) + * [Overview](../../document-extensions/counters/overview.mdx#overview) + * [Managing Counters](../../document-extensions/counters/overview.mdx#managing-counters) + * [Counter Methods and the `countersFor` object](../../document-extensions/counters/overview.mdx#counter-methods-and-the--object) + * [Managing Counters using `Operations`](../../document-extensions/counters/overview.mdx#managing-counters-using-) + +## Why use Counters? + +#### Convenient Counting Mechanism + +Counters are very easy to manage, using simple API methods or through the Studio. + +E.g. Use counters when you want to - + +- Keep track of the number of times a document has been viewed or rated. +- Count how many visitors from certain countries or regions read a document. +- Continuously record the number of visitors on an event page. +- Avoid having to update the whole document for just a numeric value change. +- Have a need for a high-throughput counter (also see **Distributed Values** below). +#### Distributed Values + +A Counter's value is [distributed between cluster nodes](../../document-extensions/counters/counters-in-clusters.mdx). +Among the advantages of this: + +* The cluster **remains available** even when nodes crash. +* Any node can provide or modify a Counter's value immediately, without checking or coordinating this with other nodes. +#### High Performance, Low Resources + +A document includes the Counter's _name_, while the Counter's actual _value_ is kept in a separate location. +Modifying a Counter's value doesn't require the modification of the document itself. +This results in highly efficient operation. +#### High-Frequency Counting + +Counters are especially useful when a very large number of counting operations is required, +because of their speed and low resources usage. + +E.g. Use Counters - + +- For an online election page, to continuously update a Number-Of-Votes Counter for each candidate. +- To continuously update Counters with the number of visitors in different sections of a big online store. + + + +## Overview + +#### Design + +A document's metadata contains only the ***Counters' names-list*** for this document. +***Counter Values*** are not kept in the document's metadata, but in a separate location. + +Therefore, changes like adding a new counter or deleting an existing counter trigger a document change, +while simply modifying the Counter Value does not. +#### Cumulative Counter Actions + +* Counter value-modification actions are cumulative, the order in which they are executed doesn't matter. + E.g., It doesn't matter if a Counter has been incremented by 2 and then by 7, or by 7 first and then by 2. + +* When a Counter is deleted, the sequence of Counter actions becomes non-cumulative and may require + [special attention](../../document-extensions/counters/counters-in-clusters.mdx#concurrent-delete-and-increment). +#### Counters and Conflicts + +Counter actions (for either name or value) almost never cause conflicts. +The only exception to this is [concurrent `delete` and `increment`](../../document-extensions/counters/counters-in-clusters.mdx#concurrent-delete-and-increment) +actions by multiple cluster nodes. + +- Counter actions can be executed concurrently or in any order, without causing a conflict. +- You can successfully modify Counters while their document is being modified by a different client. + + +Counter actions **can still be performed** when their related documents are in a conflicted state. + +#### Counters Cost + +Counters are designated to lower the cost of counting, but do come with a price. + +* **All the names** of a document's Counters are added to its content, increasing its size. +* **Counter values** occupy storage space. + + +Be aware that the negligible amount of resources required by a few Counters, +may become significant when there are many. +A single document with thousands of Counters is probably an indication of a modeling mistake, +for example. + +#### Counters Naming Convention + +* Valid characters: All visible characters, [including Unicode symbols](../../studio/database/document-extensions/counters.mdx#section) +* Length: Up to 512 bytes +* Encoding: UTF-8 +#### Counter Values + +* Valid range: Signed 64-bit integer (-9223372036854775808 to 9223372036854775807) +* Only integer additions are supported (no floats or other mathematical operations). +#### Number of Counters Per Document + +RavenDB doesn't limit the number of Counters you can create. + + +Note that the Counter names are stored in the document metadata and [do impact the size of the document](../../document-extensions/counters/overview.mdx#counters-cost). + +#### The `HasCounters` Flag + +When a Counter is added to a document, RavenDB automatically sets a `HasCounters` Flag in the document's metadata. +When all Counters are removed from a document, the server automatically removes this flag. + + + +## Managing Counters + +#### Counter Methods and the `countersFor` Object + +Managing Counters is performed using the `countersFor` session object. + +* **Counter methods**: + + | Method | Description | + |-------------------------|-----------------------------------------------------------------------------------------| + | `countersFor.increment` | Increment the value of an existing Counter, or create a new Counter if it doesn't exist | + | `countersFor.delete` | Delete a Counter | + | `countersFor.get` | Get the current value of a Counter | + | `countersFor.getAll` | Get *all* the Counters of a document and their values | + + +* **Usage flow**: + * Open a session. + * Create an instance of `countersFor`. + * Either pass `countersFor` an explicit document ID, -or- + * Pass it an [entity tracked by the session](../../client-api/session/loading-entities.mdx), + e.g. a document object returned from [session.query](../../client-api/session/querying/how-to-query.mdx) or from [session.load](../../client-api/session/loading-entities.mdx#load). + * Use Counter methods to manage the document's Counters. + * If you execute [increment](../../document-extensions/counters/create-or-modify.mdx) or [delete](../../document-extensions/counters/delete.mdx), call `session.saveChanges` for the action to take effect on the server. + +* **Success and failure**: + * As long as the document exists, Counter actions (Increment, Get, Delete etc.) always succeed. + * When a transaction that includes a Counter modification fails for any reason (e.g. a document concurrency conflict), + the Counter modification is reverted. + +* **`countersFor` usage samples**: + * You can Use `countersFor` by **explicitly passing it a document ID** (without pre-loading the document). + * You can also use `countersFor` by passing it **the document object**. + + + + +{`// Use countersFor without loading a document + +// 1. Open a session +$session = $docStore->openSession(); +try { + // 2. pass an explicit document ID to the CountersFor constructor + $documentCounters = $session->countersFor("products/1-C"); + + // 3. Use \`countersFor\` methods to manage the product document's Counters + $documentCounters->delete("ProductLikes"); // delete the "ProductLikes" Counter + $documentCounters->increment("ProductModified", 15); // Add 15 to Counter "ProductModified" + $counter = $documentCounters->get("DaysLeftForSale"); // Get "DaysLeftForSale"'s value + + // 4. Execute all changes by calling saveChanges + $session->saveChanges(); +} finally { + $session->close(); +} +`} + + + + +{`// Use CountersFor by passing it a document object + +// 1. Open a session +$session = $docStore->openSession(); +try { + // 2. Use the session to load a document. + $document = $session->load(Product::class, "products/1-C"); + + // 3. Create an instance of \`countersFor\` + // Pass the document object returned from session.load as a param. + $documentCounters = $session->countersFor($document); + + // 4. Use \`countersFor\` methods to manage the product document's Counters + $documentCounters->delete("ProductLikes"); // delete the "ProductLikes" Counter + $documentCounters->increment("ProductModified", 15); // Add 15 to Counter "ProductModified" + $counter = $documentCounters->get("DaysLeftForSale"); // Get value of "DaysLeftForSale" + + // 5. Execute all changes by calling SaveChanges + $session->saveChanges(); +} finally { + $session->close(); +} +`} + + + +#### Managing Counters using `Operations` + +* In addition to working with the high-level Session, you can manage Counters using the low-level [Operations](../../client-api/operations/what-are-operations.mdx). + +* [CounterBatchOperation](../../client-api/operations/counters/counter-batch.mdx) +can operate on a set of Counters of different documents in a single request. + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_overview-python.mdx b/versioned_docs/version-7.1/document-extensions/counters/_overview-python.mdx new file mode 100644 index 0000000000..b7eb0ad21d --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_overview-python.mdx @@ -0,0 +1,213 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* RavenDB's distributed counters, **Counters** for short, are numeric data variables that can be added to documents. + Use a Counter to count anything that needs counting, like: + * Sold products + * Voting results + * Any event related to the document + +* Counters interact with and can trigger other RavenDB features. + To find out how to use counters with other features, read [Counters and Other Features](../../../csharp/document-extensions/counters/counters-and-other-features.mdx). + +* Create and manage Counters using API methods, or through the [Studio](../../studio/database/document-extensions/counters.mdx). + +* In this page: + * [Why use Counters?](../../document-extensions/counters/overview.mdx#why-use-counters?) + * [Overview](../../document-extensions/counters/overview.mdx#overview) + * [Managing Counters](../../document-extensions/counters/overview.mdx#managing-counters) + * [Counter Methods and the `counters_for` object](../../document-extensions/counters/overview.mdx#counter-methods-and-the--object) + * [Managing Counters using `Operations`](../../document-extensions/counters/overview.mdx#managing-counters-using-) + +## Why use Counters? + +#### Convenient Counting Mechanism + +Counters are very easy to manage, using simple API methods or through the Studio. + +E.g. Use counters when you want to - + +- Keep track of the number of times a document has been viewed or rated. +- Count how many visitors from certain countries or regions read a document. +- Continuously record the number of visitors on an event page. +- Avoid having to update the whole document for just a numeric value change. +- Have a need for a high-throughput counter (also see **Distributed Values** below). +#### Distributed Values + +A Counter's value is [distributed between cluster nodes](../../document-extensions/counters/counters-in-clusters.mdx). +Among the advantages of this: + +* The cluster **remains available** even when nodes crash. +* Any node can provide or modify a Counter's value immediately, without checking or coordinating this with other nodes. +#### High Performance, Low Resources + +A document includes the Counter's _name_, while the Counter's actual _value_ is kept in a separate location. +Modifying a Counter's value doesn't require the modification of the document itself. +This results in highly efficient operation. +#### High-Frequency Counting + +Counters are especially useful when a very large number of counting operations is required, +because of their speed and low resources usage. + +E.g. Use Counters - + +- For an online election page, to continuously update a Number-Of-Votes Counter for each candidate. +- To continuously update Counters with the number of visitors in different sections of a big online store. + + + +## Overview + +#### Design + +A document's metadata contains only the ***Counters' names-list*** for this document. +***Counter Values*** are not kept in the document's metadata, but in a separate location. + +Therefore, changes like adding a new counter or deleting an existing counter trigger a document change, +while simply modifying the Counter Value does not. +#### Cumulative Counter Actions + +* Counter value-modification actions are cumulative, the order in which they are executed doesn't matter. + E.g., It doesn't matter if a Counter has been incremented by 2 and then by 7, or by 7 first and then by 2. + +* When a Counter is deleted, the sequence of Counter actions becomes non-cumulative and may require + [special attention](../../document-extensions/counters/counters-in-clusters.mdx#concurrent-delete-and-increment). +#### Counters and Conflicts + +Counter actions (for either name or value) almost never cause conflicts. +The only exception to this is [concurrent `delete` and `increment`](../../document-extensions/counters/counters-in-clusters.mdx#concurrent-delete-and-increment) +actions by multiple cluster nodes. + +- Counter actions can be executed concurrently or in any order, without causing a conflict. +- You can successfully modify Counters while their document is being modified by a different client. + + +Counter actions **can still be performed** when their related documents are in a conflicted state. + +#### Counters Cost + +Counters are designated to lower the cost of counting, but do come with a price. + +* **All the names** of a document's Counters are added to its content, increasing its size. +* **Counter values** occupy storage space. + + +Be aware that the negligible amount of resources required by a few Counters, +may become significant when there are many. +A single document with thousands of Counters is probably an indication of a modeling mistake, +for example. + +#### Counters Naming Convention + +* Valid characters: All visible characters, [including Unicode symbols](../../studio/database/document-extensions/counters.mdx#section) +* Length: Up to 512 bytes +* Encoding: UTF-8 +#### Counter Values + +* Valid range: Signed 64-bit integer (-9223372036854775808 to 9223372036854775807) +* Only integer additions are supported (no floats or other mathematical operations). +#### Number of Counters Per Document + +RavenDB doesn't limit the number of Counters you can create. + + +Note that the Counter names are stored in the document metadata and [do impact the size of the document](../../document-extensions/counters/overview.mdx#counters-cost). + +#### The `HasCounters` Flag + +When a Counter is added to a document, RavenDB automatically sets a `HasCounters` Flag in the document's metadata. +When all Counters are removed from a document, the server automatically removes this flag. + + + +## Managing Counters + +#### Counter Methods and the `counters_for` Object + +Managing Counters is performed using the `counters_for` session object. + +* **Counter methods**: + + | Method | Description | + |-------------------------|-----------------------------------------------------------------------------------------| + | `counters_for.increment` | Increment the value of an existing Counter, or create a new Counter if it doesn't exist | + | `counters_for.delete` | Delete a Counter | + | `counters_for.get` | Get the current value of a Counter | + | `counters_for.get_all` | Get *all* the Counters of a document and their values | + + +* **Usage flow**: + * Open a session. + * Create an instance of `counters_for`. + * Either pass `counters_for` an explicit document ID, -or- + * Pass it an [entity tracked by the session](../../client-api/session/loading-entities.mdx), + e.g. a document object returned from [session.query](../../client-api/session/querying/how-to-query.mdx) or from [session.load](../../client-api/session/loading-entities.mdx#load). + * Use Counter methods to manage the document's Counters. + * If you execute [increment](../../document-extensions/counters/create-or-modify.mdx) or [delete](../../document-extensions/counters/delete.mdx), call `session.save_changes` for the action to take effect on the server. + +* **Success and failure**: + * As long as the document exists, Counter actions (Increment, Get, Delete etc.) always succeed. + * When a transaction that includes a Counter modification fails for any reason (e.g. a document concurrency conflict), + the Counter modification is reverted. + +* **`counters_for` usage samples**: + * You can Use `counters_for` by **explicitly passing it a document ID** (without pre-loading the document). + * You can also use `counters_for` by passing it **the document object**. + + + + +{`# Use CountersFor without loading a document + +# 1. Open a session +with store.open_session() as session: + # 2. pass an explicit document ID to the CountersFor constructor + document_counters = session.counters_for("products/1-C") + + # 3. Use 'CountersFor' methods to manage the product document's counters + document_counters.delete("ProductLikes") # Delete the "ProductLikes" counter + document_counters.increment("ProductModified", 15) # Add 15 to Counter "ProductModified" + counter = document_counters.get("DaysLeftForSale") # Get "DaysLeftForSale"'s value + + # 4. Execute all changes by calling save_changes + session.save_changes() +`} + + + + +{`# Use counters_for_entity by passing it a document object + +# 1. Open a session +with store.open_session() as session: + # 2. Use the session to load a document. + document = session.load("products/1-C") + + # 3. Create an instance of 'CountersFor' + # Pass the document object returned from session.load as a param. + document_counters = session.counters_for_entity(document) + + # 4. Use 'CountersFor' methods to manage the product document's counters + document_counters.delete("ProductLikes") # Delete the "ProductLikes" counter + document_counters.increment("ProductModified", 15) # Add 15 to Counter "ProductModified" + counter = document_counters.get("DaysLeftForSale") # Get value of "DaysLeftForSale" + + # 5. Execute all changes by calling save_changes + session.save_changes() +`} + + + +#### Managing Counters using `Operations` + +* In addition to working with the high-level Session, you can manage Counters using the low-level [Operations](../../client-api/operations/what-are-operations.mdx). + +* [CounterBatchOperation](../../client-api/operations/counters/counter-batch.mdx) +can operate on a set of Counters of different documents in a single request. + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_retrieve-counter-values-csharp.mdx b/versioned_docs/version-7.1/document-extensions/counters/_retrieve-counter-values-csharp.mdx new file mode 100644 index 0000000000..50c5b01b26 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_retrieve-counter-values-csharp.mdx @@ -0,0 +1,118 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `CountersFor.Get` to retrieve the value of a **single Counter**, + or `CountersFor.GetAll` to retrieve the names and values of **all Counters** associated with a document. + +* For all other `CountersFor` methods see this [Overview](../../document-extensions/counters/overview.mdx#counter-methods-and-the--object). + +* In this page: + + * [Get a single Counter's value](../../document-extensions/counters/retrieve-counter-values.mdx#get-a-single-counter) + * [Get usage](../../document-extensions/counters/retrieve-counter-values.mdx#get-usage) + * [Get example](../../document-extensions/counters/retrieve-counter-values.mdx#get-example) + * [Get syntax](../../document-extensions/counters/retrieve-counter-values.mdx#get-syntax) + + * [Get all Counters of a document](../../document-extensions/counters/retrieve-counter-values.mdx#get-all-counters-of-a-document) + * [GetAll usage](../../document-extensions/counters/retrieve-counter-values.mdx#getall-usage) + * [GetAll example](../../document-extensions/counters/retrieve-counter-values.mdx#getall-exmaple) + * [GetAll Syntax](../../document-extensions/counters/retrieve-counter-values.mdx#getall-syntax) + + +## Get a single Counter's value + +#### Get usage: + +* Open a session +* Create an instance of `CountersFor`. + * Either pass `CountersFor` an explicit document ID, -or- + * Pass it an [entity tracked by the session](../../client-api/session/loading-entities.mdx), + e.g. a document object returned from [session.Query](../../client-api/session/querying/how-to-query.mdx) or from [session.Load](../../client-api/session/loading-entities.mdx#load). +* Call `CountersFor.Get` to retrieve the current value of a single Counter. +#### Get example: + + + +{`// 1. Open a session +using (var session = docStore.OpenSession()) +\{ + // 2. pass CountersFor's constructor a document ID + var documentCounters = session.CountersFor("products/1-C"); + + // 3. Use \`CountersFor.Get\` to retrieve a Counter's value + var DaysLeft = documentCounters.Get("DaysLeftForSale"); + Console.WriteLine("Days Left For Sale: " + DaysLeft); +\} +`} + + +#### Get syntax: + + + +{`long Get(string counterName); +`} + + + +| Parameter | Type | Description | +|---------------|--------|----------------| +| `counterName` | string | Counter's name | + +| Return Type | Description | +|--------------|-------------------------| +| `long` | Counter's current value | + + + +## Get all Counters of a document +#### GetAll usage: + +* Open a session. +* Create an instance of `CountersFor`. + * Either pass `CountersFor` an explicit document ID, -or- + * Pass it an [entity tracked by the session](../../client-api/session/loading-entities.mdx), + e.g. a document object returned from [session.Query](../../client-api/session/querying/how-to-query.mdx) or from [session.Load](../../client-api/session/loading-entities.mdx#load). +* Call `CountersFor.GetAll` to retrieve the names and values of all counters associated with the document. +#### GetAll example: + + + +{`// 1. Open a session +using (var session = docStore.OpenSession()) +\{ + // 2. pass CountersFor's constructor a document ID + var documentCounters = session.CountersFor("products/1-C"); + + // 3. Use GetAll to retrieve all of the document's Counters' names and values. + var counters = documentCounters.GetAll(); + + // list counters' names and values + foreach (var counter in counters) + \{ + Console.WriteLine("counter name: " + counter.Key + ", counter value: " + counter.Value); + \} +\} +`} + + +#### GetAll syntax: + + + +{`Dictionary GetAll(); +`} + + + +| Return Type | Description | +|--------------------------|---------------------------------| +| Dictionary<string, long> | Map of Counter names and values | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_retrieve-counter-values-java.mdx b/versioned_docs/version-7.1/document-extensions/counters/_retrieve-counter-values-java.mdx new file mode 100644 index 0000000000..467a513214 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_retrieve-counter-values-java.mdx @@ -0,0 +1,112 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `countersFor.get` to retrieve the value of a **single Counter**, + or `countersFor.getAll` to retrieve the names and values of **all Counters** associated with a document. + +* For all other `CountersFor` methods see this [Overview](../../document-extensions/counters/overview.mdx#counter-methods-and-the--object). + +* In this page: + + * [Get a single Counter's value](../../document-extensions/counters/retrieve-counter-values.mdx#get-a-single-counter) + * [Get usage](../../document-extensions/counters/retrieve-counter-values.mdx#get-usage) + * [Get example](../../document-extensions/counters/retrieve-counter-values.mdx#get-example) + * [Get syntax](../../document-extensions/counters/retrieve-counter-values.mdx#get-syntax) + + * [Get all Counters of a document](../../document-extensions/counters/retrieve-counter-values.mdx#get-all-counters-of-a-document) + * [GetAll usage](../../document-extensions/counters/retrieve-counter-values.mdx#getall-usage) + * [GetAll example](../../document-extensions/counters/retrieve-counter-values.mdx#getall-exmaple) + * [GetAll Syntax](../../document-extensions/counters/retrieve-counter-values.mdx#getall-syntax) + + +## Get a single Counter's value +#### Get usage: + +* Open a session +* Create an instance of `countersFor`. + * Either pass `countersFor` an explicit document ID, -or- + * Pass it an entity tracked by the session, e.g. a document object returned from `session.query` or from `session.load`. +* Call `countersFor.get` to retrieve the current value of a single Counter. +#### Get example: + + + +{`// 1. Open a session +try (IDocumentSession session = docStore.openSession()) \{ + // 2. pass CountersFor's constructor a document ID + ISessionDocumentCounters documentCounters = session.countersFor("products/1-C"); + + // 3. Use \`countersFor.get\` to retrieve a Counter's value + Long daysLeft = documentCounters.get("daysLeftForSale"); + System.out.println("Days Left For Sale: " + daysLeft); +\} +`} + + +#### Get syntax: + + + +{`long Get(string counterName); +`} + + + +| Parameter | Type | Description | +|---------------|---------|----------------| +| `counterName` | String | Counter's name | + +| Return Type | Description | +|--------------|-------------------------| +| `long` | Counter's current value | + + + +## Get all Counters of a document +#### GetAll usage: + +* Open a session. +* Create an instance of `countersFor`. + * Either pass `countersFor` an explicit document ID, -or- + * Pass it an entity tracked by the session, e.g. a document object returned from session.query or from session.load. +* Call `countersFor.getAll` to retrieve the names and values of all counters associated with the document. +#### GetAll example: + + + +{`// 1. Open a session +try (IDocumentSession session = docStore.openSession()) \{ + // 2. pass countersFor's constructor a document ID + ISessionDocumentCounters documentCounters = session.countersFor("products/1-C"); + + // 3. Use GetAll to retrieve all of the document's Counters' names and values. + Map counters = documentCounters.getAll(); + + // list counters' names and values + for (Map.Entry kvp : counters.entrySet()) \{ + System.out.println("counter name: " + kvp.getKey() + ", counter value: " + kvp.getValue()); + \} +\} +`} + + +#### GetAll syntax: + + + +{`Map getAll(); +`} + + + +| Return Type | Description | +|-------------------|---------------------------------| +| Map<String, Long> | Map of Counter names and values | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_retrieve-counter-values-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/counters/_retrieve-counter-values-nodejs.mdx new file mode 100644 index 0000000000..4d380cd50b --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_retrieve-counter-values-nodejs.mdx @@ -0,0 +1,138 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `countersFor.get` to retrieve the value of a **single Counter**, + or `countersFor.getAll` to retrieve the names and values of **all Counters** associated with a document. + +* For all other `countersFor` methods see this [Overview](../../document-extensions/counters/overview.mdx#counter-methods-and-the--object). + +* In this page: + + * [Get a single Counter's value](../../document-extensions/counters/retrieve-counter-values.mdx#get-a-single-counter) + * [Get usage](../../document-extensions/counters/retrieve-counter-values.mdx#get-usage) + * [Get example](../../document-extensions/counters/retrieve-counter-values.mdx#get-example) + * [Get syntax](../../document-extensions/counters/retrieve-counter-values.mdx#get-syntax) + + * [Get all Counters of a document](../../document-extensions/counters/retrieve-counter-values.mdx#get-all-counters-of-a-document) + * [GetAll usage](../../document-extensions/counters/retrieve-counter-values.mdx#getall-usage) + * [GetAll example](../../document-extensions/counters/retrieve-counter-values.mdx#getall-exmaple) + * [GetAll Syntax](../../document-extensions/counters/retrieve-counter-values.mdx#getall-syntax) + + +## Get a single Counter's value + + + + **Get usage**: + +* Open a session +* Create an instance of `countersFor`. + * Either pass `countersFor` an explicit document ID, -or- + * Pass it an [entity tracked by the session](../../client-api/session/loading-entities.mdx), + e.g. a document object returned from [session.query](../../client-api/session/querying/how-to-query.mdx) or from [session.load](../../client-api/session/loading-entities.mdx#load). +* Call `countersFor.get` to retrieve the current value of a single Counter. + + + + + **Get example**: + + + +{`// Open a session +const session = documentStore.openSession(); + +// Pass a document ID to the countersFor constructor +const documentCounters = session.countersFor("products/1-A"); + +// Call \`get\` to retrieve a Counter's value +const daysLeft = await documentCounters.get("DaysLeftForSale"); + +console.log("Days Left For Sale: " + daysLeft); +`} + + + + + + + **Get syntax**: + + + +{`get(counter); +`} + + + +| Parameter | Type | Description | +|------------|--------|----------------| +| `counter` | string | Counter's name | + +| Return Type | Description | +|--------------------------------|----------------------------------------------------------------------------------------------| +| `Promise` | A `Promise` resolving to the Counter's current value, or to `null` if counter doesn't exist. | + + + + +## Get all Counters of a document + + + + **GetAll usage**: + +* Open a session. +* Create an instance of `countersFor`. + * Either pass `countersFor` an explicit document ID, -or- + * Pass it an [entity tracked by the session](../../client-api/session/loading-entities.mdx), + e.g. a document object returned from [session.query](../../client-api/session/querying/how-to-query.mdx) or from [session.load](../../client-api/session/loading-entities.mdx#load). +* Call `countersFor.getAll` to retrieve the names and values of all counters associated with the document. + + + + + **GetAll example**: + + + +{`// Open a session +const session = documentStore.openSession(); + +// Pass a document ID to the countersFor constructor +const documentCounters = session.countersFor("products/1-A"); + +// Call \`getAll\` to retrieve all of the document's Counters' names and values +const allCounters = await documentCounters.getAll(); + +for (var counter in allCounters) \{ + console.log("counter name: " + counter + ", counter value: " + allCounters[counter]); +\} +`} + + + + + + + **GetAll syntax**: + + + +{`getAll(counters); +`} + + + +| Return Type | Description | +|-------------------|--------------------------------------------------------------------------| +| `Promise` | A `Promise` resolving to a dictionary of counter values by counter names | + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_retrieve-counter-values-php.mdx b/versioned_docs/version-7.1/document-extensions/counters/_retrieve-counter-values-php.mdx new file mode 100644 index 0000000000..7e01254287 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_retrieve-counter-values-php.mdx @@ -0,0 +1,127 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `countersFor.get` to retrieve the values of **specific counters**, + or `countersFor.getAll` to retrieve the names and values of **all counters** associated with a document. + +* For all other `countersFor` methods see this [Overview](../../document-extensions/counters/overview.mdx#counter-methods-and-the--object). + +* In this page: + + * [Get values of specific counters](../../document-extensions/counters/retrieve-counter-values.mdx#get-values-of-specific-counters) + * [`get` usage](../../document-extensions/counters/retrieve-counter-values.mdx#usage) + * [`get` example](../../document-extensions/counters/retrieve-counter-values.mdx#example) + * [`get` syntax](../../document-extensions/counters/retrieve-counter-values.mdx#syntax) + + * [Get all Counters of a document](../../document-extensions/counters/retrieve-counter-values.mdx#get-all-counters-of-a-document) + * [`getAll` usage](../../document-extensions/counters/retrieve-counter-values.mdx#usage-1) + * [`getAll` example](../../document-extensions/counters/retrieve-counter-values.mdx#example-1) + * [`getAll` Syntax](../../document-extensions/counters/retrieve-counter-values.mdx#syntax-1) + + +## Get values of specific counters + +#### `get` usage: + +* Open a session +* Create an instance of `countersFor`. + * Either pass `countersFor` an explicit document ID, -or- + * Pass it an [entity tracked by the session](../../client-api/session/loading-entities.mdx), + e.g. a document object returned from [session.query](../../client-api/session/querying/how-to-query.mdx) or from [session.load](../../client-api/session/loading-entities.mdx#load). +* Call `countersFor.get` to retrieve counter value/s. + * `get("CounterName")` will return a single `int` value for the specified counter. + * `get(["counter1", "counter2"]` will return an array with values for all listed counters. + E.g., `[ "counter1" => 1, "counter2" => 5 ]` +#### `get` example: + + + +{`// 1. Open a session +$session = $docStore->openSession(); +try \{ + // 2. pass the countersFor constructor a document ID + $documentCounters = $session->countersFor("products/1-C"); + + // 3. Use \`countersFor.get\` to retrieve a counter's value + $daysLeft = $documentCounters->get("DaysLeftForSale"); + + echo "Days Left For Sale: " . $daysLeft . PHP_EOL; +\} finally \{ + $session->close(); +\} +`} + + +#### `get` syntax: + + + +{`public function get(string|StringList|array $counters): null|int|array; +`} + + + +| Parameter | Type | Description | +|---------------|--------|----------------| +| `counters` | `string` or `StringList` or `array` | Counter names | + +| Return Type | Description | +|--------------|-------------------------| +| `int` | Counter's current value | + + + +## Get all Counters of a document +#### `getAll` usage: + +* Open a session. +* Create an instance of `countersFor`. + * Either pass `countersFor` an explicit document ID, -or- + * Pass it an [entity tracked by the session](../../client-api/session/loading-entities.mdx), + e.g. a document object returned from [session.query](../../client-api/session/querying/how-to-query.mdx) or from [session.load](../../client-api/session/loading-entities.mdx#load). +* Call `countersFor.getAll` to retrieve the names and values of all counters associated with the document. +#### `getAll` example: + + + +{`// 1. Open a session +$session = $docStore->openSession(); +try \{ + // 2. Pass the countersFor constructor a document ID + $documentCounters = $session->countersFor("products/1-C"); + + // 3. Use getAll to retrieve all of the document's Counters' names and values + $counters = $documentCounters->getAll(); + + // list counters' names and values + + foreach ($counters as $counterKey => $counterValue) + \{ + echo "counter name: " . $counterKey . ", counter value: " . $counterValue; + \} +\} finally \{ + $session->close(); +\} +`} + + +#### `getAll` syntax: + + + +{`public function getAll(): array; +`} + + + +| Return Type | Description | +|------------------|-----------------------------| +| `array` | An array of Counter names and values | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/_retrieve-counter-values-python.mdx b/versioned_docs/version-7.1/document-extensions/counters/_retrieve-counter-values-python.mdx new file mode 100644 index 0000000000..f618ecdc93 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/_retrieve-counter-values-python.mdx @@ -0,0 +1,112 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `counters_for.get` to retrieve the value of a **single Counter**, + or `counters_for.get_all` to retrieve the names and values of **all Counters** associated with a document. + +* For all other `counters_for` methods see this [Overview](../../document-extensions/counters/overview.mdx#counter-methods-and-the--object). + +* In this page: + + * [Get a single Counter's value](../../document-extensions/counters/retrieve-counter-values.mdx#get-a-single-counter) + * [`get` usage](../../document-extensions/counters/retrieve-counter-values.mdx#usage) + * [`get` example](../../document-extensions/counters/retrieve-counter-values.mdx#example) + * [`get` syntax](../../document-extensions/counters/retrieve-counter-values.mdx#syntax) + + * [Get all Counters of a document](../../document-extensions/counters/retrieve-counter-values.mdx#get-all-counters-of-a-document) + * [`get_all` usage](../../document-extensions/counters/retrieve-counter-values.mdx#usage-1) + * [`get_all` example](../../document-extensions/counters/retrieve-counter-values.mdx#example-1) + * [`get_all` Syntax](../../document-extensions/counters/retrieve-counter-values.mdx#syntax-1) + + +## Get a single Counter's value + +#### `get` usage: + +* Open a session +* Create an instance of `counters_for`. + * Either pass `counters_for` an explicit document ID, -or- + * Pass it an [entity tracked by the session](../../client-api/session/loading-entities.mdx), + e.g. a document object returned from [session.query](../../client-api/session/querying/how-to-query.mdx) or from [session.load](../../client-api/session/loading-entities.mdx#load). +* Call `counters_for.get` to retrieve the current value of a single Counter. +#### `get` example: + + + +{`# 1. Open a session +with store.open_session() as session: + # 2. pass CountersFor's constructor a document ID + document_counters = session.counters_for("products/1-C") + + # 3. Use 'CountersFor.Get' to retrieve a Counter's value + days_left = document_counters.get("DaysLeftForSale") + print(f"Days Left For Sale: \{days_left\}") +`} + + +#### `get` syntax: + + + +{`def get(self, counter) -> int: ... +`} + + + +| Parameter | Type | Description | +|---------------|--------|----------------| +| `counter` | str | Counter name | + +| Return Type | Description | +|--------------|-------------------------| +| `int` | Counter's current value | + + + +## Get all Counters of a document +#### `get_all` usage: + +* Open a session. +* Create an instance of `counters_for`. + * Either pass `counters_for` an explicit document ID, -or- + * Pass it an [entity tracked by the session](../../client-api/session/loading-entities.mdx), + e.g. a document object returned from [session.query](../../client-api/session/querying/how-to-query.mdx) or from [session.load](../../client-api/session/loading-entities.mdx#load). +* Call `counters_for.get_all` to retrieve the names and values of all counters associated with the document. +#### `get_all` example: + + + +{`# 1. Open a session +with store.open_session() as session: + # 2. pass CountersFor's constructor a document ID + document_counters = session.counters_for("products/1-C") + + # 3. Use GetAll to retrieve all of the document's counters' names and values + counters = document_counters.get_all() + + # list counters' names and values + for counter_name, counter_value in counters.items(): + print(f"counter name: \{counter_name\}, counter value: \{counter_value\}") +`} + + +#### `get_all` syntax: + + + +{`def get_all(self) -> Dict[str, int]: ... +`} + + + +| Return Type | Description | +|------------------|---------------------------------| +| `Dict[str, int]` | Map of Counter names and values | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/counters/counters-and-other-features.mdx b/versioned_docs/version-7.1/document-extensions/counters/counters-and-other-features.mdx new file mode 100644 index 0000000000..03a69f38ba --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/counters-and-other-features.mdx @@ -0,0 +1,57 @@ +--- +title: "Counters and Other Features" +hide_table_of_contents: true +sidebar_label: Counters and Other Features +sidebar_position: 6 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import CountersAndOtherFeaturesCsharp from './_counters-and-other-features-csharp.mdx'; +import CountersAndOtherFeaturesJava from './_counters-and-other-features-java.mdx'; +import CountersAndOtherFeaturesPython from './_counters-and-other-features-python.mdx'; +import CountersAndOtherFeaturesPhp from './_counters-and-other-features-php.mdx'; +import CountersAndOtherFeaturesNodejs from './_counters-and-other-features-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/counters/counters-in-clusters.mdx b/versioned_docs/version-7.1/document-extensions/counters/counters-in-clusters.mdx new file mode 100644 index 0000000000..2728c46126 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/counters-in-clusters.mdx @@ -0,0 +1,146 @@ +--- +title: "Counters in a Cluster" +hide_table_of_contents: true +sidebar_label: Counters in a Cluster +sidebar_position: 7 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; +import ContentFrame from '@site/src/components/ContentFrame'; +import Panel from '@site/src/components/Panel'; + + + +* This article explains how Counters behave in a cluster environment: + * How Counter values are modified and replicated across nodes + * How multiple clients can modify the same Counter concurrently without causing conflicts + * When conflicting actions (such as _Delete_ and _Increment_) may occur, and how they are resolved + +* In this article: + * [Counter value modification](../../document-extensions/counters/counters-in-clusters.mdx#counter-value-modification) + * [Value modification and replication flow](../../document-extensions/counters/counters-in-clusters.mdx#value-modification-and-replication-flow) + * [Reading a Counter's value](../../document-extensions/counters/counters-in-clusters.mdx#reading-a-counters-value) + * [Counter name modification](../../document-extensions/counters/counters-in-clusters.mdx#counter-name-modification) + * [Concurrent value modification](../../document-extensions/counters/counters-in-clusters.mdx#concurrent-value-modification) + * [Concurrent `Delete` and `Increment`](../../document-extensions/counters/counters-in-clusters.mdx#concurrent-delete-and-increment) + * [In a single-node system](../../document-extensions/counters/counters-in-clusters.mdx#in-a-single-node-system) + * [In a multi-node cluster](../../document-extensions/counters/counters-in-clusters.mdx#in-a-multi-node-cluster) + + + + + +### Value modification and replication flow + +Each node **manages its own portion** of a Counter's total value, independently of the other nodes. +In the following 3-node cluster example: + * The total value of the "ProductLikes" Counter is 80. + * Each node manages its own share of that total. + | Counter Name | Node Tag | Counter value on this node | + |--------------|-----------|----------------------------| + | ProductLikes | A | 42 | + | ProductLikes | B | 28 | + | ProductLikes | C | 10 | + +When a client **modifies** a Counter’s value, only the portion stored **on the node the client writes to** is updated. +The Counter values on the other nodes remain unchanged. +In the following example: + * A client writes to node B and increments the "ProductLikes" Counter by 5. + * Only node B’s portion is incremented (from 28 to 33). + | Counter Name | Node Tag | Counter value on this node | + |--------------|-----------|----------------------------| + | ProductLikes | A | 42 | + | ProductLikes | **B** | **33** | + | ProductLikes | C | 10 | + +After a Counter’s value is modified on a node, that node [replicates](../../document-extensions/counters/counters-in-clusters.mdx#value-modification-and-replication) +the updated value to all other nodes in the cluster. +In the above example: + * The "ProductLikes" Counter was incremented by 5 on node B. + * Node B **replicates** its updated value (33) to nodes A and C. + * As a result, all nodes now store the same set of per-node values: + each node still manages only its own portion, but also stores the portions maintained by the other nodes. + +This ensures that each node is kept up to date with the portion of the Counter's value maintained by every other node, +allowing it to compute the full total when the Counter is read. + + +Note that **only the Counter's value** is replicated. +The document itself hasn't been modified and does not require replication. + + +--- + +### Reading a Counter's value + +When a client requests a Counter's value, the server returns a single accumulated sum. +In the following example: + * A request for the value of the "ProductLikes" Counter will return **85**. + | Counter Name | Node Tag | Counter value on this node | + |--------------|-----------|------------------------------------| + | ProductLikes | A | **42** | + | ProductLikes | B | **33** | + | ProductLikes | C | **10** | + | | | **Total value: 42 + 33 + 10 = 85** | + + + + + +**Modifying a Counter name triggers document replication**: + +* Creating or deleting a Counter adds or removes the Counter name from the document’s metadata. + This is considered a document-level change, which triggers **document replication** across the database group. +* As a result, the entire document - including the new Counter value - is replicated to all nodes in the database group. +* Existing Counters are not replicated, since they already exist on the other nodes and their values remain unchanged. + + + + + +**The same Counter can be concurrently modified by multiple clients**. + +As explained in the [Value modification and replication flow](../../document-extensions/counters/counters-in-clusters.mdx#value-modification-and-replication-flow) section, +each node manages its own portion of a Counter’s value independently. +As a result: + +* Multiple clients can modify the same Counter at the same time. +* Nodes do not need to coordinate these modifications with each other. +* Concurrent value modifications do not cause conflicts. + For more details, see [Counters and conflicts](../../document-extensions/counters/overview#counters-and-conflicts). + + + + + +A sequence of Counter actions is [cumulative](../../document-extensions/counters/overview.mdx#cumulative-counter-actions), +as long as the Counter is not [Deleted](../../document-extensions/counters/delete.mdx). +When a _Delete_ is involved, the order of execution becomes significant. + +If [Increment](../../document-extensions/counters/create-or-modify.mdx) and [Delete](../../document-extensions/counters/delete.mdx) are called concurrently, +their execution order is unpredictable, and the outcome depends on the system architecture: +whether it's a single-node system or a multi-node cluster. + +### In a single-node system + +Different ***clients*** may simultaneously attempt to _Delete_ and _Increment_ the same Counter. +The outcome depends on the **server’s execution order**: + + * If _Delete_ is executed **last**, the Counter is permanently deleted. + * If _Delete_ is executed **before** _Increment_, + the Counter is deleted but then re-created with the incremented value as its new initial value. + +### In a multi-node cluster + +Different ***nodes*** may concurrently _Delete_ and _Increment_ the same Counter. + +* This is considered a conflict, and RavenDB resolves it in favor of the **increment** action. +* The incrementing node will ignore the delete action. +* The deleting node will delete the Counter locally, but re-create it when it receives the updated value through replication. + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/counters/create-or-modify.mdx b/versioned_docs/version-7.1/document-extensions/counters/create-or-modify.mdx new file mode 100644 index 0000000000..5ea6048549 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/create-or-modify.mdx @@ -0,0 +1,57 @@ +--- +title: "Create or Modify Counters" +hide_table_of_contents: true +sidebar_label: Create or Modify Counters +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import CreateOrModifyCsharp from './_create-or-modify-csharp.mdx'; +import CreateOrModifyJava from './_create-or-modify-java.mdx'; +import CreateOrModifyPython from './_create-or-modify-python.mdx'; +import CreateOrModifyPhp from './_create-or-modify-php.mdx'; +import CreateOrModifyNodejs from './_create-or-modify-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/counters/delete.mdx b/versioned_docs/version-7.1/document-extensions/counters/delete.mdx new file mode 100644 index 0000000000..2ade2889db --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/delete.mdx @@ -0,0 +1,57 @@ +--- +title: "Delete Counter" +hide_table_of_contents: true +sidebar_label: Delete Counter +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import DeleteCsharp from './_delete-csharp.mdx'; +import DeleteJava from './_delete-java.mdx'; +import DeletePython from './_delete-python.mdx'; +import DeletePhp from './_delete-php.mdx'; +import DeleteNodejs from './_delete-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/counters/including-counters.mdx b/versioned_docs/version-7.1/document-extensions/counters/including-counters.mdx new file mode 100644 index 0000000000..63c4aac959 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/including-counters.mdx @@ -0,0 +1,55 @@ +--- +title: "Including Counters" +hide_table_of_contents: true +sidebar_label: Including Counters +sidebar_position: 5 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import IncludingCountersCsharp from './_including-counters-csharp.mdx'; +import IncludingCountersJava from './_including-counters-java.mdx'; +import IncludingCountersPython from './_including-counters-python.mdx'; +import IncludingCountersPhp from './_including-counters-php.mdx'; +import IncludingCountersNodejs from './_including-counters-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/counters/indexing.mdx b/versioned_docs/version-7.1/document-extensions/counters/indexing.mdx new file mode 100644 index 0000000000..efdd2f8432 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/indexing.mdx @@ -0,0 +1,57 @@ +--- +title: "Indexing Counters" +hide_table_of_contents: true +sidebar_label: Indexing Counters +sidebar_position: 4 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import IndexingCsharp from './_indexing-csharp.mdx'; +import IndexingJava from './_indexing-java.mdx'; +import IndexingPython from './_indexing-python.mdx'; +import IndexingPhp from './_indexing-php.mdx'; +import IndexingNodejs from './_indexing-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/counters/overview.mdx b/versioned_docs/version-7.1/document-extensions/counters/overview.mdx new file mode 100644 index 0000000000..79841a25a6 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/overview.mdx @@ -0,0 +1,57 @@ +--- +title: "Counters Overview" +hide_table_of_contents: true +sidebar_label: Overview +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import OverviewCsharp from './_overview-csharp.mdx'; +import OverviewJava from './_overview-java.mdx'; +import OverviewPython from './_overview-python.mdx'; +import OverviewPhp from './_overview-php.mdx'; +import OverviewNodejs from './_overview-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/counters/retrieve-counter-values.mdx b/versioned_docs/version-7.1/document-extensions/counters/retrieve-counter-values.mdx new file mode 100644 index 0000000000..908d8d7793 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/counters/retrieve-counter-values.mdx @@ -0,0 +1,57 @@ +--- +title: "Get Counter Values" +hide_table_of_contents: true +sidebar_label: Get Counter Values +sidebar_position: 3 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import RetrieveCounterValuesCsharp from './_retrieve-counter-values-csharp.mdx'; +import RetrieveCounterValuesJava from './_retrieve-counter-values-java.mdx'; +import RetrieveCounterValuesPython from './_retrieve-counter-values-python.mdx'; +import RetrieveCounterValuesPhp from './_retrieve-counter-values-php.mdx'; +import RetrieveCounterValuesNodejs from './_retrieve-counter-values-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/overview-extensions.mdx b/versioned_docs/version-7.1/document-extensions/overview-extensions.mdx new file mode 100644 index 0000000000..52c28db0f2 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/overview-extensions.mdx @@ -0,0 +1,82 @@ +--- +title: "Document Extensions Overview" +hide_table_of_contents: true +sidebar_label: Overview +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Document Extensions Overview + + +* Document extensions are data entities associated with documents. + +* Document extensions are stored separately so that **modifying** an extension value (e.g. a counter + or a time series entry) will not modify its parent document. + +* **Creating or deleting** an extension changes the parent document's meta-data. + This document change may trigger indexing, ETL tasks, and various other operations. + +* On a [sharded database](../sharding/overview.mdx), document extensions + are stored in the same bucket as the document that owns them. + Read about document extensions on a sharded database in the section + [dedicated to this subject](../sharding/document-extensions.mdx). + +* In this page: + * [Document Extensions](../document-extensions/overview-extensions.mdx#document-extensions) + * [Studio Document Extension Views](../document-extensions/overview-extensions.mdx#studio-document-extension-views) + + +## Document Extensions + +* [Counters](../document-extensions/counters/overview.mdx) + RavenDB's distributed counters are numeric data variables that can be added to documents and used + for various counting tasks. + +* [Attachments](../document-extensions/attachments/what-are-attachments.mdx) + Attachments are binary streams (videos, images, PDF, etc.) that can be bound to an existing document. + +* [Time Series](../document-extensions/timeseries/overview.mdx) + Time series are vectors of data that collect values over time, store the values consecutively across the cluster, + and manage the collected data with high efficiency and performance. + +* [Revisions](../document-extensions/revisions/overview.mdx) + Document Revisions are snapshots of documents and their extensions that can be created to give access to a document's history. + + + + +## Studio Document Extension Views + +#### Document Extensions Flags + +![Document Extensions in Collections View](./assets/extensions-collections-view.png) + +1. **Documents Tab** + Select to view document options. +2. **Collection** + Select a documents collection. +3. **Extensions** + View which types of extensions are added to the documents. + ![Document Extensions Icons](./assets/extensions-logos.png) +#### Document Extensions View + +Select a specific document to manage the extensions. + +In Studio > click Documents Tab > select specific Collection > select specific DocumentID to navigate to the following view: + +![Managing Document Extensions in Studio](./assets/extensions-managing-single-doc.png) + +1. Attachments Settings +2. Counters Settings +3. Time-Series Settings +4. Revisions Settings + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/_category_.json b/versioned_docs/version-7.1/document-extensions/revisions/_category_.json new file mode 100644 index 0000000000..f404496811 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 4, + "label": Revisions, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/revisions/_overview-csharp.mdx b/versioned_docs/version-7.1/document-extensions/revisions/_overview-csharp.mdx new file mode 100644 index 0000000000..0f0c9f41b9 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/_overview-csharp.mdx @@ -0,0 +1,327 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Document Revisions** are snapshots of documents and their extensions: + + * The trail of revisions created for a document can be inspected to track changes made in the document over time. + * A document's live version can be [reverted](../../document-extensions/revisions/revert-revisions.mdx) to any of its recorded revisions. + + For example, tracking document revisions allows you to check how an employee's contract has changed over time, + restore a single corrupted document without requiring a backup file, or conduct a full-scale audit of your data. + +* Managed via the Client API or from the Studio, revisions can be created **automatically** or **manually**: + + * **Automatic revisions creation**: + When revisions settings are defined and enabled for a collection, a document revision is automatically created whenever documents are created, modified, or deleted. + To configure revisions settings, and set limits for the number of revisions retained per document, + apply a [Revisions configuration](../../document-extensions/revisions/overview.mdx#revisions-configuration) to all and/or specific collections. + + * **Manual revisions creation**: + When revisions settings are disabled, you can still create revisions manually. + See [Force revision creation](../../document-extensions/revisions/overview.mdx#force-revision-creation) below. +* In this page: + * [Revisions configuration](../../document-extensions/revisions/overview.mdx#revisions-configuration) + * [Defining a revisions configuration](../../document-extensions/revisions/overview.mdx#defining-a-revisions-configuration) + * [Default settings and collection-specific configurations](../../document-extensions/revisions/overview.mdx#default-settings-and-collection-specific-configurations) + * [Revisions configuration options](../../document-extensions/revisions/overview.mdx#revisions-configuration-options) + * [Revisions configuration execution](../../document-extensions/revisions/overview.mdx#revisions-configuration-execution) + * [Enabling and disabling revisions for existing documents](../../document-extensions/revisions/overview.mdx#enabling-and-disabling-revisions-for-existing-documents) + * [How it works](../../document-extensions/revisions/overview.mdx#how-it-works) + * [Revisions storage](../../document-extensions/revisions/overview.mdx#revisions-storage) + * [Force revision creation](../../document-extensions/revisions/overview.mdx#force-revision-creation) + + +## Revisions configuration + +* The revisions configuration enables or disables the creation and purging of revisions for documents, + and optionally limits the number of revisions retained per document. + +* By default, the revisions feature is **disabled** for all collections: no revisions are created or purged for any document. + You can modify this behavior and other revisions settings by applying a revisions configuration to the database. + The revisions configuration is stored in the database record. + + + + #### Conflict Revisions + + Revisions created for **conflicting documents** are a special case that is not covered in this article. + + * Conflict revisions are **enabled** by default. + * Read about the conflict revisions API here: + [Conflict Revisions Configuration](../../document-extensions/revisions/client-api/operations/conflict-revisions-configuration.mdx) + * Read about managing conflict revisions via the Studio here: + [Editing the Conflicting Document Defaults](../../studio/database/settings/document-revisions.mdx#editing-the-conflicting-document-defaults) + + +#### Defining a revisions configuration + +You can apply a revisions configuration using the Studio or the Client API: + +* Via Studio: + * Manage the revisions configuration in the [Document Revisions Settings](../../studio/database/settings/document-revisions.mdx) view. + * Inspect existing revisions and manually create a new revision in the [Revisions tab](../../studio/database/document-extensions/revisions/revisions-overview.mdx#revisions-tab) in the Studio's Document View. +* Via Client API: + * Use the [ConfigureRevisionsOperation](../../document-extensions/revisions/client-api/operations/configure-revisions.mdx) _Store_ operation to define and apply a revisions configuration. +#### Default settings and collection-specific configurations + +The revisions configuration consists of default settings and/or collection-specific configurations: + +* **Default settings**: + The default settings apply to all documents for which a collection-specific configuration is not defined. + +* **Collection-specific configurations**: + Collection-specific configurations apply only to documents of the collections they are defined for, + overriding the default settings for these collections. + + If no default settings are applied, revisions will be **disabled** for any collection where a collection-specific configuration is not defined. + +#### Revisions configuration options + +A revisions configuration defines - + +* Whether to enable or disable revisions creation: + * If the revisions configuration is **Enabled** for a collection, + creating, modifying, or deleting any document in this collection will trigger the automatic creation of a new document revision, + and optionally the purging of existing revisions for the document. + * If the revisions configuration is **Disabled** for a collection, + RavenDB will **not** automatically create or purge revisions for documents in this collection. + +* Whether to limit the number of revisions that can be kept per document. + RavenDB will only purge revisions if they exceed the limits you set. + +* Learn more about the available configuration options in [Configure revisions operations](../../document-extensions/revisions/client-api/operations/configure-revisions.mdx). +#### Revisions configuration execution + +Creating a revisions configuration does **not** immediately trigger its execution. +Default and collection-specific configurations are executed when - + +1. **Documents are Created, Modified, or Deleted**. + When a document is created, modified, or deleted, the configuration (either default or collection-specific) that applies to its collection is examined. + If the revisions configuration is enabled for this collection: + * A revision of the document will be created. + * Existing revisions will optionally be purged according to the limits set in the configuration. + +2. **Enforce Configuration is applied**. + [Enforcing the configuration](../../studio/database/settings/document-revisions.mdx#enforce-configuration) applies the defined revisions configuration immediately throughout the database, **purging** all the revisions pending purging + according to default settings or collection-specific configurations, and **deleting** all revisions that no configuration applies to. + + * Large databases and collections may contain numerous revisions pending purging, which Enforcing Configuration will purge all at once. + Be aware that this operation may require substantial server resources, so time it accordingly. + * Revisions that were created over time but to which no configuration currently applies will be deleted. + Make sure that your configuration includes the default settings and collection-specific configurations needed to retain the revisions you want to keep. + +#### Enabling and disabling revisions for existing documents + +* When revisions creation is **Enabled** for a populated collection: + * The first revision will be created for an existing document the next time the document is modified + (recording the document **after** its modification), or when the document is deleted. + +* When revisions creation is **Disabled** for a collection after revisions have been created: + * The creation of new revisions and the purging of existing revisions will stop. + * Existing revisions will remain intact. + + + +## How it works + +Let's play with revisions a little to get a taste of its advantages. + +1. **Enable Revisions** so we can experiment with the feature. + Revisions can be enabled from the [Studio](../../studio/database/settings/document-revisions.mdx) + or using the [ConfigureRevisionsOperation](../../document-extensions/revisions/client-api/operations/configure-revisions.mdx) _Store_ operation. + + ![Enable Revisions for the Users Collection](./assets/revisions-1.png) + +2. **Create a new document in the `Users` collection**. + We will follow the automatic creation of revisions for this document. + You can create the document in the [Studio](../../studio/database/documents/create-new-document.mdx#create-new-document) + or using the [session.Store](../../client-api/session/storing-entities.mdx#example) method. + + ![Create a Document](./assets/revisions-2.png) + +3. **Inspect the new document's [Revisions Tab](../../studio/database/document-extensions/revisions/revisions-overview.mdx#revisions-tab)** in the Studio. + Creating the document also created its first revision. + + ![Revision for Document Creation](./assets/revisions-3.png) + + Click the _"See the current document"_ button to return to the parent document view. + +4. **Modify and Save the document**. + This will create a second revision. + + ![Revision for Document Modification](./assets/revisions-4.png) + +5. **Delete the document**. + Though you deleted the document, its **audit trail** is **not lost**: all its revisions are moved to the [Revisions Bin](../../studio/database/document-extensions/revisions/revisions-bin.mdx), + including a new revision (called "Delete Revision"), created to indicate that the document was deleted. + + + * A "Delete Revision" is created only if the deleted document has revisions. + * If a document has no revisions, a "Delete Revision" will be created only if the Revisions Configuration is set and enabled for its collection. + + + To see the revisions created for the document before it was deleted: + * Open the `Documents > Revisions Bin` section in the Studio + * Click the deleted document's ID + + ![Revisions Bin](./assets/revisions-5.png) + +6. **Restore the document**. + Even after a document is deleted, you can still restore it from one of its revisions. + To do so, open the revision containing the content you want to restore. + Click _Clone_ to create a new document from that revision. + + ![Revisions Bin](./assets/revisions-6.png) + + Save the new document using the exact **same ID** as the deleted document. + This will restore all revisions of the deleted document from the Revisions Bin and associate them with the new document. + + Opening the document’s Revisions Tab will show the full audit trail, + including the "Delete Revision" created when the original document was deleted and the new revision created when the restored document was saved. + + ![Restored Revisions](./assets/revisions-7.png) + + + +## Revisions storage + +##### Revisions storage + +When a document revision is created, a full version of the modified document is stored in the revisions storage, +using the same blittable JSON document format as regular documents. +##### Revisions Compression + +* By default, revisions are compressed. + This setting can be customized server-wide via the [CompressRevisionsDefault](../../server/configuration/database-configuration.mdx#databasescompressioncompressrevisionsdefault) configuration key. +* At the database level, revisions compression can be customized via the database record, + as shown in [this example](../../server/storage/documents-compression.mdx#set-compression-for-selected-collections). +* Individual fields are compressed as they are compressed in regular documents: + any text field exceeding 128 bytes is compressed. + Learn more about documents compression in [Documents Compression](../../server/storage/documents-compression.mdx). +##### Storage of document extensions in revisions + +Read [here](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-time-series) about revisions and **time series**. +Read [here](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-counters) about revisions and **counters**. +Read [here](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-attachments) about revisions and **attachments**. + + + +## Force revision creation + +So far we've discussed the automatic creation of revisions when the feature is enabled. +However, you can also **force the creation** of a document revision, whether the feature is enabled or not. + +This is useful when you choose to disable automatic revisions creation but still want to create a revision for a specific document, +e.g. take a snapshot of the document as a precaution before editing it. + +* You can force the creation of a revision via the [Studio](../../document-extensions/revisions/overview.mdx#force-revision-creation-via-the-studio) + or use the [Client API](../../document-extensions/revisions/overview.mdx#force-revision-creation-via-the-client-api). +* A revision **will** be created even if the revisions configuration is disabled for the document's collection. +* A revision **will** be created even if the document has not been modified + (unless the document has revisions and the latest revision contains the current document contents). +* Similar to revisions created automatically due to the revisions configuration, + deleting a document with a manually created revision will move the revision to the [Revisions Bin](../../studio/database/document-extensions/revisions/revisions-bin.mdx), + and a "Delete Revision" will be created. +##### Force revision creation via the Studio + +To create a revision manually via the Studio, +click the **Create Revision** button in the Revisions Tab in the document view. + +![Create a revision manually](./assets/revisions-8.png) +##### Force revision creation via the Client API + +To create a revision manually via the Client API, use the advanced session `ForceRevisionCreationFor` method, +followed by a call to `SaveChanges`. + +**Example**: + + + + +{`// Force revision creation by entity +// ================================= + +var company = new Company { + Name = "CompanyName" + }; + +session.Store(company); +companyId = company.Id; +session.SaveChanges(); + +// Forcing the creation of a revision by entity can be performed +// only when the entity is tracked, after the document is stored. +session.Advanced.Revisions.ForceRevisionCreationFor(company); + +// Call SaveChanges for the revision to be created +session.SaveChanges(); + +var revisionsCount = session.Advanced.Revisions.GetFor(companyId).Count; +Assert.Equal(1, revisionsCount); +`} + + + + +{`// Force revision creation by ID +// ============================= + +session.Advanced.Revisions.ForceRevisionCreationFor(companyId); +session.SaveChanges(); + +var revisionsCount = session.Advanced.Revisions.GetFor(companyId).Count; +Assert.Equal(1, revisionsCount); +`} + + + + +**Syntax**: + + + +{`// Available overloads: +// ==================== + +// Force revision creation by entity. +// Can be used with tracked entities only. +void ForceRevisionCreationFor(T entity, + ForceRevisionStrategy strategy = ForceRevisionStrategy.Before); + +// Force revision creation by document ID. +void ForceRevisionCreationFor(string id, + ForceRevisionStrategy strategy = ForceRevisionStrategy.Before); +`} + + + +| Parameter | Type | Description | +|--------------|------------------------------|--------------------------------------------------------------------------------------------------| +| **entity** | `T` | The tracked entity for which you want to create a revision. | +| **id** | `string` | The ID of the document for which you want to create a revision. | +| **strategy** | `enum ForceRevisionStrategy` | Defines the revision creation strategy (see below).
Default: `ForceRevisionStrategy.Before` | + + + +{`public enum ForceRevisionStrategy +\{ + // Do not force a revision + None, + + // Create a forced revision from the document currently in store + // BEFORE applying any changes made by the user. + // The only exception is for a new document, + // where a revision will be created AFTER the update. + Before +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/_overview-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/revisions/_overview-nodejs.mdx new file mode 100644 index 0000000000..c51d9e1b8f --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/_overview-nodejs.mdx @@ -0,0 +1,324 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Document Revisions** are snapshots of documents and their extensions: + + * The trail of revisions created for a document can be inspected to track changes made in the document over time. + * A document's live version can be [reverted](../../document-extensions/revisions/revert-revisions.mdx) to any of its recorded revisions. + + For example, tracking document revisions allows you to check how an employee's contract has changed over time, + restore a single corrupted document without requiring a backup file, or conduct a full-scale audit of your data. + +* Managed via the Client API or from the Studio, revisions can be created **automatically** or **manually**: + + * **Automatic revisions creation**: + When revisions settings are defined and enabled for a collection, a document revision is automatically created whenever documents are created, modified, or deleted. + To configure revisions settings, and set limits for the number of revisions retained per document, + apply a [Revisions configuration](../../document-extensions/revisions/overview.mdx#revisions-configuration) to all and/or specific collections. + + * **Manual revisions creation**: + When revisions settings are disabled, you can still create revisions manually. + See [Force revision creation](../../document-extensions/revisions/overview.mdx#force-revision-creation) below. +* In this page: + * [Revisions configuration](../../document-extensions/revisions/overview.mdx#revisions-configuration) + * [Defining a revisions configuration](../../document-extensions/revisions/overview.mdx#defining-a-revisions-configuration) + * [Default settings and collection-specific configurations](../../document-extensions/revisions/overview.mdx#default-settings-and-collection-specific-configurations) + * [Revisions configuration options](../../document-extensions/revisions/overview.mdx#revisions-configuration-options) + * [Revisions configuration execution](../../document-extensions/revisions/overview.mdx#revisions-configuration-execution) + * [Enabling and disabling revisions for existing documents](../../document-extensions/revisions/overview.mdx#enabling-and-disabling-revisions-for-existing-documents) + * [How it works](../../document-extensions/revisions/overview.mdx#how-it-works) + * [Revisions storage](../../document-extensions/revisions/overview.mdx#revisions-storage) + * [Force revision creation](../../document-extensions/revisions/overview.mdx#force-revision-creation) + + +## Revisions configuration + +* The revisions configuration enables or disables the creation and purging of revisions for documents, + and optionally limits the number of revisions retained per document. + +* By default, the revisions feature is **disabled** for all collections: no revisions are created or purged for any document. + You can modify this behavior and other revisions settings by applying a revisions configuration to the database. + The revisions configuration is stored in the database record. + + + + #### Conflict Revisions + + Revisions created for **conflicting documents** are a special case that is not covered in this article. + + * Conflict revisions are **enabled** by default. + * Read about the conflict revisions API here: + [Conflict Revisions Configuration](../../document-extensions/revisions/client-api/operations/conflict-revisions-configuration.mdx) + * Read about managing conflict revisions via the Studio here: + [Editing the Conflicting Document Defaults](../../studio/database/settings/document-revisions.mdx#editing-the-conflicting-document-defaults) + + +#### Defining a revisions configuration + +You can apply a revisions configuration using the Studio or the Client API: + +* Via Studio: + * Manage the revisions configuration in the [Document Revisions Settings](../../studio/database/settings/document-revisions.mdx) view. + * Inspect existing revisions and manually create a new revision in the [Revisions tab](../../studio/database/document-extensions/revisions/revisions-overview.mdx#revisions-tab) in the Studio's Document View. +* Via Client API: + * Use the [ConfigureRevisionsOperation](../../document-extensions/revisions/client-api/operations/configure-revisions.mdx) _Store_ operation to define and apply a revisions configuration. +#### Default settings and collection-specific configurations + +The revisions configuration consists of default settings and/or collection-specific configurations: + +* **Default settings**: + The default settings apply to all documents for which a collection-specific configuration is not defined. + +* **Collection-specific configurations**: + Collection-specific configurations apply only to documents of the collections they are defined for, + overriding the default settings for these collections. + + If no default settings are applied, revisions will be **disabled** for any collection where a collection-specific configuration is not defined. + +#### Revisions configuration options + +A revisions configuration defines - + +* Whether to enable or disable revisions creation: + * If the revisions configuration is **Enabled** for a collection, + creating, modifying, or deleting any document in this collection will trigger the automatic creation of a new document revision, + and optionally the purging of existing revisions for the document. + * If the revisions configuration is **Disabled** for a collection, + RavenDB will **not** automatically create or purge revisions for documents in this collection. + +* Whether to limit the number of revisions that can be kept per document. + RavenDB will only purge revisions if they exceed the limits you set. + +* Learn more about the available configuration options in [Configure revisions operations](../../document-extensions/revisions/client-api/operations/configure-revisions.mdx). +#### Revisions configuration execution + +Creating a revisions configuration does **not** immediately trigger its execution. +Default and collection-specific configurations are executed when - + +1. **Documents are Created, Modified, or Deleted**. + When a document is created, modified, or deleted, the configuration (either default or collection-specific) that applies to its collection is examined. + If the revisions configuration is enabled for this collection: + * A revision of the document will be created. + * Existing revisions will optionally be purged according to the limits set in the configuration. + +2. **Enforce Configuration is applied**. + [Enforcing the configuration](../../studio/database/settings/document-revisions.mdx#enforce-configuration) applies the defined revisions configuration immediately throughout the database, **purging** all the revisions pending purging + according to default settings or collection-specific configurations, and **deleting** all revisions that no configuration applies to. + + * Large databases and collections may contain numerous revisions pending purging, which Enforcing Configuration will purge all at once. + Be aware that this operation may require substantial server resources, so time it accordingly. + * Revisions that were created over time but to which no configuration currently applies will be deleted. + Make sure that your configuration includes the default settings and collection-specific configurations needed to retain the revisions you want to keep. + +#### Enabling and disabling revisions for existing documents + +* When revisions creation is **Enabled** for a populated collection: + * The first revision will be created for an existing document the next time the document is modified + (recording the document **after** its modification), or when the document is deleted. + +* When revisions creation is **Disabled** for a collection after revisions have been created: + * The creation of new revisions and the purging of existing revisions will stop. + * Existing revisions will remain intact. + + + +## How it works + +Let's play with revisions a little to get a taste of its advantages. + +1. **Enable Revisions** so we can experiment with the feature. + Revisions can be enabled from the [Studio](../../studio/database/settings/document-revisions.mdx) + or using the [ConfigureRevisionsOperation](../../document-extensions/revisions/client-api/operations/configure-revisions.mdx) _Store_ operation. + + ![Enable Revisions for the Users Collection](./assets/revisions-1.png) + +2. **Create a new document in the `Users` collection**. + We will follow the automatic creation of revisions for this document. + You can create the document in the [Studio](../../studio/database/documents/create-new-document.mdx#create-new-document) + or using the [session.Store](../../client-api/session/storing-entities.mdx#example) method. + + ![Create a Document](./assets/revisions-2.png) + +3. **Inspect the new document's [Revisions Tab](../../studio/database/document-extensions/revisions/revisions-overview.mdx#revisions-tab)** in the Studio. + Creating the document also created its first revision. + + ![Revision for Document Creation](./assets/revisions-3.png) + + Click the _"See the current document"_ button to return to the parent document view. + +4. **Modify and Save the document**. + This will create a second revision. + + ![Revision for Document Modification](./assets/revisions-4.png) + +5. **Delete the document**. + Though you deleted the document, its **audit trail** is **not lost**: all its revisions were moved to the [Revisions Bin](../../studio/database/document-extensions/revisions/revisions-bin.mdx), + including a new revision (called "Delete Revision"), created to indicate that the document was deleted. + + + * A "Delete Revision" is created only if the deleted document has revisions. + * If a document has no revisions, a "Delete Revision" will be created only if the Revisions Configuration is set and enabled for its collection. + + + To see the revisions created for the document before it was deleted: + * Open the `Documents > Revisions Bin` section in the Studio + * Click the deleted document's ID + + ![Revisions Bin](./assets/revisions-5.png) + +6. **Restore the document**. + Even after a document is deleted, you can still restore it from one of its revisions. + To do so, open the revision containing the content you want to restore. + Click _Clone_ to create a new document from that revision. + + ![Revisions Bin](./assets/revisions-6.png) + + Save the new document using the exact **same ID** as the deleted document. + This will restore all revisions of the deleted document from the Revisions Bin and associate them with the new document. + + Opening the document’s Revisions Tab will show the full audit trail, + including the "Delete Revision" created when the original document was deleted and the new revision created when the restored document was saved. + + ![Restored Revisions](./assets/revisions-7.png) + + + +## Revisions storage + +##### Revisions storage + +When a document revision is created, a full version of the modified document is stored in the revisions storage, +using the same blittable JSON document format as regular documents. +##### Revisions Compression + +* By default, revisions are compressed. + This setting can be customized server-wide via the [CompressRevisionsDefault](../../server/configuration/database-configuration.mdx#databasescompressioncompressrevisionsdefault) configuration key. +* At the database level, revisions compression can be customized via the database record, + as shown in [this example](../../server/storage/documents-compression.mdx#set-compression-for-selected-collections). +* Individual fields are compressed as they are compressed in regular documents: + any text field exceeding 128 bytes is compressed. + Learn more about documents compression in [Documents Compression](../../server/storage/documents-compression.mdx). +##### Storage of document extensions in revisions + +Read [here](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-time-series) about revisions and **time series**. +Read [here](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-counters) about revisions and **counters**. +Read [here](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-attachments) about revisions and **attachments**. + + + +## Force revision creation + +So far we've discussed the automatic creation of revisions when the feature is enabled. +However, you can also **force the creation** of a document revision, whether the feature is enabled or not. + +This is useful when you choose to disable automatic revisions creation but still want to create a revision for a specific document, +e.g. take a snapshot of the document as a precaution before editing it. + +* You can force the creation of a revision via the [Studio](../../document-extensions/revisions/overview.mdx#force-revision-creation-via-the-studio) + or use the [Client API](../../document-extensions/revisions/overview.mdx#force-revision-creation-via-the-client-api). +* A revision **will** be created even if the revisions configuration is disabled for the document's collection. +* A revision **will** be created even if the document has not been modified + (unless the document has revisions and the latest revision contains the current document contents). +* Similar to revisions created automatically due to the revisions configuration, + deleting a document with a manually created revision will move the revision to the [Revisions Bin](../../studio/database/document-extensions/revisions/revisions-bin.mdx), + and a "Delete Revision" will be created. +##### Force revision creation via the Studio + +To create a revision manually via the Studio, +click the **Create Revision** button in the Revisions Tab in the document view. + +![Create a revision manually](./assets/revisions-8.png) +##### Force revision creation via the Client API + +To create a revision manually via the Client API, use the advanced session `forceRevisionCreationFor` method, +followed by a call to `saveChanges`. + +**Example**: + + + + +{`// Force revision creation by entity +// ================================= + +const company = new Company(); +company.name = "CompanyName"; + +const session = documentStore.openSession(); +await session.store(company); +await session.saveChanges(); + +// Forcing the creation of a revision by entity can be performed +// only when the entity is tracked, after the document is stored. +await session.advanced.revisions.forceRevisionCreationFor(company); + +// Must call 'saveChanges' for the revision to be created +await session.saveChanges(); + +// Get existing revisions: +const revisions = await session.advanced.revisions.getFor(company.id); +const revisionsCount = revisions.length; + +assert.equal(revisionsCount, 1); +`} + + + + +{`const company = new Company(); +company.name = "CompanyName"; + +const session = documentStore.openSession(); +await session.store(company); +await session.saveChanges(); + +// Force revision creation by ID +const companyId = company.id; +await session.advanced.revisions.forceRevisionCreationFor(companyId); +await session.saveChanges(); + +const revisions = await session.advanced.revisions.getFor(company.id); +const revisionsCount = revisions.length; + +assert.equal(revisionsCount, 1); +`} + + + + +**Syntax**: + + + +{`// Available overloads: +// ==================== +forceRevisionCreationFor(entity); +forceRevisionCreationFor(entity, strategy); +forceRevisionCreationFor(id); +forceRevisionCreationFor(id, strategy); +`} + + + +| Parameter | Type | Description | +|--------------|----------|-------------------------------------------------------------------------------------------------| +| **entity** | `object` | The tracked entity for which you want to create a revision. | +| **id** | `string` | The ID of the document for which you want to create a revision. | +| **strategy** | `string` | Defines the revision creation strategy.
Can be `"None"` or `"Before"`
Default: `"Before"` | + +**Strategy**: + +`None`: +Do not force a revision + +`Before`: +Create a forced revision from the document currently in store BEFORE applying any changes made by the user. +The only exception is for a new document, where a revision will be created AFTER the update. + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/_overview-python.mdx b/versioned_docs/version-7.1/document-extensions/revisions/_overview-python.mdx new file mode 100644 index 0000000000..8cb585b701 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/_overview-python.mdx @@ -0,0 +1,287 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Document Revisions** are snapshots of documents and their extensions: + + * The trail of revisions created for a document can be inspected to track changes made in the document over time. + * A document's live version can be [reverted](../../document-extensions/revisions/revert-revisions.mdx) to any of its recorded revisions. + + For example, tracking document revisions allows you to check how an employee's contract has changed over time, + restore a single corrupted document without requiring a backup file, or conduct a full-scale audit of your data. + +* Managed via the Client API or from the Studio, revisions can be created **automatically** or **manually**: + + * **Automatic revisions creation**: + When revisions settings are defined and enabled for a collection, a document revision is automatically created whenever documents are created, modified, or deleted. + To configure revisions settings, and set limits for the number of revisions retained per document, + apply a [Revisions configuration](../../document-extensions/revisions/overview.mdx#revisions-configuration) to all and/or specific collections. + + * **Manual revisions creation**: + When revisions settings are disabled, you can still create revisions manually. + See [Force revision creation](../../document-extensions/revisions/overview.mdx#force-revision-creation) below. +* In this page: + * [Revisions configuration](../../document-extensions/revisions/overview.mdx#revisions-configuration) + * [Defining a revisions configuration](../../document-extensions/revisions/overview.mdx#defining-a-revisions-configuration) + * [Default settings and collection-specific configurations](../../document-extensions/revisions/overview.mdx#default-settings-and-collection-specific-configurations) + * [Revisions configuration options](../../document-extensions/revisions/overview.mdx#revisions-configuration-options) + * [Revisions configuration execution](../../document-extensions/revisions/overview.mdx#revisions-configuration-execution) + * [Enabling and disabling revisions for existing documents](../../document-extensions/revisions/overview.mdx#enabling-and-disabling-revisions-for-existing-documents) + * [How it works](../../document-extensions/revisions/overview.mdx#how-it-works) + * [Revisions storage](../../document-extensions/revisions/overview.mdx#revisions-storage) + * [Force revision creation](../../document-extensions/revisions/overview.mdx#force-revision-creation) + + +## Revisions configuration + +* The revisions configuration enables or disables the creation and purging of revisions for documents, + and optionally limits the number of revisions retained per document. + +* By default, the revisions feature is **disabled** for all collections: no revisions are created or purged for any document. + You can modify this behavior and other revisions settings by applying a revisions configuration to the database. + The revisions configuration is stored in the database record. + + + + #### Conflict Revisions + + Revisions created for **conflicting documents** are a special case that is not covered in this article. + + * Conflict revisions are **enabled** by default. + * Read about the conflict revisions API here: + [Conflict Revisions Configuration](../../document-extensions/revisions/client-api/operations/conflict-revisions-configuration.mdx) + * Read about managing conflict revisions via the Studio here: + [Editing the Conflicting Document Defaults](../../studio/database/settings/document-revisions.mdx#editing-the-conflicting-document-defaults) + + +#### Defining a revisions configuration + +You can apply a revisions configuration using the Studio or the Client API: + +* Via Studio: + * Manage the revisions configuration in the [Document Revisions Settings](../../studio/database/settings/document-revisions.mdx) view. + * Inspect existing revisions and manually create a new revision in the [Revisions tab](../../studio/database/document-extensions/revisions/revisions-overview.mdx#revisions-tab) in the Studio's Document View. +* Via Client API: + * Use the [ConfigureRevisionsOperation](../../document-extensions/revisions/client-api/operations/configure-revisions.mdx) _Store_ operation to define and apply a revisions configuration. +#### Default settings and collection-specific configurations + +The revisions configuration consists of default settings and/or collection-specific configurations: + +* **Default settings**: + The default settings apply to all documents for which a collection-specific configuration is not defined. + +* **Collection-specific configurations**: + Collection-specific configurations apply only to documents of the collections they are defined for, + overriding the default settings for these collections. + + If no default settings are applied, revisions will be **disabled** for any collection where a collection-specific configuration is not defined. + +#### Revisions configuration options + +A revisions configuration defines - + +* Whether to enable or disable revisions creation: + * If the revisions configuration is **Enabled** for a collection, + creating, modifying, or deleting any document in this collection will trigger the automatic creation of a new document revision, + and optionally the purging of existing revisions for the document. + * If the revisions configuration is **Disabled** for a collection, + RavenDB will **not** automatically create or purge revisions for documents in this collection. + +* Whether to limit the number of revisions that can be kept per document. + RavenDB will only purge revisions if they exceed the limits you set. + +* Learn more about the available configuration options in [Configure revisions operations](../../document-extensions/revisions/client-api/operations/configure-revisions.mdx). +#### Revisions configuration execution + +Creating a revisions configuration does **not** immediately trigger its execution. +Default and collection-specific configurations are executed when - + +1. **Documents are Created, Modified, or Deleted**. + When a document is created, modified, or deleted, the configuration (either default or collection-specific) that applies to its collection is examined. + If the revisions configuration is enabled for this collection: + * A revision of the document will be created. + * Existing revisions will optionally be purged according to the limits set in the configuration. + +2. **Enforce Configuration is applied**. + [Enforcing the configuration](../../studio/database/settings/document-revisions.mdx#enforce-configuration) applies the defined revisions configuration immediately throughout the database, **purging** all the revisions pending purging + according to default settings or collection-specific configurations, and **deleting** all revisions that no configuration applies to. + + * Large databases and collections may contain numerous revisions pending purging, which Enforcing Configuration will purge all at once. + Be aware that this operation may require substantial server resources, so time it accordingly. + * Revisions that were created over time but to which no configuration currently applies will be deleted. + Make sure that your configuration includes the default settings and collection-specific configurations needed to retain the revisions you want to keep. + +#### Enabling and disabling revisions for existing documents + +* When revisions creation is **Enabled** for a populated collection: + * The first revision will be created for an existing document the next time the document is modified + (recording the document **after** its modification), or when the document is deleted. + +* When revisions creation is **Disabled** for a collection after revisions have been created: + * The creation of new revisions and the purging of existing revisions will stop. + * Existing revisions will remain intact. + + + +## How it works + +Let's play with revisions a little to get a taste of its advantages. + +1. **Enable Revisions** so we can experiment with the feature. + Revisions can be enabled from the [Studio](../../studio/database/settings/document-revisions.mdx) + or using the [ConfigureRevisionsOperation](../../document-extensions/revisions/client-api/operations/configure-revisions.mdx) _Store_ operation. + + ![Enable Revisions for the Users Collection](./assets/revisions-1.png) + +2. **Create a new document in the `Users` collection**. + We will follow the automatic creation of revisions for this document. + You can create the document in the [Studio](../../studio/database/documents/create-new-document.mdx#create-new-document) + or using the [session.Store](../../client-api/session/storing-entities.mdx#example) method. + + ![Create a Document](./assets/revisions-2.png) + +3. **Inspect the new document's [Revisions Tab](../../studio/database/document-extensions/revisions/revisions-overview.mdx#revisions-tab)** in the Studio. + Creating the document also created its first revision. + + ![Revision for Document Creation](./assets/revisions-3.png) + + Click the _"See the current document"_ button to return to the parent document view. + +4. **Modify and Save the document**. + This will create a second revision. + + ![Revision for Document Modification](./assets/revisions-4.png) + +5. **Delete the document**. + Though you deleted the document, its **audit trail** is **not lost**: all its revisions were moved to the [Revisions Bin](../../studio/database/document-extensions/revisions/revisions-bin.mdx), + including a new revision (called "Delete Revision"), created to indicate that the document was deleted. + + + * A "Delete Revision" is created only if the deleted document has revisions. + * If a document has no revisions, a "Delete Revision" will be created only if the Revisions Configuration is set and enabled for its collection. + + + To see the revisions created for the document before it was deleted: + * Open the `Documents > Revisions Bin` section in the Studio + * Click the deleted document's ID + + ![Revisions Bin](./assets/revisions-5.png) + +6. **Restore the document**. + Even after a document is deleted, you can still restore it from one of its revisions. + To do so, open the revision containing the content you want to restore. + Click _Clone_ to create a new document from that revision. + + ![Revisions Bin](./assets/revisions-6.png) + + Save the new document using the exact **same ID** as the deleted document. + This will restore all revisions of the deleted document from the Revisions Bin and associate them with the new document. + + Opening the document’s Revisions Tab will show the full audit trail, + including the "Delete Revision" created when the original document was deleted and the new revision created when the restored document was saved. + + ![Restored Revisions](./assets/revisions-7.png) + + + +## Revisions storage + +##### Revisions storage + +When a document revision is created, a full version of the modified document is stored in the revisions storage, +using the same blittable JSON document format as regular documents. +##### Revisions Compression + +* By default, revisions are compressed. + This setting can be customized server-wide via the [CompressRevisionsDefault](../../server/configuration/database-configuration.mdx#databasescompressioncompressrevisionsdefault) configuration key. +* At the database level, revisions compression can be customized via the database record, + as shown in [this example](../../server/storage/documents-compression.mdx#set-compression-for-selected-collections). +* Individual fields are compressed as they are compressed in regular documents: + any text field exceeding 128 bytes is compressed. + Learn more about documents compression in [Documents Compression](../../server/storage/documents-compression.mdx). +##### Storage of document extensions in revisions + +Read [here](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-time-series) about revisions and **time series**. +Read [here](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-counters) about revisions and **counters**. +Read [here](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-attachments) about revisions and **attachments**. + + + +## Revisions storage + + + +##### Revisions documents storage +* The creation of a document revision stores a full version of the modified document in the revisions storage, + in the same **blittable JSON document** format as that of regular documents. + +* **Revisions compression** + * Revisions are compressed by default. + Learn [here](../../server/configuration/database-configuration.mdx#databasescompressioncompressrevisionsdefault) + how to toggle this database option on and off. + * Learn [here](../../server/storage/documents-compression.mdx) how to apply Document Compression to revisions. + * Individual fields are compressed as they are compressed in regular documents: + any text field of more than 128 bytes is compressed. + + + + +##### Revisions document extensions storage +Read [here](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-time-series) about revisions and **time series**. +Read [here](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-counters) about revisions and **counters**. +Read [here](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-attachments) about revisions and **attachments**. + + + + +## Force revision creation + +So far we've discussed the automatic creation of revisions when the feature is enabled. +However, you can also **force the creation** of a document revision, whether the feature is enabled or not. + +This is useful when you choose to disable automatic revisions creation but still want to create a revision for a specific document, +e.g. take a snapshot of the document as a precaution before editing it. + +* You can force the creation of a revision via the [Studio](../../document-extensions/revisions/overview.mdx#force-revision-creation-via-the-studio) + or use the [Client API](../../document-extensions/revisions/overview.mdx#force-revision-creation-via-the-client-api). +* A revision **will** be created even if the revisions configuration is disabled for the document's collection. +* A revision **will** be created even if the document has not been modified + (unless the document has revisions and the latest revision contains the current document contents). +* Similar to revisions created automatically due to the revisions configuration, + deleting a document with a manually created revision will move the revision to the [Revisions Bin](../../studio/database/document-extensions/revisions/revisions-bin.mdx), + and a "Delete Revision" will be created. +##### Force revision creation via the Studio + +To create a revision manually via the Studio, +click the **Create Revision** button in the Revisions Tab in the document view. + +![Create a revision manually](./assets/revisions-8.png) +##### Force revision creation via the Client API + +To create a revision manually via the API, use the advanced session `force_revision_creation_for` method. + + + +{`with self.store.open_session() as session: + company = session.load(company_id, Company) + company.name = "HR V2" + + session.advanced.revisions.force_revision_creation_for(company) + session.save_changes() + + revisions = session.advanced.revisions.get_for(company.Id, Company) + revisions_count = len(revisions) + + self.assertEqual(1, revisions_count) + # Assert revision contains the value 'Before' the changes... + self.assertEqual("HR", revisions[0].name) +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/_revisions-and-other-features-csharp.mdx b/versioned_docs/version-7.1/document-extensions/revisions/_revisions-and-other-features-csharp.mdx new file mode 100644 index 0000000000..1906658a54 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/_revisions-and-other-features-csharp.mdx @@ -0,0 +1,264 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This page describes relationships between Revisions and other RavenDB features, including - + * When is revisions creation triggered by other features + * How revisions are supported by other features + +* In this page: + * [Revisions and Counters](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-counters) + * [Revisions and Time Series](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-time-series) + * [Revisions and Attachments](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-attachments) + * [Revisions and Replication](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-replication) + * [Revisions and ETL](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-etl) + * [Revisions and Backup](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-backup) + * [Revisions and Data Subscriptions](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-data-subscriptions) + * [Revisions Import and Export](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-import-and-export) + + +## Revisions and Counters + +### Revisions creation: + +* A document revision will be created when: + * A new counter is **created** on the document. + * A counter is **deleted** from the document. + +* A revision will Not be created upon modifying the value of an existing [counter](../../document-extensions/counters/overview.mdx). +### Stored data: + +* A revision that is created for a document that contains counters + will have the `@counters-snapshot` property in its **metadata**. + +* This property holds the counters' names and values at the time when the revision was created. + +* The counter's value stored in the revision's metadata is the **accumulated value** from all nodes. + It does not specify the value of the counter on each individual node. + +* Sample revision metadata: + + +{`\{ + ... + "@metadata": \{ + "@counters-snapshot": \{ + "counterName1": 7, + "counterName2": 42 + \}, + ... + \} +\} +`} + + +### Reverted data: + +When a document is [reverted](../../document-extensions/revisions/revert-revisions.mdx) to a revision that has counters, +the counters from the revision are restored to functionality along with their values. +### Extract counters data from revisions: + +Use [GetMetadataFor](../../document-extensions/revisions/client-api/session/loading.mdx#get-revisions-metadata) to get the revisions metadata for a specified document, +and then extract the counters' data. + + + + +{`// Use GetMetadataFor to get revisions metadata for document 'orders/1-A' +List revisionsMetadata = session + .Advanced.Revisions.GetMetadataFor(id: "orders/1-A"); + +// Extract the counters data from the metadata +List countersDataInRevisions = revisionsMetadata + .Where(metadata => + metadata.ContainsKey(Constants.Documents.Metadata.RevisionCounters)) + .Select(metadata => + (MetadataAsDictionary)metadata[Constants.Documents.Metadata.RevisionCounters]) + .ToList(); +`} + + + + +{`// Use GetMetadataForAsync to get revisions metadata for document 'orders/1-A' +List revisionsMetadata = await asyncSession + .Advanced.Revisions.GetMetadataForAsync(id: "orders/1-A"); + +// Extract the counters data from the metadata +List countersDataInRevisions = revisionsMetadata + .Where(metadata => + metadata.ContainsKey(Constants.Documents.Metadata.RevisionCounters)) + .Select(metadata => + (MetadataAsDictionary)metadata[Constants.Documents.Metadata.RevisionCounters]) + .ToList(); +`} + + + + + + +## Revisions and Time Series + +### Revisions Creation: + +* A document revision will be created when: + * A new time series is **created** on the document. + * A time series is **deleted** from the document. + (A time series is deleted from a document when all its entries are deleted) + +* A revision will Not be created upon modifying the values of an existing [time series](../../document-extensions/timeseries/overview.mdx). +### Stored Data: + +* A revision that is created for a document that contains time series + will have the `@timeseries-snapshot` property in its **metadata**. + +* This property does Not hold the time series values data, + it only contains the following information for the time when the revision was created: + * The time series names + * The number of entries in each time series + * Dates of the first and last entry in each time series + +* Read more about Revisions and Time Series [here](../../document-extensions/timeseries/time-series-and-other-features.mdx#revisions). + +* Sample revision metadata: + + +{`\{ + ... + "@metadata": \{ + "@timeseries-snapshot": \{ + "timeSeriesName1": \{ + "Count": 5, + "Start": "2023-03-22T11:25:00.9110000Z", + "End": "2023-03-22T11:28:34.9110000Z" + \}, + "timeSeriesName2": \{ + "Count": 10, + "Start": "2023-03-22T11:26:00.3950000Z", + "End": "2023-03-22T11:28:48.3950000Z" + \} + \}, + ... + \} +\} +`} + + +### Reverted Data: + +When a document is [reverted](../../document-extensions/revisions/revert-revisions.mdx) to a revision that has a time series: + +* If the current document **contains** a time series name as in the revision: + * The reverted document will keep the time series entries & values as it was in the current document. + * Time series entries and values from the revision are Not restored. + +* If the current document **doesn't contain** a time series name as in the revision, + or if the document itself was deleted: + * The reverted document will have the time series from the revision + * However, the entries count will be 0 + + + +## Revisions and Attachments + +### Revisions Creation: + +A document revision will be created when: + + * A new [attachment](../../document-extensions/attachments/what-are-attachments.mdx) is **added** to the document. + * An attachment is **deleted** from the document. +### Stored Data: + +* A revision that is created for a document with attachments + will have the `@attachments` property in its **metadata**. + +* This property does Not hold the actual attachments, as the files are stored in **separate storage**. + The property only contains the following information for each attachment the document had when the revision was created: + * Attachment file name + * File content type + * File size + * A hash string (a reference to the file in the storage) + +* Existing attachment files in the storage are Not duplicated per revision that is created when the document itself is modified. + +* An attachment file is removed from RavenDB's storage only when there is no live document or a revision that refers to it. + +* Sample revision metadata: + + +{`\{ + ... + "@metadata": \{ + "@attachments": [ + \{ + "Name": "attachmentFileName.png", + "ContentType": "image/png", + "Size": 33241, + "Hash": "iFg0o6D38pUcWGVlP71ddDp8SCcoEal47kG3LtWx0+Y=", + \}, + ], + ... + \} +\} +`} + + +### Reverted Data: + +When a document is [reverted](../../document-extensions/revisions/revert-revisions.mdx) to a revision that has attachments, +the attachments are restored to their state when the revision was created. + + + +## Revisions and Replication + +* Revisions are transferred during [replication](../../server/clustering/replication/replication-overview.mdx) from one database instance to another. + +* The revisions will be replicated by all replication types: + * [Internal replication](../../server/clustering/replication/replication-overview.mdx#internal-replication) + * [External replication](../../server/clustering/replication/replication-overview.mdx#external-replication) + * [Hub/Sink replication](../../server/clustering/replication/replication-overview.mdx#hubsink-replication) + +* Revisions can [help keep data consistency](../../server/clustering/replication/replication-overview.mdx#how-revisions-replication-help-data-consistency). + + + +## Revisions and ETL + +* An [ETL](../../server/ongoing-tasks/etl/raven.mdx) ongoing task does Not send revisions to the destination database. + +* However, if revisions are enabled on the destination database, + whenever the ETL process sends a modified document and the target document is overwritten, + a new revision will be created for the document in the target database as expected. + + + +## Revisions and Backup + +Revisions are [backed up](../../server/ongoing-tasks/backup-overview.mdx#backup-contents) both by a logical-backup and by a snapshot. + + + +## Revisions and Data Subscriptions + +* Learn about revisions and data subscriptions [here](../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx). + + + +## Revisions Import and Export + +* Revisions can be imported and exported with a `.ravendbdump` file: + * Using [the Client API](../../client-api/smuggler/what-is-smuggler.mdx) + * From the [import](../../studio/database/tasks/import-data/import-data-file.mdx#import-options) and [export](../../studio/database/tasks/export-database.mdx#export-options) views in the Studio + +* Revisions can be imported when migrating data from another [live RavenDB server](../../studio/database/tasks/import-data/import-from-ravendb.mdx#step-#4:-set-import-options). + + ![Import from Live Server](./assets/import-from-live-server.png) + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/_revisions-and-other-features-java.mdx b/versioned_docs/version-7.1/document-extensions/revisions/_revisions-and-other-features-java.mdx new file mode 100644 index 0000000000..f04d22d644 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/_revisions-and-other-features-java.mdx @@ -0,0 +1,33 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +When the revisions feature is enabled, a snapshot of a document that holds *Counters* will include in its metadata a `@counters-snapshot` property, +that holds all the document's counter names **and values** (at the time of the revision creation). +This can be useful when historic records of the documents and their counter values are needed. + + +Creation of a new counter and deletion of a counter modify the parent document (documents hold +their counter names in the metadata) and will trigger a revision creation. +Incrementing an existing counter does not modify the parent document and will not trigger a revision creation. + + +### Example + + + +{`List orderRevisionsMetadata = session.advanced() + .revisions() + .getMetadataFor("orders/1-A", 0, 10); + +List orderCountersSnapshots = orderRevisionsMetadata + .stream() + .filter(x -> x.containsKey("@counters-snapshot")) + .map(x -> (MetadataAsDictionary) x.get("@counters-snapshot")) + .collect(Collectors.toList()); +`} + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/_revisions-and-other-features-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/revisions/_revisions-and-other-features-nodejs.mdx new file mode 100644 index 0000000000..266c485b5e --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/_revisions-and-other-features-nodejs.mdx @@ -0,0 +1,308 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This page describes relationships between Revisions and other RavenDB features, including - + * When is revisions creation triggered by other features + * How revisions are supported by other features + +* In this page: + * [Revisions and Counters](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-counters) + * [Revisions and Time Series](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-time-series) + * [Revisions and Attachments](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-attachments) + * [Revisions and Replication](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-replication) + * [Revisions and ETL](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-etl) + * [Revisions and Backup](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-backup) + * [Revisions and Data Subscriptions](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-data-subscriptions) + * [Revisions Import and Export](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-import-and-export) + + +## Revisions and Counters + + + +**Revisions creation** + +* A document revision will be created when: + * A new counter is **created** on the document. + * A counter is **deleted** from the document. + +* A revision will Not be created upon modifying the value of an existing counter. + + + + + +**Stored data** + +* A revision that is created for a document that contains counters + will have the `@counters-snapshot` property in its **metadata**. + +* This property holds the counters' names and values at the time when the revision was created. + +* The counter's value stored in the revision's metadata is the **accumulated value** from all nodes. + It does not specify the value of the counter on each individual node. + +* Sample revision metadata: + + +{`\{ + ... + "@metadata": \{ + "@counters-snapshot": \{ + "counterName1": 7, + "counterName2": 42 + \}, + ... + \} +\} +`} + + + + + + + +**Reverted data** + +* When a document is [reverted](../../document-extensions/revisions/revert-revisions.mdx) to a revision that has counters, + the counters from the revision are restored to functionality along with their values. + + + + + +**Extract counters data from revisions** + +* Use [getMetadataFor](../../document-extensions/revisions/client-api/session/loading.mdx#get-revisions-metadata) to get the revisions metadata for a specified document, + and then extract the counters' data. + + + +{`// Use getMetadataFor to get revisions metadata for document 'orders/1-A' +const revisionsMetadata = await session.advanced.revisions.getMetadataFor("orders/1-A"); + +// Extract the counters data from the metadata +const countersDataInRevisions = revisionsMetadata + .filter(x => !!x[CONSTANTS.Documents.Metadata.REVISION_COUNTERS]) + .map(x => x[CONSTANTS.Documents.Metadata.REVISION_COUNTERS]); +`} + + + + + + + +## Revisions and Time Series + + + +**Revisions Creation** + +* A document revision will be created when: + * A new time series is **created** on the document. + * A time series is **deleted** from the document. + (A time series is deleted from a document when all its entries are deleted) + +* A revision will Not be created upon modifying the values of an existing [time series](../../document-extensions/timeseries/overview.mdx). + + + + + +**Stored Data** + +* A revision that is created for a document that contains time series + will have the `@timeseries-snapshot` property in its **metadata**. + +* This property does Not hold the time series values data, + it only contains the following information for the time when the revision was created: + * The time series names + * The number of entries in each time series + * Dates of the first and last entry in each time series + +* Read more about Revisions and Time Series [here](../../document-extensions/timeseries/time-series-and-other-features.mdx#revisions). + +* Sample revision metadata: + + +{`\{ + ... + "@metadata": \{ + "@timeseries-snapshot": \{ + "timeSeriesName1": \{ + "Count": 5, + "Start": "2023-03-22T11:25:00.9110000Z", + "End": "2023-03-22T11:28:34.9110000Z" + \}, + "timeSeriesName2": \{ + "Count": 10, + "Start": "2023-03-22T11:26:00.3950000Z", + "End": "2023-03-22T11:28:48.3950000Z" + \} + \}, + ... + \} +\} +`} + + + + + + + +**Reverted Data** + +When a document is [reverted](../../document-extensions/revisions/revert-revisions.mdx) to a revision that has a time series: + +* If the current document **contains** a time series name as in the revision: + * The reverted document will keep the time series entries & values as it was in the current document. + * Time series entries and values from the revision are Not restored. + +* If the current document **doesn't contain** a time series name as in the revision, + or if the document itself was deleted: + * The reverted document will have the time series from the revision + * However, the entries count will be 0 + + + + + +## Revisions and Attachments + + + +**Revisions Creation** + +* A document revision will be created when: + * A new [attachment](../../document-extensions/attachments/what-are-attachments.mdx) is **added** to the document. + * An attachment is **deleted** from the document. + + + + + +**Stored Data** + +* A revision that is created for a document with attachments + will have the `@attachments` property in its **metadata**. + +* This property does Not hold the actual attachments, as the files are stored in **separate storage**. + The property only contains the following information for each attachment the document had when the revision was created: + * Attachment file name + * File content type + * File size + * A hash string (a reference to the file in the storage) + +* Existing attachment files in the storage are Not duplicated per revision that is created when the document itself is modified. + +* An attachment file is removed from RavenDB's storage only when there is no live document or a revision that refers to it. + +* Sample revision metadata: + + +{`\{ + ... + "@metadata": \{ + "@attachments": [ + \{ + "Name": "attachmentFileName.png", + "ContentType": "image/png", + "Size": 33241, + "Hash": "iFg0o6D38pUcWGVlP71ddDp8SCcoEal47kG3LtWx0+Y=", + \}, + ], + ... + \} +\} +`} + + + + + + + +**Reverted Data** + +* When a document is [reverted](../../document-extensions/revisions/revert-revisions.mdx) to a revision that has attachments, + the attachments are restored to their state when the revision was created. + + + + + +## Revisions and Replication + + + +* Revisions are transferred during [replication](../../server/clustering/replication/replication-overview.mdx) from one database instance to another. + +* The revisions will be replicated by all replication types: + * [Internal replication](../../server/clustering/replication/replication-overview.mdx#internal-replication) + * [External replication](../../server/clustering/replication/replication-overview.mdx#external-replication) + * [Hub/Sink replication](../../server/clustering/replication/replication-overview.mdx#hubsink-replication) + +* Revisions can [help keep data consistency](../../server/clustering/replication/replication-overview.mdx#how-revisions-replication-help-data-consistency). + + + + + +## Revisions and ETL + + + +* An [ETL](../../server/ongoing-tasks/etl/raven.mdx) ongoing task does Not send revisions to the destination database. + +* However, if revisions are enabled on the destination database, + whenever the ETL process sends a modified document and the target document is overwritten, + a new revision will be created for the document in the target database as expected. + + + + + +## Revisions and Backup + + + +* Revisions are [backed up](../../server/ongoing-tasks/backup-overview.mdx#backup-contents) both by a logical-backup and by a snapshot. + + + + + +## Revisions and Data Subscriptions + + + +* Learn about revisions and data subscriptions [here](../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx). + + + + + +## Revisions Import and Export + + + +* Revisions can be imported and exported with a `.ravendbdump` file: + * Using [the Client API](../../client-api/smuggler/what-is-smuggler.mdx) + * From the [import](../../studio/database/tasks/import-data/import-data-file.mdx#import-options) and [export](../../studio/database/tasks/export-database.mdx#export-options) views in the Studio + +* Revisions can be imported when migrating data from another [live RavenDB server](../../studio/database/tasks/import-data/import-from-ravendb.mdx#step-#4:-set-import-options). + + ![Import from Live Server](./assets/import-from-live-server.png) + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/_revisions-and-other-features-php.mdx b/versioned_docs/version-7.1/document-extensions/revisions/_revisions-and-other-features-php.mdx new file mode 100644 index 0000000000..3c8783a8ad --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/_revisions-and-other-features-php.mdx @@ -0,0 +1,217 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This page describes relationships between Revisions and other RavenDB features, including - + * When is revisions creation triggered by other features + * How revisions are supported by other features + +* In this page: + * [Revisions and Counters](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-counters) + * [Revisions and Time Series](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-time-series) + * [Revisions and Attachments](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-attachments) + * [Revisions and Replication](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-replication) + * [Revisions and ETL](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-etl) + * [Revisions and Backup](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-backup) + * [Revisions Import and Export](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-import-and-export) + + +## Revisions and Counters + +### Revisions creation: + +* A document revision will be created when: + * A new counter is **created** on the document. + * A counter is **deleted** from the document. + +* A revision will Not be created upon modifying the value of an existing [counter](../../document-extensions/counters/overview.mdx). +### Stored data: + +* A revision that is created for a document that contains counters + will have the `@counters-snapshot` property in its **metadata**. + +* This property holds the counters' names and values at the time when the revision was created. + +* The counter's value stored in the revision's metadata is the **accumulated value** from all nodes. + It does not specify the value of the counter on each individual node. + +* Sample revision metadata: + + +{`\{ + ... + "@metadata": \{ + "@counters-snapshot": \{ + "counterName1": 7, + "counterName2": 42 + \}, + ... + \} +\} +`} + + +### Reverted data: + +When a document is [reverted](../../document-extensions/revisions/revert-revisions.mdx) to a revision that has counters, +the counters from the revision are restored to functionality along with their values. + + + +## Revisions and Time Series + +### Revisions Creation: + +* A document revision will be created when: + * A new time series is **created** on the document. + * A time series is **deleted** from the document. + (A time series is deleted from a document when all its entries are deleted) + +* A revision will Not be created upon modifying the values of an existing [time series](../../document-extensions/timeseries/overview.mdx). +### Stored Data: + +* A revision that is created for a document that contains time series + will have the `@timeseries-snapshot` property in its **metadata**. + +* This property does Not hold the time series values data, + it only contains the following information for the time when the revision was created: + * The time series names + * The number of entries in each time series + * Dates of the first and last entry in each time series + +* Read more about Revisions and Time Series [here](../../document-extensions/timeseries/time-series-and-other-features.mdx#revisions). + +* Sample revision metadata: + + +{`\{ + ... + "@metadata": \{ + "@timeseries-snapshot": \{ + "timeSeriesName1": \{ + "Count": 5, + "Start": "2023-03-22T11:25:00.9110000Z", + "End": "2023-03-22T11:28:34.9110000Z" + \}, + "timeSeriesName2": \{ + "Count": 10, + "Start": "2023-03-22T11:26:00.3950000Z", + "End": "2023-03-22T11:28:48.3950000Z" + \} + \}, + ... + \} +\} +`} + + +### Reverted Data: + +When a document is [reverted](../../document-extensions/revisions/revert-revisions.mdx) to a revision that has a time series: + +* If the current document **contains** a time series name as in the revision: + * The reverted document will keep the time series entries & values as it was in the current document. + * Time series entries and values from the revision are Not restored. + +* If the current document **doesn't contain** a time series name as in the revision, + or if the document itself was deleted: + * The reverted document will have the time series from the revision + * However, the entries count will be 0 + + + +## Revisions and Attachments + +### Revisions Creation: + +A document revision will be created when: + + * A new [attachment](../../document-extensions/attachments/what-are-attachments.mdx) is **added** to the document. + * An attachment is **deleted** from the document. +### Stored Data: + +* A revision that is created for a document with attachments + will have the `@attachments` property in its **metadata**. + +* This property does Not hold the actual attachments, as the files are stored in **separate storage**. + The property only contains the following information for each attachment the document had when the revision was created: + * Attachment file name + * File content type + * File size + * A hash string (a reference to the file in the storage) + +* Existing attachment files in the storage are Not duplicated per revision that is created when the document itself is modified. + +* An attachment file is removed from RavenDB's storage only when there is no live document or a revision that refers to it. + +* Sample revision metadata: + + +{`\{ + ... + "@metadata": \{ + "@attachments": [ + \{ + "Name": "attachmentFileName.png", + "ContentType": "image/png", + "Size": 33241, + "Hash": "iFg0o6D38pUcWGVlP71ddDp8SCcoEal47kG3LtWx0+Y=", + \}, + ], + ... + \} +\} +`} + + +### Reverted Data: + +When a document is [reverted](../../document-extensions/revisions/revert-revisions.mdx) to a revision that has attachments, +the attachments are restored to their state when the revision was created. + + + +## Revisions and Replication + +* Revisions are transferred during [replication](../../server/clustering/replication/replication-overview.mdx) from one database instance to another. + +* The revisions will be replicated by all replication types: + * [Internal replication](../../server/clustering/replication/replication-overview.mdx#internal-replication) + * [External replication](../../server/clustering/replication/replication-overview.mdx#external-replication) + * [Hub/Sink replication](../../server/clustering/replication/replication-overview.mdx#hubsink-replication) + +* Revisions can [help keep data consistency](../../server/clustering/replication/replication-overview.mdx#how-revisions-replication-help-data-consistency). + + + +## Revisions and ETL + +* An [ETL](../../server/ongoing-tasks/etl/raven.mdx) ongoing task does Not send revisions to the destination database. + +* However, if revisions are enabled on the destination database, + whenever the ETL process sends a modified document and the target document is overwritten, + a new revision will be created for the document in the target database as expected. + + + +## Revisions and Backup + +Revisions are [backed up](../../server/ongoing-tasks/backup-overview.mdx#backup-contents) both by a logical-backup and by a snapshot. + + + +## Revisions Import and Export + +* Revisions can be imported and exported with a `.ravendbdump` file + from the [import](../../studio/database/tasks/import-data/import-data-file.mdx#import-options) and [export](../../studio/database/tasks/export-database.mdx#export-options) views in the Studio + +* Revisions can be imported when migrating data from another [live RavenDB server](../../studio/database/tasks/import-data/import-from-ravendb.mdx#step-#4:-set-import-options). + + ![Import from Live Server](./assets/import-from-live-server.png) + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/_revisions-and-other-features-python.mdx b/versioned_docs/version-7.1/document-extensions/revisions/_revisions-and-other-features-python.mdx new file mode 100644 index 0000000000..d42399f08e --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/_revisions-and-other-features-python.mdx @@ -0,0 +1,224 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This page describes relationships between Revisions and other RavenDB features, including - + * When is revisions creation triggered by other features + * How revisions are supported by other features + +* In this page: + * [Revisions and Counters](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-counters) + * [Revisions and Time Series](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-time-series) + * [Revisions and Attachments](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-attachments) + * [Revisions and Replication](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-replication) + * [Revisions and ETL](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-etl) + * [Revisions and Backup](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-backup) + * [Revisions and Data Subscriptions](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-and-data-subscriptions) + * [Revisions Import and Export](../../document-extensions/revisions/revisions-and-other-features.mdx#revisions-import-and-export) + + +## Revisions and Counters + +### Revisions creation: + +* A document revision will be created when: + * A new counter is **created** on the document. + * A counter is **deleted** from the document. + +* A revision will Not be created upon modifying the value of an existing [counter](../../document-extensions/counters/overview.mdx). +### Stored data: + +* A revision that is created for a document that contains counters + will have the `@counters-snapshot` property in its **metadata**. + +* This property holds the counters' names and values at the time when the revision was created. + +* The counter's value stored in the revision's metadata is the **accumulated value** from all nodes. + It does not specify the value of the counter on each individual node. + +* Sample revision metadata: + + +{`\{ + ... + "@metadata": \{ + "@counters-snapshot": \{ + "counterName1": 7, + "counterName2": 42 + \}, + ... + \} +\} +`} + + +### Reverted data: + +When a document is [reverted](../../document-extensions/revisions/revert-revisions.mdx) to a revision that has counters, +the counters from the revision are restored to functionality along with their values. + + + +## Revisions and Time Series + +### Revisions Creation: + +* A document revision will be created when: + * A new time series is **created** on the document. + * A time series is **deleted** from the document. + (A time series is deleted from a document when all its entries are deleted) + +* A revision will Not be created upon modifying the values of an existing [time series](../../document-extensions/timeseries/overview.mdx). +### Stored Data: + +* A revision that is created for a document that contains time series + will have the `@timeseries-snapshot` property in its **metadata**. + +* This property does Not hold the time series values data, + it only contains the following information for the time when the revision was created: + * The time series names + * The number of entries in each time series + * Dates of the first and last entry in each time series + +* Read more about Revisions and Time Series [here](../../document-extensions/timeseries/time-series-and-other-features.mdx#revisions). + +* Sample revision metadata: + + +{`\{ + ... + "@metadata": \{ + "@timeseries-snapshot": \{ + "timeSeriesName1": \{ + "Count": 5, + "Start": "2023-03-22T11:25:00.9110000Z", + "End": "2023-03-22T11:28:34.9110000Z" + \}, + "timeSeriesName2": \{ + "Count": 10, + "Start": "2023-03-22T11:26:00.3950000Z", + "End": "2023-03-22T11:28:48.3950000Z" + \} + \}, + ... + \} +\} +`} + + +### Reverted Data: + +When a document is [reverted](../../document-extensions/revisions/revert-revisions.mdx) to a revision that has a time series: + +* If the current document **contains** a time series name as in the revision: + * The reverted document will keep the time series entries & values as it was in the current document. + * Time series entries and values from the revision are Not restored. + +* If the current document **doesn't contain** a time series name as in the revision, + or if the document itself was deleted: + * The reverted document will have the time series from the revision + * However, the entries count will be 0 + + + +## Revisions and Attachments + +### Revisions Creation: + +A document revision will be created when: + + * A new [attachment](../../document-extensions/attachments/what-are-attachments.mdx) is **added** to the document. + * An attachment is **deleted** from the document. +### Stored Data: + +* A revision that is created for a document with attachments + will have the `@attachments` property in its **metadata**. + +* This property does Not hold the actual attachments, as the files are stored in **separate storage**. + The property only contains the following information for each attachment the document had when the revision was created: + * Attachment file name + * File content type + * File size + * A hash string (a reference to the file in the storage) + +* Existing attachment files in the storage are Not duplicated per revision that is created when the document itself is modified. + +* An attachment file is removed from RavenDB's storage only when there is no live document or a revision that refers to it. + +* Sample revision metadata: + + +{`\{ + ... + "@metadata": \{ + "@attachments": [ + \{ + "Name": "attachmentFileName.png", + "ContentType": "image/png", + "Size": 33241, + "Hash": "iFg0o6D38pUcWGVlP71ddDp8SCcoEal47kG3LtWx0+Y=", + \}, + ], + ... + \} +\} +`} + + +### Reverted Data: + +When a document is [reverted](../../document-extensions/revisions/revert-revisions.mdx) to a revision that has attachments, +the attachments are restored to their state when the revision was created. + + + +## Revisions and Replication + +* Revisions are transferred during [replication](../../server/clustering/replication/replication-overview.mdx) from one database instance to another. + +* The revisions will be replicated by all replication types: + * [Internal replication](../../server/clustering/replication/replication-overview.mdx#internal-replication) + * [External replication](../../server/clustering/replication/replication-overview.mdx#external-replication) + * [Hub/Sink replication](../../server/clustering/replication/replication-overview.mdx#hubsink-replication) + +* Revisions can [help keep data consistency](../../server/clustering/replication/replication-overview.mdx#how-revisions-replication-help-data-consistency). + + + +## Revisions and ETL + +* An [ETL](../../server/ongoing-tasks/etl/raven.mdx) ongoing task does Not send revisions to the destination database. + +* However, if revisions are enabled on the destination database, + whenever the ETL process sends a modified document and the target document is overwritten, + a new revision will be created for the document in the target database as expected. + + + +## Revisions and Backup + +Revisions are [backed up](../../server/ongoing-tasks/backup-overview.mdx#backup-contents) both by a logical-backup and by a snapshot. + + + +## Revisions and Data Subscriptions + +* Learn about revisions and data subscriptions [here](../../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx). + + + +## Revisions Import and Export + +* Revisions can be imported and exported with a `.ravendbdump` file + from the [import](../../studio/database/tasks/import-data/import-data-file.mdx#import-options) and [export](../../studio/database/tasks/export-database.mdx#export-options) views in the Studio + +* Revisions can be imported when migrating data from another [live RavenDB server](../../studio/database/tasks/import-data/import-from-ravendb.mdx#step-#4:-set-import-options). + + ![Import from Live Server](./assets/import-from-live-server.png) + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/assets/import-from-live-server.png b/versioned_docs/version-7.1/document-extensions/revisions/assets/import-from-live-server.png new file mode 100644 index 0000000000..8487a3c648 Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/revisions/assets/import-from-live-server.png differ diff --git a/versioned_docs/version-7.1/document-extensions/revisions/assets/revert-revisions-1.png b/versioned_docs/version-7.1/document-extensions/revisions/assets/revert-revisions-1.png new file mode 100644 index 0000000000..c2724ad33e Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/revisions/assets/revert-revisions-1.png differ diff --git a/versioned_docs/version-7.1/document-extensions/revisions/assets/revert-revisions-2.png b/versioned_docs/version-7.1/document-extensions/revisions/assets/revert-revisions-2.png new file mode 100644 index 0000000000..a3db14a7f7 Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/revisions/assets/revert-revisions-2.png differ diff --git a/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-1.png b/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-1.png new file mode 100644 index 0000000000..07f2beb97d Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-1.png differ diff --git a/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-2.png b/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-2.png new file mode 100644 index 0000000000..0f386ee78f Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-2.png differ diff --git a/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-3.png b/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-3.png new file mode 100644 index 0000000000..bb94a96c2f Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-3.png differ diff --git a/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-4.png b/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-4.png new file mode 100644 index 0000000000..a54aaf94b7 Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-4.png differ diff --git a/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-5.png b/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-5.png new file mode 100644 index 0000000000..6473cd3204 Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-5.png differ diff --git a/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-6.png b/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-6.png new file mode 100644 index 0000000000..8221fb94da Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-6.png differ diff --git a/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-7.png b/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-7.png new file mode 100644 index 0000000000..1fee283b6c Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-7.png differ diff --git a/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-8.png b/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-8.png new file mode 100644 index 0000000000..cb7c6c6eeb Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-8.png differ diff --git a/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-bin-cleaner.png b/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-bin-cleaner.png new file mode 100644 index 0000000000..b81382b595 Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/revisions/assets/revisions-bin-cleaner.png differ diff --git a/versioned_docs/version-7.1/document-extensions/revisions/assets/set-point-in-time.png b/versioned_docs/version-7.1/document-extensions/revisions/assets/set-point-in-time.png new file mode 100644 index 0000000000..72edbfa203 Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/revisions/assets/set-point-in-time.png differ diff --git a/versioned_docs/version-7.1/document-extensions/revisions/assets/troubleshooting_rev_db-record.png b/versioned_docs/version-7.1/document-extensions/revisions/assets/troubleshooting_rev_db-record.png new file mode 100644 index 0000000000..5db07d3bee Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/revisions/assets/troubleshooting_rev_db-record.png differ diff --git a/versioned_docs/version-7.1/document-extensions/revisions/assets/troubleshooting_rev_stats-DB-ID.png b/versioned_docs/version-7.1/document-extensions/revisions/assets/troubleshooting_rev_stats-DB-ID.png new file mode 100644 index 0000000000..ad1a067f02 Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/revisions/assets/troubleshooting_rev_stats-DB-ID.png differ diff --git a/versioned_docs/version-7.1/document-extensions/revisions/assets/troubleshooting_rev_unused-db-IDs.png b/versioned_docs/version-7.1/document-extensions/revisions/assets/troubleshooting_rev_unused-db-IDs.png new file mode 100644 index 0000000000..169b545716 Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/revisions/assets/troubleshooting_rev_unused-db-IDs.png differ diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/_category_.json b/versioned_docs/version-7.1/document-extensions/revisions/client-api/_category_.json new file mode 100644 index 0000000000..2b13474af7 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 1, + "label": Client API, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/_overview-csharp.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/_overview-csharp.mdx new file mode 100644 index 0000000000..2e94d618c4 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/_overview-csharp.mdx @@ -0,0 +1,46 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* For a general revisions overview see: [Revisions Overview](../../../document-extensions/revisions/overview.mdx). + +* Document revisions can be managed from the [Studio](../../../studio/database/settings/document-revisions.mdx) or from the **Client API**. + +* From the **Client API**, revisions can be configured and managed by: + * [Store Operations](../../../document-extensions/revisions/client-api/overview.mdx#revisions-store-operations) + * [Session Methods](../../../document-extensions/revisions/client-api/overview.mdx#revisions-session-methods) + + +## Revisions Store Operations + +* [ConfigureRevisionsOperation](../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx) - apply a revision configuration +* [ConfigureRevisionsForConflictsOperation](../../../document-extensions/revisions/client-api/operations/conflict-revisions-configuration.mdx) - manage conflicting documents revisions +* [GetRevisionsOperation](../../../document-extensions/revisions/client-api/operations/get-revisions.mdx) - get revisions + + + +## Revisions Session methods + +* **Get revisions**: + + * [GetFor](../../../document-extensions/revisions/client-api/session/loading.mdx#get-all-revisions) - retrieve all revisions kept for a document. + * [GetMetadataFor](../../../document-extensions/revisions/client-api/session/loading.mdx#get-revisions-metadata) - retrieve metadata for all revisions kept for a document. + * [Get](../../../document-extensions/revisions/client-api/session/loading.mdx#get-revisions-by-creation-time) - retrieve revisions by change vector or creation time. + * Read [here](../../../client-api/session/how-to/perform-operations-lazily.mdx#getrevisions) about **lazy versions** of `GetFor`, `GetMetadataFor`, and `Get`. +* **Include revisions**: + + * [IncludeRevisions](../../../document-extensions/revisions/client-api/session/including.mdx#section) - include revisions when retrieving documents via `Session.Load` or `Session.Query`. + * [RawQuery](../../../document-extensions/revisions/client-api/session/including.mdx#including-revisions-with-sessionadvancedrawquery) - Learn how to include revisions when retrieving documents via raw queries. +* **Count revisions**: + + * [GetCountFor](../../../document-extensions/revisions/client-api/session/counting.mdx#getcountfor) - get the number of revisions kept for a document. +* **Force revision creation**: + + * [ForceRevisionCreationFor](../../../document-extensions/revisions/overview.mdx#force-revision-creation-via-api) - create a revision even if revision configuration is disabled. + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/_overview-java.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/_overview-java.mdx new file mode 100644 index 0000000000..e4a26f9b66 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/_overview-java.mdx @@ -0,0 +1,51 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The Revisions client API includes a set of session methods and store operations + that you can use to [configure](../../../document-extensions/revisions/overview.mdx#revisions-configuration), + manage and use document revisions. +* Learn about revisions here: [Docuument Revisions Overview](../../../document-extensions/revisions/overview.mdx) + +* In this page: + * [Revisions Store Operations](../../../document-extensions/revisions/client-api/overview.mdx#revisions-store-operations) + * [Revisions Session methods](../../../document-extensions/revisions/client-api/overview.mdx#revisions-session-methods) + + +## Revisions Store Operations + +* [Creating a Revisions Configuration](../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx) + * [ConfigureRevisionsOperation](../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx#section) + Use this operation to apply a + [Revisions configuration](../../../document-extensions/revisions/overview.mdx#revisions-configuration) + to your database. +* [Getting and Counting Revisions](../../../document-extensions/revisions/client-api/operations/get-revisions.mdx) + * [GetRevisionsOperation](../../../document-extensions/revisions/client-api/operations/get-revisions.mdx#getrevisionsoperation) + Use this operation to Get and Count Revisions. + + + +## Revisions Session methods + +* [Loading revisions and their metadata](../../../document-extensions/revisions/client-api/session/loading.mdx) + * [GetFor](../../../document-extensions/revisions/client-api/session/loading.mdx#getfor) + Use this method to retrieve all the revisions that are kept for a specified document. + * [GetMetadataFor](../../../document-extensions/revisions/client-api/session/loading.mdx#getmetadatafor) + Use this method to retrieve metadata for all revisions kept for a specified document. + * [Get](../../../document-extensions/revisions/client-api/session/loading.mdx#get) + Use this method to retrieve revisions by change vector or creation time. +* [Counting Revisions](../../../document-extensions/revisions/client-api/session/counting.mdx) + * [GetCountFor](../../../document-extensions/revisions/client-api/session/counting.mdx#getcountfor) + Use this method to count the revisions kept for a document. +* [Including revisions](../../../document-extensions/revisions/client-api/session/including.mdx) + * [IncludeRevisions](../../../document-extensions/revisions/client-api/session/including.mdx#section) + Use this method to include document revisions when retrieving documents via `Session.Load` or `Session.Query`. + * [RawQuery](../../../document-extensions/revisions/client-api/session/including.mdx#including-revisions-with-sessionadvancedrawquery) + Learn here how to include revisions with documents retrieved via raw queries. + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/_overview-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/_overview-nodejs.mdx new file mode 100644 index 0000000000..3b7049a1d1 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/_overview-nodejs.mdx @@ -0,0 +1,46 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* For a general revisions overview see: [Revisions Overview](../../../document-extensions/revisions/overview.mdx). + +* Document revisions can be managed from the [Studio](../../../studio/database/settings/document-revisions.mdx) or from the **Client API**. + +* From the **Client API**, revisions can be configured and managed by: + * [Store Operations](../../../document-extensions/revisions/client-api/overview.mdx#revisions-store-operations) + * [Session Methods](../../../document-extensions/revisions/client-api/overview.mdx#revisions-session-methods) + + +## Revisions Store Operations + +* [ConfigureRevisionsOperation](../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx) - apply a revision configuration +* [ConfigureRevisionsForConflictsOperation](../../../document-extensions/revisions/client-api/operations/conflict-revisions-configuration.mdx) - manage conflicting documents revisions +* [GetRevisionsOperation](../../../document-extensions/revisions/client-api/operations/get-revisions.mdx) - get revisions + + + +## Revisions Session methods + +* **Get revisions**: + + * [getFor](../../../document-extensions/revisions/client-api/session/loading.mdx#get-all-revisions) - retrieve all revisions kept for a document. + * [getMetadataFor](../../../document-extensions/revisions/client-api/session/loading.mdx#get-revisions-metadata) - retrieve metadata for all revisions kept for a document. + * [get](../../../document-extensions/revisions/client-api/session/loading.mdx#get-revisions-by-creation-time) - retrieve revisions by change vector or creation time. + * Read [here](../../../client-api/session/how-to/perform-operations-lazily.mdx#getrevisions) about **lazy versions** of `getFor`, `getMetadataFor`, and `get`. +* **Include revisions**: + + * [includeRevisions](../../../document-extensions/revisions/client-api/session/including.mdx#section) - include revisions when retrieving documents via `session.load` or `session.query`. + * [rawQuery](../../../document-extensions/revisions/client-api/session/including.mdx#including-revisions-with-sessionadvancedrawquery) - Learn how to include revisions when retrieving documents via raw queries. +* **Count revisions**: + + * [getCountFor](../../../document-extensions/revisions/client-api/session/counting.mdx#getcountfor) - get the number of revisions kept for a document. +* **Force revision creation**: + + * [forceRevisionCreationFor](../../../document-extensions/revisions/overview.mdx#force-revision-creation-via-api) - create a revision even if revision configuration is disabled. + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/_overview-php.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/_overview-php.mdx new file mode 100644 index 0000000000..236ec99dce --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/_overview-php.mdx @@ -0,0 +1,47 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* For a general revisions overview see: [Revisions Overview](../../../document-extensions/revisions/overview.mdx). + +* Document revisions can be managed using [Studio](../../../studio/database/settings/document-revisions.mdx) or from the **Client API**. + +* Find below a list of methods that revisions can be configured and managed by. + * [Store Operations](../../../document-extensions/revisions/client-api/overview.mdx#revisions-store-operations) + * [Session Methods](../../../document-extensions/revisions/client-api/overview.mdx#revisions-session-methods) + + +## Revisions `store` Operations + +* [ConfigureRevisionsOperation](../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx) - apply a revision configuration +* [GetRevisionsOperation](../../../document-extensions/revisions/client-api/operations/get-revisions.mdx) - get revisions + + + +## Revisions `session` methods + +#### Get revisions: + +* Use [getFor](../../../document-extensions/revisions/client-api/session/loading.mdx#get-all-revisions) + to retrieve **all** revisions kept for a document. +* Use [getMetadataFor](../../../document-extensions/revisions/client-api/session/loading.mdx#get-revisions-metadata) + to retrieve **metadata** for all revisions kept for a document. +* Use [getBeforeDate](../../../document-extensions/revisions/client-api/session/loading.mdx#get-revisions-by-creation-time) + to retrieve revisions by **change vector** or **creation time**. +* Read [here](../../../client-api/session/how-to/perform-operations-lazily.mdx#get-revisions) + about **lazy versions** of revision methods. +#### Count revisions: + +Use [getCountFor](../../../document-extensions/revisions/client-api/session/counting.mdx#getcountfor) +to get the number of revisions kept for a document. +#### Force revision creation: + +Read [here](../../../document-extensions/revisions/overview.mdx#force-revision-creation-via-api) +about creating a revision even if revision configuration is disabled. + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/_overview-python.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/_overview-python.mdx new file mode 100644 index 0000000000..84bf4757af --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/_overview-python.mdx @@ -0,0 +1,47 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* For a general revisions overview see: [Revisions Overview](../../../document-extensions/revisions/overview.mdx). + +* Document revisions can be managed using [Studio](../../../studio/database/settings/document-revisions.mdx) or from the **Client API**. + +* Find below a list of methods that revisions can be configured and managed by. + * [Store Operations](../../../document-extensions/revisions/client-api/overview.mdx#revisions-store-operations) + * [Session Methods](../../../document-extensions/revisions/client-api/overview.mdx#revisions-session-methods) + + +## Revisions `store` Operations + +* [ConfigureRevisionsOperation](../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx) - apply a revision configuration +* [GetRevisionsOperation](../../../document-extensions/revisions/client-api/operations/get-revisions.mdx) - get revisions + + + +## Revisions `session` methods + +#### Get revisions: + +* Use [get_for](../../../document-extensions/revisions/client-api/session/loading.mdx#get-all-revisions) + to retrieve **all** revisions kept for a document. +* Use [get_metadata_for](../../../document-extensions/revisions/client-api/session/loading.mdx#get-revisions-metadata) + to retrieve **metadata** for all revisions kept for a document. +* Use [get_by_before_date ](../../../document-extensions/revisions/client-api/session/loading.mdx#get-revisions-by-creation-time) + to retrieve revisions by **change vector** or **creation time**. +* Read [here](../../../client-api/session/how-to/perform-operations-lazily.mdx#getrevisions) + about **lazy versions** of `get_for`, `get_metadata_for`, and `get_by_before_date`. +#### Count revisions: + +Use [get_count_for](../../../document-extensions/revisions/client-api/session/counting.mdx#getcountfor) +to get the number of revisions kept for a document. +#### Force revision creation: + +Read [here](../../../document-extensions/revisions/overview.mdx#force-revision-creation-via-api) +about creating a revision even if revision configuration is disabled. + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_category_.json b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_category_.json new file mode 100644 index 0000000000..fbb9b4b328 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 2, + "label": Operations, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_configure-revisions-csharp.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_configure-revisions-csharp.mdx new file mode 100644 index 0000000000..d4807de208 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_configure-revisions-csharp.mdx @@ -0,0 +1,336 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `ConfigureRevisionsOperation` to apply the following [revisions configuration](../../../../studio/database/settings/document-revisions.mdx#revisions-configuration) to the database: + * **Default configuration** - applies to all document collections. + * **Collection-specific configurations** - override the default settings for these collections. + * To apply a configuration for conflict document revisions see [configure conflict revisions](../../../../document-extensions/revisions/client-api/operations/conflict-revisions-configuration.mdx). + +* The configuration passed to this operation will **REPLACE** the current revisions configuration in the database. + To **MODIFY** existing configuration, fetch the current configuration from the database record first. + +* After applying the configuration, + revisions are created and purged for a document whenever the document is created, modified, or deleted. + +* To create a revision when there is no configuration defined (or enabled) see: [force revision creation](../../../../document-extensions/revisions/overview.mdx#force-revision-creation) + +* By default, the operation will be applied to the [default database](../../../../client-api/setting-up-default-database.mdx). + To operate on a different database see [switch operations to different database](../../../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx). + +* In this page: + * [Replace configuration](../../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx#replace-configuration) + * [Modify configuration](../../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx#modify-configuration) + * [Syntax](../../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx#syntax) + + +## Replace configuration + +* In this example, we create a new `RevisionsConfiguration` for the database. + If revisions configuration already exists in the database - it will be **replaced**. + + + + +{`// ============================================================================== +// Define default settings that will apply to ALL collections +// Note: this is optional +var defaultRevConfig = new RevisionsCollectionConfiguration() +{ + MinimumRevisionsToKeep = 100, + MinimumRevisionAgeToKeep = new TimeSpan(days: 7, 0, 0, 0), + MaximumRevisionsToDeleteUponDocumentUpdate = 15, + PurgeOnDelete = false, + Disabled = false + + // With this configuration: + // ------------------------ + // * A revision will be created anytime a document is modified or deleted. + // * Revisions of a deleted document can be accessed in the Revisions Bin view. + // * Only the latest 100 revisions will be kept. Older ones will be discarded. + // * Older revisions will be removed if they exceed 7 days on next revision creation. + // * A maximum of 15 revisions will be deleted each time a document is updated, + // until the defined '# of revisions to keep' limit is reached. +}; + +// ============================================================================== +// Define a specific configuration for the EMPLOYEES collection +// This will override the default settings +var employeesRevConfig = new RevisionsCollectionConfiguration() +{ + MinimumRevisionsToKeep = 50, + MinimumRevisionAgeToKeep = new TimeSpan(hours: 12, minutes: 0, seconds: 0), + PurgeOnDelete = true, + Disabled = false + + // With this configuration: + // ------------------------ + // * A revision will be created anytime an Employee document is modified. + // * When a document is deleted all its revisions will be removed. + // * At least 50 of the latest revisions will be kept. + // * Older revisions will be removed if they exceed 12 hours on next revision creation. +}; + +// ============================================================================== +// Define a specific configuration for the PRODUCTS collection +// This will override the default settings +var productsRevConfig = new RevisionsCollectionConfiguration() +{ + Disabled = true + // No revisions will be created for the Products collection, + // even though default configuration is enabled +}; + +// ============================================================================== +// Combine all configurations in the RevisionsConfiguration object +var revisionsConfig = new RevisionsConfiguration() +{ + Default = defaultRevConfig, + Collections = new Dictionary() + { + { "Employees", employeesRevConfig }, + { "Products", productsRevConfig } + } +}; + +// ============================================================================== +// Define the configure revisions operation, pass the configuration +var configureRevisionsOp = new ConfigureRevisionsOperation(revisionsConfig); + +// Execute the operation by passing it to Maintenance.Send +// Any existing configuration will be replaced with the new configuration passed +documentStore.Maintenance.Send(configureRevisionsOp); +`} + + + + +{`// ============================================================================== +// Define default settings that will apply to ALL collections +// Note: this is optional +var defaultRevConfig = new RevisionsCollectionConfiguration() +{ + MinimumRevisionsToKeep = 100, + MinimumRevisionAgeToKeep = new TimeSpan(days: 7, 0, 0, 0), + MaximumRevisionsToDeleteUponDocumentUpdate = 15, + PurgeOnDelete = false, + Disabled = false, + + // With this configuration: + // ------------------------ + // * A revision will be created anytime a document is modified or deleted. + // * Revisions of a deleted document can be accessed in the Revisions Bin view. + // * Only the latest 100 revisions will be kept. Older ones will be discarded. + // * Older revisions will be removed if they exceed 7 days on next revision creation. + // * A maximum of 15 revisions will be deleted each time a document is updated, + // until the defined '# of revisions to keep' limit is reached. +}; + +// ============================================================================== +// Define a specific configuration for the EMPLOYEES collection +// This will override the default settings +var employeesRevConfig = new RevisionsCollectionConfiguration() +{ + MinimumRevisionsToKeep = 50, + MinimumRevisionAgeToKeep = new TimeSpan(hours: 12, minutes: 0, seconds: 0), + PurgeOnDelete = true, + Disabled = false + + // With this configuration: + // ------------------------ + // * A revision will be created anytime an Employee document is modified. + // * When a document is deleted all its revisions will be removed. + // * At least 50 of the latest revisions will be kept. + // * Older revisions will be removed if they exceed 12 hours on next revision creation. +}; + +// ============================================================================== +// Define a specific configuration for the PRODUCTS collection +// This will override the default settings +var productsRevConfig = new RevisionsCollectionConfiguration() +{ + Disabled = true + // No revisions will be created for the Products collection, + // even though default configuration is enabled +}; + +// ============================================================================== +// Combine all configurations in the RevisionsConfiguration object +var revisionsConfig = new RevisionsConfiguration() +{ + Default = defaultRevConfig, + Collections = new Dictionary() + { + { "Employees", employeesRevConfig }, + { "Products", productsRevConfig } + } +}; + +// ============================================================================== +// Define the configure revisions operation, pass the configuration +var configureRevisionsOp = new ConfigureRevisionsOperation(revisionsConfig); + +// Execute the operation by passing it to Maintenance.SendAsync +// Any existing configuration will be replaced with the new configuration passed +await documentStore.Maintenance.SendAsync(configureRevisionsOp); +`} + + + + + + +## Modify configuration + +* In this example, we fetch the existing revisions configuration from the database record and **modify** it. + + + + +{`// ============================================================================== +// Define the get database record operation: +var getDatabaseRecordOp = new GetDatabaseRecordOperation(documentStore.Database); +// Get the current revisions configuration from the database record: +RevisionsConfiguration revisionsConfig = + documentStore.Maintenance.Server.Send(getDatabaseRecordOp).Revisions; + +// ============================================================================== +// If no revisions configuration exists, then create a new configuration +if (revisionsConfig == null) +{ + revisionsConfig = new RevisionsConfiguration() + { + Default = defaultRevConfig, + Collections = new Dictionary() + { + { "Employees", employeesRevConfig }, + { "Products", productsRevConfig } + } + }; +} + +// ============================================================================== +// If a revisions configuration already exists, then modify it +else +{ + revisionsConfig.Default = defaultRevConfig; + revisionsConfig.Collections["Employees"] = employeesRevConfig; + revisionsConfig.Collections["Products"] = productsRevConfig; +} + +// ============================================================================== +// Define the configure revisions operation, pass the configuration +var configureRevisionsOp = new ConfigureRevisionsOperation(revisionsConfig); + +// Execute the operation by passing it to Maintenance.Send +// The existing configuration will be updated +documentStore.Maintenance.Send(configureRevisionsOp); +`} + + + + +{`// ============================================================================== +// Define the get database record operation: +var getDatabaseRecordOp = new GetDatabaseRecordOperation(documentStore.Database); +// Get the current revisions configuration from the database record: +RevisionsConfiguration revisionsConfig = + documentStore.Maintenance.Server.Send(getDatabaseRecordOp).Revisions; + +// ============================================================================== +// If no revisions configuration exists, then create a new configuration +if (revisionsConfig == null) +{ + revisionsConfig = new RevisionsConfiguration() + { + Default = defaultRevConfig, + Collections = new Dictionary() + { + { "Employees", employeesRevConfig }, + { "Products", productsRevConfig } + } + }; +} + +// ============================================================================== +// If a revisions configuration already exists, then modify it +else +{ + revisionsConfig.Default = defaultRevConfig; + revisionsConfig.Collections["Employees"] = employeesRevConfig; + revisionsConfig.Collections["Products"] = productsRevConfig; +} + +// ============================================================================== +// Define the configure revisions operation, pass the configuration +var configureRevisionsOp = new ConfigureRevisionsOperation(revisionsConfig); + +// Execute the operation by passing it to Maintenance.SendAsync +// The existing configuration will be updated +await documentStore.Maintenance.SendAsync(configureRevisionsOp); +`} + + + + + + +## Syntax + + + +{`public ConfigureRevisionsOperation(RevisionsConfiguration configuration); +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **configuration** | `RevisionsConfiguration` | The revisions configuration to apply | + + + +{`public class RevisionsConfiguration +\{ + public RevisionsCollectionConfiguration Default; + public Dictionary Collections; +\} +`} + + + +| Property | Type | Description | +| - | - | - | +| **Default** | `RevisionsCollectionConfiguration` | Optional default settings that apply to any collection Not specified in `Collections`. | +| **Collections** | `Dictionary` | A Dictionary of collection-specific configurations
The `keys` are the collection names
The `values` are the corresponding configurations.
Overrides the default settings for the collections defined. | + + + +{`public class RevisionsCollectionConfiguration +\{ + public long? MinimumRevisionsToKeep \{ get; set; \} + public TimeSpan? MinimumRevisionAgeToKeep \{ get; set; \} + public long? MaximumRevisionsToDeleteUponDocumentUpdate \{ get; set; \} + public bool PurgeOnDelete \{ get; set; \} + public bool Disabled \{ get; set; \} +\} +`} + + + + + +| Property | Type | Description | +| - | - |--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **MinimumRevisionsToKeep** | `long` | <ul><li>This number of revisions will be kept per document.</li><li>Older revisions exceeding this number will be purged upon the next document modification.<li> **Default** : `null` = no limit </li></ul> | +| **MinimumRevisionAgeToKeep** | `TimeSpan` | <ul><li>Limit the number of revisions kept per document by their age.</li><li>Revisions that are older than this time will be purged upon the next document modification.</li><li> **Default** : `null` = no age limit</li><ul> | +| **MaximumRevisionsToDeleteUponDocumentUpdate** | `long` | <ul><li>The maximum number of revisions to delete upon each document modification.</li><li> **Default** : `null` = no limit,
all revisions that pend purging will be deleted.</li></ul> | +| **PurgeOnDelete** | `bool` | <ul><li>`false` ( **Default** ) - Revisions of a deleted document are moved to the [Revisions Bin](../../../../studio/database/document-extensions/revisions/revisions-bin.mdx).</li><li>`true` - When a document is deleted all its revisions are also deleted.</li></ul> | +| **Disabled** | `bool` | <ul><li>`false` ( **Default** ) - Revisions will be created and purged according to the configuration.</li><li>`true` - No revisions will be created or purged.</li></ul> | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_configure-revisions-java.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_configure-revisions-java.mdx new file mode 100644 index 0000000000..27ac8cf0b1 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_configure-revisions-java.mdx @@ -0,0 +1,157 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* [Revisions](../../../../document-extensions/revisions/overview.mdx) are snapshots of documents that + are taken automatically each time a document is updated or deleted. + +* Revisions can be stored indefinitely, or they can be deleted when certain conditions are met. These conditions can be set + using the Configure Revisions Operation. + +* In this page: + * [Syntax](../../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx#syntax) + * [RevisionsCollectionConfiguration](../../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx#revisionscollectionconfiguration) + * [RevisionsConfiguration](../../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx#revisionsconfiguration) + * [ConfigureRevisionsOperation](../../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx#configurerevisionsoperation) + * [Example](../../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx#example) + + +## Syntax + +The ConfigureRevisionsOperation modifies the revisions settings for a particular [database](../../../../studio/database/settings/manage-database-group.mdx). +Within that database, each [collection](../../../../client-api/faq/what-is-a-collection.mdx) can have its own separate revisions +settings. + +To configure the revisions settings for a database and/or the collections in that database, follow these steps: + +[1.](../../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx#revisionscollectionconfiguration) +Create a RevisionsCollectionConfiguration` object for each desired collection. + +[2.](../../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx#revisionsconfiguration) +Put those `RevisionsCollectionConfiguration` objects in a `RevisionsConfiguration` object. + +[3.](../../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx#configurerevisionsoperation) +Send that RevisionsConfiguration` to the server. +### RevisionsCollectionConfiguration + +This object contains the four revisions settings for a particular collection: + + + +{`public class RevisionsCollectionConfiguration +\{ + private boolean disabled; + private Duration minimumRevisionAgeToKeep; + private Long minimumRevisionsToKeep; + private boolean purgeOnDelete; +\} +`} + + + +| Configuration Option | Description | Default | +| - | - | - | +| **minimumRevisionsToKeep** | The minimum number of revisions to keep per document | `null` - unlimited | +| **minimumRevisionAgeToKeep** | The minimum amount of time to keep each revision. [Format of `Duration`](https://docs.oracle.com/javase/8/docs/api/java/time/Duration.html). | `null` - unlimited | +| **disabled** | Indicates whether to completely disable revisions for documents in this collection | `false` | +| **purgeOnDelete** | When a document is deleted, this indicates whether all of its revisions should be deleted as well | `false` | + +A revision is only deleted if both the `minimumRevisionsToKeep` for that document is exceeded, **and** the revision is +older than the `minimumRevisionAgeToKeep` limit. The oldest revisions are deleted first. + +* By default both these options are set to `null`, meaning that an unlimited number of revisions will be saved +indefinitely. + +* If only `minimumRevisionsToKeep` is null, revisions will be deleted only when they are older than +`minimumRevisionAgeToKeep`. + +* If only `minimumRevisionAgeToKeep` is null, revisions will be deleted each time there are more revisions than +`minimumRevisionsToKeep`. + +These deletions will only take place _when a new revision is added_ to a document. Until a new revision is added, that +document's revisions can exceed these limits. +### RevisionsConfiguration + +This object contains a `Map` of revision configurations for each collection in the database, plus an optional default +configuration. + + + +{`public class RevisionsConfiguration +\{ + private Map collections; + private RevisionsCollectionConfiguration defaultConfig; +\} +`} + + + +| Property | Description | Default | +| - | - | - | +| **collections** | A map in which the keys are collection names, and the values are the corresponding configurations | `null` | +| **defaultConfig** | An optional default configuration that applies to any collection not listed in `collections` | `null` | + +Note that when this object is sent to the server, it overrides the configurations for **all** collections, including all existing +configurations currently on the server. If a collection is not listed in `collections` and `defaultConfig` has not been set, the +default values listed in the table [above](../../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx#revisionscollectionconfiguration) +are applied. +### ConfigureRevisionsOperation + +Lastly, the operation itself sends the `RevisionsConfiguration` to the server, overriding **all** existing collection configurations. +You'll want to store these configurations on the client-side so they don't have to be created from scratch each time you want to +modify them. + + + +{`public ConfigureRevisionsOperation(RevisionsConfiguration configuration); +`} + + + +| Parameter | Description | +| - | - | +| **configuration** | The new revision settings for a particular database | + + + +## Example + +The following code sample updates the settings of the Document Store's [default database](../../../../client-api/setting-up-default-database.mdx) +- which in this case is a database named "Northwind". To update the configuration of different database, use the +[`forDatabase()` method](../../../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx). + + + +{`// Create a configuration for the Employees collection +RevisionsCollectionConfiguration employeesRevConfig = new RevisionsCollectionConfiguration(); +employeesRevConfig.setMinimumRevisionAgeToKeep(Duration.ofDays(1)); +employeesRevConfig.setMinimumRevisionsToKeep(42l); +employeesRevConfig.setPurgeOnDelete(true); + +// Add the Employees configuration to a map +Map collections = new HashMap<>(); +collectionConfig.put("Employees", employeesRevConfig); + +// Create a default collection configuration +RevisionsCollectionConfiguration defaultRevConfig = new RevisionsCollectionConfiguration(); +defaultRevConfig.setMinimumRevisionAgeToKeep(Duration.ofDays(7)); +defaultRevConfig.setMinimumRevisionsToKeep(100l); +defaultRevConfig.setPurgeOnDelete(false); + +// Combine to create a configuration for the database +RevisionsConfiguration northwindRevConfig = new RevisionsConfiguration(); +northwindRevConfig.setCollections(collections); +northwindRevConfig.setDefaultConfig(defaultRevConfig); + +// Execute the operation to update the database +documentStore.maintenance().send(new ConfigureRevisionsOperation(northwindRevConfig)); +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_configure-revisions-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_configure-revisions-nodejs.mdx new file mode 100644 index 0000000000..d8647734f4 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_configure-revisions-nodejs.mdx @@ -0,0 +1,203 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `ConfigureRevisionsOperation` to apply the following [revisions configuration](../../../../studio/database/settings/document-revisions.mdx#revisions-configuration) to the database: + * **Default configuration** - applies to all document collections. + * **Collection-specific configurations** - override the default settings for these collections. + * To apply a configuration for conflict document revisions see [configure conflict revisions](../../../../document-extensions/revisions/client-api/operations/conflict-revisions-configuration.mdx). + +* The configuration passed to this operation will **REPLACE** the current revisions configuration in the database. + To **MODIFY** existing configuration, fetch the current configuration from the database record first. + +* After applying the configuration, + revisions are created and purged for a document whenever the document is created, modified, or deleted. + +* To create a revision when there is no configuration defined (or enabled) see: [force revision creation](../../../../document-extensions/revisions/overview.mdx#force-revision-creation) + +* By default, the operation will be applied to the [default database](../../../../client-api/setting-up-default-database.mdx). + To operate on a different database see [switch operations to different database](../../../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx). + +* In this page: + * [Replace configuration](../../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx#replace-configuration) + * [Modify configuration](../../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx#modify-configuration) + * [Syntax](../../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx#syntax) + + +## Replace configuration + +* In this example, we create a new `RevisionsConfiguration` for the database. + If revisions configuration already exists in the database - it will be **replaced**. + + + +{`// ============================================================================== +// Define default settings that will apply to ALL collections +// Note: this is optional +const defaultRevConfig = new RevisionsCollectionConfiguration(); +defaultRevConfig.minimumRevisionsToKeep = 100; +defaultRevConfig.minimumRevisionAgeToKeep = TimeUtil.millisToTimeSpan(3600 * 1000 * 24 * 7) // 7 days +defaultRevConfig.maximumRevisionsToDeleteUponDocumentUpdate = 15; +defaultRevConfig.purgeOnDelete = false; +defaultRevConfig.disabled = false; + +// With this configuration: +// ------------------------ +// * A revision will be created anytime a document is modified or deleted. +// * Revisions of a deleted document can be accessed in the Revisions Bin view. +// * Only the latest 100 revisions will be kept. Older ones will be discarded. +// * Older revisions will be removed if they exceed 7 days on next revision creation. +// * A maximum of 15 revisions will be deleted each time a document is updated, +// until the defined '# of revisions to keep' limit is reached. + +// ============================================================================== +// Define a specific configuration for the EMPLOYEES collection +// This will override the default settings +const employeesRevConfig = new RevisionsCollectionConfiguration(); +employeesRevConfig.minimumRevisionsToKeep = 50; +employeesRevConfig.minimumRevisionAgeToKeep = TimeUtil.millisToTimeSpan(3600 * 1000 * 12); // 12 hrs +employeesRevConfig.purgeOnDelete = true; +employeesRevConfig.disabled = false; + +// With this configuration: +// ------------------------ +// * A revision will be created anytime an Employee document is modified. +// * When a document is deleted all its revisions will be removed. +// * At least 50 of the latest revisions will be kept. +// * Older revisions will be removed if they exceed 12 hours on next revision creation. + +// ============================================================================== +// Define a specific configuration for the PRODUCTS collection +// This will override the default settings +const productsRevConfig = new RevisionsCollectionConfiguration(); +productsRevConfig.disabled = true; + +// With this configuration: +// ------------------------ +// No revisions will be created for the Products collection, +// even though default configuration is enabled + +// ============================================================================== +// Combine all configurations in the RevisionsConfiguration object +const revisionsConfig = new RevisionsConfiguration(); +revisionsConfig.defaultConfig = defaultRevConfig; +revisionsConfig.collections = new Map(); +revisionsConfig.collections.set("Employees", employeesRevConfig); +revisionsConfig.collections.set("Products", productsRevConfig); + +// ============================================================================== +// Define the configure revisions operation, pass the configuration +const configureRevisionsOp = new ConfigureRevisionsOperation(revisionsConfig); + +// Execute the operation by passing it to maintenance.send +// Any existing configuration will be replaced with the new configuration passed +await store.maintenance.send(configureRevisionsOp); +`} + + + + + +## Modify configuration + +* In this example, we fetch the existing revisions configuration from the database record and **modify** it. + + + +{`// ============================================================================== +// Define the get database record operation: +const getDatabaseRecordOp = new GetDatabaseRecordOperation(documentStore.database); +// Get the current revisions configuration from the database record: +const record = await store.maintenance.server.send(getDatabaseRecordOp); +const revisionsConfig = record.revisions; + +// ============================================================================== +// If no revisions configuration exists, then create a new configuration +if (!revisionsConfig) \{ + const revisionsConfig = new RevisionsConfiguration(); + revisionsConfig.defaultConfig = defaultRevConfig; + revisionsConfig.collections = new Map(); + revisionsConfig.collections.set("Employees", employeesRevConfig); + revisionsConfig.collections.set("Products", productsRevConfig); +\} + +// ============================================================================== +// If a revisions configuration already exists, then modify it +else \{ + revisionsConfig.defaultConfig = defaultRevConfig; + revisionsConfig.collections.set("Employees", employeesRevConfig); + revisionsConfig.collections.set("Products", productsRevConfig); +\} + +// ============================================================================== +// Define the configure revisions operation, pass the configuration +const configureRevisionsOp = new ConfigureRevisionsOperation(revisionsConfig); + +// Execute the operation by passing it to maintenance.send +// The existing configuration will be updated +await documentStore.maintenance.send(configureRevisionsOp); +`} + + + + + +## Syntax + + + +{`const configureRevisionsOp = new ConfigureRevisionsOperation(configuration); +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **configuration** | `RevisionsConfiguration` | The revisions configuration to apply | + + + +{`class RevisionsConfiguration +\{ + defaultConfig; + collections; +\} +`} + + + +| Property | Type | Description | +| - | - | - | +| **defaultConfig** | `RevisionsCollectionConfiguration` | Optional default settings that apply to any collection Not specified in `collections`. | +| **collections** | `Dictionary` | A Dictionary of collection-specific configurations
The `keys` are the collection names
The `values` are the corresponding configurations.
Overrides the default settings for the collections defined. | + + + +{`class RevisionsCollectionConfiguration +\{ + minimumRevisionsToKeep; + minimumRevisionAgeToKeep; + maximumRevisionsToDeleteUponDocumentUpdate; + purgeOnDelete; + disabled; +\} +`} + + + +
+ +| Property | Type | Description | +| - | - | - | +| **minimumRevisionsToKeep** | `number` | <ul><li>This number of revisions will be kept per document.</li><li>Older revisions exceeding this number will be purged upon the next document modification.<li> **Default** : `null` = no limit </li></ul> | +| **minimumRevisionAgeToKeep** | `string` | <ul><li>Limit the number of revisions kept per document by their age.</li><li>Revisions that are older than this time will be purged upon the next document modification.</li><li> **Default** : `null` = no age limit</li><ul> | +| **maximumRevisionsToDeleteUponDocumentUpdate** | `number` | <ul><li>The maximum number of revisions to delete upon each document modification.</li><li> **Default** : `null` = no limit,
all revisions that pend purging will be deleted.</li></ul> | +| **purgeOnDelete** | `boolean` | <ul><li>`false` ( **Default** ) - Revisions of a deleted document are moved to the [Revisions Bin](../../../../studio/database/document-extensions/revisions/revisions-bin.mdx).</li><li>`true` - When a document is deleted all its revisions are also deleted.</li></ul> | +| **disabled** | `boolean` | <ul><li>`false` ( **Default** ) - Revisions will be created and purged according to the configuration.</li><li>`true` - No revisions will be created or purged.</li></ul> | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_configure-revisions-php.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_configure-revisions-php.mdx new file mode 100644 index 0000000000..f343b7a31d --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_configure-revisions-php.mdx @@ -0,0 +1,211 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `ConfigureRevisionsOperation` to apply the following [revisions configuration](../../../../studio/database/settings/document-revisions.mdx#revisions-configuration) to the database: + * **Default configuration** - applies to all document collections. + * **Collection-specific configurations** - override the default settings for these collections. + * To apply a configuration for conflict document revisions see [configure conflict revisions](../../../../document-extensions/revisions/client-api/operations/conflict-revisions-configuration.mdx). + +* The configuration passed to this operation will **REPLACE** the current revisions configuration in the database. + To **MODIFY** existing configuration, fetch the current configuration from the database record first. + +* After applying the configuration, + revisions are created and purged for a document whenever the document is created, modified, or deleted. + +* To create a revision when there is no configuration defined (or enabled) see: [force revision creation](../../../../document-extensions/revisions/overview.mdx#force-revision-creation) + +* By default, the operation will be applied to the [default database](../../../../client-api/setting-up-default-database.mdx). + To operate on a different database see [switch operations to different database](../../../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx). + +* In this page: + * [Replace configuration](../../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx#replace-configuration) + * [Modify configuration](../../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx#modify-configuration) + * [Syntax](../../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx#syntax) + + +## Replace configuration + +In this example, we create a new `RevisionsConfiguration` for the database. +If revisions configuration already exists in the database - it will be **replaced**. + + +{`// ============================================================================== +// Define default settings that will apply to ALL collections +// Note: this is optional +$defaultRevConfig = new RevisionsCollectionConfiguration(); + +// With this configuration: +// ------------------------ +// * A revision will be created anytime a document is modified or deleted. +// * Revisions of a deleted document can be accessed in the Revisions Bin view. +// * At least 100 of the latest revisions will be kept. +// * Older revisions will be removed if they exceed 7 days on next revision creation. +// * A maximum of 15 revisions will be deleted each time a document is updated, +// until the defined '# of revisions to keep' limit is reached. +$defaultRevConfig->setMinimumRevisionsToKeep(100); +$defaultRevConfig->setMinimumRevisionAgeToKeep(Duration::ofDays(7)); +$defaultRevConfig->setMaximumRevisionsToDeleteUponDocumentUpdate(15); +$defaultRevConfig->setPurgeOnDelete(false); +$defaultRevConfig->setDisabled(false); + + +// ============================================================================== +// Define a specific configuration for the EMPLOYEES collection +// This will override the default settings +$employeesRevConfig = new RevisionsCollectionConfiguration(); + +// With this configuration: +// ------------------------ +// * A revision will be created anytime an Employee document is modified. +// * When a document is deleted all its revisions will be removed. +// * At least 50 of the latest revisions will be kept. +// * Older revisions will be removed if they exceed 12 hours on next revision creation. + +$employeesRevConfig->setMinimumRevisionsToKeep(50); +$employeesRevConfig->setMinimumRevisionAgeToKeep(Duration::ofHours(12)); +$employeesRevConfig->setPurgeOnDelete(true); +$employeesRevConfig->setDisabled(false); + +// ============================================================================== +// Define a specific configuration for the PRODUCTS collection +// This will override the default settings +$productsRevConfig = new RevisionsCollectionConfiguration(); + +// No revisions will be created for the Products collection, +// even though default configuration is enabled +$productsRevConfig->setDisabled(true); + +// ============================================================================== +// Combine all configurations in the RevisionsConfiguration object +$revisionsConfig = new RevisionsConfiguration(); +$revisionsConfig->setDefaultConfig($defaultRevConfig); +$revisionsConfig->setCollections([ + "Employees" => $employeesRevConfig, + "Products" => $productsRevConfig +]); + +// ============================================================================== +// Define the configure revisions operation, pass the configuration +$configureRevisionsOp = new ConfigureRevisionsOperation($revisionsConfig); + +// Execute the operation by passing it to Maintenance.Send +// Any existing configuration will be replaced with the new configuration passed +$documentStore->maintenance()->send($configureRevisionsOp); +`} + + + + + +## Modify configuration + +In this example, we fetch the existing revisions configuration from the database record and **modify** it. + + +{`// ============================================================================== +// Define the get database record operation: +$getDatabaseRecordOp = new GetDatabaseRecordOperation($documentStore->getDatabase()); +// Get the current revisions configuration from the database record: +/** @var RevisionsConfiguration $revisionsConfig */ +$revisionsConfig = $documentStore->maintenance()->server()->send($getDatabaseRecordOp)->getRevisions(); + +// ============================================================================== +// If no revisions configuration exists, then create a new configuration +if ($revisionsConfig == null) +\{ + $revisionsConfig = new RevisionsConfiguration(); + $revisionsConfig->setDefaultConfig($defaultRevConfig); + $revisionsConfig->setCollections([ + "Employees" => $employeesRevConfig, + "Products" => $productsRevConfig + ]); +\} + +// ============================================================================== +// If a revisions configuration already exists, then modify it +else +\{ + $revisionsConfig->setDefaultConfig($defaultRevConfig); + $collections = $revisionsConfig->getCollections(); + $collections["Employees"] = $employeesRevConfig; + $collections["Products"] = $productsRevConfig; + $revisionsConfig->setCollections($collections); +\} + +// ============================================================================== +// Define the configure revisions operation, pass the configuration +$configureRevisionsOp = new ConfigureRevisionsOperation($revisionsConfig); + +// Execute the operation by passing it to Maintenance.Send +// The existing configuration will be updated +$documentStore->maintenance()->send($configureRevisionsOp); +`} + + + + + +## Syntax + + + +{`new ConfigureRevisionsOperation(?RevisionsConfiguration $configuration); +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **configuration** | `?RevisionsConfiguration` | The revisions configuration to apply | + + + +{`class RevisionsConfiguration +\{ + public function getDefaultConfig(): ?RevisionsCollectionConfiguration; + public function setDefaultConfig(?RevisionsCollectionConfiguration $defaultConfig): void; + public function getCollections(): ?RevisionsCollectionConfigurationArray; + public function setCollections(null|RevisionsCollectionConfigurationArray|array $collections): void; +\} +`} + + + +| Property | Type | Description | +| - | - | - | +| **defaultConfig** | `RevisionsCollectionConfiguration` | Optional default settings that apply to any collection Not specified in `collections`. | +| **collections** | `null` or `RevisionsCollectionConfigurationArray` or `array` | An array of collection-specific configurations
The `keys` are the collection names
The `values` are the corresponding configurations.
Overrides the default settings for the collections defined. | + + + +{`class RevisionsCollectionConfiguration +\{ + private ?int $minimumRevisionsToKeep = null; + private ?Duration $minimumRevisionAgeToKeep = null; + private bool $disabled = false; + private bool $purgeOnDelete = false; + private ?int $maximumRevisionsToDeleteUponDocumentUpdate = null; + + // ... getters and setters ... +\} +`} + + + +
+ +| Property | Type | Description | +| - | - | - | +| **minimumRevisionsToKeep** | `?int` | <ul><li>This number of revisions will be kept per document.</li><li>Older revisions exceeding this number will be purged upon the next document modification.<li> **Default** : `None` = no limit </li></ul> | +| **minimumRevisionAgeToKeep** | `?Duration` | <ul><li>Limit the number of revisions kept per document by their age.</li><li>Revisions that are older than this time will be purged upon the next document modification.</li><li> **Default** : `None` = no age limit</li><ul> | +| **disabled** | `bool` | <ul><li>`fFalse` ( **Default** ) - Revisions will be created and purged according to the configuration.</li><li>`True` - No revisions will be created or purged.</li></ul> | +| **purgeOnDelete** | `bool` | <ul><li>`False` ( **Default** ) - Revisions of a deleted document are moved to the [Revisions Bin](../../../../studio/database/document-extensions/revisions/revisions-bin.mdx).</li><li>`True` - When a document is deleted all its revisions are also deleted.</li></ul> | +| **maximumRevisionsToDeleteUponDocumentUpdate** | `?int` | <ul><li>The maximum number of revisions to delete upon each document modification.</li><li> **Default** : `None` = no limit,
all revisions that pend purging will be deleted.</li></ul> | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_configure-revisions-python.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_configure-revisions-python.mdx new file mode 100644 index 0000000000..e85626a97d --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_configure-revisions-python.mdx @@ -0,0 +1,198 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `ConfigureRevisionsOperation` to apply the following [revisions configuration](../../../../studio/database/settings/document-revisions.mdx#revisions-configuration) to the database: + * **Default configuration** - applies to all document collections. + * **Collection-specific configurations** - override the default settings for these collections. + * To apply a configuration for conflict document revisions see [configure conflict revisions](../../../../document-extensions/revisions/client-api/operations/conflict-revisions-configuration.mdx). + +* The configuration passed to this operation will **REPLACE** the current revisions configuration in the database. + To **MODIFY** existing configuration, fetch the current configuration from the database record first. + +* After applying the configuration, + revisions are created and purged for a document whenever the document is created, modified, or deleted. + +* To create a revision when there is no configuration defined (or enabled) see: [force revision creation](../../../../document-extensions/revisions/overview.mdx#force-revision-creation) + +* By default, the operation will be applied to the [default database](../../../../client-api/setting-up-default-database.mdx). + To operate on a different database see [switch operations to different database](../../../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx). + +* In this page: + * [Replace configuration](../../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx#replace-configuration) + * [Modify configuration](../../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx#modify-configuration) + * [Syntax](../../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx#syntax) + + +## Replace configuration + +In this example, we create a new `RevisionsConfiguration` for the database. +If revisions configuration already exists in the database - it will be **replaced**. + + +{`# ============================================================================== +# Define default settings that will apply to ALL collections +# Note: this is optional +default_rev_config = RevisionsCollectionConfiguration( + minimum_revisions_to_keep=100, + minimum_revisions_age_to_keep=timedelta(days=7), + maximum_revisions_to_delete_upon_document_creation=15, + purge_on_delete=False, + disabled=False, + # With this configuration: + # ------------------------ + # * A revision will be created anytime a document is modified or deleted. + # * Revisions of a deleted document can be accessed in the Revisions Bin view. + # * Only the latest 100 revisions will be kept. Older ones will be discarded. + # * Older revisions will be removed if they exceed 7 days on next revision creation. + # * A maximum of 15 revisions will be deleted each time a document is updated, + # until the defined '# of revisions to keep' limit is reached. +) + +employees_rev_config = RevisionsCollectionConfiguration( + minimum_revisions_to_keep=50, + minimum_revisions_age_to_keep=timedelta(hours=12), + purge_on_delete=True, + disabled=False, + # With this configuration: + # ------------------------ + # * A revision will be created anytime an Employee document is modified. + # * When a document is deleted all its revisions will be removed. + # * At least 50 of the latest revisions will be kept. + # * Older revisions will be removed if they exceed 12 hours on next revision creation. +) + +# ============================================================================== +# Define a specific configuration for the EMPLOYEES collection +# This will override the default settings +products_rev_config = RevisionsCollectionConfiguration( + disabled=True + # No revisions will be created for the Products collection, + # even though default configuration is enabled +) + +# ============================================================================== +# Combine all configurations in the RevisionsConfiguration object +revisions_config = RevisionsConfiguration( + default_config=default_rev_config, + collections=\{"Employees": employees_rev_config, "Products": products_rev_config\}, +) + +# ============================================================================== +# Define the configure revisions operation, pass the configuration +configure_revisions_op = ConfigureRevisionsOperation(revisions_config) + +# Execute the operation by passing it to Maintenance.Send +# Any existing configuration will be replaced with the new configuration passed +store.maintenance.send(configure_revisions_op) +`} + + + + + +## Modify configuration + +In this example, we fetch the existing revisions configuration from the database record and **modify** it. + + +{`# ============================================================================== +# Define the get database record operation: +get_database_record_op = GetDatabaseRecordOperation(store.database) +# Get the current revisions configuration from the database record: +revisions_config = store.maintenance.server.send(get_database_record_op).revisions + +# ============================================================================== +# If no revisions configuration exists, then create a new configuration +if revisions_config is None: + revisions_config = RevisionsConfiguration( + default_config=default_rev_config, + collections=\{"Employees": employees_rev_config, "Products": products_rev_config\}, + ) + +# ============================================================================== +# If a revisions configuration already exists, then modify it +else: + revisions_config.default_config = default_rev_config + revisions_config.collections["Employees"] = employees_rev_config + revisions_config.collections["Products"] = products_rev_config + +# ============================================================================== +# Define the configure revisions operation, pass the configuration +configure_revisions_op = ConfigureRevisionsOperation(revisions_config) + +# Execute the operation by passing it to maintenance.send +# The existing configuration will be updated +store.maintenance.send(configure_revisions_op) + +# Execute the operation by passing it to maintenance.send +# The existing configuration will be updated +store.maintenance.send(configure_revisions_op) +`} + + + + + +## Syntax + + + +{`class ConfigureRevisionsOperation(MaintenanceOperation[ConfigureRevisionsOperationResult]): + def __init__(self, configuration: RevisionsConfiguration): ... +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **configuration** | `RevisionsConfiguration` | The revisions configuration to apply | + + + +{`class RevisionsConfiguration: + def __init__( + self, + default_config: RevisionsCollectionConfiguration = None, + collections: Dict[str, RevisionsCollectionConfiguration] = None, + ): ... +`} + + + +| Property | Type | Description | +| - | - | - | +| **default_config** | `RevisionsCollectionConfiguration` | Optional default settings that apply to any collection Not specified in `collections`. | +| **collections** | `Dict[str, RevisionsCollectionConfiguration]` | A Dictionary of collection-specific configurations
The `keys` are the collection names
The `values` are the corresponding configurations.
Overrides the default settings for the collections defined. | + + + +{`class RevisionsCollectionConfiguration: + def __init__( + self, + minimum_revisions_to_keep: int = None, + minimum_revisions_age_to_keep: timedelta = None, + disabled: bool = False, + purge_on_delete: bool = False, + maximum_revisions_to_delete_upon_document_creation: int = None, + ): ... +`} + + + +
+ +| Property | Type | Description | +| - | - | - | +| **minimum_revisions_to_keep** | `int` | <ul><li>This number of revisions will be kept per document.</li><li>Older revisions exceeding this number will be purged upon the next document modification.<li> **Default** : `None` = no limit </li></ul> | +| **minimum_revisions_age_to_keep** | `timedelta` | <ul><li>Limit the number of revisions kept per document by their age.</li><li>Revisions that are older than this time will be purged upon the next document modification.</li><li> **Default** : `None` = no age limit</li><ul> | +| **disabled** | `bool` | <ul><li>`fFalse` ( **Default** ) - Revisions will be created and purged according to the configuration.</li><li>`True` - No revisions will be created or purged.</li></ul> | +| **purge_on_delete** | `bool` | <ul><li>`False` ( **Default** ) - Revisions of a deleted document are moved to the [Revisions Bin](../../../../studio/database/document-extensions/revisions/revisions-bin.mdx).</li><li>`True` - When a document is deleted all its revisions are also deleted.</li></ul> | +| **maximum_revisions_to_delete_upon_document_creation** | `int` | <ul><li>The maximum number of revisions to delete upon each document modification.</li><li> **Default** : `None` = no limit,
all revisions that pend purging will be deleted.</li></ul> | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_conflict-revisions-configuration-csharp.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_conflict-revisions-configuration-csharp.mdx new file mode 100644 index 0000000000..7e3955da2a --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_conflict-revisions-configuration-csharp.mdx @@ -0,0 +1,131 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, RavenDB creates **revisions for conflict documents** for all collections + when conflicts occur and when they are resolved. + +* Use `ConfigureRevisionsForConflictsOperation` to disable the feature or modify the configuration. + +* If you define [default configuration](../../../../studio/database/settings/document-revisions.mdx#define-default-configuration), + then these settings will **override** the conflict revisions configuration. + +* If you define any [collection-specific configuration](../../../../studio/database/settings/document-revisions.mdx#define-collection-specific-configuration), + then these settings will also **override** the conflict revisions configuration for that collection. + * E.g., if the conflict revisions configuration defines that revisions created for conflicting documents will not be purged, + but a collection-specific configuration defines an age limit for revisions, + revisions for conflicting documents of this collection that exceed this age will be purged. + +* In this page: + * [Configure revisions for conflicts - Example](../../../../document-extensions/revisions/client-api/operations/conflict-revisions-configuration.mdx#configure-revisions-for-conflicts---example) + * [Syntax](../../../../document-extensions/revisions/client-api/operations/conflict-revisions-configuration.mdx#syntax) + * [Storage consideration](../../../../document-extensions/revisions/client-api/operations/conflict-revisions-configuration.mdx#storage-consideration) + +## Configure revisions for conflicts - Example + + + + +{`// Define the settings that will apply for conflict revisions (for all collections) +var conflictRevConfig = new RevisionsCollectionConfiguration +{ + PurgeOnDelete = true, + MinimumRevisionAgeToKeep = new TimeSpan(days: 45, 0, 0, 0) + + // With this configuration: + // ------------------------ + // * A revision will be created for conflict documents + // * When the parent document is deleted all its revisions will be removed. + // * Revisions that exceed 45 days will be removed on next revision creation. +}; + +// Define the configure conflict revisions operation, pass the configuration +var configureConflictRevisionsOp = + new ConfigureRevisionsForConflictsOperation(documentStore.Database, conflictRevConfig); + +// Execute the operation by passing it to Maintenance.Server.Send +// The existing conflict revisions configuration will be replaced by the configuration passed +documentStore.Maintenance.Server.Send(configureConflictRevisionsOp); +`} + + + + +{`// Define the settings that will apply for conflict revisions (for all collections) +var conflictRevConfig = new RevisionsCollectionConfiguration +{ + PurgeOnDelete = true, + MinimumRevisionAgeToKeep = new TimeSpan(days: 45, 0, 0, 0) + + // With this configuration: + // ------------------------ + // * A revision will be created for conflict documents + // * When the parent document is deleted all its revisions will be removed. + // * Revisions that exceed 45 days will be removed on next revision creation. +}; + +// Define the configure conflict revisions operation, pass the configuration +var configureConflictRevisionsOp = + new ConfigureRevisionsForConflictsOperation(documentStore.Database, conflictRevConfig); + +// Execute the operation by passing it to Maintenance.Server.Send +// The existing conflict revisions configuration will be replaced by the configuration passed +await documentStore.Maintenance.Server.SendAsync(configureConflictRevisionsOp); +`} + + + + + + +## Syntax + + + +{`public ConfigureRevisionsForConflictsOperation(string database, RevisionsCollectionConfiguration configuration) +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **database** | `string` | The name of the database whose conflict revisions you want to manage | +| **configuration** | `RevisionsCollectionConfiguration` | The conflict revisions configuration to apply | + + + +{`public class RevisionsCollectionConfiguration +\{ + public long? MinimumRevisionsToKeep \{ get; set; \} + public TimeSpan? MinimumRevisionAgeToKeep \{ get; set; \} + public long? MaximumRevisionsToDeleteUponDocumentUpdate \{ get; set; \} + public bool PurgeOnDelete \{ get; set; \} + public bool Disabled \{ get; set; \} +\} +`} + + + +* See properties explanation and default values [here](../../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx#revisions-collection-configuration-object). + + + + + +#### Storage consideration + +Automatic creation of conflict revisions can help track document conflicts and understand their reasons. +However, it can also lead to a significant increase in the database size if many conflicts occur unexpectedly. + +* Consider limiting the number of conflict revisions kept per document using: + `MinimumRevisionsToKeep` and/or `MinimumRevisionAgeToKeep`. + +* Revisions are purged upon [modification of their parent documents](../../../../document-extensions/revisions/overview.mdx#revisions-configuration-execution). + If you want to purge a large number of revisions at once, you can **cautiously** [enforce configuration](../../../../studio/database/settings/document-revisions.mdx#enforce-configuration). + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_conflict-revisions-configuration-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_conflict-revisions-configuration-nodejs.mdx new file mode 100644 index 0000000000..62b91d3fac --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_conflict-revisions-configuration-nodejs.mdx @@ -0,0 +1,102 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, RavenDB creates **revisions for conflict documents** for all collections + when conflicts occur and when they are resolved. + +* Use `ConfigureRevisionsForConflictsOperation` to disable the feature or modify the configuration. + +* If you define [default configuration](../../../../studio/database/settings/document-revisions.mdx#define-default-configuration), + then these settings will **override** the conflict revisions configuration. + +* If you define any [collection-specific configuration](../../../../studio/database/settings/document-revisions.mdx#define-collection-specific-configuration), + then these settings will also **override** the conflict revisions configuration for that collection. + * E.g., if the conflict revisions configuration defines that revisions created for conflicting documents will not be purged, + but a collection-specific configuration defines an age limit for revisions, + revisions for conflicting documents of this collection that exceed this age will be purged. + +* In this page: + * [Configure revisions for conflicts - Example](../../../../document-extensions/revisions/client-api/operations/conflict-revisions-configuration.mdx#configure-revisions-for-conflicts---example) + * [Syntax](../../../../document-extensions/revisions/client-api/operations/conflict-revisions-configuration.mdx#syntax) + * [Storage consideration](../../../../document-extensions/revisions/client-api/operations/conflict-revisions-configuration.mdx#storage-consideration) + +## Configure revisions for conflicts - Example + + + +{`// Define the settings that will apply for conflict revisions (for all collections) +const conflictRevConfig = new RevisionsCollectionConfiguration(); +conflictRevConfig.minimumRevisionAgeToKeep = TimeUtil.millisToTimeSpan(3600 * 1000 * 24 * 45) // 45 days +conflictRevConfig.purgeOnDelete = true; + +// With this configuration: +// ------------------------ +// * A revision will be created for conflict documents +// * When the parent document is deleted all its revisions will be removed. +// * Revisions that exceed 45 days will be removed on next revision creation. + +// Define the configure conflict revisions operation, pass the configuration +const configureConflictRevisionsOp = + new ConfigureRevisionsForConflictsOperation(documentStore.database, conflictRevConfig); + +// Execute the operation by passing it to maintenance.server.send +// The existing conflict revisions configuration will be replaced by the configuration passed +await documentStore.maintenance.server.send(configureConflictRevisionsOp); +`} + + + + + +## Syntax + + + +{`const configureRevisionsOp = new ConfigureRevisionsForConflictsOperation(database, configuration); +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **database** | `string` | The name of the database whose conflict revisions you want to manage | +| **configuration** | `RevisionsCollectionConfiguration` | The conflict revisions configuration to apply | + + + +{`class RevisionsCollectionConfiguration +\{ + minimumRevisionsToKeep; + minimumRevisionAgeToKeep; + maximumRevisionsToDeleteUponDocumentUpdate; + purgeOnDelete; + disabled; +\} +`} + + + +* See properties explanation and default values [here](../../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx#revisions-collection-configuration-object). + + + + + +#### Storage consideration + +Automatic creation of conflict revisions can help track document conflicts and understand their reasons. +However, it can also lead to a significant increase in the database size if many conflicts occur unexpectedly. + +* Consider limiting the number of conflict revisions kept per document using: + `minimumRevisionsToKeep` and/or `minimumRevisionAgeToKeep`. + +* Revisions are purged upon [modification of their parent documents](../../../../document-extensions/revisions/overview.mdx#revisions-configuration-execution). + If you want to purge a large number of revisions at once, you can **cautiously** [enforce configuration](../../../../studio/database/settings/document-revisions.mdx#enforce-configuration). + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_conflict-revisions-configuration-php.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_conflict-revisions-configuration-php.mdx new file mode 100644 index 0000000000..feecc330e2 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_conflict-revisions-configuration-php.mdx @@ -0,0 +1,104 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, RavenDB creates **revisions for conflict documents** for all collections + when conflicts occur and when they are resolved. + +* Use `ConfigureRevisionsForConflictsOperation` to disable the feature or modify the configuration. + +* If you define [default configuration](../../../../studio/database/settings/document-revisions.mdx#define-default-configuration), + then these settings will **override** the conflict revisions configuration. + +* If you define any [collection-specific configuration](../../../../studio/database/settings/document-revisions.mdx#define-collection-specific-configuration), + then these settings will also **override** the conflict revisions configuration for that collection. + * E.g., if the conflict revisions configuration defines that revisions created for conflicting documents will not be purged, + but a collection-specific configuration defines an age limit for revisions, + revisions for conflicting documents of this collection that exceed this age will be purged. + +* In this page: + * [Configure revisions for conflicts - Example](../../../../document-extensions/revisions/client-api/operations/conflict-revisions-configuration.mdx#configure-revisions-for-conflicts---example) + * [Syntax](../../../../document-extensions/revisions/client-api/operations/conflict-revisions-configuration.mdx#syntax) + * [Storage consideration](../../../../document-extensions/revisions/client-api/operations/conflict-revisions-configuration.mdx#storage-consideration) + +## Configure revisions for conflicts - Example + + + +{`// Define the settings that will apply for conflict revisions (for all collections) +$conflictRevConfig = new RevisionsCollectionConfiguration(); + +// With this configuration: +// ------------------------ +// * A revision will be created for conflict documents +// * When the parent document is deleted all its revisions will be removed. +// * Revisions that exceed 45 days will be removed on next revision creation. +$conflictRevConfig->setPurgeOnDelete(true); +$conflictRevConfig->setMinimumRevisionAgeToKeep(Duration::ofDays(45)); + +// Define the configure conflict revisions operation, pass the configuration +$configureConflictRevisionsOp = + new ConfigureRevisionsForConflictsOperation($documentStore->getDatabase(), $conflictRevConfig); + +// Execute the operation by passing it to Maintenance.Server.Send +// The existing conflict revisions configuration will be replaced by the configuration passed +$documentStore->maintenance()->server()->send($configureConflictRevisionsOp); +`} + + + + + +## Syntax + + + +{`new ConfigureRevisionsForConflictsOperation(?string $database, ?RevisionsCollectionConfiguration $configuration) +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **database** | `string` | The name of the database whose conflict revisions you want to manage | +| **configuration** | `RevisionsCollectionConfiguration` | The conflict revisions configuration to apply | + + + +{`class RevisionsCollectionConfiguration +\{ + private ?int $minimumRevisionsToKeep = null; + private ?Duration $minimumRevisionAgeToKeep = null; + private bool $disabled = false; + private bool $purgeOnDelete = false; + private ?int $maximumRevisionsToDeleteUponDocumentUpdate = null; + + // ... getters and setters ... +\} +`} + + + +* See properties explanation and default values [here](../../../../document-extensions/revisions/client-api/operations/configure-revisions.mdx#revisions-collection-configuration-object). + + + + + +#### Storage consideration + +Automatic creation of conflict revisions can help track document conflicts and understand their reasons. +However, it can also lead to a significant increase in the database size if many conflicts occur unexpectedly. + +* Consider limiting the number of conflict revisions kept per document using: + `MinimumRevisionsToKeep` and/or `MinimumRevisionAgeToKeep`. + +* Revisions are purged upon [modification of their parent documents](../../../../document-extensions/revisions/overview.mdx#revisions-configuration-execution). + If you want to purge a large number of revisions at once, you can **cautiously** [enforce configuration](../../../../studio/database/settings/document-revisions.mdx#enforce-configuration). + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_delete-revisions-csharp.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_delete-revisions-csharp.mdx new file mode 100644 index 0000000000..22d6d21f6f --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_delete-revisions-csharp.mdx @@ -0,0 +1,252 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `DeleteRevisionsOperation` to delete document revisions. + The document itself is not deleted, only its revisions will be deleted, as specified by the operation's params. + +* Existing revisions will be deleted regardless of the current [revisions settings](../../../../studio/database/settings/document-revisions.mdx), + even if these settings are disabled. + +* When working with a secure server: + * The delete revisions action will be logged in the [audit log](../../../../server/security/audit-log/audit-log-overview.mdx). + * This operation is only available for a client certificate with a [security clearance](../../../../server/security/authorization/security-clearance-and-permissions.mdx) of _DatabaseAdmin_ or higher. + +* By default, the operation will be applied to the [default database](../../../../client-api/setting-up-default-database.mdx). + To operate on a different database see [switch operations to different database](../../../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx). + +* In this page: + * [Delete all revisions - single document](../../../../document-extensions/revisions/client-api/operations/delete-revisions.mdx#delete-all-revisions---single-document) + * [Delete revisions - multiple documents](../../../../document-extensions/revisions/client-api/operations/delete-revisions.mdx#delete-revisions---multiple-documents) + * [Delete revisions by time frame](../../../../document-extensions/revisions/client-api/operations/delete-revisions.mdx#delete-revisions-by-time-frame) + * [Delete revisions by change vectors](../../../../document-extensions/revisions/client-api/operations/delete-revisions.mdx#delete-revisions-by-change-vectors) + * [Syntax](../../../../document-extensions/revisions/client-api/operations/delete-revisions.mdx#syntax) + + +## Delete all revisions - single document + +In this example, we delete ALL revisions of a document. +Both types of revisions, those resulting from the [revisions settings](../../../../studio/database/settings/document-revisions.mdx) and those generated manually via +[force revision creation](../../../../document-extensions/revisions/overview.mdx#force-revision-creation), will be deleted. + + + + +{`// Define the delete revisions operation: + +// Delete ALL existing revisions for document "orders/830-A" +var deleteRevisionsOp = new DeleteRevisionsOperation(documentId: "orders/830-A", + // Revisions that were created manually will also be removed + removeForceCreatedRevisions: true); + +// Execute the operation by passing it to Maintenance.Send +var numberOfRevisionsDeleted = store.Maintenance.Send(deleteRevisionsOp); + +// Running the above code on RavenDB's sample data results in the removal of 29 revisions +Assert.Equal(29, numberOfRevisionsDeleted.TotalDeletes); +`} + + + + +{`// Define the delete revisions operation: + +// Delete ALL existing revisions for document "orders/830-A" +var deleteRevisionsOp = new DeleteRevisionsOperation(documentId: "orders/830-A", + // Revisions that were created manually will also be removed + removeForceCreatedRevisions: true); + +// Execute the operation by passing it to Maintenance.SendAsync +var numberOfRevisionsDeleted = await store.Maintenance.SendAsync(deleteRevisionsOp); + +// Running the above code on RavenDB's sample data results in the removal of 29 revisions +Assert.Equal(29, numberOfRevisionsDeleted.TotalDeletes); +`} + + + + + + +## Delete revisions - multiple documents + +You can specify multiple documents from which to delete revisions. + + + + +{`// Delete existing revisions for the specified documents +var deleteRevisionsOp = new DeleteRevisionsOperation( + documentIds: new List() { "orders/829-A", "orders/828-A", "orders/827-A" }, + // Revisions that were created manually will Not be removed + removeForceCreatedRevisions: false); + +var numberOfRevisionsDeleted = store.Maintenance.Send(deleteRevisionsOp); + +// Running the above on RavenDB's sample data results in the removal of 19 revisions +Assert.Equal(19, numberOfRevisionsDeleted.TotalDeletes); +`} + + + + +{`// Delete existing revisions for the specified documents +var deleteRevisionsOp = new DeleteRevisionsOperation( + documentIds: new List() { "orders/829-A", "orders/828-A", "orders/827-A" }, + // Revisions that were created manually will Not be removed + removeForceCreatedRevisions: false); + +var numberOfRevisionsDeleted = await store.Maintenance.SendAsync(deleteRevisionsOp); + +// Running the above on RavenDB's sample data results in the removal of 19 revisions +Assert.Equal(19, numberOfRevisionsDeleted.TotalDeletes); +`} + + + + + + +## Delete revisions by time frame + +You can specify a time frame from which to delete revisions. +Only revisions that were created within that time frame (inclusive) will be deleted. +The time should be specified in UTC. + + + + +{`var deleteFrom = DateTime.Parse("2018-07-27T09:11:52.0Z"); +var deleteTo = DateTime.Parse("2018-07-27T09:11:54.0Z"); + +// Delete existing revisions within the specified time frame +var deleteRevisionsOp = + new DeleteRevisionsOperation(documentId: "orders/826-A", from: deleteFrom, to: deleteTo); + +var numberOfRevisionsDeleted = store.Maintenance.Send(deleteRevisionsOp); +`} + + + + +{`var deleteFrom = DateTime.Parse("2018-07-27T09:11:52.0Z"); +var deleteTo = DateTime.Parse("2018-07-27T09:11:54.0Z"); + +// Delete existing revisions within the specified time frame +var deleteRevisionsOp = + new DeleteRevisionsOperation(documentId: "orders/826-A", from: deleteFrom, to: deleteTo); + +var numberOfRevisionsDeleted = await store.Maintenance.SendAsync(deleteRevisionsOp); +`} + + + + + + +## Delete revisions by change vectors + +Each revision has its own unique [change vector](../../../../document-extensions/revisions/client-api/session/loading.mdx#get-revisions-by-change-vector). +You can specify which revisions to delete by providing their corresponding change vectors. +No exception is thrown if a change vector doesn’t match any revision. + + + + +{`// Get the change-vectors for the revisions of the specified document +var revisionsChangeVectors = session.Advanced.Revisions + .GetMetadataFor("orders/825-A") + .Select(m => m.GetString(Constants.Documents.Metadata.ChangeVector)) + .ToList(); + +// Delete the revisions by their change-vector +var revisionToDelete = + new List() { revisionsChangeVectors[0], revisionsChangeVectors[1] }; + +var deleteRevisionsOp = + new DeleteRevisionsOperation(documentId: "orders/825-A", revisionToDelete); + +var numberOfRevisionsDeleted = store.Maintenance.Send(deleteRevisionsOp); +`} + + + + +{`// Get the change-vectors for the revisions of the specified document +var metadata = await asyncSession.Advanced.Revisions + .GetMetadataForAsync("orders/825-A"); + +var revisionsChangeVectors = metadata + .Select(m => m.GetString(Constants.Documents.Metadata.ChangeVector)) + .ToList(); + +// Delete the revisions by their change-vector +var revisionToDelete = + new List() { revisionsChangeVectors[0], revisionsChangeVectors[1] }; + +var deleteRevisionsOp = + new DeleteRevisionsOperation(documentId: "orders/825-A", revisionToDelete); + +var numberOfRevisionsDeleted = await store.Maintenance.SendAsync(deleteRevisionsOp); +`} + + + + + + +Avoid deleting a "Delete Revision" using the `DeleteRevisionsOperation` operation. +Consider the following scenario: + + 1. A document that has revisions is deleted. + + 2. A "Delete Revision" is created for the document, and it will be listed in the [Revisions Bin](../../../../studio/database/document-extensions/revisions/revisions-bin.mdx). + + 3. The revisions of this deleted document remain accessible via the Revisions Bin. + + 4. If you remove this "Delete Revision" by providing its change vector to `DeleteRevisionsOperation`, + the "Delete Revision" will be removed from the Revisions Bin, causing the associated revisions to become orphaned. + However, you will still be able to access these orphaned revisions from the [All Revisions](../../../../studio/database/document-extensions/revisions/all-revisions.mdx) view. + + + + +## Syntax + + + +{`Available overloads: +==================== +public DeleteRevisionsOperation(string documentId, + bool removeForceCreatedRevisions = false); + +public DeleteRevisionsOperation(string documentId, + DateTime? from, DateTime? to, bool removeForceCreatedRevisions = false); + +public DeleteRevisionsOperation(List documentIds, + bool removeForceCreatedRevisions = false); + +public DeleteRevisionsOperation(List documentIds, + DateTime? from, DateTime? to, bool removeForceCreatedRevisions = false); + +public DeleteRevisionsOperation(string documentId, + List revisionsChangeVectors, bool removeForceCreatedRevisions = false); +`} + + + +| Parameter | Type | Description | +|---------------------------------|----------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **documentId** | `string` | The ID of the document whose revisions you want to delete. | +| **documentIds** | `List` | A list of document IDs whose revisions you want to delete. | +| **removeForceCreatedRevisions** | `bool` | `true` - Include [force-created revisions](../../../../document-extensions/revisions/overview.mdx#force-revision-creation) in the deletion.
`false` - Exclude force-created revisions. | +| **from** | `DateTime` | The start of the date range for the revisions to delete (inclusive). | +| **to** | `DateTime` | The end of the date range for the revisions to delete (inclusive). | +| **revisionsChangeVectors** | `List` | A list of change vectors corresponding to the revisions that you want to delete. | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_get-revisions-csharp.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_get-revisions-csharp.mdx new file mode 100644 index 0000000000..bb3bcfa612 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_get-revisions-csharp.mdx @@ -0,0 +1,197 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetRevisionsOperation` to GET the document's revisions. + +* To only COUNT the number of revisions without getting them, use the [GetCountFor](../../../../document-extensions/revisions/client-api/session/counting.mdx) session method. + +* In this page: + * [Get all revisions](../../../../document-extensions/revisions/client-api/operations/get-revisions.mdx#get-all-revisions) + * [Paging results](../../../../document-extensions/revisions/client-api/operations/get-revisions.mdx#paging-results) + * [Syntax](../../../../document-extensions/revisions/client-api/operations/get-revisions.mdx#syntax) + + +## Get all revisions + + + + +{`// Define the get revisions operation, pass the document id +var getRevisionsOp = new GetRevisionsOperation("Companies/1-A"); + +// Execute the operation by passing it to Operations.Send +RevisionsResult revisions = documentStore.Operations.Send(getRevisionsOp); + +// The revisions info: +List allRevisions = revisions.Results; // All the revisions +int revisionsCount = revisions.TotalResults; // Total number of revisions +`} + + + + +{`// Define the get revisions operation, pass the document id +var getRevisionsOp = new GetRevisionsOperation("Companies/1-A"); + +// Execute the operation by passing it to Operations.Send +RevisionsResult revisions = await documentStore.Operations.SendAsync(getRevisionsOp); + +// The revisions info: +List allRevisions = revisions.Results; // All the revisions +int revisionsCount = revisions.TotalResults; // Number of revisions +`} + + + + + + +## Paging results + +* Get and process revisions, one page at a time: + + + + +{`var start = 0; +var pageSize = 100; + +while (true) +{ + // Execute the get revisions operation + // Pass the document id, start & page size to get + RevisionsResult revisions = documentStore.Operations.Send( + new GetRevisionsOperation("Companies/1-A", start, pageSize)); + + { + // Process the retrieved revisions here + } + + if (revisions.Results.Count < pageSize) + break; // No more revisions to retrieve + + // Increment 'start' by page-size, to get the "next page" in next iteration + start += pageSize; +} +`} + + + + +{`var start = 0; +var pageSize = 100; + +while (true) +{ + // Execute the get revisions operation + // Pass the document id, start & page size to get + RevisionsResult revisions = await documentStore.Operations.SendAsync( + new GetRevisionsOperation("Companies/1-A", start, pageSize)); + { + // Process the retrieved revisions here + } + + if (revisions.Results.Count < pageSize) + break; // No more revisions to retrieve + + // Increment 'start' by page-size, to get the "next page" in next iteration + start += pageSize; +} +`} + + + + +* The document ID, start & page size can be wrapped in a `Parameter` object: + + + + +{`var parameters = new GetRevisionsOperation.Parameters +{ + Id = "Companies/1-A", + Start = 0, + PageSize = 100 +}; + +RevisionsResult revisions = documentStore.Operations.Send( + new GetRevisionsOperation(parameters)); +`} + + + + +{`var parameters = new GetRevisionsOperation.Parameters +{ + Id = "Companies/1-A", + Start = 0, + PageSize = 100 +}; + +RevisionsResult revisions = await documentStore.Operations.SendAsync( + new GetRevisionsOperation(parameters)); +`} + + + + + + +## Syntax + +Available overloads: + + + +{`// Get all revisions for the specified document: +public GetRevisionsOperation(string id); + +// Page revisions: +public GetRevisionsOperation(string id, int start, int pageSize); +public GetRevisionsOperation(Parameters parameters) +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **id** | `string` | Document ID for which to get revisions | +| **start** | `int` | Revision number to start from | +| **pageSize** | `int` | Number of revisions to get | +| **parameters** | `Parameters` | An object that wraps `Id`, `Start`, and `PageSize` (see below) | + + + +{`public class Parameters +\{ + public string Id \{ get; set; \} // Document ID for which to get revisions + public int? Start \{ get; set; \} // Revision number to start from + public int? PageSize \{ get; set; \} // Number of revisions to get +\} +`} + + + +| Return value of `store.Operations.Send(getRevisionsOp)` | | +| - | - | +| `RevisionsResult` | Object with revisions results | + + + + +{`public class RevisionsResult +\{ + public List Results \{ get; set; \} // The retrieved revisions + public int TotalResults \{ get; set; \} // Total number of revisions that exist for the document +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_get-revisions-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_get-revisions-nodejs.mdx new file mode 100644 index 0000000000..210f6e0416 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_get-revisions-nodejs.mdx @@ -0,0 +1,115 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetRevisionsOperation` to GET the document's revisions. + +* To only COUNT the number of revisions without getting them, use the [getCountFor](../../../../document-extensions/revisions/client-api/session/counting.mdx) session method. + +* In this page: + * [Get all revisions](../../../../document-extensions/revisions/client-api/operations/get-revisions.mdx#get-all-revisions) + * [Paging results](../../../../document-extensions/revisions/client-api/operations/get-revisions.mdx#paging-results) + * [Syntax](../../../../document-extensions/revisions/client-api/operations/get-revisions.mdx#syntax) + + +## Get all revisions + + + +{`// Define the get revisions operation, pass the document id +const getRevisionsOp = new GetRevisionsOperation("Companies/1-A"); + +// Execute the operation by passing it to operations.send +const revisions = await documentStore.operations.send(getRevisionsOp); + +// The revisions info: +const allRevisions = revisions.results; // All the revisions +const revisionsCount = revisions.totalResults; // Total number of revisions +`} + + + + + +## Paging results + +* Get and process revisions, one page at a time: + + + +{`const parameters = \{ + start: 0, + pageSize: 100 +\}; + +while (true) +\{ + // Execute the get revisions operation + // Pass parameters with document id, start & page size + const revisions = await documentStore.operations.send( + new GetRevisionsOperation("Companies/1-A", parameters)); + + \{ + // Process the retrieved revisions here + \} + + if (revisions.results.length < parameters.pageSize) + break; // No more revisions to retrieve + + // Increment 'start' by page-size, to get the "next page" in next iteration + parameters.start += parameters.pageSize; +\} +`} + + + + + +## Syntax + + + +{`// Available overloads: +const getRevisionsOp = new GetRevisionsOperation(id); +const getRevisionsOp = new GetRevisionsOperation(id, parameters); +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **id** | `string` | Document ID for which to get revisions | +| **parameters** | `object` | An object that wraps `start` and `pageSize` (see below) | + + + +{`// The parameters object +\{ + start, // Revision number to start from + pageSize // Number of revisions to get +\} +`} + + + +| Return value of `store.operations.send(getRevisionsOp)` | | +| - | - | +| `RevisionsResult` | Object with revisions results | + + + +{`class RevisionsResult +\{ + results; // The retrieved revisions + totalResults; // Total number of revisions that exist for the document +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_get-revisions-php.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_get-revisions-php.mdx new file mode 100644 index 0000000000..232e9372d2 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_get-revisions-php.mdx @@ -0,0 +1,95 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetRevisionsOperation` to GET the document's revisions. + +* To only COUNT the number of revisions without getting them, use the [get_count_for](../../../../document-extensions/revisions/client-api/session/counting.mdx) session method. + +* In this page: + * [Get all revisions](../../../../document-extensions/revisions/client-api/operations/get-revisions.mdx#get-all-revisions) + * [Paging results](../../../../document-extensions/revisions/client-api/operations/get-revisions.mdx#paging-results) + * [Syntax](../../../../document-extensions/revisions/client-api/operations/get-revisions.mdx#syntax) + + +## Get all revisions + + + +{`// Define the get revisions operation, pass the document id +$getRevisionsOp = new GetRevisionsOperation(Company::class, "Companies/1-A"); + +// Execute the operation by passing it to Operations.Send +/** @var RevisionsResult $revisions */ +$revisions = $documentStore->operations()->send($getRevisionsOp); + +// The revisions info: +/** @var array $allRevisions */ +$allRevisions = $revisions->getResults(); // All the revisions +$revisionsCount = $revisions->getTotalResults(); // Total number of revisions +`} + + + + + +## Paging results + +* Get and process revisions, one page at a time: + + +{`$start = 0; +$pageSize = 100; + +while (true) +\{ + // Execute the get revisions operation + // Pass the document id, start & page size to get + /** @var RevisionsResult $revisions */ + $revisions = $documentStore->operations()->send( + new GetRevisionsOperation(Company::class, "Companies/1-A", $start, $pageSize)); + + // Process the retrieved revisions here + + if (count($revisions->getResults()) < $pageSize) + break; // No more revisions to retrieve + + // Increment 'start' by page-size, to get the "next page" in next iteration + $start += $pageSize; +\} +`} + + + +* The document ID, start & page size can be wrapped in a `Parameters` object: + + +{`$parameters = new GetRevisionsOperationParameters(); +$parameters->setId("Companies/1-A"); +$parameters->setStart(0); +$parameters->setPageSize(100); + +/** @var RevisionsResult $revisions */ +$revisions = $documentStore->operations->send( + new GetRevisionsOperation(Company::class, $parameters)); +`} + + + + + +## Syntax + +| Parameter | Type | Description | +| - | - | - | +| **id** | `string` | ID of the document to get revisions for | +| **start** | `int` | Revision number to start from | +| **pageSize** | `int` | Number of revisions to get | +| **parameters** | `Parameters` | An object that wraps `id`, `start`, and `pageSize` | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_get-revisions-python.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_get-revisions-python.mdx new file mode 100644 index 0000000000..1c090d9c40 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_get-revisions-python.mdx @@ -0,0 +1,84 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetRevisionsOperation` to GET the document's revisions. + +* To only COUNT the number of revisions without getting them, use the [get_count_for](../../../../document-extensions/revisions/client-api/session/counting.mdx) session method. + +* In this page: + * [Get all revisions](../../../../document-extensions/revisions/client-api/operations/get-revisions.mdx#get-all-revisions) + * [Paging results](../../../../document-extensions/revisions/client-api/operations/get-revisions.mdx#paging-results) + * [Syntax](../../../../document-extensions/revisions/client-api/operations/get-revisions.mdx#syntax) + + +## Get all revisions + + + +{`# Define the get revisions operation, pass the document id +get_revisions_op = GetRevisionsOperation("companies/1-A") + +# Execute the operation by passing it to Operations.Send +revisions = store.operations.send(get_revisions_op) + +# The revisions info: +all_revisions = revisions.results # All the revisions +revisions_count = revisions.total_results # Total number of revisions +`} + + + + + +## Paging results + +* Get and process revisions, one page at a time: + + +{`start = 0 +page_size = 100 + +while True: + # Execute the get revisions operation + # Pass the document id, start & page size to get + revisions = store.operations.send(GetRevisionsOperation("comapnies/1-A", Company, start, page_size)) + + # Process the retrieved revisions here + + if len(revisions.results) < page_size: + break # No more revisions to retrieve + + # Increment 'start' by page-size, to get the "next page" in next iteration + start += page_size +`} + + + +* The document ID, start & page size can be wrapped in a `Parameters` object: + + +{`parameters = GetRevisionsOperation.Parameters("companies/1-A", 0, 100) + +revisions = store.operations.send(GetRevisionsOperation.from_parameters(parameters)) +`} + + + + + +## Syntax + +| Parameter | Type | Description | +| - | - | - | +| **id** | `str` | ID of the document to get revisions for | +| **start** | `int` | Revision number to start from | +| **page_size** | `int` | Number of revisions to get | +| **parameters** | `Parameters` | An object that wraps `id`, `start`, and `page_size` | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_revert-document-to-revision-csharp.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_revert-document-to-revision-csharp.mdx new file mode 100644 index 0000000000..9a1c343d9e --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/_revert-document-to-revision-csharp.mdx @@ -0,0 +1,211 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article describes how to revert specific documents to specific revisions using the `RevertRevisionsByIdOperation` operation. + +* To revert documents from all collections (or from selected collections) to a specified point in time, + see [Revert documents to revisions](../../../../document-extensions/revisions/revert-revisions.mdx). + +* By default, the operation will be applied to the [default database](../../../../client-api/setting-up-default-database.mdx). + To operate on a different database see [switch operations to different database](../../../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx). + +* In this page: + * [Overview](../../../../document-extensions/revisions/client-api/operations/revert-document-to-revision.mdx#overview) + * [Revert single document](../../../../document-extensions/revisions/client-api/operations/revert-document-to-revision.mdx#revert-single-document) + * [Revert multiple documents](../../../../document-extensions/revisions/client-api/operations/revert-document-to-revision.mdx#revert-multiple-documents) + * [Syntax](../../../../document-extensions/revisions/client-api/operations/revert-document-to-revision.mdx#syntax) + + +## Overview + +* To revert a document to a specific revision, provide the document ID and the change-vector of the target revision to the `RevertRevisionsByIdOperation` operation. + The document content will be overwritten by the content of the specified revision. + +* An exception will be thrown if the revision's change-vector is not found, does not exist for the specified document, or belongs to a different document. + +* Reverting a document with this operation can be executed even if the revisions configuration is disabled: + * When revisions are **enabled**: + Reverting the document creates a new revision containing the content of the target revision. + * When revisions are **disabled**: + The document is reverted to the target revision without creating a new revision. + +* In addition to the document itself, reverting will impact Document Extensions as follows: + * **Attachments**: + If the target revision owns attachments, they are restored to their state when the revision was created. + * **Counters**: + If the target revision owns counters, they are restored to functionality with their values at the time the revision was created. + * **Time series**: + Time series data is Not reverted. Learn more [here](../../../../document-extensions/revisions/revisions-and-other-features.mdx#reverted-data-1). + +* When executing this operation on a document that had revisions and was deleted, placing it in the Revisions Bin, + the document will be **recreated** with the content of the specified target revision and will be removed from the Revisions Bin. +##### How to obtain a revision's change-vector: + +The change-vector of a revision can be obtained via: + + * The Client API - follow the code in the examples [below](../../../../document-extensions/revisions/client-api/operations/revert-document-to-revision.mdx#revert-single-document) + * Or from the document view in the Studio + +![Get revision CV](./assets/get-cv-for-revision.png) + +1. Go to the Revisions tab in the document view. +2. Click a revision to view +3. The document view will display the content of the revision. + This top label indicates that you are viewing a revision and not the current document. +4. Click the copy button in the Properties pane to copy this revision's change-vector to your clipboard. + + + +## Revert single document + +Using RavenDB's sample data, document _orders/1-A_ has a total of 7 revisions. +In this example, we revert document _orders/1-A_ to its very first revision. + + + + +{`using (var session = store.OpenSession()) +{ + // Get the revisions metadata for the document you wish to revert + // ============================================================== + + var revisionsMetadata = session.Advanced.Revisions + .GetMetadataFor(id: "orders/1-A"); + + // Get the CV of the revision you wish to revert to: + // ================================================= + + // Note: revisionsMetadata[0] is the latest revision, + // so specify the index of the revision you want. + // In this example, it will be the very first revision of the document: + + var numberOfRevisions = revisionsMetadata.Count(); + var changeVector = revisionsMetadata[numberOfRevisions-1] + .GetString(Constants.Documents.Metadata.ChangeVector); + + // Execute the operation + store.Operations.Send( + // Pass the document ID and the change-vector of the revision to revert to + new RevertRevisionsByIdOperation("orders/1-A", changeVector)); +} +`} + + + + +{`using (var asyncSession = store.OpenAsyncSession()) +{ + // Get the revisions metadata for the document you wish to revert + // ============================================================== + + var revisionsMetadata = await asyncSession.Advanced.Revisions + .GetMetadataForAsync(id: "Orders/1-A"); + + // Get the CV of the revision you wish to revert to: + // ================================================= + + // Note: revisionsMetadata[0] is the latest revision, + // so specify the index of the revision you want. + // In this example, it will be the very first revision of the document: + + var numberOfRevisions = revisionsMetadata.Count(); + var changeVector = revisionsMetadata[numberOfRevisions-1] + .GetString(Constants.Documents.Metadata.ChangeVector); + + // Execute the operation + await store.Operations.SendAsync( + // Pass the document ID and the change-vector of the revision to revert to + new RevertRevisionsByIdOperation("Orders/1-A", changeVector)); +} +`} + + + + + + +## Revert multiple documents + +You can use the operation to revert multiple documents. +Note: The documents do not need to belong to the same collection. + + + + +{`using (var session = store.OpenSession()) +{ + // Get the revisions metadata for the documents you wish to revert + var revisionsMetadata1 = session.Advanced.Revisions + .GetMetadataFor(id: "orders/1-A"); + var revisionsMetadata2 = session.Advanced.Revisions + .GetMetadataFor(id: "users/999"); + + // Get the CV of the revisions you wish to revert to + var changeVector1 = revisionsMetadata1[2] + .GetString(Constants.Documents.Metadata.ChangeVector); + var changeVector2 = revisionsMetadata1[3] + .GetString(Constants.Documents.Metadata.ChangeVector); + + // Execute the operation + store.Operations.Send( + // Pass the document IDs and the change-vector of the revisions to revert to + new RevertRevisionsByIdOperation(new Dictionary() + { { "orders/1-A", changeVector1 }, { "users/999", changeVector2 } })); +} +`} + + + + +{`using (var asyncSession = store.OpenAsyncSession()) +{ + // Get the revisions metadata for the documents you wish to revert + var revisionsMetadata1 = await asyncSession.Advanced.Revisions + .GetMetadataForAsync(id: "orders/1-A"); + var revisionsMetadata2 = await asyncSession.Advanced.Revisions + .GetMetadataForAsync(id: "users/999"); + + // Get the CV of the revisions you wish to revert to + var changeVector1 = revisionsMetadata1[2] + .GetString(Constants.Documents.Metadata.ChangeVector); + var changeVector2 = revisionsMetadata1[3] + .GetString(Constants.Documents.Metadata.ChangeVector); + + // Execute the operation + await store.Operations.SendAsync( + // Pass the document IDs and the change-vector of the revisions to revert to + new RevertRevisionsByIdOperation(new Dictionary() + { { "orders/1-A", changeVector1 }, { "users/999", changeVector2 } })); +} +`} + + + + + + +## Syntax + + + +{`Available overloads: +==================== +public RevertRevisionsByIdOperation(string id, string cv); +public RevertRevisionsByIdOperation(Dictionary idToChangeVector); +`} + + + +| Parameter | Type | Description | +|----------------------|------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------| +| **id** | `string` | The ID of the document to revert. | +| **cv** | `string` | The change vector of the revision to which the document should be reverted. | +| **idToChangeVector** | `Dictionary` | A dictionary where each key is a document ID, and each value is the change-vector of the revision to which the document should be reverted. | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/assets/get-cv-for-revision.png b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/assets/get-cv-for-revision.png new file mode 100644 index 0000000000..a0c6326ca9 Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/assets/get-cv-for-revision.png differ diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/configure-revisions.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/configure-revisions.mdx new file mode 100644 index 0000000000..8020b49624 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/configure-revisions.mdx @@ -0,0 +1,64 @@ +--- +title: "Configure Revisions Operation" +hide_table_of_contents: true +sidebar_label: Configure Revisions +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import ConfigureRevisionsCsharp from './_configure-revisions-csharp.mdx'; +import ConfigureRevisionsJava from './_configure-revisions-java.mdx'; +import ConfigureRevisionsPython from './_configure-revisions-python.mdx'; +import ConfigureRevisionsPhp from './_configure-revisions-php.mdx'; +import ConfigureRevisionsNodejs from './_configure-revisions-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/conflict-revisions-configuration.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/conflict-revisions-configuration.mdx new file mode 100644 index 0000000000..640d53abe1 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/conflict-revisions-configuration.mdx @@ -0,0 +1,49 @@ +--- +title: "Configure Conflict Revisions Operation" +hide_table_of_contents: true +sidebar_label: Configure Conflict Revisions +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import ConflictRevisionsConfigurationCsharp from './_conflict-revisions-configuration-csharp.mdx'; +import ConflictRevisionsConfigurationPhp from './_conflict-revisions-configuration-php.mdx'; +import ConflictRevisionsConfigurationNodejs from './_conflict-revisions-configuration-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "php", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/delete-revisions.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/delete-revisions.mdx new file mode 100644 index 0000000000..e733377eb7 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/delete-revisions.mdx @@ -0,0 +1,43 @@ +--- +title: "Delete Revisions Operation" +hide_table_of_contents: true +sidebar_label: Delete Revisions +sidebar_position: 3 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import DeleteRevisionsCsharp from './_delete-revisions-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/get-revisions.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/get-revisions.mdx new file mode 100644 index 0000000000..d756488821 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/get-revisions.mdx @@ -0,0 +1,55 @@ +--- +title: "Get Revisions Operation" +hide_table_of_contents: true +sidebar_label: Get Revisions +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetRevisionsCsharp from './_get-revisions-csharp.mdx'; +import GetRevisionsPython from './_get-revisions-python.mdx'; +import GetRevisionsPhp from './_get-revisions-php.mdx'; +import GetRevisionsNodejs from './_get-revisions-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/revert-document-to-revision.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/revert-document-to-revision.mdx new file mode 100644 index 0000000000..cc59ec813d --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/operations/revert-document-to-revision.mdx @@ -0,0 +1,39 @@ +--- +title: "Revert Document to Revision Operation" +hide_table_of_contents: true +sidebar_label: Revert Document to Revision +sidebar_position: 4 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import RevertDocumentToRevisionCsharp from './_revert-document-to-revision-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/overview.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/overview.mdx new file mode 100644 index 0000000000..317d2e10ef --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/overview.mdx @@ -0,0 +1,53 @@ +--- +title: "Revisions Client API Overview" +hide_table_of_contents: true +sidebar_label: Overview +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import OverviewCsharp from './_overview-csharp.mdx'; +import OverviewJava from './_overview-java.mdx'; +import OverviewPython from './_overview-python.mdx'; +import OverviewPhp from './_overview-php.mdx'; +import OverviewNodejs from './_overview-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_category_.json b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_category_.json new file mode 100644 index 0000000000..75fd17712e --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 1, + "label": Session, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_counting-csharp.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_counting-csharp.mdx new file mode 100644 index 0000000000..bcf3549695 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_counting-csharp.mdx @@ -0,0 +1,55 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can get the number of revisions a document has by using the advance session method `GetCountFor`. + +* In this page: + * [Get revisions count](../../../../document-extensions/revisions/client-api/session/counting.mdx#get-revisions-count) + * [syntax](../../../../document-extensions/revisions/client-api/session/counting.mdx#syntax) + + +## Get revisions count + + + + +{`// Get the number of revisions for document 'companies/1-A" +var revisionsCount = session.Advanced.Revisions.GetCountFor("companies/1-A"); +`} + + + + +{`// Get the number of revisions for document 'companies/1-A" +var revisionsCount = await asyncSession.Advanced.Revisions.GetCountForAsync("companies/1-A"); +`} + + + + + + +## Syntax + + + +{`long GetCountFor(string id); +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **id** | string | Document ID for which revisions are counted | + +| Return value | | +| - | - | +| `long` | The number of revisions for the specified document | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_counting-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_counting-nodejs.mdx new file mode 100644 index 0000000000..1c0b07795c --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_counting-nodejs.mdx @@ -0,0 +1,46 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can get the number of revisions a document has by using the advance session method `getCountFor`. + +* In this page: + * [Get revisions count](../../../../document-extensions/revisions/client-api/session/counting.mdx#get-revisions-count) + * [syntax](../../../../document-extensions/revisions/client-api/session/counting.mdx#syntax) + + +## Get revisions count + + + +{`// Get the number of revisions for document 'companies/1-A" +const revisionsCount = await session.advanced.revisions.getCountFor("companies/1-A"); +`} + + + + + +## Syntax + + + +{`await session.advanced.revisions.getCountFor(id); +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **id** | string | Document ID for which revisions are counted | + +| Return value | | +| - | - | +| `Promise` | A `Promise` resolving to the number of revisions for the specified document | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_counting-php.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_counting-php.mdx new file mode 100644 index 0000000000..f695289992 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_counting-php.mdx @@ -0,0 +1,46 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can get the number of revisions a document has by using the session `advanced` method `getCountFor`. + +* In this page: + * [Get revisions count](../../../../document-extensions/revisions/client-api/session/counting.mdx#get-revisions-count) + * [syntax](../../../../document-extensions/revisions/client-api/session/counting.mdx#syntax) + + +## Get revisions count + + + +{`// Get the number of revisions for document 'companies/1-A" +$revisionsCount = $session->advanced()->revisions()->getCountFor("companies/1-A"); +`} + + + + + +## Syntax + + + +{`function getCountFor(?string $id): int; +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **id** | `string` | The ID of the document that revisions are counted for | + +| Return value | | +| - | - | +| `int` | The number of revisions for the specified document | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_counting-python.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_counting-python.mdx new file mode 100644 index 0000000000..3955fa8c2b --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_counting-python.mdx @@ -0,0 +1,46 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* You can get the number of revisions a document has by using the session `advanced` method `get_count_for`. + +* In this page: + * [Get revisions count](../../../../document-extensions/revisions/client-api/session/counting.mdx#get-revisions-count) + * [syntax](../../../../document-extensions/revisions/client-api/session/counting.mdx#syntax) + + +## Get revisions count + + + +{`# Get the number of revisions for document 'companies/1-A' +revisions_count = session.advanced.revisions.get_count_for("companies/1-A") +`} + + + + + +## Syntax + + + +{`def get_count_for(self, id_: str) -> int: ... +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **id_** | `str` | The ID of the document that revisions are counted for | + +| Return value | | +| - | - | +| `int` | The number of revisions for the specified document | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_including-csharp.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_including-csharp.mdx new file mode 100644 index 0000000000..1586b42883 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_including-csharp.mdx @@ -0,0 +1,470 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Document revisions can be [included](../../../../client-api/how-to/handle-document-relationships.mdx#includes) in results when: + * **Making a query** (`Session.Query` / `Session.Advanced.RawQuery`) + * **Loading a document** (`Session.Load`) from the server + +* The revisions to include can be specified by: + * **Creation time** + * **Change vector** + +* In this page: + * [Overview:](../../../../document-extensions/revisions/client-api/session/including.mdx#overview) + * [Why include revisions](../../../../document-extensions/revisions/client-api/session/including.mdx#why-include-revisions) + * [Including by creation time](../../../../document-extensions/revisions/client-api/session/including.mdx#including-by-creation-time) + * [Including by change vector](../../../../document-extensions/revisions/client-api/session/including.mdx#including-by-change-vector) + * [Include revisions:](../../../../document-extensions/revisions/client-api/session/including.mdx#include-revisions-when-loading-document) + * [When Loading document](../../../../document-extensions/revisions/client-api/session/including.mdx#include-revisions-when-loading-document) + * [When making a Query](../../../../document-extensions/revisions/client-api/session/including.mdx#include-revisions-when-making-a-query) + * [When making a Raw Query](../../../../document-extensions/revisions/client-api/session/including.mdx#include-revisions-when-making-a-raw-query) + * [Syntax](../../../../document-extensions/revisions/client-api/session/including.mdx#syntax) + * [Patching the revision change vector](../../../../document-extensions/revisions/client-api/session/including.mdx#patching-the-revision-change-vector) + + +## Overview + +#### Why include revisions: + +* Including revisions may be useful, for example, when an auditing application loads or queries for a document. + The document's past revisions can be included with the document to make the document's history available for instant inspection. + +* Once loaded to the session, there are no additional trips to the server when accessing the revisions. + [Getting](../../../../document-extensions/revisions/client-api/session/loading.mdx) a revision that was included with the document will retrieve it directly from the session. + This also holds true when attempting to include revisions but none are found. +#### Including by Creation Time: + +* You can include a single revision by specifying its creation time, see examples below. + +* You can pass local time or UTC, either way the server will convert it to UTC. + +* **If the provided time matches** the creation time of a document revision, this revision will be included. + +* **If no exact match is found**, then the first revision that precedes the specified time will be returned. +#### Including by Change Vector: + +* Each time a document is modified, its [Change Vector](../../../../server/clustering/replication/change-vector.mdx) is updated. + +* When a revision is created, + the revision's change vector is the change vector of the document at the time of the revision's creation. + +* To include single or multiple document revisions by their change vectors: + + * When modifying the document, store its updated change vector in a property in the document. + Can be done by [patching](../../../../document-extensions/revisions/client-api/session/including.mdx#patching-the-revision-change-vector) the document from the Client API or from the Studio. + + * Specify the **path** to this property when including the revisions, see examples below. + + * e.g.: + Each time an employee's contract document is modified (e.g. when their salary is raised), + you can add the current change vector of the document to a dedicated property in the document. + Whenever the time comes to re-evaluate an employee's terms and their contract is loaded, + its past revisions can be easily included with it by their change vectors. + + + +## Include revisions when Loading document + +#### Include a revision by Time: + + + + +{`// The revision creation time +// For example - looking for a revision from last month +var creationTime = DateTime.Now.AddMonths(-1); + +// Load a document: +var order = session.Load("orders/1-A", builder => builder + // Pass the revision creation time to 'IncludeRevisions' + // The revision will be 'loaded' to the session along with the document + .IncludeRevisions(creationTime)); + +// Get the revision by creation time - it will be retrieved from the SESSION +// No additional trip to the server is made +var revision = session + .Advanced.Revisions.Get("orders/1-A", creationTime); +`} + + + + +{`// The revision creation time +// For example - looking for a revision from last month +var creationTime = DateTime.Now.AddMonths(-1); + +// Load a document: +var order = await asyncSession.LoadAsync("orders/1-A", builder => builder + // Pass the revision creation time to 'IncludeRevisions' + // The revision will be 'loaded' to the session along with the document + .IncludeRevisions(creationTime)); + +// Get the revision by creation time - it will be retrieved from the SESSION +// No additional trip to the server is made +var revision = await asyncSession + .Advanced.Revisions.GetAsync("orders/1-A", creationTime); +`} + + + +#### Include revisions by Change Vector: + + + + +{`// Load a document: +var contract = session.Load("contracts/1-A", builder => builder + // Pass the path to the document property that contains the revision change vector(s) + // The revision(s) will be 'loaded' to the session along with the document + .IncludeRevisions(x => x.RevisionChangeVector) // Include a single revision + .IncludeRevisions(x => x.RevisionChangeVectors)); // Include multiple revisions + +// Get the revision(s) by change vectors - it will be retrieved from the SESSION +// No additional trip to the server is made +var revision = session + .Advanced.Revisions.Get(contract.RevisionChangeVector); +var revisions = session + .Advanced.Revisions.Get(contract.RevisionChangeVectors); +`} + + + + +{`// Load a document: +var contract = await asyncSession.LoadAsync("contracts/1-A",builder => builder + // Pass the path to the document property that contains the revision change vector(s) + // The revision(s) will be 'loaded' to the session along with the document + .IncludeRevisions(x => x.RevisionChangeVector) // Include a single revision + .IncludeRevisions(x => x.RevisionChangeVectors)); // Include multiple revisions + +// Get the revision(s) by change vectors - it will be retrieved from the SESSION +// No additional trip to the server is made +var revision = await asyncSession + .Advanced.Revisions.GetAsync(contract.RevisionChangeVector); +var revisions = await asyncSession + .Advanced.Revisions.GetAsync(contract.RevisionChangeVectors); +`} + + + + +
+ + +{`// Sample Contract document +private class Contract +\{ + public string Id \{ get; set; \} + public string Name \{ get; set; \} + public string RevisionChangeVector \{ get; set; \} // A single change vector + public List RevisionChangeVectors \{ get; set; \} // A list of change vectors +\} +`} + + + + + +## Include revisions when making a Query + +#### Include revisions by Time: + + + + +{`// The revision creation time +// For example - looking for revisions from last month +var creationTime = DateTime.Now.AddMonths(-1); + +// Query for documents: +var orderDocuments = session.Query() + .Where(x => x.ShipTo.Country == "Canada") + // Pass the revision creation time to 'IncludeRevisions' + .Include(builder => builder.IncludeRevisions(creationTime)) + // For each document in the query results, + // the matching revision will be 'loaded' to the session along with the document + .ToList(); + +// Get a revision by its creation time for a document from the query results +// It will be retrieved from the SESSION - no additional trip to the server is made +var revision = session + .Advanced.Revisions.Get(orderDocuments[0].Id, creationTime); +`} + + + + +{`// The revision creation time +// For example - looking for revisions from last month +var creationTime = DateTime.Now.AddMonths(-1); + +// Query for documents: +var orderDocuments = await asyncSession.Query() + .Where(x => x.ShipTo.Country == "Canada") + // Pass the revision creation time to 'IncludeRevisions' + .Include(builder => builder.IncludeRevisions(creationTime)) + // For each document in the query results, + // the matching revision will be 'loaded' to the session along with the document + .ToListAsync(); + +// Get a revision by its creation time for a document from the query results +// It will be retrieved from the SESSION - no additional trip to the server is made +var revision = await asyncSession + .Advanced.Revisions.GetAsync(orderDocuments[0].Id, creationTime); +`} + + + +#### Include revisions by Change Vector: + + + + +{`// Query for documents: +var orderDocuments = session.Query() + // Pass the path to the document property that contains the revision change vector(s) + .Include(builder => builder + .IncludeRevisions(x => x.RevisionChangeVector) // Include a single revision + .IncludeRevisions(x => x.RevisionChangeVectors)) // Include multiple revisions + // For each document in the query results, + // the matching revisions will be 'loaded' to the session along with the document + .ToList(); + +// Get the revision(s) by change vectors - it will be retrieved from the SESSION +// No additional trip to the server is made +var revision = session. + Advanced.Revisions.Get(orderDocuments[0].RevisionChangeVector); +var revisions = session + .Advanced.Revisions.Get(orderDocuments[0].RevisionChangeVectors); +`} + + + + +{`// Query for documents: +var orderDocuments = await asyncSession.Query() + // Pass the path to the document property that contains the revision change vector(s) + .Include(builder => builder + .IncludeRevisions(x => x.RevisionChangeVector) // Include a single revision + .IncludeRevisions(x => x.RevisionChangeVectors)) // Include multiple revisions + // For each document in the query results, + // the matching revisions will be 'loaded' to the session along with the document + .ToListAsync(); + +// Get the revision(s) by change vectors - it will be retrieved from the SESSION +// No additional trip to the server is made +var revision = await asyncSession. + Advanced.Revisions.GetAsync(orderDocuments[0].RevisionChangeVector); +var revisions = await asyncSession + .Advanced.Revisions.GetAsync(orderDocuments[0].RevisionChangeVectors); +`} + + + + +* See the _Contract_ class definition [above](../../../../document-extensions/revisions/client-api/session/including.mdx#sample-document). + + + +## Include revisions when making a Raw Query + +* Use `include revisions` in your RQL when making a raw query. + +* Pass either the revision creation time or the path to the document property containing the change vector(s), + RavenDB will figure out the parameter type passed and include the revisions accordingly. + +* Aliases (e.g. `from Users as U`) are Not supported by raw queries that include revisions. +#### Include revisions by Time: + + + + +{`// The revision creation time +// For example - looking for revisions from last month +var creationTime = DateTime.Now.AddMonths(-1); + +// Query for documents with Raw Query: +var orderDocuments = session.Advanced + // Use 'include revisions' in the RQL + .RawQuery("from Orders include revisions($p0)") + // Pass the revision creation time + .AddParameter("p0", creationTime) + // For each document in the query results, + // the matching revision will be 'loaded' to the session along with the document + .ToList(); + +// Get a revision by its creation time for a document from the query results +// It will be retrieved from the SESSION - no additional trip to the server is made +var revision = session + .Advanced.Revisions.Get(orderDocuments[0].Id, creationTime); +`} + + + + +{`// The revision creation time +// For example - looking for revisions from last month +var creationTime = DateTime.Now.AddMonths(-1); + +// Query for documents with Raw Query: +var orderDocuments = await asyncSession.Advanced + // Use 'include revisions' in the RQL + .AsyncRawQuery("from Orders include revisions($p0)") + // Pass the revision creation time + .AddParameter("p0", creationTime) + // For each document in the query results, + // the matching revision will be 'loaded' to the session along with the document + .ToListAsync(); + +// Get a revision by its creation time for a document from the query results +// It will be retrieved from the SESSION - no additional trip to the server is made +var revision = await asyncSession + .Advanced.Revisions.GetAsync(orderDocuments[0].Id, creationTime); +`} + + + +#### Include revisions by Change Vector: + + + + +{`// Query for documents with Raw Query: +var orderDocuments = session.Advanced + // Use 'include revisions' in the RQL + .RawQuery("from Contracts include revisions($p0, $p1)") + // Pass the path to the document properties containing the change vectors + .AddParameter("p0", "RevisionChangeVector") + .AddParameter("p1", "RevisionChangeVectors") + // For each document in the query results, + // the matching revisions will be 'loaded' to the session along with the document + .ToList(); + +// Get the revision(s) by change vectors - it will be retrieved from the SESSION +// No additional trip to the server is made +var revision = session. + Advanced.Revisions.Get(orderDocuments[0].RevisionChangeVector); +var revisions = session + .Advanced.Revisions.Get(orderDocuments[0].RevisionChangeVectors); +`} + + + + +{`// Query for documents with Raw Query: +var orderDocuments = await asyncSession.Advanced + // Use 'include revisions' in the RQL + .AsyncRawQuery("from Contracts include revisions($p0, $p1)") + // Pass the path to the document properties containing the change vectors + .AddParameter("p0", "RevisionChangeVector") + .AddParameter("p1", "RevisionChangeVectors") + // For each document in the query results, + // the matching revisions will be 'loaded' to the session along with the document + .ToListAsync(); + +// Get the revision(s) by change vectors - it will be retrieved from the SESSION +// No additional trip to the server is made +var revision = await asyncSession. + Advanced.Revisions.GetAsync(orderDocuments[0].RevisionChangeVector); +var revisions = await asyncSession + .Advanced.Revisions.GetAsync(orderDocuments[0].RevisionChangeVectors); +`} + + + + +* See the _Contract_ class definition [above](../../../../document-extensions/revisions/client-api/session/including.mdx#sample-document). + + + +## Syntax + + + +{`// Include a single revision by Time +TBuilder IncludeRevisions(DateTime before); + +// Include a single revision by Change Vector +TBuilder IncludeRevisions(Expression> path); + +// Include an array of revisions by Change Vectors +TBuilder IncludeRevisions(Expression>> path); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **before** | `DateTime` | <ul><li>Creation time of the revision to be included.</li><li>Pass local time or UTC.
The server will convert the param to UTC.</li><li>If no revision was created at this time then the first revision that precedes it is returned.</li></ul> | +| **path** | `Expression>` | <ul><li>The path to the document property that contains
**a single change vector** of the revision to be included.</li></ul> | +| **path** | `Expression>>` | <ul><li>The path to the document property that contains
**an array of change vectors** of the revisions to be included.</li></ul> | + +| Return value | | +| - | - | +| `TBuilder` | <ul><li>When **loading** a document:
A builder object that is used to build the include part in the Load request.</il><li>When **querying** for a document:
A builder object that is used to build the include part in the Query RQL expression.</li><li>Can be used in chaining.</li></ul> | + + + +## Patching the revision change vector + +* To include revisions when making a query or a raw query, + you need to specify the path to the document property that contains the revision change vector(s). + +* The below example shows how to get and patch a revision change vector to a document property. + + + + +{`using (var session = store.OpenSession()) +{ + // Get the revisions' metadata for document 'contracts/1-A' + List contractRevisionsMetadata = + session.Advanced.Revisions.GetMetadataFor("contracts/1-A"); + + // Get a change vector from the metadata + string changeVector = + contractRevisionsMetadata.First().GetString(Constants.Documents.Metadata.ChangeVector); + + // Patch the document - add the revision change vector to a specific document property + session.Advanced + .Patch("contracts/1-A", x => x.RevisionChangeVector, changeVector); + + // Save your changes + session.SaveChanges(); +} +`} + + + + +{`using (var asyncSession = store.OpenAsyncSession()) +{ + // Get the revisions' metadata for document 'contracts/1-A' + List contractRevisionsMetadata = + await asyncSession.Advanced.Revisions.GetMetadataForAsync("contracts/1-A"); + + // Get a change vector from the metadata + string changeVector = + contractRevisionsMetadata.First().GetString(Constants.Documents.Metadata.ChangeVector); + + // Patch the document - add the revision change vector to a specific document property + asyncSession.Advanced + .Patch("contracts/1-A", x => x.RevisionChangeVector, changeVector); + + // Save your changes + await asyncSession.SaveChangesAsync(); +} +`} + + + + +* See the _Contract_ class definition [above](../../../../document-extensions/revisions/client-api/session/including.mdx#sample-document). + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_including-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_including-nodejs.mdx new file mode 100644 index 0000000000..1e4daa422d --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_including-nodejs.mdx @@ -0,0 +1,324 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Document revisions can be included in results when: + * **Making a query** (`session.query` / `session.advanced.rawQuery`) + * **Loading a document** (`session.load`) from the server + +* The revisions to include can be specified by: + * **Creation time** + * **Change vector** + +* In this page: + * [Overview:](../../../../document-extensions/revisions/client-api/session/including.mdx#overview) + * [Why include revisions](../../../../document-extensions/revisions/client-api/session/including.mdx#why-include) + * [Including by creation time](../../../../document-extensions/revisions/client-api/session/including.mdx#include-by-time) + * [Including by change vector](../../../../document-extensions/revisions/client-api/session/including.mdx#include-by-change-vector) + * [Include revisions:](../../../../document-extensions/revisions/client-api/session/including.mdx#include-revisions-when-loading-document) + * [When Loading document](../../../../document-extensions/revisions/client-api/session/including.mdx#include-revisions-when-loading-document) + * [When making a Query](../../../../document-extensions/revisions/client-api/session/including.mdx#include-revisions-when-making-a-query) + * [When making a Raw Query](../../../../document-extensions/revisions/client-api/session/including.mdx#include-revisions-when-making-a-raw-query) + * [Syntax](../../../../document-extensions/revisions/client-api/session/including.mdx#syntax) + * [Patching the revision change vector](../../../../document-extensions/revisions/client-api/session/including.mdx#patching-the-revision-change-vector) + + +## Overview + + + +
**Why include revisions**: +* Including revisions may be useful, for example, when an auditing application loads or queries for a document. + The document's past revisions can be included with the document to make the document's history available for instant inspection. + +* Once loaded to the session, there are no additional trips to the server when accessing the revisions. + [Getting](../../../../document-extensions/revisions/client-api/session/loading.mdx) a revision that was included with the document will retrieve it directly from the session. + This also holds true when attempting to include revisions but none are found. + + + + + + **Including by Creation Time**: +* You can include a single revision by specifying its creation time, see examples below. + +* You can pass local time or UTC, either way the server will convert it to UTC. + +* **If the provided time matches** the creation time of a document revision, this revision will be included. + +* **If no exact match is found**, then the first revision that precedes the specified time will be returned. + + + + + + **Including by Change Vector**: +* Each time a document is modified, its [Change Vector](../../../../server/clustering/replication/change-vector.mdx) is updated. + +* When a revision is created, + the revision's change vector is the change vector of the document at the time of the revision's creation. + +* To include single or multiple document revisions by their change vectors: + + * When modifying the document, store its updated change vector in a property in the document. + Can be done by [patching](../../../../document-extensions/revisions/client-api/session/including.mdx#patching-the-revision-change-vector) the document from the Client API or from the Studio. + + * Specify the **path** to this property when including the revisions, see examples below. + + * e.g.: + Each time an employee's contract document is modified (e.g. when their salary is raised), + you can add the current change vector of the document to a dedicated property in the document. + Whenever the time comes to re-evaluate an employee's terms and their contract is loaded, + its past revisions can be easily included with it by their change vectors. + + + + + +## Include revisions when Loading document + +**Include a revision by Time** + + + +{`// The revision creation time +// For example - looking for a revision from last month +const creationTime = new Date(); +creationTime.setMonth(creationTime.getMonth() - 1).toLocaleString(); + +// Load a document: +const order = await session.load("orders/1-A", \{ + // Pass the revision creation time to 'includeRevisions' + // The revision will be 'loaded' to the session along with the document + includes: builder => builder.includeRevisions(creationTime) +\}); + +// Get the revision by creation time - it will be retrieved from the SESSION +// No additional trip to the server is made +const revision = await session + .advanced.revisions.get("orders/1-A", creationTime); +`} + + +**Include revisions by Change Vector** + + + +{`// Load a document: +const contract = await session.load("contracts/1-A", \{ + includes: builder => builder + // Pass the path to the document property that contains the revision change vector(s) + // The revision(s) will be 'loaded' to the session along with the document + .includeRevisions("revisionChangeVector") // Include a single revision + .includeRevisions("revisionChangeVectors") // Include multiple revisions +\}); + +// Get the revision(s) by change vectors - it will be retrieved from the SESSION +// No additional trip to the server is made +const revision = await session + .advanced.revisions.get(contract.revisionChangeVector); +const revisions = await session + .advanced.revisions.get(contract.revisionChangeVectors); +`} + + + + + + +{`// Sample Contract document +class Contract \{ + id: string; + name: string; + revisionChangeVector: string; + revisionChangeVectors: string[]; +\} +`} + + + + + +## Include revisions when making a Query + +**Include revisions by Time** + + + +{`// The revision creation time +// For example - looking for a revision from last month +const creationTime = new Date(); +creationTime.setMonth(creationTime.getMonth() - 1).toLocaleString(); + +// Define the query: +const query = session.query(\{collection: "Orders"\}) + .whereEquals("ShipTo.Country", "Canada") + // Pass the revision creation time to 'includeRevisions' + .include(builder => builder.includeRevisions(creationTime)); + +// Execute the query: +// For each document in the query results, +// the matching revision will be 'loaded' to the session along with the document +const orderDocuments = await query.all(); + +// Get a revision by its creation time for a document from the query results +// It will be retrieved from the SESSION - no additional trip to the server is made +const revision = await session + .advanced.revisions.get(orderDocuments[0].id, creationTime); +`} + + +**Include revisions by Change Vector** + + + +{`// Define the query: +const query = session.query(\{collection: "Contracts"\}) + // Pass the path to the document property that contains the revision change vector(s) + .include(builder => \{ + builder + .includeRevisions("revisionChangeVector") // Include a single revision + .includeRevisions("revisionChangeVectors") // Include multiple revisions + \}); + +// Execute the query: +// For each document in the query results, +// the matching revisions will be 'loaded' to the session along with the document +const orderDocuments = await query.all(); + +// Get the revision(s) by change vectors - it will be retrieved from the SESSION +// No additional trip to the server is made +const revision = await session + .advanced.revisions.get(orderDocuments[0].revisionChangeVector); +const revisions = await session + .advanced.revisions.get(orderDocuments[0].revisionChangeVectors); +`} + + + +* See the _Contract_ class definition [above](../../../../document-extensions/revisions/client-api/session/including.mdx#sample-document). + + + +## Include revisions when making a Raw Query + +* Use `include revisions` in your RQL when making a raw query. + +* Pass either the revision creation time or the path to the document property containing the change vector(s), + RavenDB will figure out the parameter type passed and include the revisions accordingly. + +* Aliases (e.g. `from Users as U`) are Not supported by raw queries that include revisions. +**Include revisions by Time** + + + +{`// The revision creation time +// For example - looking for a revision from last month +const creationTime = new Date(); +creationTime.setMonth(creationTime.getMonth() - 1).toLocaleString(); + +// Define the Raw Query: +const rawQuery = session.advanced + // Use 'include revisions' in the RQL + .rawQuery("from Orders include revisions($p0)") + // Pass the revision creation time + .addParameter("p0", creationTime); + +// Execute the query: +// For each document in the query results, +// the matching revision will be 'loaded' to the session along with the document +const orderDocuments = await rawQuery.all(); + +// Get a revision by its creation time for a document from the query results +// It will be retrieved from the SESSION - no additional trip to the server is made +const revision = await session + .advanced.revisions.get(orderDocuments[0].Id, creationTime); +`} + + +**Include revisions by Change Vector** + + + +{`// Define the Raw Query: +const rawQuery = session.advanced + // Use 'include revisions' in the RQL + .rawQuery("from Contracts include revisions($p0, $p1)") + // Pass the path to the document properties containing the change vectors + .addParameter("p0", "revisionChangeVector") + .addParameter("p1", "revisionChangeVectors"); + +// Execute the raw query: +// For each document in the query results, +// the matching revisions will be 'loaded' to the session along with the document +const orderDocuments = await rawQuery.all(); + +// Get the revision(s) by change vectors - it will be retrieved from the SESSION +// No additional trip to the server is made +const revision = await session + .advanced.revisions.get(orderDocuments[0].revisionChangeVector); +const revisions = await session + .advanced.revisions.get(orderDocuments[0].revisionChangeVectors); +`} + + + +* See the _Contract_ class definition [above](../../../../document-extensions/revisions/client-api/session/including.mdx#sample-document). + + + +## Syntax + + + +{`object includeRevisions(before); +object includeRevisions(path); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **before** | `string` | <ul><li>Creation time of the revision to be included.</li><li>Pass local time or UTC.
The server will convert the param to UTC.</li><li>If no revision was created at this time then the first revision that precedes it is returned.</li></ul> | +| **path** | `string` | <ul><li>The path to the document property that contains
**a single change vector** or **an array of change vectors**
of the revisions to be included.</li></ul> | + +| Return value | | +| - | - | +| `object` | <ul><li>When **loading** a document:
A builder object that is used to build the include part in the Load request.</il><li>When **querying** for a document:
A builder object that is used to build the include part in the Query RQL expression.</li><li>Can be used in chaining.</li></ul> | + + + +## Patching the revision change vector + +* To include revisions when making a query or a raw query, + you need to specify the path to the document property that contains the revision change vector(s). + +* The below example shows how to get and patch a revision change vector to a document property. + + + +{`// Get the revisions' metadata for document 'contracts/1-A' +const contractRevisionsMetadata = await session + .advanced.revisions.getMetadataFor("contracts/1-A"); + +// Get a change vector from the metadata +const metadata = orderRevisionsMetadata[0]; +const changeVector = metadata[CONSTANTS.Documents.Metadata.CHANGE_VECTOR]; + +// Patch the document - add the revision change vector to a specific document property +await session.advanced.patch("contracts/1-A", "revisionChangeVector", changeVector); + +// Save your changes +await session.saveChanges(); +`} + + + +* See the _Contract_ class definition [above](../../../../document-extensions/revisions/client-api/session/including.mdx#sample-document). + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_including-php.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_including-php.mdx new file mode 100644 index 0000000000..e24768684d --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_including-php.mdx @@ -0,0 +1,348 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Document revisions can be [included](../../../../client-api/how-to/handle-document-relationships.mdx#includes) in results when: + * **Making a query** (`session.query`/`session.advanced.rawQuery`) + * **Loading a document** (`session.load`) from the server + +* The revisions to include can be specified by: + * **Creation time** + * **Change vector** + +* In this page: + * [Overview:](../../../../document-extensions/revisions/client-api/session/including.mdx#overview) + * [Why include revisions](../../../../document-extensions/revisions/client-api/session/including.mdx#why-include-revisions) + * [Including by creation time](../../../../document-extensions/revisions/client-api/session/including.mdx#including-by-creation-time) + * [Including by change vector](../../../../document-extensions/revisions/client-api/session/including.mdx#including-by-change-vector) + * [Include revisions:](../../../../document-extensions/revisions/client-api/session/including.mdx#include-revisions-when-loading-document) + * [When Loading document](../../../../document-extensions/revisions/client-api/session/including.mdx#include-revisions-when-loading-document) + * [When making a Query](../../../../document-extensions/revisions/client-api/session/including.mdx#include-revisions-when-making-a-query) + * [When making a Raw Query](../../../../document-extensions/revisions/client-api/session/including.mdx#include-revisions-when-making-a-raw-query) + * [Syntax](../../../../document-extensions/revisions/client-api/session/including.mdx#syntax) + * [Patching the revision change vector](../../../../document-extensions/revisions/client-api/session/including.mdx#patching-the-revision-change-vector) + + +## Overview + +#### Why include revisions: + +* Including revisions may be useful, for example, when an auditing application loads or queries for a document. + The document's past revisions can be included with the document to make the document's history available for instant inspection. + +* Once loaded to the session, there are no additional trips to the server when accessing the revisions. + [Getting](../../../../document-extensions/revisions/client-api/session/loading.mdx) a revision that was included with the document will retrieve it directly from the session. + This also holds true when attempting to include revisions but none are found. +#### Including by Creation Time: + +* You can include a single revision by specifying its creation time, see examples below. + +* You can pass local time or UTC, either way the server will convert it to UTC. + +* **If the provided time matches** the creation time of a document revision, this revision will be included. + +* **If no exact match is found**, then the first revision that precedes the specified time will be returned. +#### Including by Change Vector: + +* Each time a document is modified, its [Change Vector](../../../../server/clustering/replication/change-vector.mdx) is updated. + +* When a revision is created, + the revision's change vector is the change vector of the document at the time of the revision's creation. + +* To include single or multiple document revisions by their change vectors: + + * When modifying the document, store its updated change vector in a property in the document. + Can be done by [patching](../../../../document-extensions/revisions/client-api/session/including.mdx#patching-the-revision-change-vector) the document from the Client API or from the Studio. + + * Specify the **path** to this property when including the revisions, see examples below. + + * e.g.: + Each time an employee's contract document is modified (e.g. when their salary is raised), + you can add the current change vector of the document to a dedicated property in the document. + Whenever the time comes to re-evaluate an employee's terms and their contract is loaded, + its past revisions can be easily included with it by their change vectors. + + + +## Include revisions when Loading document + +#### Include a revision by Time: + + + +{`// The revision creation time +// For example - looking for a revision from last month +$creationTime = (new DateTime())->sub(new DateInterval("P1M")); + +// Load a document: +$order = $session->load(Order::class, "orders/1-A", function($builder) use ($creationTime) \{ + return $builder + // Pass the revision creation time to 'IncludeRevisionsBefore' + // The revision will be 'loaded' to the session along with the document + ->includeRevisionsBefore($creationTime); + \}); + +// Get the revision by creation time - it will be retrieved from the SESSION +// No additional trip to the server is made +$revision = $session + ->advanced()->revisions()->get(Order::class, "orders/1-A", $creationTime); +`} + + +#### Include revisions by Change Vector: + + + +{`// Load a document: +$contract = $session->load(Contract::class, "contracts/1-A", function($builder) \{ + return $builder + + // Pass the path to the document property that contains the revision change vector(s) + // The revision(s) will be 'loaded' to the session along with the document + ->includeRevisions("RevisionChangeVector") + ->includeRevisions("RevisionChangeVectors"); +\}); + +// Get the revision(s) by change vectors - it will be retrieved from the SESSION +// No additional trip to the server is made +$revision = $session->advanced()->revisions()->get(Contract::class, "RevisionChangeVector"); +$revisions = $session->advanced()->revisions()->get(Contract::class, "RevisionChangeVectors"); +`} + + + +
+ + +{`// Sample Contract document +class Contract +\{ + private ?string $id = null; + private ?string $name = null; + private ?string $revisionChangeVector = null; // A single change vector + private ?array $revisionChangeVectors = null; // A list of change vectors + + public function getId(): ?string + \{ + return $this->id; + \} + + public function setId(?string $id): void + \{ + $this->id = $id; + \} + + public function getName(): ?string + \{ + return $this->name; + \} + + public function setName(?string $name): void + \{ + $this->name = $name; + \} + + public function getRevisionChangeVector(): ?string + \{ + return $this->revisionChangeVector; + \} + + public function setRevisionChangeVector(?string $revisionChangeVector): void + \{ + $this->revisionChangeVector = $revisionChangeVector; + \} + + public function getRevisionChangeVectors(): ?array + \{ + return $this->revisionChangeVectors; + \} + + public function setRevisionChangeVectors(?array $revisionChangeVectors): void + \{ + $this->revisionChangeVectors = $revisionChangeVectors; + \} +\} +`} + + + + + +## Include revisions when making a Query + +#### Include revisions by Time: + + + +{`// The revision creation time +// For example - looking for revisions from last month +$creationTime = (new DateTime())->sub(new DateInterval("P1M"); + +// Query for documents: +$orderDocuments = $session->query(Order::class) + ->whereEquals("ShipTo.Country", "Canada") + // Pass the revision creation time to 'IncludeRevisionsBefore' + ->include(function($builder) use ($creationTime) \{ return $builder->includeRevisionsBefore($creationTime); \}) + // For each document in the query results, + // the matching revision will be 'loaded' to the session along with the document + ->toList(); + +// Get a revision by its creation time for a document from the query results +// It will be retrieved from the SESSION - no additional trip to the server is made +$revision = $session + ->advanced()->revisions()->getBeforeDate(Order::class, $orderDocuments[0]->getId(), $creationTime); +`} + + +#### Include revisions by Change Vector: + + + +{`// Query for documents: +$orderDocuments = $session->query(Contract::class) + // Pass the path to the document property that contains the revision change vector(s) + ->include(function($builder) \{ + return $builder + ->includeRevisions("getRevisionChangeVector") // Include a single revision + ->includeRevisions("getRevisionChangeVectors"); // Include multiple revisions + \}) + // For each document in the query results, + // the matching revisions will be 'loaded' to the session along with the document + ->toList(); + +// Get the revision(s) by change vectors - it will be retrieved from the SESSION +// No additional trip to the server is made +$revision = $session + ->advanced()->revisions()->get(Contract::class, $orderDocuments[0]->getRevisionChangeVector()); +$revisions = $session + ->advanced()->revisions()->get(Contract::class, $orderDocuments[0]->getRevisionChangeVectors()); +`} + + + +* See the _Contract_ class definition [above](../../../../document-extensions/revisions/client-api/session/including.mdx#sample-document). + + + +## Include revisions when making a Raw Query + +* Use `include revisions` in your RQL when making a raw query. + +* Pass either the revision creation time or the path to the document property containing the change vector(s), + RavenDB will figure out the parameter type passed and include the revisions accordingly. + +* Aliases (e.g. `from Users as U`) are Not supported by raw queries that include revisions. +#### Include revisions by Time: + + + +{`// The revision creation time +// For example - looking for revisions from last month +$creationTime = (new DateTime())->sub(new DateInterval("P1M")); + +// Query for documents with Raw Query: +$orderDocuments = $session->advanced() + // Use 'include revisions' in the RQL + ->rawQuery(Order::class, "from Orders include revisions(\\$p0)") + // Pass the revision creation time + ->addParameter("p0", $creationTime) + // For each document in the query results, + // the matching revision will be 'loaded' to the session along with the document + ->toList(); + +// Get a revision by its creation time for a document from the query results +// It will be retrieved from the SESSION - no additional trip to the server is made +$revision = $session + ->advanced()->revisions()->getBeforeDate(Order::class, $orderDocuments[0]->getId(), $creationTime); +`} + + +#### Include revisions by Change Vector: + + + +{`// Query for documents with Raw Query: +$orderDocuments = $session->advanced() + // Use 'include revisions' in the RQL + ->rawQuery(Contract::class, "from Contracts include revisions(\\$p0, \\$p1)") + // Pass the path to the document properties containing the change vectors + ->addParameter("p0", "RevisionChangeVector") + ->addParameter("p1", "RevisionChangeVectors") + // For each document in the query results, + // the matching revisions will be 'loaded' to the session along with the document + ->toList(); + +// Get the revision(s) by change vectors - it will be retrieved from the SESSION +// No additional trip to the server is made +$revision = $session + ->advanced()->revisions()->get(Contract::class, $orderDocuments[0]->getRevisionChangeVector()); +$revisions = $session + ->advanced()->revisions()->get(Contract::class, $orderDocuments[0]->getRevisionChangeVectors()); +`} + + + +* See the _Contract_ class definition [above](../../../../document-extensions/revisions/client-api/session/including.mdx#sample-document). + + + +## Syntax + + + +{`// Include a single revision by Time +public function includeRevisionsBefore(DateTime $before): IncludeBuilderInterface; + +// Include a single revision by Change Vector path(s) +public function includeRevisions(string $changeVectorPaths): IncludeBuilderInterface; +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **before** | `DateTime` | <ul><li>Creation time of the revision to be included.</li><li>Pass local time or UTC.
The server will convert the param to UTC.</li><li>If no revision was created at this time then the first revision that precedes it is returned.</li></ul> | +| **changeVectorPaths** | `string` | <ul><li>The path to the document property that contains
**an array of change vectors** of the revisions to be included.</li></ul> | + + + +## Patching the revision change vector + +* To include revisions when making a query or a raw query, + you need to specify the path to the document property that contains the revision change vector(s). + +* The below example shows how to get and patch a revision change vector to a document property. + + + +{`$session = $store->openSession(); +try \{ + // Get the revisions' metadata for document 'contracts/1-A' + /** @var array $contractRevisionsMetadata */ + $contractRevisionsMetadata = + $session->advanced()->revisions()->getMetadataFor("contracts/1-A"); + + // Get a change vector from the metadata + $changeVector = $contractRevisionsMetadata[array_key_first($contractRevisionsMetadata)]->getString(DocumentsMetadata::CHANGE_VECTOR); + + // Patch the document - add the revision change vector to a specific document property + $session->advanced() + ->patch( "contracts/1-A", "RevisionChangeVector", $changeVector); + + // Save your changes + $session->saveChanges(); +\} finally \{ + $session->close(); +\} +`} + + + +* See the _Contract_ class definition [above](../../../../document-extensions/revisions/client-api/session/including.mdx#sample-document). + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_loading-csharp.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_loading-csharp.mdx new file mode 100644 index 0000000000..004919ec8f --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_loading-csharp.mdx @@ -0,0 +1,237 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Using the Advanced Session methods you can **retrieve revisions and their metadata** + from the database for the specified document. + +* These methods can also be executed lazily, see [get revisions lazily](../../../../client-api/session/how-to/perform-operations-lazily.mdx#getrevisions). + +* In this page: + * [Get all revisions](../../../../document-extensions/revisions/client-api/session/loading.mdx#get-all-revisions) + * [Get revisions metadata](../../../../document-extensions/revisions/client-api/session/loading.mdx#get-revisions-metadata) + * [Get revisions by creation time](../../../../document-extensions/revisions/client-api/session/loading.mdx#get-revisions-by-creation-time) + * [Get revisions by change vector](../../../../document-extensions/revisions/client-api/session/loading.mdx#get-revisions-by-change-vector) + + +## Get all revisions + +* Use `GetFor` to retrieve all of the revisions currently kept for the specified document. +**Example**: + + + + +{`// Get revisions for document 'orders/1-A' +// Revisions will be ordered by most recent revision first +List orderRevisions = session + .Advanced + .Revisions + .GetFor(id: "orders/1-A", start: 0, pageSize: 10); +`} + + + + +{`// Get revisions for document 'orders/1-A' +// Revisions will be ordered by most recent revision first +List orderRevisions = await asyncSession + .Advanced + .Revisions + .GetForAsync(id: "orders/1-A", start: 0, pageSize: 10); +`} + + + + +**Syntax**: + + + +{`List GetFor(string id, int start = 0, int pageSize = 25); +`} + + + +| Parameters | Type | Description | +| - | - |- | +| **id** | `string` | Document ID for which to retrieve revisions | +| **start** | `int` | First revision to retrieve, used for paging | +| **pageSize** | `int` | Number of revisions to retrieve per results page | + + + +## Get revisions metadata + +* Use `GetMetadataFor` to retrieve the metadata for all the revisions currently kept for the specified document. +**Example**: + + + + +{`// Get revisions' metadata for document 'orders/1-A' +List orderRevisionsMetadata = session + .Advanced + .Revisions + .GetMetadataFor(id: "orders/1-A", start: 0, pageSize: 10); + +// Each item returned is a revision's metadata, as can be verified in the @flags key +var metadata = orderRevisionsMetadata[0]; +var flagsValue = metadata.GetString(Constants.Documents.Metadata.Flags); + +Assert.Contains("Revision", flagsValue); +`} + + + + +{`// Get revisions' metadata for document 'orders/1-A' +List orderRevisionsMetadata = await asyncSession + .Advanced + .Revisions + .GetMetadataForAsync(id: "orders/1-A", start: 0, pageSize: 10); + +// Each item returned is a revision's metadata, as can be verified in the @flags key +var metadata = orderRevisionsMetadata[0]; +var flagsValue = metadata.GetString(Constants.Documents.Metadata.Flags); + +Assert.Contains("Revision", flagsValue); +`} + + + + +**Syntax**: + + + +{`List GetMetadataFor(string id, int start = 0, int pageSize = 25); +`} + + + +| Parameters | Type | Description | +| - | - |- | +| **id** | `string` | Document ID for which to retrieve revisions' metadata | +| **start** | `int` | First revision to retrieve metadata for, used for paging | +| **pageSize** | `int` | Number of revisions to retrieve per results page | + + + +## Get revisions by creation time + +* Use `Get` to retrieve a revision by its **creation time**. +**Example**: + + + + +{`// Get a revision by its creation time +Order revisionFromLastYear = session + .Advanced + .Revisions + // If no revision was created at the specified time, + // then the first revision that precedes it will be returned + .Get("orders/1-A", DateTime.Now.AddYears(-1)); +`} + + + + +{`// Get a revision by its creation time +Order revisionFromLastYear = await asyncSession + .Advanced + .Revisions + // If no revision was created at the specified time, + // then the first revision that precedes it will be returned + .GetAsync("orders/1-A", DateTime.Now.AddYears(-1)); +`} + + + +**Syntax**: + + + +{`T Get(string id, DateTime date); +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **id** | `string` | Document ID for which to retrieve the revision by creation time | +| **date** | `DateTime ` | The revision's creation time | + + + +## Get revisions by change vector + +* Use `Get` to retrieve a revision or multiple revisions by their **change vectors**. +**Example**: + + + + +{`// Get revisions metadata +List revisionsMetadata = session + .Advanced + .Revisions + .GetMetadataFor("orders/1-A", start: 0, pageSize: 25); + +// Get the change-vector from the metadata +var changeVector = revisionsMetadata[0].GetString(Constants.Documents.Metadata.ChangeVector); + +// Get the revision by its change-vector +Order revision = session + .Advanced + .Revisions + .Get(changeVector); +`} + + + + +{`// Get revisions metadata +List revisionsMetadata = await asyncSession + .Advanced + .Revisions + .GetMetadataForAsync("orders/1-A", start: 0, pageSize: 25); + +// Get the change-vector from the metadata +var changeVector = revisionsMetadata[0].GetString(Constants.Documents.Metadata.ChangeVector); + +// Get the revision by its change-vector +Order revision = await asyncSession + .Advanced + .Revisions + .GetAsync(changeVector); +`} + + + + +**Syntax**: + + + +{`// Get a revision by its change vector +T Get(string changeVector); + +// Get multiple revisions by their change vectors +Dictionary Get(IEnumerable changeVectors); +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **changeVector** | `string` | The revision's change vector | +| **changeVectors** | `IEnumerable` | Change vectors of multiple revisions | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_loading-java.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_loading-java.mdx new file mode 100644 index 0000000000..219e516a29 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_loading-java.mdx @@ -0,0 +1,116 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +There are a few methods that allow you to retrieve revisions from a database: + +- **session.advanced().revisions().getFor** + - can be used to return all previous revisions for a specified document +- **session.advanced().revisions().getMetadataFor** + - can be used to return metadata of all previous revisions for a specified document +- **session.advanced().revisions().get** + - can be used to retrieve a revision(s) using a change vector(s) + +## getFor + +### Syntax + + + +{` List getFor(Class clazz, String id); + + List getFor(Class clazz, String id, int start); + + List getFor(Class clazz, String id, int start, int pageSize); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **id** | String | document ID for which the revisions will be returned for | +| **start** | int | used for paging | +| **pageSize** | int | used for paging | + +### Example + + + +{`List orderRevisions = session + .advanced() + .revisions() + .getFor(Order.class, "orders/1-A", 0, 10); +`} + + + + + +## getMetadataFor + +### Syntax + + + +{`List getMetadataFor(String id); + +List getMetadataFor(String id, int start); + +List getMetadataFor(String id, int start, int pageSize); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **id** | String | document ID for which the revisions will be returned for | +| **start** | int | used for paging | +| **pageSize** | int | used for paging | + +### Example + + + +{`List orderRevisionsMetadata = session + .advanced() + .revisions() + .getMetadataFor("orders/1-A", 0, 10); +`} + + + + + +## get + +### Syntax + + + +{` T get(Class clazz, String changeVector); + + Map get(Class clazz, String[] changeVectors); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **changeVector** or **changeVectors**| `String` or `String[]` | one or many revision change vectors | + +### Example + + + +{`Order orderRevision = session + .advanced() + .revisions() + .get(Order.class, orderRevisionChangeVector); +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_loading-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_loading-nodejs.mdx new file mode 100644 index 0000000000..a7f4b918c7 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_loading-nodejs.mdx @@ -0,0 +1,212 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Using the Advanced Session methods you can **retrieve revisions and their metadata** + from the database for the specified document. + +* These methods can also be executed lazily, see [get revisions lazily](../../../../client-api/session/how-to/perform-operations-lazily.mdx#getrevisions). + +* In this page: + * [Get all revisions](../../../../document-extensions/revisions/client-api/session/loading.mdx#get-all-revisions) + * [Get revisions metadata](../../../../document-extensions/revisions/client-api/session/loading.mdx#get-revisions-metadata) + * [Get revisions by creation time](../../../../document-extensions/revisions/client-api/session/loading.mdx#get-revisions-by-creation-time) + * [Get revisions by change vector](../../../../document-extensions/revisions/client-api/session/loading.mdx#get-revisions-by-change-vector) + + +## Get all revisions + +* Use `getFor` to retrieve all of the revisions currently kept for the specified document. +**Example**: + + + +{`// Get revisions for document 'orders/1-A' +const orderRevisions = await session.advanced.revisions + .getFor("orders/1-A", \{ + start: 0, + pageSize: 10 + \}); +`} + + + +**Syntax**: + + + +{`// Available overloads: +await session.advanced.revisions.getFor(id); +await session.advanced.revisions.getFor(id, options); +`} + + + +| Parameters | Type | Description | +| - | - |- | +| **id** | string | Document ID for which to retrieve revisions | +| **options** | options object | Used for paging | + + + +{`// options object +\{ + start, // The first revision to retrieve, used for paging. Default is 0. + pageSize // Number of revisions to retrieve per results page. Default is 25. +\} +`} + + + +| Return value | | +| - | - | +| `Promise` | A `Promise` resolving to the document's revisions.
Revisions will be ordered by most recent revision first. | + + + +## Get revisions metadata + +* Use `getMetadataFor` to retrieve the metadata for all the revisions currently kept for the specified document. +**Example**: + + + +{`// Get revisions' metadata for document 'orders/1-A' +const orderRevisionsMetadata = await session.advanced.revisions + .getMetadataFor("orders/1-A", \{ + start: 0, + pageSize: 10 + \}); + +// Each item returned is a revision's metadata, as can be verified in the @flags key +const metadata = orderRevisionsMetadata[0]; +const flagsValue = metadata[CONSTANTS.Documents.Metadata.FLAGS]; + +assertThat(flagsValue).contains("Revision"); +`} + + + +**Syntax**: + + + +{`// Available overloads: +await session.advanced.revisions.getMetadataFor(id); +await session.advanced.revisions.getMetadataFor(id, options); +`} + + + +| Parameters | Type | Description | +| - | - |- | +| **id** | string | Document ID for which to retrieve revisions' metadata | +| **options** | options object | Used for paging | + + + +{`// options object +\{ + start, // The first revision to retrieve, used for paging. Default is 0. + pageSize // Number of revisions to retrieve per results page. Default is 25. +\} +`} + + + +| Return value | | +| - | - | +| `Promise` | A `Promise` resolving to a list of the revisions metadata. | + + + +## Get revisions by creation time + +* Use `get` to retrieve a revision by its **creation time**. +**Example**: + + + +{`// Creation time to use, e.g. last year: +const creationTime = new Date(); +creationTime.setFullYear(creationTime.getFullYear() - 1); + +// Get a revision by its creation time +// If no revision was created at the specified time, +// then the first revision that precedes it will be returned +const orderRevision = await session.advanced.revisions + .get("orders/1-A", creationTime.toLocaleDateString()); +`} + + + +**Syntax**: + + + +{`await session.advanced.revisions.get(id, date); +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **id** | string | Document ID for which to retrieve the revision by creation time | +| **date** | string | The revision's creation time | + +| Return value | | +| - | - | +| `Promise` | A `Promise` resolving to the revision.
If no revision was created at the specified time, then the first revision that precedes it will be returned. | + + + +## Get revisions by change vector + +* Use `get` to retrieve a revision or multiple revisions by their **change vectors**. +**Example**: + + + +{`// Get revisions metadata +const revisionsMetadata = await session.advanced.revisions + .getMetadataFor("orders/1-A", \{ + start: 0, + pageSize: 25 + \}); + +// Get the change-vector from the metadata +var changeVector = revisionsMetadata[0][CONSTANTS.Documents.Metadata.CHANGE_VECTOR]; + +// Get the revision by its change-vector +const orderRevision = await session.advanced.revisions + .get(changeVector); +`} + + + +**Syntax**: + + + +{`// Available overloads: +await session.advanced.revisions.get(changeVector); +await session.advanced.revisions.get(changeVectors); +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **changeVector** | string | The revision's change vector | +| **changeVectors** | string[] | Change vectors of multiple revisions | + +| Return value | | +| - | - | +| `Promise` | A `Promise` resolving to the matching revision(s). | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_loading-php.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_loading-php.mdx new file mode 100644 index 0000000000..3c79cd1e7c --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_loading-php.mdx @@ -0,0 +1,168 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Using the Advanced Session methods you can **retrieve revisions and their metadata** + from the database for the specified document. + +* These methods can also be executed lazily, see [get revisions lazily](../../../../client-api/session/how-to/perform-operations-lazily.mdx#getrevisions). + +* In this page: + * [Get all revisions](../../../../document-extensions/revisions/client-api/session/loading.mdx#get-all-revisions) + * [Get revisions metadata](../../../../document-extensions/revisions/client-api/session/loading.mdx#get-revisions-metadata) + * [Get revisions by creation time](../../../../document-extensions/revisions/client-api/session/loading.mdx#get-revisions-by-creation-time) + * [Get revisions by change vector](../../../../document-extensions/revisions/client-api/session/loading.mdx#get-revisions-by-change-vector) + + +## Get all revisions + +Use `getFor` to retrieve all of the revisions currently kept for the specified document. + +* **Example**: + + +{`// Get revisions for document 'orders/1-A' +// Revisions will be ordered by most recent revision first +/** @var array $orderRevisions */ +$orderRevisions = $session + ->advanced() + ->revisions() + ->getFor(Order::class, id: "orders/1-A", start: 0, pageSize: 10); +`} + + +* **Syntax**: + + +{`public function getFor(?string $className, ?string $id, int $start = 0, int $pageSize = 25): array; +`} + + + + | Parameters | Type | Description | + | - | - |- | + | **id** | `string` | Document ID for which to retrieve revisions | + | **className** | `string` | The type of the object whose revisions we want to retrieve | + | **start** | `int` | First revision to retrieve, used for paging | + | **pageSize** | `int` | Number of revisions to retrieve per results page | + + + +## Get revisions metadata + +Use `getMetadataFor` to retrieve the metadata for all the revisions currently kept for the specified document. + +* **Example**: + + +{`// Get revisions' metadata for document 'orders/1-A' +/** @var array $orderRevisionsMetadata */ +$orderRevisionsMetadata = $session + ->advanced() + ->revisions() + ->getMetadataFor(id: "orders/1-A", start: 0, pageSize: 10); + +// Each item returned is a revision's metadata, as can be verified in the @flags key +$metadata = $orderRevisionsMetadata[0]; +$flagsValue = $metadata[DocumentsMetadata::FLAGS]; + +$this->assertContains("Revision", $flagsValue); +`} + + +* **Syntax**: + + +{`public function getMetadataFor(?string $id, int $start = 0, int $pageSize = 25): array; +`} + + + + | Parameters | Type | Description | + | - | - |- | + | **id** | `string` | Document ID for which to retrieve revisions' metadata | + | **start** | `int` | First revision to retrieve metadata for, used for paging | + | **pageSize** | `int` | Number of revisions to retrieve per results page | + + + +## Get revisions by creation time + +Use `getBeforeDate` to retrieve a revision by its **creation time**. + +* **Example**: + + +{`// Get a revision by its creation time +$revisionFromLastYear = $session + ->advanced() + ->revisions() + // If no revision was created at the specified time, + // then the first revision that precedes it will be returned + ->getBeforeDate(Order::class, "orders/1-A", (new DateTime())->sub(new DateInterval("P1Y"))); +`} + + +* **Syntax**: + + +{`function getBeforeDate(?string $className, ?string $id, DateTime $date): ?object; +`} + + + + | Parameter | Type | Description | + | - | - | - | + | **id** | `string` | The ID of the document whose revisions we want to retrieve by creation time | + | **date** | `DateTime` | Revision creation time | + | **className** | `string` | The type of the object whose revisions we want to retrieve | + + + + +## Get revisions by change vector + +To retrieve a revision or multiple revisions by **change vectors** use `getMetadataFor`, +extract the change vector from the metadata, and `get` the revision using the change vector. + +* **Example**: + + +{`// Get revisions metadata +/** @var array $revisionsMetadata */ +$revisionsMetadata = $session + ->advanced() + ->revisions() + ->getMetadataFor("orders/1-A", start: 0, pageSize: 25); + +// Get the change-vector from the metadata +$changeVector = $revisionsMetadata[0][DocumentsMetadata::CHANGE_VECTOR]; + +// Get the revision by its change-vector +$revision = $session + ->advanced() + ->revisions() + ->get(Order::class, $changeVector); +`} + + +* **Syntax**: + + +{`// Get a revision(s) by its change vector(s) +public function get(?string $className, null|string|array|StringArray $changeVectors): mixed; +`} + + + + | Parameter | Type | Description | + | - | - | - | + | **changeVectors** | `null` or `string` or `array` or `StringArray` | A list of change vector strings | + | **className** | `className` | The types of the objects whose revisions we want to retrieve | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_loading-python.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_loading-python.mdx new file mode 100644 index 0000000000..6bb6368f60 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/_loading-python.mdx @@ -0,0 +1,156 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Using the Advanced Session methods you can **retrieve revisions and their metadata** + from the database for the specified document. + +* These methods can also be executed lazily, see [get revisions lazily](../../../../client-api/session/how-to/perform-operations-lazily.mdx#getrevisions). + +* In this page: + * [Get all revisions](../../../../document-extensions/revisions/client-api/session/loading.mdx#get-all-revisions) + * [Get revisions metadata](../../../../document-extensions/revisions/client-api/session/loading.mdx#get-revisions-metadata) + * [Get revisions by creation time](../../../../document-extensions/revisions/client-api/session/loading.mdx#get-revisions-by-creation-time) + * [Get revisions by change vector](../../../../document-extensions/revisions/client-api/session/loading.mdx#get-revisions-by-change-vector) + + +## Get all revisions + +Use `get_for` to retrieve all of the revisions currently kept for the specified document. + +* **Example**: + + +{`# Get revisions for document 'orders/1-A' +# Revisions will be ordered by most recent revision first +order_revisions = session.advanced.revisions.get_for("orders/1-A", Order, 0, 10) +`} + + +* **Syntax**: + + +{`def get_for(self, id_: str, object_type: Type[_T] = None, start: int = 0, page_size: int = 25) -> List[_T]: ... +`} + + + + | Parameters | Type | Description | + | - | - |- | + | **id_** | `str` | Document ID for which to retrieve revisions | + | **object_type** | `Type[_T]` | The type of the object whose revisions we want to retrieve | + | **start** | `int` | First revision to retrieve, used for paging | + | **page_size** | `int` | Number of revisions to retrieve per results page | + + + +## Get revisions metadata + +Use `get_metadata_for` to retrieve the metadata for all the revisions currently kept for the specified document. + +* **Example**: + + +{`# Get 'revisions' metadata for document 'orders/1-A' +order_revisions_metadata = session.advanced.revisions.get_metadata_for("orders/1-A", 0, 10) + +# Each item returned is a revision's metadata, as can be verified in the @flags key +metadata = order_revisions_metadata[0] +flags_value = metadata[constants.Documents.Metadata.FLAGS] + +self.assertIn("Revision", flags_value) +`} + + +* **Syntax**: + + +{`def get_metadata_for(self, id_: str, start: int = 0, page_size: int = 25) -> List["MetadataAsDictionary"]: ... +`} + + + + | Parameters | Type | Description | + | - | - |- | + | **id_** | `str` | Document ID for which to retrieve revisions' metadata | + | **start** | `int` | First revision to retrieve metadata for, used for paging | + | **page_size** | `int` | Number of revisions to retrieve per results page | + + + +## Get revisions by creation time + +Use `get_by_before_date` to retrieve a revision by its **creation time**. + +* **Example**: + + +{`# Get a revision by its creation time +revision_from_last_year = ( + session.advanced.revisions + # If no revision was created at the specified time, + # then the first revision that precedes it will be returned + .get_by_before_date("orders/1-A", datetime.datetime.utcnow() - datetime.timedelta(days=365)) +) +`} + + +* **Syntax**: + + +{`def get_by_before_date(self, id_: str, before_date: datetime.datetime, object_type: Type[_T] = None) -> _T: ... +`} + + + + | Parameter | Type | Description | + | - | - | - | + | **id_** | `str` | The ID of the document whose revisions we want to retrieve by creation time | + | **before_date** | `datetime.datetime` | Revision creation time | + | **object_type** | `Type[_T]` | The type of the object whose revisions we want to retrieve | + + + + +## Get revisions by change vector + +To retrieve a revision or multiple revisions by **change vectors**, get the +change vector using `get_metadata_for` and the revisions using `get_by_change_vector`. + +* **Example**: + + +{`# Get revisions metadata +revisions_metadata = session.advanced.revisions.get_metadata_for("orders/1-A", 0, 25) + +# Get the change-vector from the metadata +change_vector = revisions_metadata[0][constants.Documents.Metadata.CHANGE_VECTOR] + +# Get the revision by its change-vector +revision = session.advanced.revisions.get_by_change_vector(change_vector, Order) +`} + + +* **Syntax**: + + +{`# Get a revision by its change vector +def get_by_change_vector(self, change_vector: str, object_type: Type[_T] = None) -> _T: ... + +# Get multiple revisions by their change vectors +def get_by_change_vectors(self, change_vectors: List[str], object_type: Type[_T] = None) -> Dict[str, _T]: ... +`} + + + + | Parameter | Type | Description | + | - | - | - | + | **change_vectors** | `List[str]` | A list of change vector strings | + | **object_type** | `Type[_T]` | The types of the objects whose revisions we want to retrieve | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/counting.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/counting.mdx new file mode 100644 index 0000000000..f35f07fb71 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/counting.mdx @@ -0,0 +1,54 @@ +--- +title: "Count Revisions" +hide_table_of_contents: true +sidebar_label: Count Revisions +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import CountingCsharp from './_counting-csharp.mdx'; +import CountingPython from './_counting-python.mdx'; +import CountingPhp from './_counting-php.mdx'; +import CountingNodejs from './_counting-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/including.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/including.mdx new file mode 100644 index 0000000000..a49cda7df3 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/including.mdx @@ -0,0 +1,49 @@ +--- +title: "Include Revisions" +hide_table_of_contents: true +sidebar_label: Include Revisions +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import IncludingCsharp from './_including-csharp.mdx'; +import IncludingPhp from './_including-php.mdx'; +import IncludingNodejs from './_including-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "php", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/loading.mdx b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/loading.mdx new file mode 100644 index 0000000000..a802572b7a --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/client-api/session/loading.mdx @@ -0,0 +1,59 @@ +--- +title: "Get Revisions" +hide_table_of_contents: true +sidebar_label: Get Revisions +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import LoadingCsharp from './_loading-csharp.mdx'; +import LoadingJava from './_loading-java.mdx'; +import LoadingPython from './_loading-python.mdx'; +import LoadingPhp from './_loading-php.mdx'; +import LoadingNodejs from './_loading-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/revisions/overview.mdx b/versioned_docs/version-7.1/document-extensions/revisions/overview.mdx new file mode 100644 index 0000000000..600aa0d606 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/overview.mdx @@ -0,0 +1,47 @@ +--- +title: "Document Revisions Overview" +hide_table_of_contents: true +sidebar_label: Overview +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import OverviewCsharp from './_overview-csharp.mdx'; +import OverviewPython from './_overview-python.mdx'; +import OverviewNodejs from './_overview-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/revisions/revert-revisions.mdx b/versioned_docs/version-7.1/document-extensions/revisions/revert-revisions.mdx new file mode 100644 index 0000000000..4e072851d0 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/revert-revisions.mdx @@ -0,0 +1,207 @@ +--- +title: "Revert Documents to Revisions" +hide_table_of_contents: true +sidebar_label: Revert Documents to Revisions +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Revert Documents to Revisions + + +* You can **revert the database to its state at a specified point in time** + by reverting documents to their revisions as they were at that time. + +* You can choose whether to revert documents from specific collections + or revert documents from all collections as explained below. + +* To revert a single document (or multiple documents) to a specific revision, + see [Revert document to revision operation](../../document-extensions/revisions/client-api/operations/revert-document-to-revision.mdx). + +* Being able to restore the database to a previous state can simplify auditing, enhance understanding of changes made over time, + and facilitate instant recovery without the need to search and retrieve a stored backup. + +* In this page: + * [The revisions settings view](../../document-extensions/revisions/revert-revisions.mdx#the-revisions-settings-view) + * [Revert documents](../../document-extensions/revisions/revert-revisions.mdx#revert-documents) + * [Point in time](../../document-extensions/revisions/revert-revisions.mdx#point-in-time) + * [Time Window](../../document-extensions/revisions/revert-revisions.mdx#time-window) + * [Content reverted](../../document-extensions/revisions/revert-revisions.mdx#content-reverted) + + +## The revisions settings view + +![Document Revisions View](./assets/revert-revisions-1.png) + +1. **The revisions settings view**: + In the Studio, go to _Settings > Document Revisions_. From this view you can: + * [Set](../../studio/database/settings/document-revisions.mdx#revisions-configuration) revision configurations + * [Enforce](../../studio/database/settings/document-revisions.mdx#enforce-configuration) revision configurations + * [Revert](../../document-extensions/revisions/revert-revisions.mdx#revert-documents) documents to revisions + +2. **Revision configurations**: + * The ability to revert documents to their past revisions depends on revisions being created. + * When no default configuration or collection-specific configurations are defined and enabled, + no revisions will be created for any document. + * Make sure that a configuration that suits your needs is defined. + +3. **Revert documents**: + Click the _Revert Revisions_ button to revert documents from all or selected collections to a specified point in time. + Learn more [below](../../document-extensions/revisions/revert-revisions.mdx#revert-documents). + + + +## Revert documents + +![Revert Revisions](./assets/revert-revisions-2.png) + +1. Enter the **Point in Time** to which you would like to revert the documents. Learn more [below](../../document-extensions/revisions/revert-revisions.mdx#point-in-time). + +2. Enter the **Time Window**. Learn more [below](../../document-extensions/revisions/revert-revisions.mdx#time-window). + +3. Revert **All collections** in the database, or toggle to select **Specific collections** to revert. + + + +## Point in time + +![Set point in time](./assets/set-point-in-time.png) + +* Select or enter the point in time (LOCAL) to revert documents to. + The information text on the right will display the expected behavior in [UTC](https://en.wikipedia.org/wiki/Coordinated_Universal_Time). +* When the revert process is executed: + + + + Documents created **AFTER** the specified Point in Time will be **moved to the Revision Bin**. + + + + + + Documents created **BEFORE** this time and that were modified after this time: + + * Any of these documents that own revisions will be **reverted** + to the revision created at the specified Point in Time or to the latest revision preceding this time. + + * When [setting a limit](../../studio/database/settings/document-revisions.mdx#limit-revisions) on the number of revisions that are kept (by number or by age) + then only the most recent revisions are kept for the document. + If all of these revisions were created AFTER the Point in Time then the oldest revision will be + the one we revert to, even though that revision is newer than the specified time. + By doing so we make sure that all the documents that existed before the specified Point in Time + still exist after the revert process. + + + + + + Documents created **BEFORE** this time that were Not modified after this Time are **not reverted**. + + + + + +## Time window + +* Revisions are ordered in the _revisions storage_ by their change-vector, and Not by creation time. + +* When reverting the database (or selected collections) to some previous date, + RavenDB will iterate through the revisions, starting from the most recent revision, + and search for revisions that should be restored. + For each revision found - its matching document will be reverted to that revision. + +* To avoid conducting unnecessarily long searches or revert to revisions that are too old + RavenDB sets a limit to this search. + The search will stop once we hit a revision that was created prior to: `Point in Time - Time Window` + +* The default Time Window value is 96 hours (4 days). + + + +**Example**: + +* **Point in Time** to revert to: `15.2.2023 02:00` + Documents will be reverted to their latest revision that was created prior to this Point in Time. + +* **Time Window**: `4 days` + We will stop searching the revisions once we hit a revision with creation time prior to: + `11.2.2023 02:00` + +* **Sample revisions**: + * The list below contains revisions of all documents, it is not just revisions of a single document. + * The revisions are Not ordered by creation time, the order is set by their change-vector. + +| Revision | Creation time | | +|---------------|-----------------|--------------------------------------------------------------------------------------------------------| +| 1) `Users/1` | 20.2.2023 01:00 | | +| 2) `Users/5` | 19.2.2023 01:00 | | +| 3) `Users/3` | 14.2.2023 01:00 | => Document Users/3 stays with this revision content | +| 4) `Users/4` | 17.2.2023 01:00 | => Document Users/4 will be moved to Revisions Bin | +| 5) `Users/1` | 18.2.2023 01:00 | | +| 6) `Users/1` | 13.2.2023 01:00 | => Document Users/1 will be reverted to this revision | +| 7) `Users/5` | 11.2.2023 01:00 | => Document Users/5 will be reverted to this revision +
STOP the search for more Users/5 revisions | +| 8) `Users/5` | 11.2.2023 03:00 | | +| 9) `Users/9` | 10.2.2023 01:00 | => Document Users/9 will Not be reverted to this revision +
STOP the search in this list | +| 10) `Users/6` | 11.2.2023 01:00 | | +| . . . | | | + +* (line 1) + We iterate on the revisions starting from `Users/1` revision created on `20.2.2023 01:00`. + We search for a relevant revision for document `Users/1` by iterating on all `Users/1` revisions. + The revision that will be restored for `Users/1` is the one from `13.2.2023 01:00` (line 6) + since it is the latest one prior to `15.2.2023 02:00`. + +* (line 2) + Next, we search for a relevant revision for document `Users/5` by iterating on all `Users/5` revisions, + and we reach line 7. + Here the search for `Users/5` revisions will STOP since this revision was created prior to `11.2.2023 02:00`. + We will revert the document to this revision since it is the latest one prior to `15.2.2023 02:00`. + The following revision for `Users/5` from `11.2.2023 03:00` (line 8) is Not restored. + +* (line 3) + Next, document `Users/3` is Not modified, since it wasn't modified after `15.2.2023 02:00`. + +* (line 4) + Next, `Users/4` has NO revisions prior to `15.2.2023 02:00`, + which means it was created AFTER this Point in Time, + so this document is moved to the Revisions Bin. + +* (line 9) + Next, we reach `Users/9` revision created on `10.2.2023 01:00`, which is PRIOR to `11.2.2023 02:00`. + The search on this list will now STOP. + No further revisions will be taken into account, not even `Users/6` revision created on `11.2.2023 01:00`. + +
+ + + +## Content reverted + + + +* When reverting a document to one of its revisions, RavenDB actually creates a new revision for the document. + The content of this new revision is a copy of the historical revision content, and it becomes the current version of the document. + +* Database items other than documents, such as ongoing tasks, indexes, and compare-exchange, + are Not reverted by this process. + +* Document extensions: + * **Time series** + Time series data is Not reverted. Learn more [here](../../document-extensions/revisions/revisions-and-other-features.mdx#reverted-data-1). + * **Attachments** + When a document is reverted to a revision that owns attachments, + the attachments are restored to their state when the revision was created. + * **Counters** + When a document is reverted to a revision that owns counters, + the counters are restored to functionality along with their values from that revision. + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/revisions-and-other-features.mdx b/versioned_docs/version-7.1/document-extensions/revisions/revisions-and-other-features.mdx new file mode 100644 index 0000000000..bb670eb7ac --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/revisions-and-other-features.mdx @@ -0,0 +1,64 @@ +--- +title: "Revisions and Other Features" +hide_table_of_contents: true +sidebar_label: Revisions and Other Features +sidebar_position: 3 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import RevisionsAndOtherFeaturesJava from './_revisions-and-other-features-java.mdx'; +import RevisionsAndOtherFeaturesCsharp from './_revisions-and-other-features-csharp.mdx'; +import RevisionsAndOtherFeaturesPython from './_revisions-and-other-features-python.mdx'; +import RevisionsAndOtherFeaturesPhp from './_revisions-and-other-features-php.mdx'; +import RevisionsAndOtherFeaturesNodejs from './_revisions-and-other-features-nodejs.mdx'; + +export const supportedLanguages = ["java", "csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/revisions/revisions-bin-cleaner.mdx b/versioned_docs/version-7.1/document-extensions/revisions/revisions-bin-cleaner.mdx new file mode 100644 index 0000000000..e9edfc38c1 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/revisions-bin-cleaner.mdx @@ -0,0 +1,155 @@ +--- +title: "Revisions Bin Cleaner" +hide_table_of_contents: true +sidebar_label: Revisions Bin Cleaner +sidebar_position: 4 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Revisions Bin Cleaner + + +* The [Revisions Bin](../../studio/database/document-extensions/revisions/revisions-bin.mdx) stores revisions of deleted documents, ensuring they remain accessible. + +* While you can manually delete these revisions directly from the Revisions Bin, + you can also set up a cleaner task to remove them automatically, as described below. + +* For a complete list of deletion methods, see [All ways to delete revisions](../../studio/database/document-extensions/revisions/all-revisions.mdx#all-ways-to-delete-revisions). + +* In this page: + * [The revisions bin cleaner](../../document-extensions/revisions/revisions-bin-cleaner.mdx#the-revisions-bin-cleaner) + * [Setting the revisions bin cleaner - from the Studio](../../document-extensions/revisions/revisions-bin-cleaner.mdx#setting-the-revisions-bin-cleaner---from-the-studio) + * [Setting the revisions bin cleaner - from the Client API](../../document-extensions/revisions/revisions-bin-cleaner.mdx#setting-the-revisions-bin-cleaner---from-the-client-api) + * [Syntax](../../document-extensions/revisions/revisions-bin-cleaner.mdx#syntax) + + +## The revisions bin cleaner + +Each entry in the [Revisions Bin](../../studio/database/document-extensions/revisions/revisions-bin.mdx) represents a "Delete Revision", +which is a revision that marks a document as deleted and provides access to the revisions that were created for the document before it was deleted. + + +When the cleaner removes a "Delete Revision" entry, +ALL the revisions associated with the deleted document are **permanently deleted**. + +* The Revisions Bin Cleaner is configured with the following parameters: + * **Frequency** - How often the cleaner runs. + * **Entries age to keep** - The cleaner deletes revision entries older than this value. + +* The cleaner task can be managed from: + * The [Revisions bin cleaner view](../../document-extensions/revisions/revisions-bin-cleaner.mdx#setting-the-revisions-bin-cleaner---from-the-studio) in the Studio + * The [Client API](../../document-extensions/revisions/revisions-bin-cleaner.mdx#setting-the-revisions-bin-cleaner---from-the-client-api). + +* When working with a secure server: + * Configuring the Revisions Bin Cleaner is logged in the [audit log](../../server/security/audit-log/audit-log-overview.mdx). + * Deleting revisions is only available to a client certificate with a security clearance of [Database Admin](../../server/security/authorization/security-clearance-and-permissions.mdx#section) or higher. + + + +## Setting the revisions bin cleaner - from the Studio + +![Revisions bin cleaner view](./assets/revisions-bin-cleaner.png) + +1. Go to **Settings > Revisions Bin Cleaner** +2. Toggle ON to enable the cleaner task. +3. Set the minimum entries age to keep: + * When toggled ON: + * Revisions Bin entries older than this value will be deleted. + * Default: `30` days. + * When toggled OFF: + * ALL Revisions Bin entries will be deleted. +4. Set the custom cleaner frequency: + * Define how often (in seconds) the Revisions Bin Cleaner runs. + * Default: `300` seconds (5 minutes). + + + +## Setting the revisions bin cleaner - from the Client API + +* Use `ConfigureRevisionsBinCleanerOperation` to configure the Revisions Bin Cleaner from the Client API. + +* By default, the operation will be applied to the [default database](../../client-api/setting-up-default-database.mdx). + To operate on a different database see [switch operations to different database](../../client-api/operations/how-to/switch-operations-to-a-different-database.mdx). + +* In this example, we enable the cleaner and configure its execution frequency and retention policy. + + + + +{`//Define the revisions bin cleaner configuration +var config = new RevisionsBinConfiguration() +{ + // Enable the cleaner + Disabled = false, + + // Set the cleaner execution frequency + CleanerFrequencyInSec = 24 * 60 * 60, // one day (in seconds) + + // Revisions bin entries older than the following value will be deleted + MinimumEntriesAgeToKeepInMin = 24 * 60 // one day (in minutes) +}; + +// Define the operation +var configRevisionsBinCleanerOp = new ConfigureRevisionsBinCleanerOperation(config); + +// Execute the operation by passing it to Maintenance.Send +store.Maintenance.Send(configRevisionsBinCleanerOp); +`} + + + + +{`var config = new RevisionsBinConfiguration() +{ + Disabled = false, + CleanerFrequencyInSec = 24 * 60 * 60, + MinimumEntriesAgeToKeepInMin = 24 * 60 +}; + +var configRevisionsBinCleanerOp = new ConfigureRevisionsBinCleanerOperation(config); +await store.Maintenance.SendAsync(configRevisionsBinCleanerOp); +`} + + + + + + +## Syntax + + + +{`public ConfigureRevisionsBinCleanerOperation(RevisionsBinConfiguration configuration); +`} + + + + +{`public class RevisionsBinConfiguration +\{ + // Set to true to enable the revisions bin cleaner. + // Default: false (cleaner is disabled). + public bool Disabled \{ get; set; \} + + // The minimum age (in minutes) for revisions-bin entries to be kept in the database. + // The cleaner deletes entries older than this value. + // When set to 0: ALL revisions-bin entries will be removed from the Revisions Bin + // Default: 30 days. + public int MinimumEntriesAgeToKeepInMin \{ get; set; \} + + // The frequency (in seconds) at which the revisions bin cleaner executes. + // Default: 300 seconds (5 minutes). + public long CleanerFrequencyInSec \{ get; set; \} = 5 * 60; +\} +`} + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/revisions/troubleshooting.mdx b/versioned_docs/version-7.1/document-extensions/revisions/troubleshooting.mdx new file mode 100644 index 0000000000..c397a9db6e --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/revisions/troubleshooting.mdx @@ -0,0 +1,107 @@ +--- +title: "Revisions Troubleshooting" +hide_table_of_contents: true +sidebar_label: Troubleshooting +sidebar_position: 5 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Revisions Troubleshooting + + +* In this page: + * [`ThrowRevisionKeyTooBig` exception](../../document-extensions/revisions/troubleshooting.mdx#throwrevisionkeytoobig-exception) + + +## `ThrowRevisionKeyTooBig` exception + +RavenDB allows **document revisions** and their **tombstones** to have IDs of up to **1,536 bytes**. +A `ThrowRevisionKeyTooBig` exception will be thrown when an attempt is made to store a revision or a revision tombstone +whose ID length exceeds this limit. + +* **How does a revision ID become too long?** + * RavenDB servers base the IDs they give revisions (and their tombstones) + on the revisions' [change vectors](../../server/clustering/replication/change-vector.mdx). + * Change vectors, on their part, are not limited and may grow [beyond the 1,536 bytes limit](../../client-api/operations/maintenance/clean-change-vector.mdx). + * It may therefore happen that a server would try to give a revision an ID, find its + change vector longer than 1,536 bytes, and fail with a `ThrowRevisionKeyTooBig` exception. +* **How are Change Vectors bloated?** + * A revision's change vector is comprised of the IDs of databases that have handled the revision + over time. + * In some cases, such as transferring a revision via [import](../../studio/database/tasks/import-data/import-from-ravendb.mdx) + or [external replication](../../server/ongoing-tasks/external-replication.mdx), the ID of the + revision's Source database is no longer needed but is still added to the revision's change + vector on the Destination database. + * Repeatedly transferring a revision this way may bloat its change vector beyond the 1,536 bytes + limit. When the destination database attempts to give such a revision an ID to store it by, + it will fail with a `ThrowRevisionKeyTooBig` exception. +* **What to do?** + * Revision **IDs** can be shortened by minimizing revision **change vectors**. + * To shorten revision change vectors, register IDs of databases that are irrelevant to this + server via Studio's **Unused Database IDs** view, as shown below. IDs listed in this view + will be omitted from revision change vectors. + + ![Unused Database IDs List](./assets/troubleshooting_rev_unused-db-IDs.png) + + 1. **Unused Database IDs** + Click to open the **Unused Database IDs** view. + 2. **Save** + Click to save the current list of unused IDs. + 3. **Used IDs** + IDs of databases that are currently used by this database. + 4. **Enter database ID to add** + Use this bar to manually enter an unused ID. + Click the **Add ID** button to add the ID to the list. + 5. **List of unused IDs.** + To remove an ID from the list, click the trash bin to its right. + 6. **IDs that may be added to the list** + A. an ID that was already added to the list. + B. an ID that hasn't been added to the list yet. Click it to add the ID to the list. + +* **What to be aware of:** + When adding a database ID to the **Unused Database IDs** list via studio: + * **Do not** add IDs of databases that are currently in use. + The ID of a RavenDB database can be found in the Studio > **Stats** view. + + ![Studio Stats: Database ID](./assets/troubleshooting_rev_stats-DB-ID.png) + + * If an external replication task is running: + **Do not** add the IDs of databases that are used by the destination database. + **Add** the unused IDs on the **destination** database first, to prevent conflicts. + + * **Do not** use the IDs indicated by the database record `DatabaseTopologyIdBase64` and + `ClusterTransactionIdBase64` properties. + Find these IDs using the Studio > Settings > **Database Record** view. + + ![Database Record](./assets/troubleshooting_rev_db-record.png) +#### When is this check Enabled: + +* **New databases only** + Checking revisions ID length is enabled only for **new databases**. + * A database is regarded as **new**, and its revisions ID length **will** be checked, if + its version is not defined in the database record or the version is `6.0.107` or newer. + * Revisions ID length will **not** be checked for databases older than `6.0.107`. +* **Imported databases** + Importing a database is always regarded as the creation of a new database. + An exception **will** therefore be thrown if the ID of an imported revision + exceeds 1,536 bytes, regardless of the imported revision's database version. +* **Restoring database from backup** + * Revisions ID length **will** be checked if the database version is not defined in its + restored database record or if the version is `6.0.107` or newer. + * Revision ID lengths will **not** be checked when restoring databases older than `6.0.107`. +* **Restoring database from a snapshot** + Revisions ID length will not be checked while restoring a snapshot, since snapshots are + restored as an image. If revision IDs longer than 1,536 bytes exist in the restored database, + they are in it because the database is of an older version than `6.0.107` and doesn't perform + this check. +* **Receiving a revision via replication** + The check is not performed when receiving a revision or a revision tombstone via replication. + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/_category_.json b/versioned_docs/version-7.1/document-extensions/timeseries/_category_.json new file mode 100644 index 0000000000..3984ef32c1 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 1, + "label": Time Series, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/_design-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/_design-csharp.mdx new file mode 100644 index 0000000000..2ca3fb5f8c --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/_design-csharp.mdx @@ -0,0 +1,174 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Time series are sequences of numerical values, associated with timestamps and sorted chronologically. + +* RavenDB time series are stored and managed as document extensions, achieving much greater speed and efficiency compared to storing them as JSON-formatted data within a document. + +* In this page: + * [Time series architecture](../../document-extensions/timeseries/design.mdx#time-series-architecture) + * [Time series as a document extension](../../document-extensions/timeseries/design.mdx#time-series-as-a--document-extension) + * [The `HasTimeSeries` flag](../../document-extensions/timeseries/design.mdx#the--flag) + * [The time series entry](../../document-extensions/timeseries/design.mdx#the-time-series-entry) + * [Segmentation](../../document-extensions/timeseries/design.mdx#segmentation) + * [Compression](../../document-extensions/timeseries/design.mdx#compression) + * [Updating time series](../../document-extensions/timeseries/design.mdx#updating-time-series) + * [Document change](../../document-extensions/timeseries/design.mdx#document-change) + * [No conflicts](../../document-extensions/timeseries/design.mdx#no-conflicts) + * [Transactions](../../document-extensions/timeseries/design.mdx#transactions) + * [Case insensitive](../../document-extensions/timeseries/design.mdx#case-insensitive) + + +## Time series architecture +### Time series as a document extension + +* Each time series belongs to, or _extends_, one particular document. + +* The document and the time series reference each another: + * The document's metadata keeps a reference to the time series **name**. + The time series **data** itself is stored in a separate location. + * The [segments](../../document-extensions/timeseries/design.mdx#segmentation) containing the time series data keep a reference to the document ID. +### The `HasTimeSeries` flag + +* When a document has one or more time series, + RavenDB automatically adds the `HasTimeSeries` flag to the document's metadata under `@flags`. + +* When all time series are deleted from the document, RavenDB automatically removes the flag. + + + +{`\{ + "Name": "Paul", + "@metadata": \{ + "@collection": "Users", + "@timeseries": [ + "my time series name" + ] + "@flags": "HasTimeSeries" + \} +\} +`} + + +### The time series entry + +Each time series entry is composed of a `TimeSeriesEntry` object which contains: + +| Parameter | Type | Description | +|---------------|------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **Timestamp** | `DateTime (UTC)` | <ul><li>The time of the event represented by the entry.</li><li>Time is measured up to millisecond resolution.</li></ul> | +| **Tag** | `string` | <ul><li>An optional tag for the entry.</li><li>Can be any string up to 255 bytes.</li><li>Possible uses for the tag:
Descriptions or metadata for individual entries.
Storing a related document id, which can then be referenced when querying the time series.</li></ul> | +| **Values** | `double[]` | <ul><li>An array of up to 32 `double` values.</li></ul> | +| **Value** | `double` | <ul><li>equivalent to Values[0].</li></ul> | + + + +Doubles with higher precision - i.e. more digits after the decimal point, are much less compressible. +In other words, `1.672` takes up more space than `1672`. + + +### Segmentation + +* At the server storage level, time series data is divided into **segments**. + +* Each segment contains a number of consecutive entries from the same time series and aggregated values that RavenDB automatically updates in the segment's header. + See [TimeSeriesSegment](../../document-extensions/timeseries/indexing.mdx#section-5) for the detailed class syntax. + +* **Segments size and limitations**: + * Segments have a maximum size of 2 KB. + What this limit practically means, is that a segment can only contain up to 32k entries. + Time series larger than that would always be stored in multiple segments. + * In practice, segments usually contain far less than 32k entries, depending on the size of the entries (after compression). + For example, in the [Northwind sample dataset](../../studio/database/tasks/create-sample-data.mdx), the _Companies_ documents all have a time series called _StockPrice_. + These time series are stored in segments that have ~10-20 entries each. + * The maximum time gap between the first and last entries in a segment is ~24.86 days (`int.MaxValue` milliseconds). + Adding an entry that is further than that from the first segment entry, would add it as the first entry of a new segment. + As a consequence, segments of sparsely-updated time series can be significantly smaller than 2 KB. + * The maximum number of unique tags allowed per segment, is 127. + A higher number than that, would cause the creation of a new segment. + +* **Aggregated values**: + RavenDB automatically stores and updates aggregated values in each segment's header. + These values summarize commonly-used values regarding the segment, including - + - The segment's **Max** value + - The segment's **Min** value + - The segment's values **Sum** + - The segment's **Count** of entries + - The segment's **First** timestamp + - The segment's **Last** timestamp + + + When segment entries store multiple values, e.g. each entry contains a _Latitude_ value and a _Longitude_ value, + the six aggregated values are stored for each value separately. + +### Compression + +Time series data is stored using a format called [Gorilla compression](https://www.vldb.org/pvldb/vol8/p1816-teller.pdf). +On top of the Gorilla compression, the time series segments are compressed using the [LZ4 algorithm](https://lz4.github.io/lz4/). + + + +## Updating Time Series +### Document-change event + +* **Time series name update**: + Creating/deleting a time series adds/removes its name to/from the metadata of the document it belongs to. + This modification triggers a document-change event, thereby initiating various processes within RavenDB such as ongoing tasks, revision creation, subscriptions, etc. + +* **Time series entries updates**: + As long as a new time series is not created, or an existing one is not removed, + modifying time series entries does Not invoke a document-change event, +### No conflicts + +Time series actions do not cause conflicts, updating a time series is designed to succeed without causing a concurrency conflict; +as long as the document it extends exists, updating a time series will always succeed. + +* **Updating time series concurrently by multiple cluster nodes**: + + When a time series' data is replicated by multiple nodes, the data from all nodes is merged into a single series. + + When multiple nodes append **different values** at the same timestamp: + * If the nodes try to append a **different number of values** for the same timestamp, the greater number of values is applied. + * If the nodes try to append the **same number of values**, the first values from each node are compared. + The append whose first value sorts higher [_lexicographically_](https://mathworld.wolfram.com/LexicographicOrder.html) (not numerically) is applied. + For example, lexicographic order would sort numbers like this: `1 < 10 < 100 < 2 < 21 < 22 < 3` + * If an existing value at a certain timestamp is deleted by one node and updated by another node, the deletion is applied. + +* **Updating time series by multiple clients to the same node**: + * When a time series' value at a certain timestamp is appended by multiple clients more or less simultaneously, + RavenDB uses the last-write strategy. + * When an existing value at a certain timestamp is deleted by a client and updated by another client, + RavenDB still uses the last-write strategy. +### Transactions + +When a session transaction that includes a time series modification fails for any reason, +the time series modification is reverted. +### Case insensitive + +All time series operations are case insensitive. E.g. - + + + +{`session.TimeSeriesFor("users/john", "HeartRate") + .Delete(baseline.AddMinutes(1)); +`} + + + +is equivalent to + + + +{`session.TimeSeriesFor("users/john", "HEARTRATE") + .Delete(baseline.AddMinutes(1)); +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/_design-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/_design-nodejs.mdx new file mode 100644 index 0000000000..0e3af2be7c --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/_design-nodejs.mdx @@ -0,0 +1,175 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Time series are sequences of numerical values, associated with timestamps and sorted chronologically. + +* RavenDB time series are stored and managed as document extensions, achieving much greater speed and efficiency compared to storing them as JSON-formatted data within a document. + +* In this page: + * [Time series architecture](../../document-extensions/timeseries/design.mdx#time-series-architecture) + * [Time series as a document extension](../../document-extensions/timeseries/design.mdx#time-series-as-a--document-extension) + * [The `HasTimeSeries` flag](../../document-extensions/timeseries/design.mdx#the--flag) + * [The time series entry](../../document-extensions/timeseries/design.mdx#the-time-series-entry) + * [Segmentation](../../document-extensions/timeseries/design.mdx#segmentation) + * [Compression](../../document-extensions/timeseries/design.mdx#compression) + * [Updating time series](../../document-extensions/timeseries/design.mdx#updating-time-series) + * [Document change](../../document-extensions/timeseries/design.mdx#document-change) + * [No conflicts](../../document-extensions/timeseries/design.mdx#no-conflicts) + * [Transactions](../../document-extensions/timeseries/design.mdx#transactions) + * [Case insensitive](../../document-extensions/timeseries/design.mdx#case-insensitive) + + +## Time series architecture +### Time series as a document extension + +* Each time series belongs to, or _extends_, one particular document. + +* The document and the time series reference each another: + * The document's metadata keeps a reference to the time series **name**. + The time series **data** itself is stored in a separate location. + * The [segments](../../document-extensions/timeseries/design.mdx#segmentation) containing the time series data keep a reference to the document ID. +### The `HasTimeSeries` flag + +* When a document has one or more time series, + RavenDB automatically adds the `HasTimeSeries` flag to the document's metadata under `@flags`. + +* When all time series are deleted from the document, RavenDB automatically removes the flag. + + + +{`\{ + "Name": "Paul", + "@metadata": \{ + "@collection": "Users", + "@timeseries": [ + "my time series name" + ] + "@flags": "HasTimeSeries" + \} +\} +`} + + +### The time series entry + +Each time series entry is composed of a `TimeSeriesEntry` object which contains: + +| Parameter | Type | Description | +|---------------|------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **timestamp** | `Date` | <ul><li>The time of the event represented by the entry.</li><li>Time is measured up to millisecond resolution.</li></ul> | +| **tag** | `string` | <ul><li>An optional tag for the entry.</li><li>Can be any string up to 255 bytes.</li><li>Possible uses for the tag:
Descriptions or metadata for individual entries.
Storing a related document id, which can then be referenced when querying the time series.</li></ul> | +| **values** | `number[]` | <ul><li>An array of up to 32 values.</li></ul> | +| **value** | `number` | <ul><li>equivalent to values[0].</li></ul> | + + + +Doubles with higher precision - i.e. more digits after the decimal point, are much less compressible. +In other words, `1.672` takes up more space than `1672`. + + +### Segmentation + +* At the server storage level, time series data is divided into **segments**. + +* Each segment contains a number of consecutive entries from the same time series and aggregated values that RavenDB automatically updates in the segment's header. + See section [Segment properties](../../document-extensions/timeseries/indexing.mdx#segment-properties) for more details. + +* **Segments size and limitations**: + * Segments have a maximum size of 2 KB. + What this limit practically means, is that a segment can only contain up to 32k entries. + Time series larger than that would always be stored in multiple segments. + * In practice, segments usually contain far less than 32k entries, depending on the size of the entries (after compression). + For example, in the [Northwind sample dataset](../../studio/database/tasks/create-sample-data.mdx), the _Companies_ documents all have a time series called _StockPrice_. + These time series are stored in segments that have ~10-20 entries each. + * The maximum time gap between the first and last entries in a segment is ~24.86 days + (equivalent to 2147483647 milliseconds, the maximum value of a 32-bit signed integer in C#). + Adding an entry that is further than that from the first segment entry, would add it as the first entry of a new segment. + As a consequence, segments of sparsely-updated time series can be significantly smaller than 2 KB. + * The maximum number of unique tags allowed per segment, is 127. + A higher number than that, would cause the creation of a new segment. + +* **Aggregated values**: + RavenDB automatically stores and updates aggregated values in each segment's header. + These values summarize commonly-used values regarding the segment, including - + - The segment's **Max** value + - The segment's **Min** value + - The segment's values **Sum** + - The segment's **Count** of entries + - The segment's **First** timestamp + - The segment's **Last** timestamp + + + When segment entries store multiple values, e.g. each entry contains a _Latitude_ value and a _Longitude_ value, + the six aggregated values are stored for each value separately. + +### Compression + +Time series data is stored using a format called [Gorilla compression](https://www.vldb.org/pvldb/vol8/p1816-teller.pdf). +On top of the Gorilla compression, the time series segments are compressed using the [LZ4 algorithm](https://lz4.github.io/lz4/). + + + +## Updating Time Series +### Document-change event + +* **Time series name update**: + Creating/deleting a time series adds/removes its name to/from the metadata of the document it belongs to. + This modification triggers a document-change event, thereby initiating various processes within RavenDB such as ongoing tasks, revision creation, subscriptions, etc. + +* **Time series entries updates**: + As long as a new time series is not created, or an existing one is not removed, + modifying time series entries does Not invoke a document-change event, +### No conflicts + +Time series actions do not cause conflicts, updating a time series is designed to succeed without causing a concurrency conflict; +as long as the document it extends exists, updating a time series will always succeed. + +* **Updating time series concurrently by multiple cluster nodes**: + + When a time series' data is replicated by multiple nodes, the data from all nodes is merged into a single series. + + When multiple nodes append **different values** at the same timestamp: + * If the nodes try to append a **different number of values** for the same timestamp, the greater number of values is applied. + * If the nodes try to append the **same number of values**, the first values from each node are compared. + The append whose first value sorts higher [_lexicographically_](https://mathworld.wolfram.com/LexicographicOrder.html) (not numerically) is applied. + For example, lexicographic order would sort numbers like this: `1 < 10 < 100 < 2 < 21 < 22 < 3` + * If an existing value at a certain timestamp is deleted by one node and updated by another node, the deletion is applied. + +* **Updating time series by multiple clients to the same node**: + * When a time series' value at a certain timestamp is appended by multiple clients more or less simultaneously, + RavenDB uses the last-write strategy. + * When an existing value at a certain timestamp is deleted by a client and updated by another client, + RavenDB still uses the last-write strategy. +### Transactions + +When a session transaction that includes a time series modification fails for any reason, +the time series modification is reverted. +### Case insensitive + +All time series operations are case insensitive. E.g. - + + + +{`session.timeSeriesFor("users/john", "HeartRate") + .deleteAt(timeStampOfEntry); +`} + + + +is equivalent to + + + +{`session.timeSeriesFor("users/john", "HEARTRATE") + .deleteAt(timeStampOfEntry); +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/_indexing-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/_indexing-csharp.mdx new file mode 100644 index 0000000000..71841b8985 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/_indexing-csharp.mdx @@ -0,0 +1,639 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* [Static](../../studio/database/indexes/indexes-overview.mdx#index-types) time series indexes can be created from your client application or from the Studio. + +* Indexing allows for fast retrieval of the indexed time series data when querying a time series. + +* In this page: + * [Time series indexes vs Document indexes](../../document-extensions/timeseries/indexing.mdx#time-series-indexes-vs-document-indexes) + * [Ways to create a time series index](../../document-extensions/timeseries/indexing.mdx#ways-to-create-a-time-series-index) + * [Examples of time series indexes](../../document-extensions/timeseries/indexing.mdx#examples-of-time-series-indexes) + * [Map index - index single time series from single collection](../../document-extensions/timeseries/indexing.mdx#map-index---index-single-time-series-from-single-collection) + * [Map index - index all time series from single collection](../../document-extensions/timeseries/indexing.mdx#map-index---index-all-time-series-from-single-collection) + * [Map index - index all time series from all collections](../../document-extensions/timeseries/indexing.mdx#map-index---index-all-time-series-from-all-collections) + * [Multi-Map index - index time series from several collections](../../document-extensions/timeseries/indexing.mdx#multi-map-index---index-time-series-from-several-collections) + * [Map-Reduce index](../../document-extensions/timeseries/indexing.mdx#map-reduce-index) + * [Syntax](../../document-extensions/timeseries/indexing.mdx#syntax) + + +## Time series indexes vs Document indexes + +#### Auto-Indexes: + +* Time series index: + Dynamic time series indexes are Not created in response to queries. + +* Document index: + [Auto-indexes](../../studio/database/indexes/indexes-overview.mdx#indexes-types) are created in response to dynamic queries. +#### Data source: + +* Time series index: + + * Time series indexes process **[segments](../../document-extensions/timeseries/design.mdx#segmentation)** that contain time series entries. + The entries are indexed through the segment they are stored in, for example, using a LINQ syntax that resembles this one: + + + +{`from segment in timeseries +from entry in segment +... +`} + + + + * The following items can be indexed per index-entry in a time series index: + * Values & timestamp of a time series entry + * The entry tag + * Content from a document referenced by the tag + * Properties of the containing segment (see **[`TimeSeriesSegment`](../../document-extensions/timeseries/indexing.mdx#section-5)**) + +* Document index: + + * The index processes fields from your JSON documents. + Documents are indexed through the collection they belong to, for example, using this LINQ syntax: + + + +{`from employee in employees +... +`} + + +#### Query results: + +* Time series index: + When [querying](../../document-extensions/timeseries/querying/using-indexes.mdx) a time series index, each result item corresponds to the type defined by the **index-entry** in the index definition, + (unless results are [projected](../../document-extensions/timeseries/querying/using-indexes.mdx#project-results)). The documents themselves are not returned. + +* Document index: + The resulting objects are the document entities (unless results are [projected](../../indexes/querying/projections.mdx)). + + + +## Ways to create a time series index + +There are two main ways to create a time series index: + +1. Create a class that inherits from one of the following abstract index creation task classes: + * [`AbstractTimeSeriesIndexCreationTask`](../../document-extensions/timeseries/indexing.mdx#section) + for [map](../../indexes/map-indexes.mdx) and [map-reduce](../../indexes/map-reduce-indexes.mdx) time series indexes. + * [`AbstractMultiMapTimeSeriesIndexCreationTask`](../../document-extensions/timeseries/indexing.mdx#section-1) + for [multi-map](../../indexes/multi-map-indexes.mdx) time series indexes. + * [`AbstractJavaScriptTimeSeriesIndexCreationTask`](../../document-extensions/timeseries/indexing.mdx#section-2) + for static [javascript indexes](../../indexes/javascript-indexes.mdx). + +2. Deploy a time series index definition via [PutIndexesOperation](../../client-api/operations/maintenance/indexes/put-indexes.mdx): + * Create a [`TimeSeriesIndexDefinition`](../../document-extensions/timeseries/indexing.mdx#section-3) directly. + * Create a strongly typed index definition using [`TimeSeriesIndexDefinitionBuilder`](../../document-extensions/timeseries/indexing.mdx#section-4). + + + +## Examples of time series indexes + +#### Map index - index single time series from single collection: + +* In this index, we index data from the "StockPrices" time series entries in the "Companies" collection (`TradeVolume`, `Date`). + +* In addition, we index the containing document id (`DocumentID`), which is obtained from the segment, + and some content from the document referenced by the entry's Tag (`EmployeeName`). + +* Each tab below presents one of the different [ways](../../document-extensions/timeseries/indexing.mdx#ways-to-create-a-time-series-index) the index can be defined. + + + + +{`public class StockPriceTimeSeriesFromCompanyCollection : AbstractTimeSeriesIndexCreationTask +{ + // The index-entry: + // ================ + public class IndexEntry + { + // The index-fields: + // ================= + public double TradeVolume { get; set; } + public DateTime Date { get; set; } + public string CompanyID { get; set; } + public string EmployeeName { get; set; } + } + + public StockPriceTimeSeriesFromCompanyCollection() + { + // Call 'AddMap', specify the time series name to be indexed + AddMap("StockPrices", timeseries => + from segment in timeseries + from entry in segment.Entries + + // Can load the document referenced in the TAG: + let employee = LoadDocument(entry.Tag) + + // Define the content of the index-fields: + // ======================================= + select new IndexEntry() + { + // Retrieve content from the time series ENTRY: + TradeVolume = entry.Values[4], + Date = entry.Timestamp.Date, + + // Retrieve content from the SEGMENT: + CompanyID = segment.DocumentId, + + // Retrieve content from the loaded DOCUMENT: + EmployeeName = employee.FirstName + " " + employee.LastName + }); + } +} +`} + + + + +{`public class StockPriceTimeSeriesFromCompanyCollection_NonTyped : AbstractTimeSeriesIndexCreationTask +{ + public override TimeSeriesIndexDefinition CreateIndexDefinition() + { + return new TimeSeriesIndexDefinition + { + Name = "StockPriceTimeSeriesFromCompanyCollection_NonTyped", + Maps = + { + @" + from segment in timeSeries.Companies.StockPrices + from entry in segment.Entries + + let employee = LoadDocument(entry.Tag, ""Employees"") + + select new + { + TradeVolume = entry.Values[4], + Date = entry.Timestamp.Date, + CompanyID = segment.DocumentId, + EmployeeName = employee.FirstName + ' ' + employee.LastName + }" + } + }; + } +} +`} + + + + +{`public class StockPriceTimeSeriesFromCompanyCollection_JS : + AbstractJavaScriptTimeSeriesIndexCreationTask +{ + public StockPriceTimeSeriesFromCompanyCollection_JS() + { + Maps = new HashSet { @" + timeSeries.map('Companies', 'StockPrices', function (segment) { + + return segment.Entries.map(entry => { + let employee = load(entry.Tag, 'Employees'); + + return { + TradeVolume: entry.Values[4], + Date: new Date(entry.Timestamp.getFullYear(), + entry.Timestamp.getMonth(), + entry.Timestamp.getDate()), + CompanyID: segment.DocumentId, + EmployeeName: employee.FirstName + ' ' + employee.LastName + }; + }); + })" + }; + } +} +`} + + + + +{`// Define the 'index definition' +var indexDefinition = new TimeSeriesIndexDefinition + { + Name = "StockPriceTimeSeriesFromCompanyCollection ", + Maps = + { + @" + from segment in timeSeries.Companies.StockPrices + from entry in segment.Entries + + let employee = LoadDocument(entry.Tag, ""Employees"") + + select new + { + TradeVolume = entry.Values[4], + Date = entry.Timestamp.Date, + CompanyID = segment.DocumentId, + EmployeeName = employee.FirstName + ' ' + employee.LastName + }" + } + }; + +// Deploy the index to the server via 'PutIndexesOperation' +documentStore.Maintenance.Send(new PutIndexesOperation(indexDefinition)); +`} + + + + +{`// Create the index builder +var TSIndexDefBuilder = + new TimeSeriesIndexDefinitionBuilder("StockPriceTimeSeriesFromCompanyCollection "); + +TSIndexDefBuilder.AddMap("StockPrices", timeseries => + from segment in timeseries + from entry in segment.Entries + + // Note: + // Class TimeSeriesIndexDefinitionBuilder does not support the 'LoadDocument' API method. + // Use one of the other index creation methods if needed. + + select new + { + TradeVolume = entry.Values[4], + Date = entry.Timestamp.Date, + ComapnyID = segment.DocumentId + }); + +// Build the index definition +var indexDefinitionFromBuilder = TSIndexDefBuilder.ToIndexDefinition(documentStore.Conventions); + +// Deploy the index to the server via 'PutIndexesOperation' +documentStore.Maintenance.Send(new PutIndexesOperation(indexDefinitionFromBuilder)); +`} + + + + +* Querying this index, you can retrieve the indexed time series data while filtering by any of the index-fields. + + + + +{`using (var session = documentStore.OpenSession()) +{ + // Retrieve time series data for the specified company: + // ==================================================== + List results = session + .Query() + .Where(x => x.CompanyID == "Companies/91-A") + .ToList(); +} + +// Results will include data from all 'StockPrices' entries in document 'Companies/91-A'. +`} + + + + +{`from index "StockPriceTimeSeriesFromCompanyCollection" +where "CompanyID" == "Comapnies/91-A" +`} + + + + + +{`from index "StockPriceTimeSeriesFromCompanyCollection" +where "TradeVolume" > 150_000_000 +select distinct CompanyID +`} + + + +#### Map index - index all time series from single collection: + + + + +{`public class AllTimeSeriesFromCompanyCollection : AbstractTimeSeriesIndexCreationTask +{ + public class IndexEntry + { + public double Value { get; set; } + public DateTime Date { get; set; } + } + + public AllTimeSeriesFromCompanyCollection() + { + // Call 'AddMapForAll' to index ALL the time series in the 'Companies' collection + // ============================================================================== + AddMapForAll(timeseries => + from segment in timeseries + from entry in segment.Entries + + select new IndexEntry() + { + Value = entry.Value, + Date = entry.Timestamp.Date + }); + } +} +`} + + + +#### Map index - index all time series from all collections: + + + + +{`// Inherit from AbstractTimeSeriesIndexCreationTask +// Specify as the type to index from ALL collections +// ========================================================== + +public class AllTimeSeriesFromAllCollections : AbstractTimeSeriesIndexCreationTask +{ + public class IndexEntry + { + public double Value { get; set; } + public DateTime Date { get; set; } + public string DocumentID { get; set; } + } + + public AllTimeSeriesFromAllCollections() + { + AddMapForAll(timeseries => + from segment in timeseries + from entry in segment.Entries + + select new IndexEntry() + { + Value = entry.Value, + Date = entry.Timestamp.Date, + DocumentID = segment.DocumentId + }); + } +} +`} + + + +#### Multi-Map index - index time series from several collections: + + + + +{`public class Vehicles_ByLocation : AbstractMultiMapTimeSeriesIndexCreationTask +{ + public class IndexEntry + { + public double Latitude { get; set; } + public double Longitude { get; set; } + public DateTime Date { get; set; } + public string DocumentID { get; set; } + } + + public Vehicles_ByLocation() + { + // Call 'AddMap' for each collection you wish to index + // =================================================== + + AddMap( + "GPS_Coordinates",timeSeries => + from segment in timeSeries + from entry in segment.Entries + select new IndexEntry() + { + Latitude = entry.Values[0], + Longitude = entry.Values[1], + Date = entry.Timestamp.Date, + DocumentID = segment.DocumentId + }); + + AddMap( + "GPS_Coordinates",timeSeries => + from segment in timeSeries + from entry in segment.Entries + select new IndexEntry() + { + Latitude = entry.Values[0], + Longitude = entry.Values[1], + Date = entry.Timestamp.Date, + DocumentID = segment.DocumentId + }); + } +} +`} + + + +#### Map-Reduce index: + + + + +{`public class TradeVolume_PerDay_ByCountry : + AbstractTimeSeriesIndexCreationTask +{ + public class Result + { + public double TotalTradeVolume { get; set; } + public DateTime Date { get; set; } + public string Country { get; set; } + } + + public TradeVolume_PerDay_ByCountry() + { + // Define the Map part: + AddMap("StockPrices", timeSeries => + from segment in timeSeries + from entry in segment.Entries + + let company = LoadDocument(segment.DocumentId) + + select new Result + { + Date = entry.Timestamp.Date, + Country = company.Address.Country, + TotalTradeVolume = entry.Values[4] + }); + + // Define the Reduce part: + Reduce = results => + from r in results + group r by new {r.Date, r.Country} + into g + select new Result + { + Date = g.Key.Date, + Country = g.Key.Country, + TotalTradeVolume = g.Sum(x => x.TotalTradeVolume) + }; + } +} +`} + + + + + + +## Syntax +### `AbstractTimeSeriesIndexCreationTask` + + + +{`// To define a Map index inherit from: +// =================================== +public abstract class AbstractTimeSeriesIndexCreationTask \{ \} +// Time series that belong to documents of the specified \`TDocument\` type will be indexed. + +// To define a Map-Reduce index inherit from: +// ========================================== +public abstract class AbstractTimeSeriesIndexCreationTask \{ \} +// Specify both the document type and the reduce type + +// Methods available in AbstractTimeSeriesIndexCreationTask class: +// =============================================================== + +// Set a map function for the specified time series +protected void AddMap(string timeSeries, + Expression, IEnumerable>> map); + +// Set a map function for all time series +protected void AddMapForAll( + Expression, IEnumerable>> map); +`} + + +### `AbstractMultiMapTimeSeriesIndexCreationTask` + + + +{`// To define a Multi-Map index inherit from: +// ========================================= +public abstract class AbstractMultiMapTimeSeriesIndexCreationTask \{ \} + +// Methods available in AbstractMultiMapTimeSeriesIndexCreationTask class: +// ======================================================================= + +// Set a map function for all time series with the specified name +// that belong to documents of type \`TSource\` +protected void AddMap(string timeSeries, + Expression, IEnumerable>> map); + +// Set a map function for all time series that belong to documents of type \`TBase\` +// or any type that inherits from \`TBase\` +protected void AddMapForAll( + Expression,IEnumerable>> map); +`} + + +### `AbstractJavaScriptTimeSeriesIndexCreationTask` + + + +{`// To define a JavaScript index inherit from: +// ========================================== +public abstract class AbstractJavaScriptTimeSeriesIndexCreationTask +\{ + public HashSet Maps; // The set of JavaScript map functions for this index + protected string Reduce; // The JavaScript reduce function +\} +`} + + + +Learn more about JavaScript indexes in [JavaScript Indexes](../../indexes/javascript-indexes.mdx). +### `TimeSeriesIndexDefinition` + + + +{`public class TimeSeriesIndexDefinition : IndexDefinition +`} + + + +While `TimeSeriesIndexDefinition` is currently functionally equivalent to the regular [`IndexDefinition`](../../client-api/operations/maintenance/indexes/put-indexes.mdx#put-indexes-operation-with-indexdefinition) class from which it inherits, +it is recommended to use `TimeSeriesIndexDefinition` when creating a time series index definition in case additional functionality is added in future versions of RavenDB. +### `TimeSeriesIndexDefinitionBuilder` + + + +{`public class TimeSeriesIndexDefinitionBuilder +\{ + public TimeSeriesIndexDefinitionBuilder(string indexName = null) +\} +`} + + + + +**Note**: + +* Currently, class `TimeSeriesIndexDefinitionBuilder` does Not support API methods from abstract class `AbstractCommonApiForIndexes`, + such as `LoadDocument` or `Recurse`. + +* Use one of the other index creation methods if needed. + +### `TimeSeriesSegment` + +* Segment properties include the entries data and aggregated values that RavenDB automatically updates in the segment's header. + +* The following segment properties can be indexed: + + + +{`public sealed class TimeSeriesSegment +\{ + // The ID of the document this time series belongs to + public string DocumentId \{ get; set; \} + + // The name of the time series this segment belongs to + public string Name \{ get; set; \} + + // The smallest values from all entries in the segment + // The first array item is the Min of all first values, etc. + public double[] Min \{ get; set; \} + + // The largest values from all entries in the segment + // The first array item is the Max of all first values, etc. + public double[] Max \{ get; set; \} + + // The sum of all values from all entries in the segment + // The first array item is the Sum of all first values, etc. + public double[] Sum \{ get; set; \} + + // The number of entries in the segment + public int Count \{ get; set; \} + + // The timestamp of the first entry in the segment + public DateTime Start \{ get; set; \} + + // The timestamp of the last entry in the segment + public DateTime End \{ get; set; \} + + // The segment's entries themselves + public TimeSeriesEntry[] Entries \{ get; set; \} +\} +`} + + + +* These are the properties of a `TimeSeriesEntry` which can be indexed: + + + +{`public class TimeSeriesEntry +\{ + public DateTime Timestamp; + public string Tag; + public double[] Values; + + // This is exactly equivalent to Values[0] + public double Value; +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/_indexing-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/_indexing-nodejs.mdx new file mode 100644 index 0000000000..b68703e663 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/_indexing-nodejs.mdx @@ -0,0 +1,427 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* [Static](../../studio/database/indexes/indexes-overview.mdx#index-types) time series indexes can be created from your client application or from the Studio. + +* Indexing allows for fast retrieval of the indexed time series data when querying a time series. + +* In this page: + * [Time series indexes vs Document indexes](../../document-extensions/timeseries/indexing.mdx#time-series-indexes-vs-document-indexes) + * [Ways to create a time series index](../../document-extensions/timeseries/indexing.mdx#ways-to-create-a-time-series-index) + * [Examples of time series indexes](../../document-extensions/timeseries/indexing.mdx#examples-of-time-series-indexes) + * [Map index - index single time series from single collection](../../document-extensions/timeseries/indexing.mdx#map-index---index-single-time-series-from-single-collection) + * [Map index - index all time series from single collection](../../document-extensions/timeseries/indexing.mdx#map-index---index-all-time-series-from-single-collection) + * [Map index - index all time series from all collections](../../document-extensions/timeseries/indexing.mdx#map-index---index-all-time-series-from-all-collections) + * [Multi-Map index - index time series from several collections](../../document-extensions/timeseries/indexing.mdx#multi-map-index---index-time-series-from-several-collections) + * [Map-Reduce index](../../document-extensions/timeseries/indexing.mdx#map-reduce-index) + * [Syntax](../../document-extensions/timeseries/indexing.mdx#syntax) + + +## Time series indexes vs Document indexes + +#### Auto-Indexes: + +* Time series index: + Dynamic time series indexes are Not created in response to queries. + +* Document index: + [Auto-indexes](../../studio/database/indexes/indexes-overview.mdx#indexes-types) are created in response to dynamic queries. +#### Data source: + +* Time series index: + + * Time series indexes process **[segments](../../document-extensions/timeseries/design.mdx#segmentation)** that contain time series entries. + The entries are indexed through the segment they are stored in. + * The following items can be indexed per index-entry in a time series index: + * Values & timestamp of a time series entry + * The entry tag + * Content from a document referenced by the tag + * Properties of the containing segment (see **[segment properties](../../document-extensions/timeseries/indexing.mdx#segment-properties)**) + +* Document index: + + * The index processes fields from your JSON documents. + Documents are indexed through the collection they belong to. +#### Query results: + +* Time series index: + When [querying](../../document-extensions/timeseries/querying/using-indexes.mdx) a time series index, each result item corresponds to the type defined by the **index-entry** in the index definition, + (unless results are [projected](../../document-extensions/timeseries/querying/using-indexes.mdx#project-results)). The documents themselves are not returned. + +* Document index: + The resulting objects are the document entities (unless results are [projected](../../indexes/querying/projections.mdx)). + + + +## Ways to create a time series index + +There are two main ways to create a time series index: + +1. Create a class that inherits from the abstract index class [`AbstractRawJavaScriptTimeSeriesIndexCreationTask`](../../document-extensions/timeseries/indexing.mdx#section). + +2. Create a [`TimeSeriesIndexDefinition`](../../document-extensions/timeseries/indexing.mdx#section-1) + and deploy the time series index definition via [PutIndexesOperation](../../client-api/operations/maintenance/indexes/put-indexes.mdx). + + + +## Examples of time series indexes + +#### Map index - index single time series from single collection: + +* In this index, we index data from the "StockPrices" time series entries in the "Companies" collection (`tradeVolume`, `date`). + +* In addition, we index the containing document id (`documentID`), which is obtained from the segment, + and some content from the document referenced by the entry's Tag (`employeeName`). + + + + +{`class StockPriceTimeSeriesFromCompanyCollection extends + AbstractRawJavaScriptTimeSeriesIndexCreationTask { + + constructor() { + super(); + + this.maps.add(\` + // Call timeSeries.map(), pass: + // * The collection to index + // * The time series name + // * The fuction that defines the index-entries + // ============================================ + timeSeries.map("Companies", "StockPrices", function (segment) { + + // Return the index-entries: + // ========================= + return segment.Entries.map(entry => { + let employee = load(entry.Tag, "Employees"); + + // Define the index-fields per entry: + // ================================== + + return { + // Retrieve content from the time series ENTRY: + tradeVolume: entry.Values[4], + date: new Date(entry.Timestamp), + + // Retrieve content from the SEGMENT: + companyID: segment.DocumentId, + + // Retrieve content from the loaded DOCUMENT: + employeeName: employee.FirstName + " " + employee.LastName + }; + }); + }) + \`); + } +} +`} + + + + +{`const timeSeriesIndexDefinition = new TimeSeriesIndexDefinition(); + +timeSeriesIndexDefinition.name = "StockPriceTimeSeriesFromCompanyCollection"; + +timeSeriesIndexDefinition.maps = new Set([\` + from segment in timeSeries.Companies.StockPrices + from entry in segment.Entries + + let employee = LoadDocument(entry.Tag, "Employees") + + select new + { + tradeVolume = entry.Values[4], + date = entry.Timestamp.Date, + companyID = segment.DocumentId, + employeeName = employee.FirstName + " " + employee.LastName + }\` +]); + +// Deploy the index to the server via 'PutIndexesOperation' +await documentStore.maintenance.send(new PutIndexesOperation(timeSeriesIndexDefinition)); +`} + + + + +* Querying this index, you can retrieve the indexed time series data while filtering by any of the index-fields. + + + + +{`const results = await session + // Retrieve time series data for the specified company: + // ==================================================== + .query({ indexName: "StockPriceTimeSeriesFromCompanyCollection" }) + .whereEquals("companyID", "Companies/91-A") + .all(); + +// Results will include data from all 'StockPrices' entries in document 'Companies/91-A'. +`} + + + + +{`from index "StockPriceTimeSeriesFromCompanyCollection" +where "companyID" == "Comapnies/91-A" +`} + + + + +{`from index "StockPriceTimeSeriesFromCompanyCollection" +where "companyID" == "Comapnies/91-A" +`} + + + + +{`from index "StockPriceTimeSeriesFromCompanyCollection" +where "tradeVolume" > 150_000_000 +select distinct companyID +`} + + + +#### Map index - index all time series from single collection: + + + + +{`class AllTimeSeriesFromCompanyCollection extends AbstractRawJavaScriptTimeSeriesIndexCreationTask { + + constructor() { + super(); + + this.maps.add(\` + // Call timeSeries.map(), pass: + // * The collection to index and the function that defines the index-entries + // * No time series is specified - so ALL time series from the collection will be indexed + // ====================================================================================== + timeSeries.map("Companies", function (segment) { + + return segment.Entries.map(entry => ({ + value: entry.Value, + date: new Date(entry.Timestamp) + })); + }) + \`); + } +} +`} + + + +#### Map index - index all time series from all collections: + + + + +{`class AllTimeSeriesFromAllCollections extends AbstractRawJavaScriptTimeSeriesIndexCreationTask { + + constructor() { + super(); + + this.maps.add(\` + // No collection and time series are specified - + // so ALL time series from ALL collections will be indexed + // ======================================================= + timeSeries.map(function (segment) { + + return segment.Entries.map(entry => ({ + value: entry.Value, + date: new Date(entry.Timestamp), + documentID: segment.DocumentId, + })); + }) + \`); + } +} +`} + + + +#### Multi-Map index - index time series from several collections: + + + + +{`class Vehicles_ByLocation extends AbstractRawJavaScriptTimeSeriesIndexCreationTask { + + constructor() { + super(); + + // Call 'timeSeries.map()' for each collection you wish to index + // ============================================================= + + this.maps.add(\` + timeSeries.map("Planes", "GPS_Coordinates", function (segment) { + + return segment.Entries.map(entry => ({ + latitude: entry.Values[0], + longitude: entry.Values[1], + date: new Date(entry.Timestamp), + documentID: segment.DocumentId + })); + }) + \`); + + this.maps.add(\` + timeSeries.map("Ships", "GPS_Coordinates", function (segment) { + + return segment.Entries.map(entry => ({ + latitude: entry.Values[0], + longitude: entry.Values[1], + date: new Date(entry.Timestamp), + documentID: segment.DocumentId + })); + }) + \`); + } +} +`} + + + +#### Map-Reduce index: + + + + +{`class TradeVolume_PerDay_ByCountry extends AbstractRawJavaScriptTimeSeriesIndexCreationTask { + + constructor() { + super(); + + // Define the Map part: + this.maps.add(\` + timeSeries.map("Companies", "StockPrices", function (segment) { + + return segment.Entries.map(entry => { + let company = load(segment.DocumentId, "Companies"); + + return { + date: new Date(entry.Timestamp), + country: company.Address.Country, + totalTradeVolume: entry.Values[4], + }; + }); + }) + \`); + + // Define the Reduce part: + this.reduce = \` + groupBy(x => ({date: x.date, country: x.country})) + .aggregate(g => { + return { + date: g.key.date, + country: g.key.country, + totalTradeVolume: g.values.reduce((sum, x) => x.totalTradeVolume + sum, 0) + }; + }) + \`; + } +} +`} + + + + + + +## Syntax +### `AbstractRawJavaScriptTimeSeriesIndexCreationTask` + + + +{`// To define a raw JavaScript index extend the following class: +// ============================================================ +abstract class AbstractRawJavaScriptTimeSeriesIndexCreationTask +\{ + // The set of JavaScript map functions for this index + maps; // Set + + // The JavaScript reduce function + reduce; // string +\} +`} + + +### `TimeSeriesIndexDefinition` + + + +{`class TimeSeriesIndexDefinition extends IndexDefinition +`} + + + +While `TimeSeriesIndexDefinition` is currently functionally equivalent to the regular [`IndexDefinition`](../../client-api/operations/maintenance/indexes/put-indexes.mdx#put-indexes-operation-with-indexdefinition) class from which it inherits, +it is recommended to use `TimeSeriesIndexDefinition` when creating a time series index definition in case additional functionality is added in future versions of RavenDB. +### Segment properties + +* Segment properties include the entries data and aggregated values that RavenDB automatically updates in the segment's header. + +* **Unlike the C# client**, class `TimeSeriesSegment` is Not defined in the Node.js client. + However, the following are the segment properties that can be indexed from your raw javascript index definition which the server recognizes: + + + +{`// The ID of the document this time series belongs to +DocumentId; // string + +// The name of the time series this segment belongs to +Name; // string + +// The smallest values from all entries in the segment +// The first array item is the Min of all first values, etc. +Min; // number[] + +// The largest values from all entries in the segment +// The first array item is the Max of all first values, etc. +Max; // number[] + +// The sum of all values from all entries in the segment +// The first array item is the Sum of all first values, etc. +Sum; // number[] + +// The number of entries in the segment +Count; // number + +// The timestamp of the first entry in the segment +Start; // Date + +// The timestamp of the last entry in the segment +End; // Date + +// The segment's entries themselves +Entries; // TimeSeriesEntry[] +`} + + + +* These are the properties of a `TimeSeriesEntry` which can be indexed: + + + +{`class TimeSeriesEntry +\{ + timestamp; // Date + tag; // string + values; // number[] + + // This is equivalent to values[0] + value; // number +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/_indexing-php.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/_indexing-php.mdx new file mode 100644 index 0000000000..b32fe250cc --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/_indexing-php.mdx @@ -0,0 +1,616 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* [Static](../../studio/database/indexes/indexes-overview.mdx#index-types) time series indexes can be created from your client application or from the Studio. + +* Indexing allows for fast retrieval of the indexed time series data when querying a time series. + +* In this page: + * [Time series indexes vs Document indexes](../../document-extensions/timeseries/indexing.mdx#time-series-indexes-vs-document-indexes) + * [Ways to create a time series index](../../document-extensions/timeseries/indexing.mdx#ways-to-create-a-time-series-index) + * [Examples of time series indexes](../../document-extensions/timeseries/indexing.mdx#examples-of-time-series-indexes) + * [Map index - index single time series from single collection](../../document-extensions/timeseries/indexing.mdx#map-index---index-single-time-series-from-single-collection) + * [Multi-Map index - index time series from several collections](../../document-extensions/timeseries/indexing.mdx#multi-map-index---index-time-series-from-several-collections) + * [Map-Reduce index](../../document-extensions/timeseries/indexing.mdx#map-reduce-index) + * [Syntax](../../document-extensions/timeseries/indexing.mdx#syntax) + + +## Time series indexes vs Document indexes + +#### Auto-Indexes: + +* Time series index: + Dynamic time series indexes are Not created in response to queries. + +* Document index: + [Auto-indexes](../../studio/database/indexes/indexes-overview.mdx#indexes-types) are created in response to dynamic queries. +#### Data source: + +* Time series index: + + * Time series indexes process **segments** that contain time series entries. + The entries are indexed through the segment they are stored in, for example, using a LINQ syntax that resembles this one: + + + +{`from segment in timeseries +from entry in segment +... +`} + + + + * The following items can be indexed per index-entry in a time series index: + * Values & timestamp of a time series entry + * The entry tag + * Content from a document referenced by the tag + * Properties of the containing segment + +* Document index: + + * The index processes fields from your JSON documents. + Documents are indexed through the collection they belong to, for example, using this LINQ syntax: + + + +{`from employee in employees +... +`} + + +#### Query results: + +* Time series index: + When [querying](../../document-extensions/timeseries/querying/using-indexes.mdx) a time series index, each result item corresponds to the type defined by the **index-entry** in the index definition, + (unless results are [projected](../../document-extensions/timeseries/querying/using-indexes.mdx#project-results)). The documents themselves are not returned. + +* Document index: + The resulting objects are the document entities (unless results are projected). + + + +## Ways to create a time series index + +There are two main ways to create a time series index: + +1. Create a class that inherits from one of the following abstract index creation task classes: + * `AbstractTimeSeriesIndexCreationTask` + for [map](../../indexes/map-indexes.mdx) and [map-reduce](../../indexes/map-reduce-indexes.mdx) time series indexes. + * `AbstractMultiMapTimeSeriesIndexCreationTask` + for [multi-map](../../indexes/multi-map-indexes.mdx) time series indexes. + * `AbstractJavaScriptTimeSeriesIndexCreationTask` + for static javascript indexes. + +2. Deploy a time series index definition via [PutIndexesOperation](../../client-api/operations/maintenance/indexes/put-indexes.mdx): + * Create a `TimeSeriesIndexDefinition` directly. + * Create a strongly typed index definition using `TimeSeriesIndexDefinitionBuilder`. + + + +## Examples of time series indexes + +#### Map index - index single time series from single collection: + +* In this index, we index data from the "StockPrices" time series entries in the "Companies" collection (`TradeVolume`, `Date`). + +* In addition, we index the containing document id (`DocumentID`), which is obtained from the segment, + and some content from the document referenced by the entry's Tag (`EmployeeName`). + +* Each tab below presents one of the different [ways](../../document-extensions/timeseries/indexing.mdx#ways-to-create-a-time-series-index) the index can be defined. + + + + +{`class StockPriceTimeSeriesFromCompanyCollection_IndexEntry +{ + // The index-fields: + // ================= + public ?float $tradeVolume = null; + public ?DateTime $date = null; + public ?string $companyID = null; + public ?string $employeeName = null; + + public function getTradeVolume(): ?float + { + return $this->tradeVolume; + } + + public function setTradeVolume(?float $tradeVolume): void + { + $this->tradeVolume = $tradeVolume; + } + + public function getDate(): ?DateTime + { + return $this->date; + } + + public function setDate(?DateTime $date): void + { + $this->date = $date; + } + + public function getCompanyID(): ?string + { + return $this->companyID; + } + + public function setCompanyID(?string $companyID): void + { + $this->companyID = $companyID; + } + + public function getEmployeeName(): ?string + { + return $this->employeeName; + } + + public function setEmployeeName(?string $employeeName): void + { + $this->employeeName = $employeeName; + } +} +class StockPriceTimeSeriesFromCompanyCollection extends AbstractTimeSeriesIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = + "from segment in timeSeries.Companies.StockPrices" . + "from entry in segment.Entries" . + + // Can load the document referenced in the TAG: + "let employee = LoadDocument(entry.Tag, \\"Employees\\")" . + + // Define the content of the index-fields: + // ======================================= + "select new" . + "{" . + // Retrieve content from the time series ENTRY: + " TradeVolume = entry.Values[4]," . + " Date = entry.Timestamp.Date," . + // Retrieve content from the SEGMENT: + " CompanyId = segment.DocumentId," . + // Retrieve content from the loaded DOCUMENT: + " EmployeeName = employee.FirstName + \\" \\" + employee.LastName" . + "}" ; + // Call 'AddMap', specify the time series name to be indexed + } +} +`} + + + + +{`class StockPriceTimeSeriesFromCompanyCollection_JS extends AbstractJavaScriptTimeSeriesIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->setMaps([" + timeSeries.map('Companies', 'StockPrices', function (segment) { + + return segment.Entries.map(entry => { + let employee = load(entry.Tag, 'Employees'); + + return { + TradeVolume: entry.Values[4], + Date: new Date(entry.Timestamp.getFullYear(), + entry.Timestamp.getMonth(), + entry.Timestamp.getDate()), + CompanyID: segment.DocumentId, + EmployeeName: employee.FirstName + ' ' + employee.LastName + }; + }); + })" + ]); + } +} +`} + + + + +{`// Define the 'index definition' +$indexDefinition = new TimeSeriesIndexDefinition(); +$indexDefinition->setName("StockPriceTimeSeriesFromCompanyCollection "); +$indexDefinition->setMaps([" + from segment in timeSeries.Companies.StockPrices + from entry in segment.Entries + + let employee = LoadDocument(entry.Tag, \\"Employees\\") + + select new + { + TradeVolume = entry.Values[4], + Date = entry.Timestamp.Date, + CompanyId = segment.DocumentId, + EmployeeName = employee.FirstName + ' ' + employee.LastName + }" +]); + +// Deploy the index to the server via 'PutIndexesOperation' +$documentStore->maintenance()->send(new PutIndexesOperation($indexDefinition)); +`} + + + + +{`// Create the index builder +$TSIndexDefBuilder = new TimeSeriesIndexDefinitionBuilder("StockPriceTimeSeriesFromCompanyCollection "); + +// "StockPrices" +$TSIndexDefBuilder->setMap(" + from segment in timeSeries.Companies.StockPrices + from entry in segment.Entries + select new + { + TradeVolume = entry.Values[4], + Date = entry.Timestamp.Date, + CompanyId = segment.DocumentId, + } +"); + +// Build the index definition +$indexDefinitionFromBuilder = $TSIndexDefBuilder->toIndexDefinition($documentStore->getConventions()); + +// Deploy the index to the server via 'PutIndexesOperation' +$documentStore->maintenance()->send(new PutIndexesOperation($indexDefinitionFromBuilder)); +`} + + + + +* Querying this index, you can retrieve the indexed time series data while filtering by any of the index-fields. + + + + +{`$session = $documentStore->openSession(); +try { + // Retrieve time series data for the specified company: + // ==================================================== + /** @var array $results */ + $results = $session + ->query(StockPriceTimeSeriesFromCompanyCollection_IndexEntry::class, + StockPriceTimeSeriesFromCompanyCollection::class) + ->whereEquals("CompanyId", "Companies/91-A") + ->toList(); +} finally { + $session->close(); +} + +// Results will include data from all 'StockPrices' entries in document 'Companies/91-A'. +`} + + + + +{`from index "StockPriceTimeSeriesFromCompanyCollection" +where "CompanyID" == "Comapnies/91-A" +`} + + + + +{`from index "StockPriceTimeSeriesFromCompanyCollection" +where "CompanyID" == "Comapnies/91-A" +`} + + + + +{`from index "StockPriceTimeSeriesFromCompanyCollection" +where "TradeVolume" > 150_000_000 +select distinct CompanyID +`} + + + +#### Multi-Map index - index time series from several collections: + + + + +{`class Vehicles_ByLocation_IndexEntry +{ + private ?float $latitude = null; + private ?float $longitude = null; + private ?DateTime $date = null; + private ?string $documentId = null; + + public function getLatitude(): ?float + { + return $this->latitude; + } + + public function setLatitude(?float $latitude): void + { + $this->latitude = $latitude; + } + + public function getLongitude(): ?float + { + return $this->longitude; + } + + public function setLongitude(?float $longitude): void + { + $this->longitude = $longitude; + } + + public function getDate(): ?DateTime + { + return $this->date; + } + + public function setDate(?DateTime $date): void + { + $this->date = $date; + } + + public function getDocumentId(): ?string + { + return $this->documentId; + } + + public function setDocumentId(?string $documentId): void + { + $this->documentId = $documentId; + } +} +class Vehicles_ByLocation extends AbstractMultiMapTimeSeriesIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + // Call 'AddMap' for each collection you wish to index + // =================================================== + // "GPS_Coordinates" + $this->addMap(" + from segment in timeSeries.Planes.GPS_Coordinates + from entry in segment.Entries + select new + { + Latitude = entry.Values[0], + Longitude = entry.Values[1], + Date = entry.Timestamp.Date, + DocumentId = segment.DocumentId + } + "); + + $this->addMap(" + from segment in timeSeries.Ships.GPS_Coordinates + from entry in segment.Entries + select new + { + Latitude = entry.Values[0], + Longitude = entry.Values[1], + Date = entry.Timestamp.Date, + DocumentId = segment.DocumentId + } + "); + } +} +`} + + + +#### Map-Reduce index: + + + + +{`class TradeVolume_PerDay_ByCountry_Result +{ + private ?float $totalTradeVolume = null; + private ?DateTime $date = null; + private ?string $country = null; + + public function getTotalTradeVolume(): ?float + { + return $this->totalTradeVolume; + } + + public function setTotalTradeVolume(?float $totalTradeVolume): void + { + $this->totalTradeVolume = $totalTradeVolume; + } + + public function getDate(): ?DateTime + { + return $this->date; + } + + public function setDate(?DateTime $date): void + { + $this->date = $date; + } + + public function getCountry(): ?string + { + return $this->country; + } + + public function setCountry(?string $country): void + { + $this->country = $country; + } +} +class TradeVolume_PerDay_ByCountry extends AbstractTimeSeriesIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + // Define the Map part: + // "StockPrices" + $this->map = " + from segment in timeSeries.Companies.StockPrices + from entry in segment.Entries + + let company = LoadDocument(segment.DocumentId, 'Companies') + + select new + { + Date = entry.Timestamp.Date, + Country = company.Address.Country, + TotalTradeVolume = entry.Values[4], + } + "; + + // Define the Reduce part: + $this->reduce = " + from r in results + group r by new {r.date, r.country} + into g + select new + { + Date = g.Key.date, + Country = g.Key.country, + TotalTradeVolume = g.Sum(x => x.total_trade_volume) + } + "; + } +} +`} + + + + + + +## Syntax +### `AbstractJavaScriptTimeSeriesIndexCreationTask` + + + +{`class AbstractJavaScriptTimeSeriesIndexCreationTask(AbstractIndexCreationTaskBase[TimeSeriesIndexDefinition]): + def __init__( + self, + conventions: DocumentConventions = None, + priority: IndexPriority = None, + lock_mode: IndexLockMode = None, + deployment_mode: IndexDeploymentMode = None, + state: IndexState = None, + ): + super().__init__(conventions, priority, lock_mode, deployment_mode, state) + self._definition = TimeSeriesIndexDefinition() + + @property + def maps(self) -> Set[str]: + return self._definition.maps + + @maps.setter + def maps(self, maps: Set[str]): + self._definition.maps = maps + + @property + def reduce(self) -> str: + return self._definition.reduce + + @reduce.setter + def reduce(self, reduce: str): + self._definition.reduce = reduce +`} + + +### `TimeSeriesIndexDefinition` + + + +{`class TimeSeriesIndexDefinition(IndexDefinition): + @property + def source_type(self) -> IndexSourceType: + return IndexSourceType.TIME_SERIES +`} + + + +While `TimeSeriesIndexDefinition` is currently functionally equivalent to the regular +[`IndexDefinition`](../../indexes/creating-and-deploying.mdx#using-maintenance-operations) +class from which it inherits, it is recommended to use `TimeSeriesIndexDefinition` when +creating a time series index definition in case additional functionality is added in +future versions of RavenDB. +### `TimeSeriesIndexDefinitionBuilder` + + + +{`class TimeSeriesIndexDefinitionBuilder(AbstractIndexDefinitionBuilder[TimeSeriesIndexDefinition]): + def __init__(self, index_name: Optional[str] = None): + super().__init__(index_name) + self.map: Optional[str] = None +`} + + +### `TimeSeriesSegment` + +* Segment properties include the entries data and aggregated values that RavenDB automatically updates in the segment's header. + +* The following segment properties can be indexed: + + + +{`public sealed class TimeSeriesSegment +\{ + // The ID of the document this time series belongs to + public string DocumentId \{ get; set; \} + + // The name of the time series this segment belongs to + public string Name \{ get; set; \} + + // The smallest values from all entries in the segment + // The first array item is the Min of all first values, etc. + public double[] Min \{ get; set; \} + + // The largest values from all entries in the segment + // The first array item is the Max of all first values, etc. + public double[] Max \{ get; set; \} + + // The sum of all values from all entries in the segment + // The first array item is the Sum of all first values, etc. + public double[] Sum \{ get; set; \} + + // The number of entries in the segment + public int Count \{ get; set; \} + + // The timestamp of the first entry in the segment + public DateTime Start \{ get; set; \} + + // The timestamp of the last entry in the segment + public DateTime End \{ get; set; \} + + // The segment's entries themselves + public TimeSeriesEntry[] Entries \{ get; set; \} +\} +`} + + + +* These are the properties of a `TimeSeriesEntry` which can be indexed: + + + +{`public class TimeSeriesEntry +\{ + public DateTime Timestamp; + public string Tag; + public double[] Values; + + // This is exactly equivalent to Values[0] + public double Value; +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/_indexing-python.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/_indexing-python.mdx new file mode 100644 index 0000000000..bb98df601b --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/_indexing-python.mdx @@ -0,0 +1,485 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* [Static](../../studio/database/indexes/indexes-overview.mdx#index-types) time series indexes can be created from your client application or from the Studio. + +* Indexing allows for fast retrieval of the indexed time series data when querying a time series. + +* In this page: + * [Time series indexes vs Document indexes](../../document-extensions/timeseries/indexing.mdx#time-series-indexes-vs-document-indexes) + * [Ways to create a time series index](../../document-extensions/timeseries/indexing.mdx#ways-to-create-a-time-series-index) + * [Examples of time series indexes](../../document-extensions/timeseries/indexing.mdx#examples-of-time-series-indexes) + * [Map index - index single time series from single collection](../../document-extensions/timeseries/indexing.mdx#map-index---index-single-time-series-from-single-collection) + * [Multi-Map index - index time series from several collections](../../document-extensions/timeseries/indexing.mdx#multi-map-index---index-time-series-from-several-collections) + * [Map-Reduce index](../../document-extensions/timeseries/indexing.mdx#map-reduce-index) + * [Syntax](../../document-extensions/timeseries/indexing.mdx#syntax) + + +## Time series indexes vs Document indexes + +#### Auto-Indexes: + +* Time series index: + Dynamic time series indexes are Not created in response to queries. + +* Document index: + [Auto-indexes](../../studio/database/indexes/indexes-overview.mdx#indexes-types) are created in response to dynamic queries. +#### Data source: + +* Time series index: + + * Time series indexes process [segments](../../document-extensions/timeseries/design.mdx#segmentation) that contain time series entries. + The entries are indexed through the segment they are stored in, for example, using a LINQ syntax that resembles this one: + + + +{`from segment in timeseries +from entry in segment +... +`} + + + + * The following items can be indexed per index-entry in a time series index: + * Values & timestamp of a time series entry + * The entry tag + * Content from a document referenced by the tag + * Properties of the containing segment + +* Document index: + + * The index processes fields from your JSON documents. + Documents are indexed through the collection they belong to, for example, using this LINQ syntax: + + + +{`from employee in employees +... +`} + + +#### Query results: + +* Time series index: + When [querying](../../document-extensions/timeseries/querying/using-indexes.mdx) a time series index, each result item corresponds to the type defined by the **index-entry** in the index definition, + (unless results are [projected](../../document-extensions/timeseries/querying/using-indexes.mdx#project-results)). The documents themselves are not returned. + +* Document index: + The resulting objects are the document entities (unless results are projected). + + + +## Ways to create a time series index + +There are two main ways to create a time series index: + +1. Create a class that inherits from one of the following abstract index creation task classes: + * `AbstractTimeSeriesIndexCreationTask` + for [map](../../indexes/map-indexes.mdx) and [map-reduce](../../indexes/map-reduce-indexes.mdx) time series indexes. + * `AbstractMultiMapTimeSeriesIndexCreationTask` + for [multi-map](../../indexes/multi-map-indexes.mdx) time series indexes. + * `AbstractJavaScriptTimeSeriesIndexCreationTask` + for static [javascript indexes](../../indexes/javascript-indexes.mdx). + +2. Deploy a time series index definition via [PutIndexesOperation](../../client-api/operations/maintenance/indexes/put-indexes.mdx): + * Create a `TimeSeriesIndexDefinition` directly. + * Create a strongly typed index definition using `TimeSeriesIndexDefinitionBuilder`. + + + +## Examples of time series indexes + +#### Map index - index single time series from single collection: + +* In this index, we index data from the "StockPrices" time series entries in the "Companies" collection (`TradeVolume`, `Date`). + +* In addition, we index the containing document id (`DocumentID`), which is obtained from the segment, + and some content from the document referenced by the entry's Tag (`EmployeeName`). + +* Each tab below presents one of the different [ways](../../document-extensions/timeseries/indexing.mdx#ways-to-create-a-time-series-index) the index can be defined. + + + + +{`class StockPriceTimeSeriesFromCompanyCollection(AbstractTimeSeriesIndexCreationTask): + # The index-entry: + # ================ + class IndexEntry: + def __init__( + self, trade_volume: float = None, date: datetime = None, company_id: str = None, employee_name: str = None + ): + # The index-fields: + # ================= + self.trade_volume = trade_volume + self.date = date + self.company_id = company_id + self.employee_name = employee_name + + def __init__(self): + super().__init__() + self.map = """ + from segment in timeSeries.Companies.StockPrices + from entry in segment.Entries + + let employee = LoadDocument(entry.Tag, "Employees") + + select new + { + trade_volume = entry.Values[4], + date = entry.Timestamp.Date, + company_id = segment.DocumentId, + employee_name = employee.FirstName + " " + employee.LastName + } + """ +`} + + + + +{`class StockPriceTimeSeriesFromCompanyCollection_JS(AbstractJavaScriptTimeSeriesIndexCreationTask): + def __init__(self): + super().__init__() + self.maps = { + """ + timeSeries.map('Companies', 'StockPrices', function (segment) { + + return segment.Entries.map(entry => { + let employee = load(entry.Tag, 'Employees'); + + return { + trade_volume: entry.Values[4], + date: new Date(entry.Timestamp.getFullYear(), + entry.Timestamp.getMonth(), + entry.Timestamp.getDate()), + company_id: segment.DocumentId, + employee_name: employee.FirstName + ' ' + employee.LastName + }; + }); + }) + """ + } +`} + + + + +{`# Define the 'index definition' +index_definition = TimeSeriesIndexDefinition( + name="StockPriceTimeSeriesFromCompanyCollection", + maps={ + """ + from segment in timeSeries.Companies.StockPrices + from entry in segment.Entries + + let employee = LoadDocument(entry.Tag, "Employees") + + select new + { + trade_volume = entry.Values[4], + date = entry.Timestamp.Date, + company_id = segment.DocumentId, + employee_name = employee.FirstName + ' ' + employee.LastName + } + """ + }, +) + +# Deploy the index to the server via 'PutIndexesOperation' +store.maintenance.send(PutIndexesOperation(index_definition)) +`} + + + + +{`# Create the index builder +ts_index_def_builder = TimeSeriesIndexDefinitionBuilder("StockPriceTimeSeriesFromCompanyCollection") + +ts_index_def_builder.map = """ + from segment in timeSeries.Companies.StockPrices + from entry in segment.Entries + select new + { + trade_volume = entry.Values[4], + date = entry.Timestamp.Date, + company_id = segment.DocumentId, + } +""" +# Build the index definition +index_definition_from_builder = ts_index_def_builder.to_index_definition(store.conventions) + +# Deploy the index to the server via 'PutIndexesOperation' +store.maintenance.send(PutIndexesOperation(index_definition_from_builder)) +`} + + + + +* Querying this index, you can retrieve the indexed time series data while filtering by any of the index-fields. + + + + +{`with store.open_session() as session: + # Retrieve time series data for the specified company: + # ==================================================== + results = list( + session.query_index_type( + StockPriceTimeSeriesFromCompanyCollection, StockPriceTimeSeriesFromCompanyCollection.IndexEntry + ).where_equals("company_id", "Companies/91-A") + ) + + # Results will include data from all 'StockPrices' entries in document 'Companies/91-A' +`} + + + + +{`from index "StockPriceTimeSeriesFromCompanyCollection" +where "CompanyID" == "Comapnies/91-A" +`} + + + + +{`from index "StockPriceTimeSeriesFromCompanyCollection" +where "CompanyID" == "Comapnies/91-A" +`} + + + + +{`from index "StockPriceTimeSeriesFromCompanyCollection" +where "TradeVolume" > 150_000_000 +select distinct CompanyID +`} + + + +#### Multi-Map index - index time series from several collections: + + + + +{`class Vechicles_ByLocation(AbstractMultiMapTimeSeriesIndexCreationTask): + class IndexEntry: + def __init__( + self, latitude: float = None, longitude: float = None, date: datetime = None, document_id: str = None + ): + self.latitude = latitude + self.longitude = longitude + self.date = date + self.document_id = document_id + + def __init__(self): + super().__init__() + self._add_map( + """ + from segment in timeSeries.Planes.GPS_Coordinates + from entry in segment.Entries + select new + { + latitude = entry.Values[0], + longitude = entry.Values[1], + date = entry.Timestamp.Date, + document_id = segment.DocumentId + } + """ + ) + self._add_map( + """ + from segment in timeSeries.Ships.GPS_Coordinates + from entry in segment.Entries + select new + { + latitude = entry.Values[0], + longitude = entry.Values[1], + date = entry.Timestamp.Date, + document_id = segment.DocumentId + } + """ + ) +`} + + + +#### Map-Reduce index: + + + + +{`class TradeVolume_PerDay_ByCountry(AbstractTimeSeriesIndexCreationTask): + class Result: + def __init__(self, total_trade_volume: float = None, date: datetime = None, country: str = None): + self.total_trade_volume = total_trade_volume + self.date = date + self.country = country + + def __init__(self): + super().__init__() + # Define the Map part: + self.map = """ + from segment in timeSeries.Companies.StockPrices + from entry in segment.Entries + + let company = LoadDocument(segment.DocumentId, 'Companies') + + select new + { + date = entry.Timestamp.Date, + country = company.Address.Country, + total_trade_volume = entry.Values[4], + } + """ + + # Define the Reduce part: + self._reduce = """ + from r in results + group r by new {r.date, r.country} + into g + select new + { + date = g.Key.date, + country = g.Key.country, + total_trade_volume = g.Sum(x => x.total_trade_volume) + } + """ +`} + + + + + + +## Syntax +### `AbstractJavaScriptTimeSeriesIndexCreationTask` + + + +{`class AbstractJavaScriptTimeSeriesIndexCreationTask(AbstractIndexCreationTaskBase[TimeSeriesIndexDefinition]): + def __init__( + self, + conventions: DocumentConventions = None, + priority: IndexPriority = None, + lock_mode: IndexLockMode = None, + deployment_mode: IndexDeploymentMode = None, + state: IndexState = None, + ): + super().__init__(conventions, priority, lock_mode, deployment_mode, state) + self._definition = TimeSeriesIndexDefinition() + + @property + def maps(self) -> Set[str]: + return self._definition.maps + + @maps.setter + def maps(self, maps: Set[str]): + self._definition.maps = maps + + @property + def reduce(self) -> str: + return self._definition.reduce + + @reduce.setter + def reduce(self, reduce: str): + self._definition.reduce = reduce +`} + + + +Learn more about JavaScript indexes in [JavaScript Indexes](../../indexes/javascript-indexes.mdx). +### `TimeSeriesIndexDefinition` + + + +{`class TimeSeriesIndexDefinition(IndexDefinition): + @property + def source_type(self) -> IndexSourceType: + return IndexSourceType.TIME_SERIES +`} + + + +While `TimeSeriesIndexDefinition` is currently functionally equivalent to the regular +[`IndexDefinition`](../../indexes/creating-and-deploying.mdx#using-maintenance-operations) +class from which it inherits, it is recommended to use `TimeSeriesIndexDefinition` when +creating a time series index definition in case additional functionality is added in +future versions of RavenDB. +### `TimeSeriesIndexDefinitionBuilder` + + + +{`class TimeSeriesIndexDefinitionBuilder(AbstractIndexDefinitionBuilder[TimeSeriesIndexDefinition]): + def __init__(self, index_name: Optional[str] = None): + super().__init__(index_name) + self.map: Optional[str] = None +`} + + +### `TimeSeriesSegment` + +* Segment properties include the entries data and aggregated values that RavenDB automatically updates in the segment's header. + +* The following segment properties can be indexed: + + + +{`public sealed class TimeSeriesSegment +\{ + // The ID of the document this time series belongs to + public string DocumentId \{ get; set; \} + + // The name of the time series this segment belongs to + public string Name \{ get; set; \} + + // The smallest values from all entries in the segment + // The first array item is the Min of all first values, etc. + public double[] Min \{ get; set; \} + + // The largest values from all entries in the segment + // The first array item is the Max of all first values, etc. + public double[] Max \{ get; set; \} + + // The sum of all values from all entries in the segment + // The first array item is the Sum of all first values, etc. + public double[] Sum \{ get; set; \} + + // The number of entries in the segment + public int Count \{ get; set; \} + + // The timestamp of the first entry in the segment + public DateTime Start \{ get; set; \} + + // The timestamp of the last entry in the segment + public DateTime End \{ get; set; \} + + // The segment's entries themselves + public TimeSeriesEntry[] Entries \{ get; set; \} +\} +`} + + + +* These are the properties of a `TimeSeriesEntry` which can be indexed: + + + +{`public class TimeSeriesEntry +\{ + public DateTime Timestamp; + public string Tag; + public double[] Values; + + // This is exactly equivalent to Values[0] + public double Value; +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/_rollup-and-retention-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/_rollup-and-retention-csharp.mdx new file mode 100644 index 0000000000..5972ad8ca5 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/_rollup-and-retention-csharp.mdx @@ -0,0 +1,300 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +Many time series applications produce massive amounts of data at a steady rate. +**Time Series Policies** help you manage your data in two ways: + +* Creating **Rollups**: + Summarizing time series data by aggregating it into the form of a new, lower-resolution time series. + +* Limiting **Retention**: + Controlling the duration for which time series data is kept before deletion. + +* In this page: + * [Time series policies](../../document-extensions/timeseries/rollup-and-retention.mdx#time-series-policies) + * [Examples](../../document-extensions/timeseries/rollup-and-retention.mdx#examples) + * [Create time series policy](../../document-extensions/timeseries/rollup-and-retention.mdx#create-time-series-policies) + * [Retrieve rollup data](../../document-extensions/timeseries/rollup-and-retention.mdx#retrieve-rollup-data) + * [Syntax](../../document-extensions/timeseries/rollup-and-retention.mdx#syntax) + + +## Time series policies + +#### What are rollups? + +A rollup is a time series that summarizes the data from another time series, +with each rollup entry representing a specific time frame in the original time series. +Each rollup entry contains 6 values that aggregate the data from all the entries in the original time frame: + +* *First* - the value of the first entry in the frame. +* *Last* - the value of the last entry. +* *Min* - the smallest value. +* *Max* - the largest value. +* *Sum* - the sum of all the values in the frame. +* *Count* - the total number of entries in the frame. + +This results in a much more compact time series that still contains useful information about the original time series (also called "raw" time series). + +#### Rollup policies: + +Rollup time series are created automatically according to rollup policies that can be defined from Studio or client code. + +* A rollup policy applies to all time series of every document in the given collection. + +* Each collection can be configured to have multiple policies which are applied sequentially: + * The raw time series is first rolled up using the policy with the shortest aggregation frame. + * Subsequently, the resulting rollup time series is further aggregated using the policy with the next shortest aggregation frame, + and so on. + +[Querying with group-by](../../document-extensions/timeseries/querying/aggregation-and-projections.mdx) +will transparently traverse over the rollups to retrieve the relevant results. + +Let's look at an example of rollup data: + +!["Rollup time series entries"](./assets/rollup-1.png) + +**1) Name:** +The name of a rollup time series has this format: `@` +It is a combination of the name of the raw time series and the name of the time series policy separated by `@`. +In the image above these are "HeartRates" and "byHour" respectively. +For this reason, neither a time series name nor a policy name can have the character `@` in it. + +**2) Timestamp:** +The aggregation frame always begins at a round number of one of these time units: a second, minute, hour, day, week, month, or year. +So the frame includes all entries starting at a round number of time units, and ending at a round number *minus one millisecond* +(since milliseconds are the minimal resolution in RavenDB time series). +The timestamp for a rollup entry is the beginning of the frame it represents. + +For example, if the aggregation frame is three days, a frame will start and end at a time stamps like: +`2020-01-01 00:00:00` - `2020-01-03 23:59:59.999`. + +**3) Values:** +Each group of six values represents one value from the original entries. +If the raw time series has `n` values per entry, the rollup time series will have `6 * n` per entry: +the first six summarize the first raw value, the next six summarize the next raw value, and so on. +The aggregated values have the names: `"First ()", "Last ()", ...` respectively. + +Because time series entries are limited to 32 values, rollups are limited to the first five values of an original time series entry, or 30 aggregate values. + + + +## Examples + +#### Create time series policies: + + + +{`var oneWeek = TimeValue.FromDays(7); +var fiveYears = TimeValue.FromYears(5); + +// Define a policy on the RAW time series data: +// ============================================ +var rawPolicy = new RawTimeSeriesPolicy(fiveYears); // Retain entries for five years + +// Define a ROLLUP policy: +// ======================= +var rollupPolicy = new TimeSeriesPolicy( + "By1WeekFor1Year", // Name of policy + oneWeek, // Aggregation time, roll-up the data for each week + fiveYears); // Retention time, keep data for five years + +// Define the time series configuration for collection "Companies" (use above policies): +// ===================================================================================== +var timeSeriesConfig = new TimeSeriesConfiguration(); +timeSeriesConfig.Collections["Companies"] = new TimeSeriesCollectionConfiguration +\{ + Policies = new List \{ rollupPolicy \}, + RawPolicy = rawPolicy +\}; + +// Deploy the time series configuration to the server +// by sending the 'ConfigureTimeSeriesOperation' operation: +// ======================================================== +store.Maintenance.Send(new ConfigureTimeSeriesOperation(timeSeriesConfig)); + +// NOTE: +// The time series entries in the RavenDB sample data are dated up to the year 2020. +// To ensure that you see the rollup time series created when running this example, +// the retention time should be set to exceed that year. +`} + + +#### Retrieve rollup data: + +* Retrieving entries from a rollup time series is similar to getting the raw time series data. + +* Learn more about using `TimeSeriesFor.Get` in [Get time series entries](../../document-extensions/timeseries/client-api/session/get/get-entries.mdx). + + + +{`// Get all data from the RAW time series: +// ====================================== + +var rawData = session + .TimeSeriesFor("companies/91-A", "StockPrices") + .Get(DateTime.MinValue, DateTime.MaxValue); + +// Get all data from the ROLLUP time series: +// ========================================= + +// Either - pass the rollup name explicitly to 'TimeSeriesFor': +var rollupData = session + .TimeSeriesFor("companies/91-A", "StockPrices@By1WeekFor1Year") + .Get(DateTime.MinValue, DateTime.MaxValue); + +// Or - get the rollup name by calling 'GetTimeSeriesName': +rollupData = session + .TimeSeriesFor("companies/91-A", rollupPolicy.GetTimeSeriesName("StockPrices")) + .Get(DateTime.MinValue, DateTime.MaxValue); + +// The raw time series has 100 entries +Assert.Equal(rawData.Length, 100); +Assert.Equal(rawData[0].IsRollup, false); + +// The rollup time series has only 22 entries +// as each entry aggregates 1 week's data from the raw time series +Assert.Equal(rollupData.Length, 22); +Assert.Equal(rollupData[0].IsRollup, true); +`} + + + + + +## Syntax + +### The time series policies + +* Raw policy: + * Used to define the retention time of the raw time series. + * Only one such policy per collection can be defined. + * Does not perform aggregation. + +* Rollup policy: + * Used to define the aggregation time frame and retention time for the rollup time series. + * Multiple policies can be defined per collection. + + + +{`public class RawTimeSeriesPolicy : TimeSeriesPolicy +\{ + public TimeValue RetentionTime; +\} + +public class TimeSeriesPolicy +\{ + public string Name; + public TimeValue RetentionTime; \{ get; protected set; \} + public TimeValue AggregationTime; \{ get; private set; \} +\} +`} + + + +| Property | Description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **Name** | This string is used to create the name of the rollup time series.
`Name` is added to the raw time series name - with `@` as a separator,
e.g.: `@` | +| **RetentionTime** | Time series entries older than this time span (see `TimeValue` below) are automatically deleted. | +| **AggregationTime** | The time series data being rolled up is divided into parts of this length of time, rounded to nearest time units. Each part is aggregated into an entry of the rollup time series. | + + + +{`public struct TimeValue +\{ + public static TimeValue FromSeconds(int seconds); + public static TimeValue FromMinutes(int minutes); + public static TimeValue FromHours(int hours); + public static TimeValue FromDays(int days); + public static TimeValue FromMonths(int months); + public static TimeValue FromYears(int years); +\} +`} + + + +Each of the above `TimeValue` methods returns a `TimeValue` object representing a whole number of the specified time units. +These methods are used to define the aggregation and retention spans om time series policies. + + +The main reason we use `TimeValue` rather than something like `TimeSpan` is that `TimeSpan` doesn't have a notion of 'months' +because a calendar month is not a standard unit of time (as it can range from 28 to 31 days). +`TimeValue` enables you to define retention and aggregation spans specifically tailored to calendar months. + +### The time series configuration object + + + +{`public class TimeSeriesConfiguration +\{ + public Dictionary Collections; +\} + +public class TimeSeriesCollectionConfiguration +\{ + public bool Disabled; + public List Policies; + public RawTimeSeriesPolicy RawPolicy; +\} +`} + + + +| Property | Description | +|-----------------|---------------------------------------------------------------------------------------------------------------------------| +| **Collections** | Populate this `Dictionary` with the collection names and their corresponding `TimeSeriesCollectionConfiguration` objects. | +| **Disabled** | If set to `true`, rollup processes will stop, and time series data will not be deleted by retention policies. | +| **Policies** | Populate this `List` with your rollup policies. | +| **RawPolicy** | The `RawTimeSeriesPolicy`, the retention policy for the raw time series. | +### The time series configuration operation + + + +{`public ConfigureTimeSeriesOperation(TimeSeriesConfiguration configuration); +`} + + + +Learn more about operations in: [What are operations](../../client-api/operations/what-are-operations.mdx). +### Casting time series entries + +Time series entries are of one of the following classes: + + + +{`public class TimeSeriesEntry \{ \} +public class TimeSeriesEntry : TimeSeriesEntry \{ \} +public class TimeSeriesRollupEntry : TimeSeriesEntry \{ \} +`} + + + +If you have an existing rollup entry of type `TimeSeriesEntry`, +you can cast it to a `TimeSeriesRollupEntry` using `AsRollupEntry()`. + + + +{`public static TimeSeriesRollupEntry AsRollupEntry(this TimeSeriesEntry entry); +`} + + + +You can cast a `TimeSeriesRollupEntry` to a `TimeSeriesEntry` directly. +Its values will consist of all the `First` values of the rollup entry. + + + +{`var rollupEntry = new TimeSeriesRollupEntry(new DateTime(2020,1,1)); +TimeSeriesEntry TSEntry = (TimeSeriesEntry)rollupEntry; +`} + + + +Read more about time series with generic types [here](../../document-extensions/timeseries/client-api/named-time-series-values.mdx). + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/_rollup-and-retention-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/_rollup-and-retention-nodejs.mdx new file mode 100644 index 0000000000..2c0c4c1a5b --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/_rollup-and-retention-nodejs.mdx @@ -0,0 +1,257 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +Many time series applications produce massive amounts of data at a steady rate. +**Time Series Policies** help you manage your data in two ways: + +* Creating **Rollups**: + Summarizing time series data by aggregating it into the form of a new, lower-resolution time series. + +* Limiting **Retention**: + Controlling the duration for which time series data is kept before deletion. + +* In this page: + * [Time series policies](../../document-extensions/timeseries/rollup-and-retention.mdx#time-series-policies) + * [Examples](../../document-extensions/timeseries/rollup-and-retention.mdx#examples) + * [Create time series policy](../../document-extensions/timeseries/rollup-and-retention.mdx#create-time-series-policies) + * [Retrieve rollup data](../../document-extensions/timeseries/rollup-and-retention.mdx#retrieve-rollup-data) + * [Syntax](../../document-extensions/timeseries/rollup-and-retention.mdx#syntax) + + +## Time series policies + +#### What are rollups? + +A rollup is a time series that summarizes the data from another time series, +with each rollup entry representing a specific time frame in the original time series. +Each rollup entry contains 6 values that aggregate the data from all the entries in the original time frame: + +* *First* - the value of the first entry in the frame. +* *Last* - the value of the last entry. +* *Min* - the smallest value. +* *Max* - the largest value. +* *Sum* - the sum of all the values in the frame. +* *Count* - the total number of entries in the frame. + +This results in a much more compact time series that still contains useful information about the original time series (also called "raw" time series). + +#### Rollup policies: + +Rollup time series are created automatically according to rollup policies that can be defined from the tudio or client code. + +* A rollup policy applies to all time series of every document in the given collection. + +* Each collection can be configured to have multiple policies which are applied sequentially: + * The raw time series is first rolled up using the policy with the shortest aggregation frame. + * Subsequently, the resulting rollup time series is further aggregated using the policy with the next shortest aggregation frame, + and so on. + +[Querying with group-by](../../document-extensions/timeseries/querying/aggregation-and-projections.mdx) +will transparently traverse over the rollups to retrieve the relevant results. + +Let's look at an example of rollup data: + +!["Rollup time series entries"](./assets/rollup-1.png) + +**1) Name:** +The name of a rollup time series has this format: `@` +It is a combination of the name of the raw time series and the name of the time series policy separated by `@`. +In the image above these are "HeartRates" and "byHour" respectively. +For this reason, neither a time series name nor a policy name can have the character `@` in it. + +**2) Timestamp:** +The aggregation frame always begins at a round number of one of these time units: a second, minute, hour, day, week, month, or year. +So the frame includes all entries starting at a round number of time units, and ending at a round number *minus one millisecond* +(since milliseconds are the minimal resolution in RavenDB time series). +The timestamp for a rollup entry is the beginning of the frame it represents. + +For example, if the aggregation frame is three days, a frame will start and end at a time stamps like: +`2020-01-01 00:00:00` - `2020-01-03 23:59:59.999`. + +**3) Values:** +Each group of six values represents one value from the original entries. +If the raw time series has `n` values per entry, the rollup time series will have `6 * n` per entry: +the first six summarize the first raw value, the next six summarize the next raw value, and so on. +The aggregated values have the names: `"First ()", "Last ()", ...` respectively. + +Because time series entries are limited to 32 values, rollups are limited to the first five values of an original time series entry, or 30 aggregate values. + + + +## Examples + +#### Create time series policies: + + + +{`// Define a policy on the RAW time series data: +// ============================================ +const rawPolicy = new RawTimeSeriesPolicy(TimeValue.ofYears(5)); // Retain data for five years + +// Define a ROLLUP policy: +// ======================= +const rollupPolicy = new TimeSeriesPolicy( + "By1WeekFor1Year", // Name of policy + TimeValue.ofDays(7), // Aggregation time, roll-up the data for each week + TimeValue.ofYears(5)); // Retention time, keep data for five years + +// Define the time series configuration for collection "Companies" (use above policies): +// ===================================================================================== +const collectionConfig = new TimeSeriesCollectionConfiguration(); +collectionConfig.rawPolicy = rawPolicy; +collectionConfig.policies = [rollupPolicy]; + +const timeSeriesConfig = new TimeSeriesConfiguration(); +timeSeriesConfig.collections.set("Companies", collectionConfig); + +// Deploy the time series configuration to the server +// by sending the 'ConfigureTimeSeriesOperation' operation: +// ======================================================== +await documentStore.maintenance.send(new ConfigureTimeSeriesOperation(timeSeriesConfig)); + +// NOTE: +// The time series entries in the RavenDB sample data are dated up to the year 2020. +// To ensure that you see the rollup time series created when running this example, +// the retention time should be set to exceed that year. +`} + + +#### Retrieve rollup data: + +* Retrieving entries from a rollup time series is similar to getting the raw time series data. + +* Learn more about using `timeSeriesFor.get` in [Get time series entries](../../document-extensions/timeseries/client-api/session/get/get-entries.mdx). + + + +{`// Get all data from the RAW time series: +// ====================================== + +const rawData = await session + .timeSeriesFor("companies/91-A", "StockPrices") + .get(); + +// Get all data from the ROLLUP time series: +// ========================================= + +// Either - pass the rollup name explicitly to 'TimeSeriesFor': +let rollupData = await session + .timeSeriesFor("companies/91-A", "StockPrices@By1WeekFor1Year") + .get(); + +// Or - get the rollup name by calling 'GetTimeSeriesName': +rollupData = await session + .timeSeriesFor("companies/91-A", rollupPolicy.getTimeSeriesName("StockPrices")) + .get(); + +// The raw time series has 100 entries +assert.equal(rawData.length, 100); +assert.equal(rawData[0].isRollup, false); + +// The rollup time series has only 22 entries +// as each entry aggregates 1 week's data from the raw time series +assert.equal(rollupData.length, 22); +assert.equal(rollupData[0].isRollup, true); +`} + + + + + +## Syntax +### The time series policies + +* Raw policy: + * Used to define the retention time of the raw time series. + * Only one such policy per collection can be defined. + * Does not perform aggregation. + +* Rollup policy: + * Used to define the aggregation time frame and retention time for the rollup time series. + * Multiple policies can be defined per collection. + + + +{`class RawTimeSeriesPolicy extends TimeSeriesPolicy \{ + retentionTime; // TimeValue +\} + +class TimeSeriesPolicy \{ + name; // string; + retentionTime // TimeValue + aggregationTime // TimeValue +\} +`} + + + +| Property | Description | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **name** | This string is used to create the name of the rollup time series.
`Name` is added to the raw time series name - with `@` as a separator,
e.g.: `@` | +| **retentionTime** | Time series entries older than this `TimeValue` are automatically deleted. | +| **aggregationTime** | The time series data being rolled up is divided into parts of this length of time, rounded to nearest time units. Each part is aggregated into an entry of the rollup time series. | + + + +{`class TimeValue \{ + static ofSeconds(seconds); + static ofMinutes(minutes); + static ofHours(hours); + static ofDays(days); + static ofMonths(months); + static ofYears(years); +\} +`} + + + + +The main reason we use `TimeValue` rather than something like `TimeSpan` is that `TimeSpan` doesn't have a notion of 'months' +because a calendar month is not a standard unit of time (as it can range from 28 to 31 days). +`TimeValue` enables you to define retention and aggregation spans specifically tailored to calendar months. + +### The time series configuration object + + + +{`class TimeSeriesConfiguration \{ + collections; // Map +\} + +class TimeSeriesCollectionConfiguration \{ + disabled; // boolean + policies; // TimeSeriesPolicy[] + rawPolicy; // RawTimeSeriesPolicy +\} +`} + + + +| Property | Description | +|-----------------|-------------------------------------------------------------------------------------------------------------------------| +| **collections** | Populate this dictionary with the collection names and their corresponding `TimeSeriesCollectionConfiguration` objects. | +| **disabled** | If set to `true`, rollup processes will stop, and time series data will not be deleted by retention policies. | +| **policies** | Populate this list with your rollup policies. | +| **rawPolicy** | The `RawTimeSeriesPolicy`, the retention policy for the raw time series. | +### The time series configuration operation + + + +{`ConfigureTimeSeriesOperation(configuration); +`} + + + +| Parameter | Description | +|-------------------|--------------------------------------------------------------| +| **configuration** | The `TimeSeriesConfiguration` object to deploy to the server | + +Learn more about operations in: [What are operations](../../client-api/operations/what-are-operations.mdx). + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/_rollup-and-retention-php.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/_rollup-and-retention-php.mdx new file mode 100644 index 0000000000..af9e9480f6 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/_rollup-and-retention-php.mdx @@ -0,0 +1,334 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +Many time series applications produce massive amounts of data at a steady rate. +**Time Series Policies** help you manage your data in two ways: + +* Creating **Rollups**: + Summarizing time series data by aggregating it into the form of a new, lower-resolution time series. + +* Limiting **Retention**: + Controlling the duration for which time series data is kept before deletion. + +* In this page: + * [Time series policies](../../document-extensions/timeseries/rollup-and-retention.mdx#time-series-policies) + * [Examples](../../document-extensions/timeseries/rollup-and-retention.mdx#examples) + * [Create time series policy](../../document-extensions/timeseries/rollup-and-retention.mdx#create-time-series-policies) + * [Retrieve rollup data](../../document-extensions/timeseries/rollup-and-retention.mdx#retrieve-rollup-data) + * [Syntax](../../document-extensions/timeseries/rollup-and-retention.mdx#syntax) + + +## Time series policies + +#### What are rollups? + +A rollup is a time series that summarizes the data from another time series, +with each rollup entry representing a specific time frame in the original time series. +Each rollup entry contains 6 values that aggregate the data from all the entries in the original time frame: + +* `First` - the value of the first entry in the frame. +* `Last` - the value of the last entry. +* `Min` - the smallest value. +* `Max` - the largest value. +* `Sum` - the sum of all the values in the frame. +* `Count` - the total number of entries in the frame. + +This results in a much more compact time series that still contains useful information about the original time series (also called "raw" time series). + +#### Rollup policies: + +Rollup time series are created automatically according to rollup policies that can be defined from Studio or client code. + +* A rollup policy applies to all time series of every document in the given collection. + +* Each collection can be configured to have multiple policies which are applied sequentially: + * The raw time series is first rolled up using the policy with the shortest aggregation frame. + * Subsequently, the resulting rollup time series is further aggregated using the policy with the next shortest aggregation frame, + and so on. + +[Querying with group-by](../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#examples) +will transparently traverse over the rollups to retrieve the relevant results. + +Let's look at an example of rollup data: + +!["Rollup time series entries"](./assets/rollup-1.png) + +**1) Name:** +The name of a rollup time series has this format: `@` +It is a combination of the name of the raw time series and the name of the time series policy separated by `@`. +In the image above these are "HeartRates" and "byHour" respectively. +For this reason, neither a time series name nor a policy name can have the character `@` in it. + +**2) Timestamp:** +The aggregation frame always begins at a round number of one of these time units: a second, minute, hour, day, week, month, or year. +So the frame includes all entries starting at a round number of time units, and ending at a round number *minus one millisecond* +(since milliseconds are the minimal resolution in RavenDB time series). +The timestamp for a rollup entry is the beginning of the frame it represents. + +For example, if the aggregation frame is three days, a frame will start and end at a time stamps like: +`2020-01-01 00:00:00` - `2020-01-03 23:59:59.999`. + +**3) Values:** +Each group of six values represents one value from the original entries. +If the raw time series has `n` values per entry, the rollup time series will have `6 * n` per entry: +the first six summarize the first raw value, the next six summarize the next raw value, and so on. +The aggregated values have the names: `"First ()", "Last ()", ...` respectively. + +Because time series entries are limited to 32 values, rollups are limited to the first five values of an original time series entry, or 30 aggregate values. + + + +## Examples + +#### Create time series policies: + + + +{`$oneWeek = TimeValue::ofDays(7); +$fiveYears = TimeValue::ofYears(5); + +// Define a policy on the RAW time series data: +// ============================================ +$rawPolicy = new RawTimeSeriesPolicy($fiveYears); // Retain entries for five years + +// Define a ROLLUP policy: +// ======================= +$rollupPolicy = new TimeSeriesPolicy( + "By1WeekFor1Year", // Name of policy + $oneWeek, // Aggregation time, roll-up the data for each week + $fiveYears); // Retention time, keep data for five years + +// Define the time series configuration for collection "Companies" (use above policies): +// ===================================================================================== +$companyConfig = new TimeSeriesCollectionConfiguration(); +$companyConfig->setPolicies(TimeSeriesPolicyArray::fromArray([ $rollupPolicy ])); +$companyConfig->setRawPolicy($rawPolicy); + +$timeSeriesConfig = new TimeSeriesConfiguration(); +$timeSeriesConfig->setCollections([ + "Companies" => $companyConfig +]); + +// Deploy the time series configuration to the server +// by sending the 'ConfigureTimeSeriesOperation' operation: +// ======================================================== +$store->maintenance()->send(new ConfigureTimeSeriesOperation($timeSeriesConfig)); + +// NOTE: +// The time series entries in the RavenDB sample data are dated up to the year 2020. +// To ensure that you see the rollup time series created when running this example, +// the retention time should be set to exceed that year. +`} + + +#### Retrieve rollup data: + +* Retrieving entries from a rollup time series is similar to getting the raw time series data. + +* Learn more about using `timeSeriesFor.get` in [Get time series entries](../../document-extensions/timeseries/client-api/session/get/get-entries.mdx). + + + +{`// Get all data from the RAW time series: +// ====================================== + +$rawData = $session + ->timeSeriesFor("companies/91-A", "StockPrices") + ->get(); + +// Get all data from the ROLLUP time series: +// ========================================= + +// Either - pass the rollup name explicitly to 'TimeSeriesFor': +$rollupData = $session + ->timeSeriesFor("companies/91-A", "StockPrices@By1WeekFor1Year") + ->get(); + +// Or - get the rollup name by calling 'GetTimeSeriesName': +$rollupData = $session + ->timeSeriesFor("companies/91-A", $rollupPolicy->GetTimeSeriesName("StockPrices")) + ->get(); + +// The raw time series has 100 entries +$this->assertCount(100, $rawData); +$this->assertFalse($rawData[0]->isRollup()); + +// The rollup time series has only 22 entries +// as each entry aggregates 1 week's data from the raw time series +$this->assertCount(22, $rollupData); +$this->assertTrue($rollupData[0]->isRollup()); +`} + + + + + +## Syntax + +### The time series policies + +* `rawPolicy` + * Used to define the retention time of the raw time series. + * Only one such policy per collection can be defined. + * Does not perform aggregation. + +* Rollup policy: + * Used to define the aggregation time frame and retention time for the rollup time series. + * Multiple policies can be defined per collection. + + + +{`class RawTimeSeriesPolicy(TimeSeriesPolicy): + def __init__(self, retention_time: TimeValue = TimeValue.MAX_VALUE()): + ... + +class TimeSeriesPolicy: + def __init__( + self, + name: Optional[str] = None, + aggregation_time: Optional[TimeValue] = None, + retention_time: TimeValue = TimeValue.MAX_VALUE(), + ): + ... +`} + + + +| Property | Type | Description | +|--------------------------|-------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **name** (Optional) | `str` | This string is used to create the name of the rollup time series.
`name` is added to the raw time series name - with `@` as a separator,
e.g.: `@` | +| **retention_time** | `TimeValue` | Time series entries older than this time value (see `TimeValue` below) are automatically deleted. | +| **aggregation_time** (Optional) | `TimeValue` | The time series data being rolled up is divided into parts of this length of time, rounded to nearest time units. Each part is aggregated into an entry of the rollup time series. | + + + +{`class TimeValue: + def __init__(self, value: int, unit: TimeValueUnit): + self.value = value + self.unit = unit + + @classmethod + def of_seconds(cls, seconds: int) -> TimeValue: + return cls(seconds, TimeValueUnit.SECOND) + + @classmethod + def of_minutes(cls, minutes: int) -> TimeValue: + return cls(minutes * 60, TimeValueUnit.SECOND) + + @classmethod + def of_hours(cls, hours: int) -> TimeValue: + return cls(hours * 3600, TimeValueUnit.SECOND) + + @classmethod + def of_days(cls, days: int) -> TimeValue: + return cls(days * cls.SECONDS_PER_DAY, TimeValueUnit.SECOND) + + @classmethod + def of_months(cls, months: int) -> TimeValue: + return cls(months, TimeValueUnit.MONTH) + + @classmethod + def of_years(cls, years: int) -> TimeValue: + return cls(12 * years, TimeValueUnit.MONTH) +`} + + + +Each of the above `TimeValue` methods returns a `TimeValue` object representing a whole number of the specified time units. +These methods are used to define the aggregation and retention spans om time series policies. +### The time series configuration object + + + +{`class TimeSeriesConfiguration: + def __init__(self): + self.collections: Dict[str, TimeSeriesCollectionConfiguration] = \{\} + self.policy_check_frequency: Optional[datetime.timedelta] = None + self.named_values: Optional[Dict[str, Dict[str, List[str]]]] = None + +class TimeSeriesCollectionConfiguration: + def __init__( + self, + disabled: Optional[bool] = False, + policies: Optional[List[TimeSeriesPolicy]] = None, + raw_policy: Optional[RawTimeSeriesPolicy] = RawTimeSeriesPolicy.DEFAULT_POLICY(), + ): + self.disabled = disabled + self.policies = policies + self.raw_policy = raw_policy +`} + + + +| Property | Type | Description | +|-----------------|------|-------------| +| **collections** | `Dict[str, TimeSeriesCollectionConfiguration]` | Populate this `Dictionary` with the collection names and their corresponding `TimeSeriesCollectionConfiguration` objects. | +| **disabled** (Optional) | `bool` | If set to `true`, rollup processes will stop, and time series data will not be deleted by retention policies. | +| **policies** (Optional) | `List[TimeSeriesPolicy]` | Populate this `List` with your rollup policies. | +| **raw_policy** (Optional) | `RawTimeSeriesPolicy` | The `RawTimeSeriesPolicy`, the retention policy for the raw time series. | +### The time series configuration operation + + + +{`class ConfigureTimeSeriesOperation(MaintenanceOperation[ConfigureTimeSeriesOperationResult]) +`} + + + +Learn more about operations in: [What are operations](../../client-api/operations/what-are-operations.mdx). +### Time series entries + +Time series entries are of one of the following classes: + + + +{`class TimeSeriesEntry: + def __init__( + self, timestamp: datetime.datetime = None, tag: str = None, values: List[int] = None, rollup: bool = None + ): + self.timestamp = timestamp + self.tag = tag + self.values = values + self.rollup = rollup + +class TypedTimeSeriesEntry(Generic[_T_TSBindable]): + def __init__( + self, + timestamp: datetime.datetime = None, + tag: str = None, + values: List[int] = None, + is_rollup: bool = None, + value: _T_TSBindable = None, + ): + self.timestamp = timestamp + self.tag = tag + self.values = values + self.is_rollup = is_rollup + self.value = value + + +class TypedTimeSeriesRollupEntry(Generic[_T_Values]): + def __init__(self, object_type: Type[_T_Values], timestamp: datetime.datetime): + self._object_type = object_type + self.tag: Optional[str] = None + self.rollup = True + self.timestamp = timestamp + + self._first: Optional[_T_Values] = None + self._last: Optional[_T_Values] = None + self._max: Optional[_T_Values] = None + self._min: Optional[_T_Values] = None + self._sum: Optional[_T_Values] = None + self._count: Optional[_T_Values] = None + self._average: Optional[_T_Values] = None +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/_rollup-and-retention-python.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/_rollup-and-retention-python.mdx new file mode 100644 index 0000000000..aa992fed57 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/_rollup-and-retention-python.mdx @@ -0,0 +1,309 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +Many time series applications produce massive amounts of data at a steady rate. +**Time Series Policies** help you manage your data in two ways: + +* Creating **Rollups**: + Summarizing time series data by aggregating it into the form of a new, lower-resolution time series. + +* Limiting **Retention**: + Controlling the duration for which time series data is kept before deletion. + +* In this page: + * [Time series policies](../../document-extensions/timeseries/rollup-and-retention.mdx#time-series-policies) + * [Examples](../../document-extensions/timeseries/rollup-and-retention.mdx#examples) + * [Create time series policy](../../document-extensions/timeseries/rollup-and-retention.mdx#create-time-series-policies) + * [Retrieve rollup data](../../document-extensions/timeseries/rollup-and-retention.mdx#retrieve-rollup-data) + * [Syntax](../../document-extensions/timeseries/rollup-and-retention.mdx#syntax) + + +## Time series policies + +#### What are rollups? + +A rollup is a time series that summarizes the data from another time series, +with each rollup entry representing a specific time frame in the original time series. +Each rollup entry contains 6 values that aggregate the data from all the entries in the original time frame: + +* `First` - the value of the first entry in the frame. +* `Last` - the value of the last entry. +* `Min` - the smallest value. +* `Max` - the largest value. +* `Sum` - the sum of all the values in the frame. +* `Count` - the total number of entries in the frame. + +This results in a much more compact time series that still contains useful information about the original time series (also called "raw" time series). + +#### Rollup policies: + +Rollup time series are created automatically according to rollup policies that can be defined from Studio or client code. + +* A rollup policy applies to all time series of every document in the given collection. + +* Each collection can be configured to have multiple policies which are applied sequentially: + * The raw time series is first rolled up using the policy with the shortest aggregation frame. + * Subsequently, the resulting rollup time series is further aggregated using the policy with the next shortest aggregation frame, + and so on. + +[Querying with group-by](../../document-extensions/timeseries/querying/aggregation-and-projections.mdx) +will transparently traverse over the rollups to retrieve the relevant results. + +Let's look at an example of rollup data: + +!["Rollup time series entries"](./assets/rollup-1.png) + +**1) Name:** +The name of a rollup time series has this format: `@` +It is a combination of the name of the raw time series and the name of the time series policy separated by `@`. +In the image above these are "HeartRates" and "byHour" respectively. +For this reason, neither a time series name nor a policy name can have the character `@` in it. + +**2) Timestamp:** +The aggregation frame always begins at a round number of one of these time units: a second, minute, hour, day, week, month, or year. +So the frame includes all entries starting at a round number of time units, and ending at a round number *minus one millisecond* +(since milliseconds are the minimal resolution in RavenDB time series). +The timestamp for a rollup entry is the beginning of the frame it represents. + +For example, if the aggregation frame is three days, a frame will start and end at a time stamps like: +`2020-01-01 00:00:00` - `2020-01-03 23:59:59.999`. + +**3) Values:** +Each group of six values represents one value from the original entries. +If the raw time series has `n` values per entry, the rollup time series will have `6 * n` per entry: +the first six summarize the first raw value, the next six summarize the next raw value, and so on. +The aggregated values have the names: `"First ()", "Last ()", ...` respectively. + +Because time series entries are limited to 32 values, rollups are limited to the first five values of an original time series entry, or 30 aggregate values. + + + +## Examples + +#### Create time series policies: + + + +{`# Policy for the original ("raw") time-series, +# to keep the data for one week +one_week = TimeValue.of_days(7) +raw_retention = RawTimeSeriesPolicy(one_week) + +# Roll-up the data for each day, +# and keep the results for one year +one_day = TimeValue.of_days(1) +one_year = TimeValue.of_years(1) +daily_rollup = TimeSeriesPolicy("DailyRollupForOneYear", one_day, one_year) + +# Enter the above policies into a +# time-series collection configuration +# for the collection 'Sales' +sales_ts_config = TimeSeriesCollectionConfiguration(policies=[daily_rollup], raw_policy=raw_retention) + +# Enter the configuration for the Sales collection +# into a time-series configuration for the whole database +database_ts_config = TimeSeriesConfiguration() +database_ts_config.collections["Sales"] = sales_ts_config + +# Send the time-series configuration to the server +store.maintenance.send(ConfigureTimeSeriesOperation(database_ts_config)) +`} + + +#### Retrieve rollup data: + +* Retrieving entries from a rollup time series is similar to getting the raw time series data. + +* Learn more about using `time_series_for.get` in [Get time series entries](../../document-extensions/timeseries/client-api/session/get/get-entries.mdx). + + + +{`# Create local instance of the time-series "rawSales" +# in the document "sales/1" +raw_ts = session.time_series_for("sales/1", "rawSales") + +# Create local instance of the rollup time-series - first method: +daily_rollup_TS = session.time_series_for("sales/1", "rawSales@DailyRollupForOneYear") + +# Create local instance of the rollup time-series - second method: +# using the rollup policy itself and the raw time-series' name +rollup_time_series_2 = session.time_series_for("sales/1", daily_rollup.get_time_series_name("rawSales")) + +# Retrieve all the data from both time-series +raw_data = raw_ts.get(datetime.min, datetime.max) +rollup_data = daily_rollup_TS.get(datetime.min, datetime.max) +`} + + + + + +## Syntax + +### The time series policies + +* `raw_policy` + * Used to define the retention time of the raw time series. + * Only one such policy per collection can be defined. + * Does not perform aggregation. + +* Rollup policy: + * Used to define the aggregation time frame and retention time for the rollup time series. + * Multiple policies can be defined per collection. + + + +{`class RawTimeSeriesPolicy(TimeSeriesPolicy): + def __init__(self, retention_time: TimeValue = TimeValue.MAX_VALUE()): + ... + +class TimeSeriesPolicy: + def __init__( + self, + name: Optional[str] = None, + aggregation_time: Optional[TimeValue] = None, + retention_time: TimeValue = TimeValue.MAX_VALUE(), + ): + ... +`} + + + +| Property | Type | Description | +|----------------------------|-------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **name** (Optional) | `str` | This string is used to create the name of the rollup time series.
`name` is added to the raw time series name - with `@` as a separator,
e.g.: `@` | +| **retention_time** | `TimeValue` | Time series entries older than this time value (see `TimeValue` below) are automatically deleted. | +| **aggregation_time** (Optional) | `TimeValue` | The time series data being rolled up is divided into parts of this length of time, rounded to nearest time units. Each part is aggregated into an entry of the rollup time series. | + + + +{`class TimeValue: + def __init__(self, value: int, unit: TimeValueUnit): + self.value = value + self.unit = unit + + @classmethod + def of_seconds(cls, seconds: int) -> TimeValue: + return cls(seconds, TimeValueUnit.SECOND) + + @classmethod + def of_minutes(cls, minutes: int) -> TimeValue: + return cls(minutes * 60, TimeValueUnit.SECOND) + + @classmethod + def of_hours(cls, hours: int) -> TimeValue: + return cls(hours * 3600, TimeValueUnit.SECOND) + + @classmethod + def of_days(cls, days: int) -> TimeValue: + return cls(days * cls.SECONDS_PER_DAY, TimeValueUnit.SECOND) + + @classmethod + def of_months(cls, months: int) -> TimeValue: + return cls(months, TimeValueUnit.MONTH) + + @classmethod + def of_years(cls, years: int) -> TimeValue: + return cls(12 * years, TimeValueUnit.MONTH) +`} + + + +Each of the above `TimeValue` methods returns a `TimeValue` object representing a whole number of the specified time units. +These methods are used to define the aggregation and retention spans om time series policies. +### The time series configuration object + + + +{`class TimeSeriesConfiguration: + def __init__(self): + self.collections: Dict[str, TimeSeriesCollectionConfiguration] = \{\} + self.policy_check_frequency: Optional[datetime.timedelta] = None + self.named_values: Optional[Dict[str, Dict[str, List[str]]]] = None + +class TimeSeriesCollectionConfiguration: + def __init__( + self, + disabled: Optional[bool] = False, + policies: Optional[List[TimeSeriesPolicy]] = None, + raw_policy: Optional[RawTimeSeriesPolicy] = RawTimeSeriesPolicy.DEFAULT_POLICY(), + ): + self.disabled = disabled + self.policies = policies + self.raw_policy = raw_policy +`} + + + +| Property | Type | Description | +|-----------------|------|-------------| +| **collections** | `Dict[str, TimeSeriesCollectionConfiguration]` | Populate this `Dictionary` with the collection names and their corresponding `TimeSeriesCollectionConfiguration` objects. | +| **disabled** (Optional) | `bool` | If set to `true`, rollup processes will stop, and time series data will not be deleted by retention policies. | +| **policies** (Optional) | `List[TimeSeriesPolicy]` | Populate this `List` with your rollup policies. | +| **raw_policy** (Optional) | `RawTimeSeriesPolicy` | The `RawTimeSeriesPolicy`, the retention policy for the raw time series. | +### The time series configuration operation + + + +{`class ConfigureTimeSeriesOperation(MaintenanceOperation[ConfigureTimeSeriesOperationResult]) +`} + + + +Learn more about operations in: [What are operations](../../client-api/operations/what-are-operations.mdx). +### Time series entries + +Time series entries are of one of the following classes: + + + +{`class TimeSeriesEntry: + def __init__( + self, timestamp: datetime.datetime = None, tag: str = None, values: List[int] = None, rollup: bool = None + ): + self.timestamp = timestamp + self.tag = tag + self.values = values + self.rollup = rollup + +class TypedTimeSeriesEntry(Generic[_T_TSBindable]): + def __init__( + self, + timestamp: datetime.datetime = None, + tag: str = None, + values: List[int] = None, + is_rollup: bool = None, + value: _T_TSBindable = None, + ): + self.timestamp = timestamp + self.tag = tag + self.values = values + self.is_rollup = is_rollup + self.value = value + + +class TypedTimeSeriesRollupEntry(Generic[_T_Values]): + def __init__(self, object_type: Type[_T_Values], timestamp: datetime.datetime): + self._object_type = object_type + self.tag: Optional[str] = None + self.rollup = True + self.timestamp = timestamp + + self._first: Optional[_T_Values] = None + self._last: Optional[_T_Values] = None + self._max: Optional[_T_Values] = None + self._min: Optional[_T_Values] = None + self._sum: Optional[_T_Values] = None + self._count: Optional[_T_Values] = None + self._average: Optional[_T_Values] = None +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/_time-series-and-other-features-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/_time-series-and-other-features-csharp.mdx new file mode 100644 index 0000000000..9e9c202531 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/_time-series-and-other-features-csharp.mdx @@ -0,0 +1,115 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This page describes how time series interact with various other RavenDB features. + +* Features not listed here either have no special behavior regarding time series, + or they have their own pages describing their interaction with time series (such as [indexing](../../document-extensions/timeseries/indexing.mdx)). + +* In this page: + * [General features](../../document-extensions/timeseries/time-series-and-other-features.mdx#general-features) + * [Smuggler](../../document-extensions/timeseries/time-series-and-other-features.mdx#smuggler) + * [Ongoing tasks](../../document-extensions/timeseries/time-series-and-other-features.mdx#ongoing-tasks) + * [Revisions](../../document-extensions/timeseries/time-series-and-other-features.mdx#revisions) + + +## General features + +* The Document Session [tracks](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#tracking-changes) changes to time series data. +* The [Changes API](../../client-api/changes/what-is-changes-api.mdx) service is triggered by changes to time series data. +* Learn about how to **index** time series [here](../../document-extensions/timeseries/indexing.mdx). +* Learn about how to **query** time series data [here](../../document-extensions/timeseries/querying/overview-and-syntax.mdx). +* Learn how to **include** time series with `session.Load()` and in queries [here](../../document-extensions/timeseries/client-api/session/include/overview.mdx). + + + +## Smuggler + +[Smuggler](../../client-api/smuggler/what-is-smuggler.mdx) is a DocumentStore property that can be used to export selected database items to an external file +or import database items from an existing file into the database. + +To [configure smuggler](../../client-api/smuggler/what-is-smuggler.mdx#databasesmugglerexportoptions) to handle time series, +add the parameter `DatabaseItemType.TimeSeries` to the `OperateOnTypes` enum property: + + + +{`OperateOnTypes = DatabaseItemType.Documents | DatabaseItemType.TimeSeries +`} + + + + + +## Ongoing tasks + +[Ongoing tasks](../../studio/database/tasks/ongoing-tasks/general-info.mdx) are various automatic processes that operate on the database. +Some of these apply to time series data, while others do not. + +#### Tasks that apply to time series + +* [External replication](../../server/ongoing-tasks/external-replication.mdx) creates a complete copy of a database, including documents and their extensions. +* [Hub/Sink replication](../../server/ongoing-tasks/hub-sink-replication.mdx) allows you to create a live replica of a database or a part of it, + including documents' time series, using Hub and Sink tasks. +* [Backups](../../client-api/operations/maintenance/backup/backup-overview.mdx) save the whole database at a certain point in time and can be used to restore the database later. + All kinds of backups include time series data: logical-backup and snapshot, full and incremental. +* [RavenDB ETL](../../server/ongoing-tasks/etl/raven.mdx#time-series) is a type of task that _extracts_ some portion of the data from a database, _transforms_ it according to a script, + and _loads_ it to another RavenDB database on another server. + +#### Tasks that cannot be applied to time series + +* [SQL ETL](../../server/ongoing-tasks/etl/basics.mdx), another type of ETL that can set a relational database as its target. +* [Data Subscriptions](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx) send data to "worker" clients in batches. + + + +Support for time series in ETL is planned for one of the next releases. + + + + + +## Revisions + +[Revisions](../../document-extensions/revisions/overview.mdx) are old versions of a document. +They can be created manually or by setting a policy that creates them automatically on selected collections. + +Revisions do not preserve time series data, and editing a time series does not trigger the creation of a new revision as editing a document would. +This is because time series are designed to accommodate frequent additions of new entries quickly, and creating revisions each time would significantly slow down this process. + +However, revisions are triggered / created manually if a _new_ time series is added to the document, +or an existing time series is deleted. (Remember that a time series is deleted by deleting all of its entries). + +#### The `@timeseries-snapshot` metadata property + +While revisions don't contain the time series data themselves, they do include few details about the time series the document had at the time. +These details appear in the `@timeseries-snapshot` property within the document's metadata. +When a revision is viewed in the studio, this metadata property looks like this: + +![NoSQL Database Time Series Feature](./assets/TSSnapshot.png) + +This time series snapshot property can also be accessed by loading a revision in the client. +This is the general JSON format of the time series snapshot: + + + +{`"@metadata": \{ + ... + "@timeseries-snapshot": \{ + "": \{ + "Count": , + "Start": "", + "End": "" + \}, + "": \{ ... \} + \} +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/_time-series-and-other-features-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/_time-series-and-other-features-nodejs.mdx new file mode 100644 index 0000000000..bb63670643 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/_time-series-and-other-features-nodejs.mdx @@ -0,0 +1,116 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This page describes how time series interact with various other RavenDB features. + +* Features not listed here either have no special behavior regarding time series, + or they have their own pages describing their interaction with time series (such as [indexing](../../document-extensions/timeseries/indexing.mdx)). + +* In this page: + * [General features](../../document-extensions/timeseries/time-series-and-other-features.mdx#general-features) + * [Smuggler](../../document-extensions/timeseries/time-series-and-other-features.mdx#smuggler) + * [Ongoing tasks](../../document-extensions/timeseries/time-series-and-other-features.mdx#ongoing-tasks) + * [Revisions](../../document-extensions/timeseries/time-series-and-other-features.mdx#revisions) + + +## General features + +* The Document Session [tracks](../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#tracking-changes) changes to time series data. +* The [Changes API](../../client-api/changes/what-is-changes-api.mdx) service is triggered by changes to time series data. +* Learn about how to **index** time series [here](../../document-extensions/timeseries/indexing.mdx). +* Learn about how to **query** time series data [here](../../document-extensions/timeseries/querying/overview-and-syntax.mdx). +* Learn how to **include** time series with `session.Load()` and in queries [here](../../document-extensions/timeseries/client-api/session/include/overview.mdx). + + + +## Smuggler + +[Smuggler](../../client-api/smuggler/what-is-smuggler.mdx) is a DocumentStore property that can be used to export selected database items to an external file +or import database items from an existing file into the database. + +To [configure smuggler](../../client-api/smuggler/what-is-smuggler.mdx#databasesmugglerexportoptions) to handle time series, +add the string `TimeSeries` to the `operateOnTypes` array: + + + +{`const options = new DatabaseSmugglerExportOptions(); +options.operateOnTypes = ["Documents", "TimeSeries"]; +`} + + + + + +## Ongoing tasks + +[Ongoing tasks](../../studio/database/tasks/ongoing-tasks/general-info.mdx) are various automatic processes that operate on the database. +Some of these apply to time series data, while others do not. + +#### Tasks that apply to time series + +* [External replication](../../server/ongoing-tasks/external-replication.mdx) creates a complete copy of a database, including documents and their extensions. +* [Hub/Sink replication](../../server/ongoing-tasks/hub-sink-replication.mdx) allows you to create a live replica of a database or a part of it, + including documents' time series, using Hub and Sink tasks. +* [Backups](../../client-api/operations/maintenance/backup/backup-overview.mdx) save the whole database at a certain point in time and can be used to restore the database later. + All kinds of backups include time series data: logical-backup and snapshot, full and incremental. +* [RavenDB ETL](../../server/ongoing-tasks/etl/raven.mdx#time-series) is a type of task that _extracts_ some portion of the data from a database, _transforms_ it according to a script, + and _loads_ it to another RavenDB database on another server. + +#### Tasks that cannot be applied to time series + +* [SQL ETL](../../server/ongoing-tasks/etl/basics.mdx), another type of ETL that can set a relational database as its target. +* [Data Subscriptions](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx) send data to "worker" clients in batches. + + + +Support for time series in ETL is planned for one of the next releases. + + + + + +## Revisions + +[Revisions](../../document-extensions/revisions/overview.mdx) are old versions of a document. +They can be created manually or by setting a policy that creates them automatically on selected collections. + +Revisions do not preserve time series data, and editing a time series does not trigger the creation of a new revision as editing a document would. +This is because time series are designed to accommodate frequent additions of new entries quickly, and creating revisions each time would significantly slow down this process. + +However, revisions are triggered / created manually if a _new_ time series is added to the document, +or an existing time series is deleted. (Remember that a time series is deleted by deleting all of its entries). + +#### The `@timeseries-snapshot` metadata property + +While revisions don't contain the time series data themselves, they do include few details about the time series the document had at the time. +These details appear in the `@timeseries-snapshot` property within the document's metadata. +When a revision is viewed in the studio, this metadata property looks like this: + +![NoSQL Database Time Series Feature](./assets/TSSnapshot.png) + +This time series snapshot property can also be accessed by loading a revision in the client. +This is the general JSON format of the time series snapshot: + + + +{`"@metadata": \{ + ... + "@timeseries-snapshot": \{ + "": \{ + "Count": , + "Start": "", + "End": "" + \}, + "": \{ ... \} + \} +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/assets/TSSnapshot.png b/versioned_docs/version-7.1/document-extensions/timeseries/assets/TSSnapshot.png new file mode 100644 index 0000000000..d02259442a Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/timeseries/assets/TSSnapshot.png differ diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/assets/rollup-1.png b/versioned_docs/version-7.1/document-extensions/timeseries/assets/rollup-1.png new file mode 100644 index 0000000000..abe4dcab26 Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/timeseries/assets/rollup-1.png differ diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/_category_.json b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/_category_.json new file mode 100644 index 0000000000..2b13474af7 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 1, + "label": Client API, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/_javascript-support-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/_javascript-support-csharp.mdx new file mode 100644 index 0000000000..cc7e9524a3 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/_javascript-support-csharp.mdx @@ -0,0 +1,173 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* With the introduction of time series, RavenDB has extended its [JavaScript support](../../../server/kb/javascript-engine.mdx) + to include manipulations involving time series data when patching [single](../../../client-api/operations/patching/single-document.mdx#patching-how-to-perform-single-document-patch-operations) + or [multiple](../../../client-api/operations/patching/set-based.mdx) documents. + +* Time series capabilities can be achieved via JavaScript when using the following methods: + * [session.Advanced.Defer](../../../document-extensions/timeseries/client-api/session/patch.mdx) - perform patch via the _Session_ + * [PatchOperation](../../../document-extensions/timeseries/client-api/operations/patch.mdx#patchoperation) - perform patch via a _Store_ operation + * [PatchByQueryOperation](../../../document-extensions/timeseries/client-api/operations/patch.mdx#patchbyqueryoperation) - perform query & patch via a _Store_ operation + +* The server treats timestamps passed in the scripts as **UTC**, no conversion is applied by the client to local time. + +* In this page: + * [JavaScript time series API methods](../../../document-extensions/timeseries/client-api/javascript-support.mdx#javascript-time-series-api-methods) + * [timeseries - choose a time series](../../../document-extensions/timeseries/client-api/javascript-support.mdx#section) + * [timeseries.append - append an entry](../../../document-extensions/timeseries/client-api/javascript-support.mdx#section-1) + * [timeseries.delete - delete entries](../../../document-extensions/timeseries/client-api/javascript-support.mdx#section-2) + * [timeseries.get - get entries](../../../document-extensions/timeseries/client-api/javascript-support.mdx#section-3) + * [Examples](../../../document-extensions/timeseries/client-api/javascript-support.mdx#examples) + + +## JavaScript time series API methods + +The JavaScript time series API includes these methods: +#### `timeseries (doc, name)` + +Choose a time series by the ID of its owner document and by the series name. + +| Parameter | Type | Description | +|-----------|-------------------------------------------|----------------------------------------------------------------------------------------------------------| +| **doc** | `string`
or
`document instance` | Document ID, e.g. `timeseries('users/1-A', 'StockPrice')`

e.g. `timeseries(this, 'StockPrice')` | +| **name** | `string` | Time Series Name | + +#### `timeseries.append` + +You can use two overloads, to append **tagged** or **untagged** time series entries. + +* `timeseries.append (timestamp, values)` +* `timeseries.append (timestamp, values, tag)` + +| Parameter | Type | Description | +|---------------|------------|--------------| +| **timestamp** | `DateTime` | Timestamp | +| **values** | `double[]` | Values | +| **tag** | `string` | Tag | + +#### `timeseries.delete (from, to)` + +Use this method to delete a range of entries from a document. + +| Parameter | Type | Description | +|---------------------|------------|--------------------------------------------------------------------------------------------| +| **from** (optional) | `DateTime` | Entries will be deleted starting at this timestamp (inclusive)
Default: `DateTime.Min` | +| **to** (optional) | `DateTime` | Entries will be deleted up to this timestamp (inclusive)
Default: `DateTime.Max` | + +#### `timeseries.get (from, to)` + +Use this method to retrieve a range of time series entries. + +| Parameter | Type | Description | +|---------------------|------------|---------------------------------------------------------------------------------------------| +| **from** (optional) | `DateTime` | Get time series entries starting from this timestamp (inclusive)
Default: `DateTime.Min` | +| **to** (optional) | `DateTime` | Get time series entries ending at this timestamp (inclusive)
Default: `DateTime.Max` | + +**Return Type**: +Values are returned in an array of time series entries, i.e. - + + + +{`[ + \{ + "Timestamp" : ... + "Tag": ... + "Values": ... + "IsRollup": ... + \}, + \{ + "Timestamp" : ... + "Tag": ... + "Values": ... + "IsRollup": ... + \} + ... +] +`} + + + + + +## Examples + +* This example shows a script that appends 100 entries to time series "HeartRates" in document "Users/john". + The script is passed to method [session.Advanced.Defer](../../../document-extensions/timeseries/client-api/session/patch.mdx). + + +{`var baseTime = DateTime.UtcNow; + +// Create arrays of timestamps and random values to patch +var values = new List(); +var timeStamps = new List(); + +for (var i = 0; i < 100; i++) +\{ + values.Add(68 + Math.Round(19 * new Random().NextDouble())); + timeStamps.Add(baseTime.AddMinutes(i)); +\} + +session.Advanced.Defer(new PatchCommandData("users/john", null, + new PatchRequest + \{ + Script = @" + var i = 0; + for(i = 0; i < $values.length; i++) + \{ + timeseries(id(this), $timeseries) + .append ( + new Date($timeStamps[i]), + $values[i], + $tag); + \}", + + Values = + \{ + \{ "timeseries", "HeartRates" \}, + \{ "timeStamps", timeStamps \}, + \{ "values", values \}, + \{ "tag", "watches/fitbit" \} + \} + \}, null)); + +session.SaveChanges(); +`} + + + +* This example shows a script that deletes time series "HeartRates" for documents that match the specified query. + The script is passed to the [PatchByQueryOperation](../../../document-extensions/timeseries/client-api/operations/patch.mdx#patchbyqueryoperation) operation. + + +{`PatchByQueryOperation deleteByQueryOp = new PatchByQueryOperation(new IndexQuery +\{ + Query = @"from Users as u + where u.Age < 30 + update + \{ + timeseries(u, $name).delete($from, $to) + \}", + + QueryParameters = new Parameters + \{ + \{ "name", "HeartRates" \}, + \{ "from", DateTime.MinValue \}, + \{ "to", DateTime.MaxValue \} + \} +\}); + +// Execute the operation: +// Time series "HeartRates" will be deleted for all users with age < 30 +store.Operations.Send(deleteByQueryOp); +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/_javascript-support-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/_javascript-support-nodejs.mdx new file mode 100644 index 0000000000..308c1fcbc6 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/_javascript-support-nodejs.mdx @@ -0,0 +1,181 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* With the introduction of time series, RavenDB has extended its [JavaScript support](../../../server/kb/javascript-engine.mdx) + to include manipulations involving time series data when patching [single](../../../client-api/operations/patching/single-document.mdx#patching-how-to-perform-single-document-patch-operations) + or [multiple](../../../client-api/operations/patching/set-based.mdx) documents. + +* Time series capabilities can be achieved via JavaScript when using the following methods: + * [session.advanced.defer](../../../document-extensions/timeseries/client-api/session/patch.mdx) - perform patch via the _session_ + * [PatchOperation](../../../document-extensions/timeseries/client-api/operations/patch.mdx#patchoperation) - perform patch via a _store_ operation + * [PatchByQueryOperation](../../../document-extensions/timeseries/client-api/operations/patch.mdx#patchbyqueryoperation) - perform query & patch via a _store_ operation + +* The server treats timestamps passed in the scripts as **UTC**, no conversion is applied by the client to local time. + +* In this page: + * [JavaScript time series API methods](../../../document-extensions/timeseries/client-api/javascript-support.mdx#javascript-time-series-api-methods) + * [timeseries - choose a time series](../../../document-extensions/timeseries/client-api/javascript-support.mdx#section) + * [timeseries.append - append an entry](../../../document-extensions/timeseries/client-api/javascript-support.mdx#section-1) + * [timeseries.delete - delete entries](../../../document-extensions/timeseries/client-api/javascript-support.mdx#section-2) + * [timeseries.get - get entries](../../../document-extensions/timeseries/client-api/javascript-support.mdx#section-3) + * [Examples](../../../document-extensions/timeseries/client-api/javascript-support.mdx#examples) + + +## JavaScript time series API methods + +The JavaScript time series API includes these methods: +#### `timeseries (doc, name)` + +Choose a time series by the ID of its owner document and by the series name. + +| Parameter | Type | Description | +|-----------|-------------------------------------------|----------------------------------------------------------------------------------------------------------| +| **doc** | `string`
or
`document instance` | Document ID, e.g. `timeseries('users/1-A', 'StockPrice')`

e.g. `timeseries(this, 'StockPrice')` | +| **name** | `string` | Time Series Name | + +#### `timeseries.append` + +You can use two overloads, to append **tagged** or **untagged** time series entries. + +* `timeseries.append (timestamp, values)` +* `timeseries.append (timestamp, values, tag)` + +| Parameter | Type | Description | +|---------------|------------|--------------| +| **timestamp** | `Date` | Timestamp | +| **values** | `number[]` | Values | +| **tag** | `string` | Tag | + +#### `timeseries.delete (from, to)` + +Use this method to delete a range of entries from a document. + +| Parameter | Type | Description | +|---------------------|----------|-----------------------------------------------------------------------------------------------------| +| **from** (optional) | `Date` | Entries will be deleted starting at this timestamp (inclusive).
Default: the minimum date value. | +| **to** (optional) | `Date` | Entries will be deleted up to this timestamp (inclusive).
Default: the maximum date value. | + +#### `timeseries.get (from, to)` + +Use this method to retrieve a range of time series entries. + +| Parameter | Type | Description | +|---------------------|----------|--------------------------------------------------------------------------------------------------------| +| **from** (optional) | `Date` | Get time series entries starting from this timestamp (inclusive).
Default: The minimum date value. | +| **to** (optional) | `Date` | Get time series entries ending at this timestamp (inclusive).
Default: The maximum date value. | + +**Return Type**: +Values are returned in an array of time series entries, i.e. - + + + +{`[ + \{ + "Timestamp" : ... + "Tag": ... + "Values": ... + "IsRollup": ... + \}, + \{ + "Timestamp" : ... + "Tag": ... + "Values": ... + "IsRollup": ... + \} + ... +] +`} + + + + + +## Examples + +* This example shows a script that appends 100 entries to time series "HeartRates" in document "Users/john". + The script is passed to method [session.Advanced.Defer](../../../document-extensions/timeseries/client-api/session/patch.mdx). + + +{`const baseTime = new Date(); + +// Prepare random values and timestamps to patch +const values = []; +const timeStamps = []; + +for (let i = 0; i < 100; i++) \{ + const randomValue = 65 + Math.round(20 * Math.random()); + values.push(randomValue); + + // NOTE: the timestamp passed in the patch request script should be in UTC + const timeStamp = new Date(baseTime.getTime() + 60_000 * i); + const utcDate = new Date(timeStamp.getTime() + timeStamp.getTimezoneOffset() * 60_000); + timeStamps.push(utcDate); +\} + +// Define the patch request +// ======================== + +const patchRequest = new PatchRequest(); + +// Provide a JavaScript script, use the 'append' method +// Note: "args." can be replaced with "$". E.g.: "args.tag" => "$tag" +patchRequest.script = \` + for(var i = 0; i < args.values.length; i++) + \{ + timeseries(id(this), args.timeseries) + .append ( + new Date(args.timeStamps[i]), + args.values[i], + args.tag); + \}\`; + +// Provide values for the params used within the script +patchRequest.values = \{ + timeseries: "HeartRates", + timeStamps: timeStamps, + values: values, + tag: "watches/fitbit" +\} + +// Define the patch command +const patchCommand = new PatchCommandData("users/john", null, patchRequest, null) + +// Pass the patch command to 'defer' +session.advanced.defer(patchCommand); + +// Call saveChanges for the patch request to execute on the server +await session.saveChanges(); +`} + + + +* This example shows a script that deletes time series "HeartRates" for documents that match the specified query. + The script is passed to the [PatchByQueryOperation](../../../document-extensions/timeseries/client-api/operations/patch.mdx#patchbyqueryoperation) operation. + + +{`const indexQuery = new IndexQuery(); + +indexQuery.query = \` + from users as u + where u.age < 30 + update + \{ + timeseries(u, "HeartRates").delete() + \}\`; + +const deleteByQueryOp = new PatchByQueryOperation(indexQuery); + +// Execute the operation: +// Time series "HeartRates" will be deleted for all users with age < 30 +await documentStore.operations.send(deleteByQueryOp); +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/_named-time-series-values-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/_named-time-series-values-csharp.mdx new file mode 100644 index 0000000000..3ef3e53a10 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/_named-time-series-values-csharp.mdx @@ -0,0 +1,268 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A time series entry consists of a **timestamp**, one or more **values**, and an optional **tag**. + Each value can be given a name to indicate what it represents, such as "Temperature", "Humidity", "Pressure", etc. + +* Referring to these values by their names in time series methods (such as `Append`, `Get`, etc.) + makes your code more readable and easier to manage. + +* In order for the Studio to present the time series values by their names, as can be seen [here](../../../studio/database/document-extensions/time-series.mdx#time-series-view), + you need to register the time series types on the server. + +* In this page: + * [Named values](../../../document-extensions/timeseries/client-api/named-time-series-values.mdx#named-values) + * [Define time series type](../../../document-extensions/timeseries/client-api/named-time-series-values.mdx#define-time-series-type) + * [Examples](../../../document-extensions/timeseries/client-api/named-time-series-values.mdx#examples) + * [Register time series type](../../../document-extensions/timeseries/client-api/named-time-series-values.mdx#register-time-series-type) + * [Usage](../../../document-extensions/timeseries/client-api/named-time-series-values.mdx#usage) + * [Syntax](../../../document-extensions/timeseries/client-api/named-time-series-values.mdx#syntax) + + +## Named values + +* Many time series are populated with multiple values for each measurement. + For example, each GPS measurement in a route-tracking time series would include at least two values: + latitude and longitude. + +* You can ease the management of multi-value time series by - + * Naming time series values in model classes that can be used as time series types. + * Calling time series methods with your custom types to address and manage values by name. +#### Define time series type + +To define a class for use as a time series type, mark the class properties (which represent the values) +with consecutive `TimeSeriesValue` attributes: `TimeSeriesValue(0)`, `TimeSeriesValue(1)`, etc. + +E.g.: + + + +{`public class RoutePoint +\{ + // The Latitude and Longitude properties will contain the time series entry values. + // The names for these values will be "Latitude" and "Longitude" respectively. + [TimeSeriesValue(0)] public double Latitude; + [TimeSeriesValue(1)] public double Longitude; +\} +`} + + + +The class can then be used by time series methods like _Append_: + + + +{`// Append coordinates +session.TimeSeriesFor("users/john") + .Append(baseTime.AddHours(1), new RoutePoint + \{ + Latitude = 40.712776, + Longitude = -74.005974 + \}, "devices/Navigator"); +`} + + + + +A quick way of retrieving a time series entry's value, timestamp, and tag is to use `Deconstruct()`: + + + +{`public void Deconstruct(out DateTime timestamp, out T value); +public void Deconstruct(out DateTime timestamp, out T value, out string tag); +`} + + + +#### Examples + +* In this example, we define a StockPrice type and use it when appending StockPrice entries. + + +{`public class StockPrice +\{ + [TimeSeriesValue(0)] public double Open; + [TimeSeriesValue(1)] public double Close; + [TimeSeriesValue(2)] public double High; + [TimeSeriesValue(3)] public double Low; + [TimeSeriesValue(4)] public double Volume; +\} +`} + + + + +{`using (var session = store.OpenSession()) +\{ + session.Store(new User \{ Name = "John" \}, "users/john"); + + // Call 'Append' with the custom StockPrice class + session.TimeSeriesFor("users/john") + .Append(baseTime.AddDays(1), new StockPrice + \{ + Open = 52, + Close = 54, + High = 63.5, + Low = 51.4, + Volume = 9824, + \}, "companies/kitchenAppliances"); + + session.TimeSeriesFor("users/john") + .Append(baseTime.AddDays(2), new StockPrice + \{ + Open = 54, + Close = 55, + High = 61.5, + Low = 49.4, + Volume = 8400, + \}, "companies/kitchenAppliances"); + + session.TimeSeriesFor("users/john") + .Append(baseTime.AddDays(3), new StockPrice + \{ + Open = 55, + Close = 57, + High = 65.5, + Low = 50, + Volume = 9020, + \}, "companies/kitchenAppliances"); + + session.SaveChanges(); +\} +`} + + + +* In this example, we get StockPrice values by name and check whether a stock's closing-time prices are ascending over time. + + +{`goingUp = false; + +using (var session = store.OpenSession()) +\{ + // Call 'Get' with the custom StockPrice class type + TimeSeriesEntry[] val = session.TimeSeriesFor("users/john") + .Get(); + + var closePriceDay1 = val[0].Value.Close; + var closePriceDay2 = val[1].Value.Close; + var closePriceDay3 = val[2].Value.Close; + + if ((closePriceDay2 > closePriceDay1) + && + (closePriceDay3 > closePriceDay2)) + goingUp = true; +\} +`} + + + +* In this query, we use the custom StockPrice type so we can address trade Volume by name. + + + +{`using (var session = store.OpenSession()) +{ + var query = + session.Query() + .Where(c => c.Address.City == "New York") + // Use the StockPrice type in the time series query + .Select(q => RavenQuery.TimeSeries(q, "StockPrices", baseTime, baseTime.AddDays(3)) + .Where(ts => ts.Tag == "companies/kitchenAppliances") + .ToList()); + + List> queryResults = query.ToList(); + + var tsEntries = queryResults[0].Results; + + double volumeDay1 = tsEntries[0].Value.Volume; + double volumeDay2 = tsEntries[1].Value.Volume; + double volumeDay3 = tsEntries[2].Value.Volume; +} +`} + + + + +{`from "companies" as c +where Address.City = $p0 +select timeseries( + from c.StockPrices + between $p1 and $p2 + where (Tag == $p3)) +{ + "p0":"New York", + "p1":"2024-06-03T10:47:00.7880000Z", + "p2":"2024-06-06T10:47:00.7880000Z", + "p3":"companies/kitchenAppliances" +} +`} + + + + + + +## Register time series type + +Registering a custom time series type on the server stores this information in the [database record](../../../studio/database/settings/database-record.mdx). +This allows the Studio to present time series values by name when you view and manage them. +#### Usage + +To register a time series type, call `store.TimeSeries.Register`, e.g.: + + + +{`// Register the StockPrice class type on the server +store.TimeSeries.Register(); +`} + + + +
+The time series entries will be listed in the Studio under their corresponding named values: + +!["Time series entries"](./assets/time-series-entries.png) + +
+The named values can be managed from the [Time Series Settings View](../../../studio/database/settings/time-series-settings.mdx) in the Studio: + +!["Time series settings view"](./assets/time-series-settings-view.png) +#### Syntax + + + +{`public void Register(string name = null) +`} + + + + +{`public void Register(string name, string[] valueNames) +`} + + + + +{`public void Register(string collection, string name, string[] valueNames) +`} + + + +
+ +| Parameter | Type | Description | +|----------------------|------------------|-------------------------------------------------------------------------| +| **TCollection** | Collection type | The time series collection | +| **TTimeSeriesEntry** | Time series type | The custom time series type | +| **collection** | `string` | The time series collection name
(when `TCollection` is not provided) | +| **name** | `string ` | Time series name | +| **valueNames** | `string[]` | Names to register (name per value) | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/_named-time-series-values-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/_named-time-series-values-nodejs.mdx new file mode 100644 index 0000000000..7a6a805741 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/_named-time-series-values-nodejs.mdx @@ -0,0 +1,304 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A time series entry consists of a **timestamp**, one or more **values**, and an optional **tag**. + Each value can be given a name to indicate what it represents, such as "Temperature", "Humidity", "Pressure", etc. + +* Referring to these values by their names in time series methods (such as `append`, `get`, etc.) + makes your code more readable and easier to manage. + +* In order for the Studio to present the time series values by their names, as can be seen [here](../../../studio/database/document-extensions/time-series.mdx#time-series-view), + you need to register the named values on the server. + +* In this page: + * [Named values](../../../document-extensions/timeseries/client-api/named-time-series-values.mdx#named-values) + * [Define time series class with named values](../../../document-extensions/timeseries/client-api/named-time-series-values.mdx#define-time-series-class-with-named-values) + * [Examples](../../../document-extensions/timeseries/client-api/named-time-series-values.mdx#examples) + * [Register time series named values](../../../document-extensions/timeseries/client-api/named-time-series-values.mdx#register-time-series-named-values) + * [Usage](../../../document-extensions/timeseries/client-api/named-time-series-values.mdx#usage) + * [Syntax](../../../document-extensions/timeseries/client-api/named-time-series-values.mdx#syntax) + + +## Named values + +* Many time series are populated with multiple values for each measurement. + For example, each GPS measurement in a route-tracking time series would include at least two values: + latitude and longitude. + +* You can ease the management of multi-value time series by - + * Naming time series values in custom classes. + * Calling time series methods with your custom types to address and manage values by name. +#### Define time series class with named values + +To define a class with named values, add the static property `TIME_SERIES_VALUES` to the class. +E.g.: + + + +{`class RoutePoint \{ + + // Add the following static param: + static TIME_SERIES_VALUES = ["latitude", "longitude"]; + + // The Latitude and Longitude properties will contain the time series entry values. + // The names for these values will be "latitude" and "longitude" respectively. + + constructor( + latitude = 0, + longitude = 0 + ) \{ + Object.assign(this, \{ + latitude, + longitude + \}); + \} +\} +`} + + + +The class can then be used by time series methods like _append_: + + + +{`const baseTime = new Date(); +const oneHour = 60 * 60 * 1000; +let nextHour = new Date(baseTime.getTime() + oneHour); + +const tsf = session.timeSeriesFor("users/john", "RoutePoints", RoutePoint); + +const routePoint = new RoutePoint(); +routePoint.latitude = 40.712776; +routePoint.longitude = -74.005974; + +// Append coordinates using the routePoint object +tsf.append(nextHour, routePoint, "devices/Navigator"); + +await session.saveChanges(); +`} + + +#### Examples + +* In this example, we define a StockPrice class and use it when appending StockPrice entries. + + +{`class StockPrice \{ + + // Define the names for the entry values + static TIME_SERIES_VALUES = ["open", "close", "high", "low", "volume"]; + + constructor( + open = 0, + close = 0, + high = 0, + low = 0, + volume = 0 + ) \{ + Object.assign(this, \{ + open, + close, + high, + low, + volume + \}); + \} +\} +`} + + + + +{`const session = documentStore.openSession(); +await session.store(new User("John"), "users/john"); + +// Get an instance of 'timeSeriesFor', pass: +// * the document ID +// * the time series name +// * the class that will hold the entry's values +const tsf = session.timeSeriesFor("users/john", "StockPrices", StockPrice); + +const optionalTag = "companies/kitchenAppliances"; +const baseTime = new Date(); +baseTime.setUTCHours(0); +const oneDay = 24 * 60 * 60 * 1000; + +// Provide the multiple values via the StockPrice class +const price1 = new StockPrice(); +price1.open = 52; +price1.close = 54; +price1.high = 63.5; +price1.low = 51.4; +price1.volume = 9824; + +// Call 'append' with the custom StockPrice class +let nextDay = new Date(baseTime.getTime() + oneDay); +tsf.append(nextDay, price1, optionalTag); + +const price2 = new StockPrice(); +price2.open = 54; +price2.close = 55; +price2.high = 61.5; +price2.low = 49.4; +price2.volume = 8400; + +nextDay = new Date(baseTime.getTime() + oneDay * 2); +tsf.append(nextDay, price2, optionalTag); + +const price3 = new StockPrice(); +price3.open = 55; +price3.close = 57; +price3.high = 65.5; +price3.low = 50; +price3.volume = 9020; + +nextDay = new Date(baseTime.getTime() + oneDay * 3); +tsf.append(nextDay, price3, optionalTag); + +await session.saveChanges(); +`} + + + +* In this example, we get StockPrice values by name and check whether a stock's closing-time prices are ascending over time. + + +{`let goingUp = false; + +const allEntries = await session + .timeSeriesFor("users/john", "StockPrices") + .get(); + +// Call 'asTypedEntry' to be able to access the entry's values by their names +// Pass the class type (StockPrice) +const typedEntry1 = allEntries[0].asTypedEntry(StockPrice); + +// Access the entry value by its StockPrice class property name (close) +const closePriceDay1 = typedEntry1.value.close; + +const typedEntry2 = allEntries[1].asTypedEntry(StockPrice); +const closePriceDay2 = typedEntry2.value.close; + +const typedEntry3 = allEntries[2].asTypedEntry(StockPrice); +const closePriceDay3 = typedEntry3.value.close; + +// Check if the stock's closing price is rising +if ((closePriceDay2 > closePriceDay1) && (closePriceDay3 > closePriceDay2)) \{ + goingUp = true; +\} +`} + + + +* In this query, we use the custom StockPrice type so we can address trade Volume by name. + + + +{`const oneDay = 24 * 60 * 60 * 1000; +const startTime = new Date(); +const endTime = new Date(startTime.getTime() + 3 * oneDay); + +// Note: the 'where' clause must come after the 'between' clause +const tsQueryText = \` + from StockPrices + between $start and $end + where Tag == "AppleTech"\`; + +const query = session.query({ collection: "companies" }) + .whereEquals("address.city", "New York") + .selectTimeSeries(b => b.raw(tsQueryText), TimeSeriesRawResult) + .addParameter("start", startTime) + .addParameter("end", endTime); + +// Execute the query: +const results = await query.all(); + +// Access entries results: +const tsEntries = results[0].results; + +// Call 'asTypedEntry' to be able to access the entry's values by their names +// Pass the class type (StockPrice) +const volumeDay1 = tsEntries[0].asTypedEntry(StockPrice).value.volume; +const volumeDay2 = tsEntries[1].asTypedEntry(StockPrice).value.volume; +const volumeDay3 = tsEntries[2].asTypedEntry(StockPrice).value.volume; +`} + + + + +{`from "companies" +where address.city = $p0 +select timeseries( + from StockPrices + between $start and $end + where Tag == "AppleTech") +{ + "p0":"New York", + "start":"2024-06-04T06:02:39.826Z", + "end":"2024-06-07T06:02:39.826Z" +} +`} + + + + + + +## Register time series named values + +Registering a custom time series type on the server stores this information in the [database record](../../../studio/database/settings/database-record.mdx). +This allows the Studio to present time series values by name when you view and manage them. +#### Usage + +To register a time series type, call `documentStore.timeSeries.register`, e.g.: + + + +{`// Register the named values for the 'StockPrices' series on the server +await documentStore.timeSeries.register("Users", + "StockPrices", ["open", "close", "high", "low", "volume"]); +`} + + + +
+The time series entries will be listed in the Studio under their corresponding named values: + +!["Time series entries"](./assets/time-series-entries-js.png) + +
+The named values can be managed from the [Time Series Settings View](../../../studio/database/settings/time-series-settings.mdx) in the Studio: + +!["Time series settings view"](./assets/time-series-settings-view-js.png) +#### Syntax + + + +{`// Available overloads: +// ==================== + +register(collection, name, valueNames); +register(collectionClass, name, valueNames); +register(collectionClass, timeSeriesEntryClass); +register(collectionClass, timeSeriesEntryClass, name); +`} + + + +
+ +| Parameter | Type | Description | +|--------------------------|------------|------------------------------------| +| **collection** | `string` | The time series collection name | +| **name** | `string ` | Time series name | +| **valueNames** | `string[]` | Names to register (name per value) | +| **collectionClass** | `object` | The collection class | +| **timeSeriesEntryClass** | `object` | The custom time series entry class | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/_overview-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/_overview-csharp.mdx new file mode 100644 index 0000000000..c73301cadf --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/_overview-csharp.mdx @@ -0,0 +1,105 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +The Time Series client API includes a set of [session](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx) +methods and [store](../../../client-api/what-is-a-document-store.mdx) +[operations](../../../client-api/operations/what-are-operations.mdx). +You can use the API to **append** (create and update), **get**, +**delete**, **include**, **patch** and **query** time series data. + +* In this page: + * [Creating and Removing Time Series](../../../document-extensions/timeseries/client-api/overview.mdx#creating-and-removing-time-series) + * [`session` Methods -vs- `document-store` Operations](../../../document-extensions/timeseries/client-api/overview.mdx#session-methods--vs--document-store-operations) + * [Available Time Series `session` methods](../../../document-extensions/timeseries/client-api/overview.mdx#available-time-series-session-methods) + * [Available Time Series `store` Operations](../../../document-extensions/timeseries/client-api/overview.mdx#available-time-series-store-operations) + + +## Creating and Removing Time Series + +A time series is constructed of time series **entries**, which can +be created and deleted using the API. +There is no need to explicitly create or delete a time series: + + * A time series is created when the first entry is appended to it. + * A time series is deleted when all entries are deleted from it. + + + +## `session` Methods -vs- `document-store` Operations + +Some time series functions are available through both `session` methods +and `document-store` operations: +You can **append**, **delete**, **get** and **patch** time series data +through both interfaces. +There are also functionalities unique to each interface. + +* **Time series functionalities unique to the `session`interface**: + * `session` methods provide a **transactional guarantee**. + Use them when you want to guarantee that your actions would + be processed in a [single ACID transaction](../../../client-api/faq/transaction-support.mdx). + You can, for instance, gather multiple session actions + (e.g. the update of a time series and the modification + of a document) and execute them in a single transaction + by calling `session.SaveChanges`, to ensure that they + would all be completed or all be reverted. + * You can use `session` methods to `include` time series while + loading documents. + Included time series data is held by the client's session, + and can be handed to the user instantly when requested + without issuing an additional request to the server +* **Time series functionalities unique to the `store`interface**: + * Getting the data of **multiple time series** in a single operation. + * Managing time series **rollup and retention policies**. + * Patching time series data to **multiple documents** located by a query. + + +## Available Time Series `session` methods + +* [TimeSeriesFor.Append](../../../document-extensions/timeseries/client-api/session/append.mdx) + Use this method to **Append entries to a time series** + (creating the series if it didn't previously exist). +* [TimeSeriesFor.Delete](../../../document-extensions/timeseries/client-api/session/delete.mdx) + Use this method to **delete a range of entries from a time series** + (removing the series completely if all entries have been deleted). +* [TimeSeriesFor.Get](../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx) + Use this method to **Retrieve raw time series entries** + for all entries or for a chosen entries range. +* [Advanced.GetTimeSeriesFor](../../../document-extensions/timeseries/client-api/session/get/get-names.mdx) + Use this method to **Retrieve time series Names**. + Series names are fetched by `GetTimeSeriesFor` directly from their parent documents' + metadata, requiring no additional server roundtrips. +* [session.Advanced.Defer](../../../document-extensions/timeseries/client-api/session/patch.mdx) + Use this method to **patch time series data to a document**. +* **To include time series data** - + * [Use IncludeTimeSeries while loading a document via session.Load](../../../document-extensions/timeseries/client-api/session/include/with-session-load.mdx) + * [Use IncludeTimeSeries while retrieving a document via session.Query](../../../document-extensions/timeseries/client-api/session/include/with-session-query.mdx) + * [Use RQL while running a raw query](../../../document-extensions/timeseries/client-api/session/include/with-raw-queries.mdx) + + +## Available Time Series `store` Operations + +* [TimeSeriesBatchOperation](../../../document-extensions/timeseries/client-api/operations/append-and-delete.mdx) + Use this operation to **append and delete time series entries**. + You can bundle a series of Append and/or Delete operations in a list and + execute them in a single call. +* [GetTimeSeriesOperation](../../../document-extensions/timeseries/client-api/operations/get.mdx#gettimeseriesoperation) + Use this operation to Get entries from a single time series. +* [GetMultipleTimeSeriesOperation](../../../document-extensions/timeseries/client-api/operations/get.mdx#getmultipletimeseriesoperation) + Use this operation to Get entries from multiple time series. +* [ConfigureTimeSeriesOperation](../../../document-extensions/timeseries/rollup-and-retention.mdx) + Use this operation to **manage time series roll-up and retention policies**. +* [PatchOperation](../../../document-extensions/timeseries/client-api/operations/patch.mdx#patchoperation) + Use this operation to append/delete time series entries to/from a single document. +* [PatchByQueryOperation](../../../document-extensions/timeseries/client-api/operations/patch.mdx#patchbyqueryoperation) + Use this operation to run a query and append/delete time series entries to/from + matching documents. +* [BulkInsert.TimeSeriesFor.Append](../../../document-extensions/timeseries/client-api/bulk-insert/append-in-bulk.mdx) + Use this operation to append time series entries in bulk. + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/_overview-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/_overview-nodejs.mdx new file mode 100644 index 0000000000..929f1474c0 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/_overview-nodejs.mdx @@ -0,0 +1,105 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +The Time Series client API includes a set of [session](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx) +methods and [store](../../../client-api/what-is-a-document-store.mdx) +[operations](../../../client-api/operations/what-are-operations.mdx). +You can use the API to **append** (create and update), **get**, +**delete**, **include**, **patch** and **query** time series data. + +* In this page: + * [Creating and Removing Time Series](../../../document-extensions/timeseries/client-api/overview.mdx#creating-and-removing-time-series) + * [`session` Methods -vs- `document-store` Operations](../../../document-extensions/timeseries/client-api/overview.mdx#session-methods--vs--document-store-operations) + * [Available Time Series `session` methods](../../../document-extensions/timeseries/client-api/overview.mdx#available-time-series-session-methods) + * [Available Time Series `store` Operations](../../../document-extensions/timeseries/client-api/overview.mdx#available-time-series-store-operations) + + +## Creating and Removing Time Series + +A time series is constructed of time series **entries**, which can +be created and deleted using the API. +There is no need to explicitly create or delete a time series: + + * A time series is created when the first entry is appended to it. + * A time series is deleted when all entries are deleted from it. + + + +## `session` Methods -vs- `document-store` Operations + +Some time series functions are available through both `session` methods +and `document-store` operations: +You can **append**, **delete**, **get** and **patch** time series data +through both interfaces. +There are also functionalities unique to each interface. + +* **Time series functionalities unique to the `session`interface**: + * `session` methods provide a **transactional guarantee**. + Use them when you want to guarantee that your actions would + be processed in a [single ACID transaction](../../../client-api/faq/transaction-support.mdx). + You can, for instance, gather multiple session actions + (e.g. the update of a time series and the modification + of a document) and execute them in a single transaction + by calling `session.saveChanges`, to ensure that they + would all be completed or all be reverted. + * You can use `session` methods to `include` time series while + loading documents. + Included time series data is held by the client's session, + and can be handed to the user instantly when requested + without issuing an additional request to the server +* **Time series functionalities unique to the `store`interface**: + * Getting the data of **multiple time series** in a single operation. + * Managing time series **rollup and retention policies**. + * Patching time series data to **multiple documents** located by a query. + + +## Available Time Series `session` methods + +* [timeSeriesFor.append](../../../document-extensions/timeseries/client-api/session/append.mdx) + Use this method to **Append entries to a time series** + (creating the series if it didn't previously exist). +* [timeSeriesFor.delete](../../../document-extensions/timeseries/client-api/session/delete.mdx) + Use this method to **delete a range of entries from a time series** + (removing the series completely if all entries have been deleted). +* [timeSeriesFor.get](../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx) + Use this method to **Retrieve raw time series entries** + for all entries or for a chosen entries range. +* [advanced.getTimeSeriesFor](../../../document-extensions/timeseries/client-api/session/get/get-names.mdx) + Use this method to **Retrieve time series Names**. + Series names are fetched by `GetTimeSeriesFor` directly from their parent documents' + metadata, requiring no additional server roundtrips. +* [session.advanced.defer](../../../document-extensions/timeseries/client-api/session/patch.mdx) + Use this method to **patch time series data to a document**. +* **To include time series data** - + * [Use includeTimeSeries while loading a document via session.load](../../../document-extensions/timeseries/client-api/session/include/with-session-load.mdx) + * [Use includeTimeSeries while retrieving a document via session.query](../../../document-extensions/timeseries/client-api/session/include/with-session-query.mdx) + * [Use RQL while running a raw query](../../../document-extensions/timeseries/client-api/session/include/with-raw-queries.mdx) + + +## Available Time Series `store` Operations + +* [TimeSeriesBatchOperation](../../../document-extensions/timeseries/client-api/operations/append-and-delete.mdx) + Use this operation to **append and delete time series entries**. + You can bundle a series of Append and/or Delete operations in a list and + execute them in a single call. +* [GetTimeSeriesOperation](../../../document-extensions/timeseries/client-api/operations/get.mdx#gettimeseriesoperation) + Use this operation to Get entries from a single time series. +* [GetMultipleTimeSeriesOperation](../../../document-extensions/timeseries/client-api/operations/get.mdx#getmultipletimeseriesoperation) + Use this operation to Get entries from multiple time series. +* [ConfigureTimeSeriesOperation](../../../document-extensions/timeseries/rollup-and-retention.mdx) + Use this operation to **manage time series roll-up and retention policies**. +* [PatchOperation](../../../document-extensions/timeseries/client-api/operations/patch.mdx#patchoperation) + Use this operation to append/delete time series entries to/from a single document. +* [PatchByQueryOperation](../../../document-extensions/timeseries/client-api/operations/patch.mdx#patchbyqueryoperation) + Use this operation to run a query and append/delete time series entries to/from + matching documents. +* [bulkInsert.timeSeriesFor.append](../../../document-extensions/timeseries/client-api/bulk-insert/append-in-bulk.mdx) + Use this operation to append time series entries in bulk. + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/assets/time-series-entries-js.png b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/assets/time-series-entries-js.png new file mode 100644 index 0000000000..cb6c40ead5 Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/assets/time-series-entries-js.png differ diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/assets/time-series-entries.png b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/assets/time-series-entries.png new file mode 100644 index 0000000000..a156750622 Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/assets/time-series-entries.png differ diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/assets/time-series-settings-view-js.png b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/assets/time-series-settings-view-js.png new file mode 100644 index 0000000000..c523d86011 Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/assets/time-series-settings-view-js.png differ diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/assets/time-series-settings-view.png b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/assets/time-series-settings-view.png new file mode 100644 index 0000000000..e0b3c70b7a Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/assets/time-series-settings-view.png differ diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/bulk-insert/_append-in-bulk-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/bulk-insert/_append-in-bulk-csharp.mdx new file mode 100644 index 0000000000..38ab191232 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/bulk-insert/_append-in-bulk-csharp.mdx @@ -0,0 +1,198 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* `store.BulkInsert` is RavenDB's high-performance data insertion operation. + +* The `bulkInsert.TimeSeriesFor` interface provides similar functionality to the [session.TimeSeriesFor](../../../../document-extensions/timeseries/client-api/session/append.mdx), + but without the overhead associated with the _Session_, resulting in significantly improved performance. + +* In this page: + * [Usage](../../../../document-extensions/timeseries/client-api/bulk-insert/append-in-bulk.mdx#usage) + * [Examples](../../../../document-extensions/timeseries/client-api/bulk-insert/append-in-bulk.mdx#examples) + * [Append single entry](../../../../document-extensions/timeseries/client-api/bulk-insert/append-in-bulk.mdx#append-single-entry) + * [Append multiple entries](../../../../document-extensions/timeseries/client-api/bulk-insert/append-in-bulk.mdx#append-multiple-entries) + * [Append multiple values per entry](../../../../document-extensions/timeseries/client-api/bulk-insert/append-in-bulk.mdx#append-multiple-values-per-entry) + * [Append multiple time series](../../../../document-extensions/timeseries/client-api/bulk-insert/append-in-bulk.mdx#append-multiple-time-series) + * [Syntax](../../../../document-extensions/timeseries/client-api/bulk-insert/append-in-bulk.mdx#syntax) + + + +## Usage + +**Flow**: + +* Call `store.BulkInsert` to create a `BulkInsertOperation` instance. +* Call `TimeSeriesFor` on that instance and pass it: + * The document ID + (An exception will be thrown if the specified document does Not exist). + * The time series name + (Appending entries to a time series that doesn't yet exist yet will create the time series). +* To append an entry, call `Append` and pass it: + * The entry's Timestamp + * The entry's Value or Values + * The entry's Tag (optional) + +**Note**: + +* To append multiple entries, call `Append` as many times as needed. +* Ensure there is at least a 1-millisecond interval between each timestamp. +* The client converts all timestamps to **UTC** before sending the batch to the server. +* Multiple time series can be appended in the same `BulkInsertOperation`. See this [example](../../../../document-extensions/timeseries/client-api/bulk-insert/append-in-bulk.mdx#append-multiple-time-series) below. + + + +## Examples + +#### Append single entry: + +In this example, we append a single entry with a single value to time series "HeartRates". + + +{`var baseTime = DateTime.Today; + +// Create a BulkInsertOperation instance +using (BulkInsertOperation bulkInsert = store.BulkInsert()) +\{ + // Create a TimeSeriesBulkInsert instance + using (TimeSeriesBulkInsert timeSeriesBulkInsert = + // Call 'TimeSeriesFor', pass it: + // * The document ID + // * The time series name + bulkInsert.TimeSeriesFor("users/john", "HeartRates")) + \{ + // Call 'Append' to add an entry, pass it: + // * The entry's Timestamp + // * The entry's Value or Values + // * The entry's Tag (optional) + timeSeriesBulkInsert.Append(baseTime.AddMinutes(1), 61d, "watches/fitbit"); + \} +\} +`} + + +#### Append multiple entries: + +In this example, we append 100 entries with a single value to time series "HeartRates". + + +{`using (BulkInsertOperation bulkInsert = store.BulkInsert()) +\{ + using (TimeSeriesBulkInsert timeSeriesBulkInsert = + bulkInsert.TimeSeriesFor("users/john", "HeartRates")) + \{ + Random rand = new Random(); + + for (int i = 0; i < 100; i++) + \{ + double randomValue = rand.Next(60, 91); + timeSeriesBulkInsert.Append(baseTime.AddMinutes(i), randomValue, "watches/fitbit"); + \} + \} +\} +`} + + +#### Append multiple values per entry: + +In this example, we append multiple values per entry in time series "HeartRates". + + +{`using (BulkInsertOperation bulkInsert = store.BulkInsert()) +\{ + using (TimeSeriesBulkInsert timeSeriesBulkInsert = + bulkInsert.TimeSeriesFor("users/john", "HeartRates")) + \{ + var exerciseHeartRates = new List \{ 89d, 82d, 85d \}; + timeSeriesBulkInsert.Append(baseline.AddMinutes(1), exerciseHeartRates, "watches/fitbit"); + + var restingHeartRates = new List \{ 59d, 63d, 61d, 64d, 65d \}; + timeSeriesBulkInsert.Append(baseline.AddMinutes(2), restingHeartRates, "watches/apple-watch"); + \} +\} +`} + + +#### Append multiple time series: + +In this example, we append multiple time series in different documents in the same batch. + + +{`using (BulkInsertOperation bulkInsert = store.BulkInsert()) +\{ + // Append first time series + using (TimeSeriesBulkInsert timeSeriesBulkInsert = + bulkInsert.TimeSeriesFor("users/john", "HeartRates")) + \{ + timeSeriesBulkInsert.Append(baseTime.AddMinutes(1), 61d, "watches/fitbit"); + timeSeriesBulkInsert.Append(baseTime.AddMinutes(2), 62d, "watches/fitbit"); + \} + + // Append another time series + using (TimeSeriesBulkInsert timeSeriesBulkInsert = + bulkInsert.TimeSeriesFor("users/john", "ExerciseHeartRates")) + \{ + timeSeriesBulkInsert.Append(baseTime.AddMinutes(3), 81d, "watches/apple-watch"); + timeSeriesBulkInsert.Append(baseTime.AddMinutes(4), 82d, "watches/apple-watch"); + \} + + // Append time series in another document + using (TimeSeriesBulkInsert timeSeriesBulkInsert = + bulkInsert.TimeSeriesFor("users/jane", "HeartRates")) + \{ + timeSeriesBulkInsert.Append(baseTime.AddMinutes(1), 59d, "watches/fitbit"); + timeSeriesBulkInsert.Append(baseTime.AddMinutes(2), 60d, "watches/fitbit"); + \} +\} +`} + + + + + +## Syntax + +**`BulkInsert.TimeSeriesFor`** + + + +{`public TimeSeriesBulkInsert TimeSeriesFor(string id, string name) +`} + + + +| Parameter | Type | Description | +|-------------|----------|------------------| +| **id** | `string` | Document ID | +| **name** | `string` | Time Series Name | + +**`TimeSeriesFor.Append`** overloads: + + + +{`// Append a single value +public void Append(DateTime timestamp, double value, string tag = null) +`} + + + + +{`// Append multiple values +public void Append(DateTime timestamp, ICollection values, string tag = null) +`} + + + +| Parameter | Type | Description | +|---------------|-----------------------|---------------------------| +| **timestamp** | `DateTime` | TS-entry's timestamp | +| **value** | `double` | A single value | +| **values** | `ICollection` | Multiple values | +| **tag** | `string` | TS-entry's tag (optional) | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/bulk-insert/_append-in-bulk-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/bulk-insert/_append-in-bulk-nodejs.mdx new file mode 100644 index 0000000000..6f48bcb7d0 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/bulk-insert/_append-in-bulk-nodejs.mdx @@ -0,0 +1,209 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* `bulkInsert` is RavenDB's high-performance data insertion operation. + +* The `bulkInsert.timeSeriesFor` interface provides similar functionality to the [session.timeSeriesFor](../../../../document-extensions/timeseries/client-api/session/append.mdx), + but without the overhead associated with the _Session_, resulting in significantly improved performance. + +* In this page: + * [Usage](../../../../document-extensions/timeseries/client-api/bulk-insert/append-in-bulk.mdx#usage) + * [Examples](../../../../document-extensions/timeseries/client-api/bulk-insert/append-in-bulk.mdx#examples) + * [Append single entry](../../../../document-extensions/timeseries/client-api/bulk-insert/append-in-bulk.mdx#append-single-entry) + * [Append multiple entries](../../../../document-extensions/timeseries/client-api/bulk-insert/append-in-bulk.mdx#append-multiple-entries) + * [Append multiple values per entry](../../../../document-extensions/timeseries/client-api/bulk-insert/append-in-bulk.mdx#append-multiple-values-per-entry) + * [Append multiple time series](../../../../document-extensions/timeseries/client-api/bulk-insert/append-in-bulk.mdx#append-multiple-time-series) + * [Syntax](../../../../document-extensions/timeseries/client-api/bulk-insert/append-in-bulk.mdx#syntax) + + + +## Usage + +**Flow**: + +* Call `documentStore.bulkInsert` to create a `BulkInsertOperation` instance. +* Call `timeSeriesFor` on that instance and pass it: + * The document ID + (An exception will be thrown if the specified document does Not exist). + * The time series name + (Appending entries to a time series that doesn't yet exist yet will create the time series). +* To append an entry, call `append` and pass it: + * The entry's Timestamp + * The entry's Value or Values + * The entry's Tag (optional) + +**Note**: + +* To append multiple entries, call `append` as many times as needed. +* Ensure there is at least a 1-millisecond interval between each timestamp. +* The client converts all timestamps to **UTC** before sending the batch to the server. +* Multiple time series can be appended in the same `BulkInsertOperation`. See this [example](../../../../document-extensions/timeseries/client-api/bulk-insert/append-in-bulk.mdx#append-multiple-time-series) below. + + + +## Examples + +#### Append single entry: + +In this example, we append a single entry with a single value to time series "HeartRates". + + +{`const baseTime = new Date(); + +// Create a BulkInsertOperation instance +const bulkInsert = documentStore.bulkInsert(); + +\{ + // Call 'TimeSeriesFor', pass it: + // * The document ID + // * The time series name + const timeSeriesBulkInsert = bulkInsert.timeSeriesFor("users/john", "HeartRates"); + + // Call 'Append' to add an entry, pass it: + // * The entry's Timestamp + // * The entry's Value or Values + // * The entry's Tag (optional) + const nextMinute = new Date(baseTime.getTime() + 60_000 * 1); + await timeSeriesBulkInsert.append(nextMinute, 61, "watches/fitbit"); + + timeSeriesBulkInsert.dispose(); +\} + +// Call finish to send all data to the server +await bulkInsert.finish(); +`} + + +#### Append multiple entries: + +In this example, we append 100 entries with a single value to time series "HeartRates". + + +{`const baseTime = new Date(); + +const bulkInsert = documentStore.bulkInsert(); + +\{ + const timeSeriesBulkInsert = bulkInsert.timeSeriesFor("users/john", "HeartRates"); + + for (let i = 0; i < 100; i++) \{ + let randomValue = Math.floor(Math.random() * (29)) + 60; + let nextMinute = new Date(baseTime.getTime() + 60_000 * (i + 1)); + + await timeSeriesBulkInsert.append(nextMinute, randomValue, "watches/fitbit"); + \} + + timeSeriesBulkInsert.dispose(); +\} + +await bulkInsert.finish(); +`} + + +#### Append multiple values per entry: + +In this example, we append multiple values per entry in time series "HeartRates". + + +{`const baseTime = new Date(); + +const bulkInsert = documentStore.bulkInsert(); + +\{ + const timeSeriesBulkInsert = bulkInsert.timeSeriesFor("users/john", "HeartRates"); + + const exerciseHeartRates = [89, 82, 85]; + await timeSeriesBulkInsert.append(new Date(baseTime.getTime() + 60_000), + exerciseHeartRates, "watches/fitbit"); + + const restingHeartRates = [59, 63, 61, 64, 65]; + await timeSeriesBulkInsert.append(new Date(baseTime.getTime() + 60_000 * 2), + restingHeartRates, "watches/fitbit"); + + timeSeriesBulkInsert.dispose(); +\} + +await bulkInsert.finish(); +`} + + +#### Append multiple time series: + +In this example, we append multiple time series in different documents in the same batch. + + +{`const baseTime = new Date(); + +const bulkInsert = documentStore.bulkInsert(); + +\{ + // Append first time series + const timeSeriesBulkInsert = bulkInsert.timeSeriesFor("users/john", "HeartRates"); + await timeSeriesBulkInsert.append(new Date(baseTime.getTime() + 60_000), 61, "watches/fitbit"); + await timeSeriesBulkInsert.append(new Date(baseTime.getTime() + 60_000 * 2), 62, "watches/fitbit"); + timeSeriesBulkInsert.dispose(); +\} +\{ + // Append another time series + const timeSeriesBulkInsert = bulkInsert.timeSeriesFor("users/john", "ExerciseHeartRates"); + await timeSeriesBulkInsert.append(new Date(baseTime.getTime() + 60_000 * 3), 81, "watches/apple-watch"); + await timeSeriesBulkInsert.append(new Date(baseTime.getTime() + 60_000 * 4), 82, "watches/apple-watch"); + timeSeriesBulkInsert.dispose(); +\} +\{ + // Append time series in another document + const timeSeriesBulkInsert = bulkInsert.timeSeriesFor("users/jane", "HeartRates"); + await timeSeriesBulkInsert.append(new Date(baseTime.getTime() + 60_000), 59, "watches/fitbit"); + await timeSeriesBulkInsert.append(new Date(baseTime.getTime() + 60_000 * 2), 60, "watches/fitbit"); + timeSeriesBulkInsert.dispose(); +\} + +await bulkInsert.finish(); +`} + + + + + +## Syntax + +**`bulkInsert.timeSeriesFor`** + + + +{`timeSeriesFor(id, name); +`} + + + +| Parameter | Type | Description | +|-------------|----------|------------------| +| **id** | `string` | Document ID | +| **name** | `string` | Time Series Name | + +**`timeSeriesFor.Append`** overloads: + + + +{`append(timestamp, value); +append(timestamp, value, tag); +append(timestamp, values); +append(timestamp, values, tag); +`} + + + +| Parameter | Type | Description | +|---------------|------------|---------------------------| +| **timestamp** | `Date` | TS-entry's timestamp | +| **value** | `number` | A single value | +| **values** | `number[]` | Multiple values | +| **tag** | `string` | TS-entry's tag (optional) | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/bulk-insert/_category_.json b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/bulk-insert/_category_.json new file mode 100644 index 0000000000..6b4e9e8ddc --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/bulk-insert/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 3, + "label": Bulk Insert, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/bulk-insert/append-in-bulk.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/bulk-insert/append-in-bulk.mdx new file mode 100644 index 0000000000..f45cb8b9b7 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/bulk-insert/append-in-bulk.mdx @@ -0,0 +1,42 @@ +--- +title: "Append Time Series with Bulk Insert" +hide_table_of_contents: true +sidebar_label: Append In Bulk +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import AppendInBulkCsharp from './_append-in-bulk-csharp.mdx'; +import AppendInBulkNodejs from './_append-in-bulk-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/javascript-support.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/javascript-support.mdx new file mode 100644 index 0000000000..679b1d8ea0 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/javascript-support.mdx @@ -0,0 +1,50 @@ +--- +title: "Time Series: JavaScript Support" +hide_table_of_contents: true +sidebar_label: JavaScript Support +sidebar_position: 4 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import JavascriptSupportCsharp from './_javascript-support-csharp.mdx'; +import JavascriptSupportNodejs from './_javascript-support-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/named-time-series-values.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/named-time-series-values.mdx new file mode 100644 index 0000000000..00b7a3dc37 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/named-time-series-values.mdx @@ -0,0 +1,43 @@ +--- +title: "Named Time Series Values" +hide_table_of_contents: true +sidebar_label: Named Time Series Values +sidebar_position: 5 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import NamedTimeSeriesValuesCsharp from './_named-time-series-values-csharp.mdx'; +import NamedTimeSeriesValuesNodejs from './_named-time-series-values-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/_append-and-delete-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/_append-and-delete-csharp.mdx new file mode 100644 index 0000000000..7d1bbbeb1f --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/_append-and-delete-csharp.mdx @@ -0,0 +1,259 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `TimeSeriesBatchOperation` to Append and Delete multiple time series entries on a single document. + A list of predefined Append and Delete actions will be executed in this single batch operation. + +* To Append and Delete multiple time series entries on multiple documents, see [PatchByQueryOperation](../../../../document-extensions/timeseries/client-api/operations/patch.mdx#patchbyqueryoperation). + +* For a general _Operations_ overview, see [What are Operations](../../../../client-api/operations/what-are-operations.mdx). + +* In this page: + * [Usage](../../../../document-extensions/timeseries/client-api/operations/append-and-delete.mdx#usage) + * [Examples](../../../../document-extensions/timeseries/client-api/operations/append-and-delete.mdx#examples) + * [Append multiple entries](../../../../document-extensions/timeseries/client-api/operations/append-and-delete.mdx#append-multiple-entries) + * [Delete multiple entries](../../../../document-extensions/timeseries/client-api/operations/append-and-delete.mdx#delete-multiple-entries) + * [Append & Delete entries in the same batch](../../../../document-extensions/timeseries/client-api/operations/append-and-delete.mdx#append--delete-entries-in-the-same-batch) + * [Syntax](../../../../document-extensions/timeseries/client-api/operations/append-and-delete.mdx#syntax) + + +## Usage + +**Flow**: + +* Prepare the Append and Delete operations: + * Create an instance of `TimeSeriesOperation.AppendOperation` to define an Append action. + * Create an instance of ` TimeSeriesOperation.DeleteOperation` fo define a Delete action. +* Create an instance of `TimeSeriesOperation` and pass it the the time series name. + * Call `TimeSeriesOperation.Append` to add the Append operation. + * Call `TimeSeriesOperation.Delete` to add the Delete operation. +* Create a `TimeSeriesBatchOperation` instance and pass it: + * The document ID + * The `TimeSeriesOperation` object +* Execute the `TimeSeriesBatchOperation` operation by calling `store.Operations.Send` + +**Note**: + +* All the added Append and Delete operations will be executed in a single-node transaction. +* Delete actions are executed **before** Append actions. As seen in [this example](../../../../document-extensions/timeseries/client-api/operations/append-and-delete.mdx#append--delete-entries-in-the-same-batch). +* Appending entries to a time series that doesn't yet exist yet will create the time series. +* An exception will be thrown if the specified document does Not exist. + + + +## Examples + +#### Append multiple entries: + +In this example, we append four entries to a time series. + + + +{`var baseTime = DateTime.Today; + +// Define the Append operations: +// ============================= +var appendOp1 = new TimeSeriesOperation.AppendOperation +\{ + Timestamp = baseTime.AddMinutes(1), Values = new[] \{79d\}, Tag = "watches/fitbit" +\}; + +var appendOp2 = new TimeSeriesOperation.AppendOperation +\{ + Timestamp = baseTime.AddMinutes(2), Values = new[] \{82d\}, Tag = "watches/fitbit" +\}; + +var appendOp3 = new TimeSeriesOperation.AppendOperation +\{ + Timestamp = baseTime.AddMinutes(3), Values = new[] \{80d\}, Tag = "watches/fitbit" +\}; + +var appendOp4 = new TimeSeriesOperation.AppendOperation +\{ + Timestamp = baseTime.AddMinutes(4), Values = new[] \{78d\}, Tag = "watches/fitbit" +\}; + +// Define 'TimeSeriesOperation' and add the Append operations: +// =========================================================== +var timeSeriesOp = new TimeSeriesOperation +\{ + Name = "HeartRates" +\}; + +timeSeriesOp.Append(appendOp1); +timeSeriesOp.Append(appendOp2); +timeSeriesOp.Append(appendOp3); +timeSeriesOp.Append(appendOp4); + + +// Define 'TimeSeriesBatchOperation' and execute: +// ============================================== +var timeSeriesBatchOp = new TimeSeriesBatchOperation("users/john", timeSeriesOp); +store.Operations.Send(timeSeriesBatchOp); +`} + + +#### Delete multiple entries: + +In this example, we delete a range of two entries from a time series. + + + +{`var baseTime = DateTime.Today; + +var deleteOp = new TimeSeriesOperation.DeleteOperation +\{ + From = baseTime.AddMinutes(2), To = baseTime.AddMinutes(3) +\}; + +var timeSeriesOp = new TimeSeriesOperation +\{ + Name = "HeartRates" +\}; + +timeSeriesOp.Delete(deleteOp); + +var timeSeriesBatchOp = new TimeSeriesBatchOperation("users/john", timeSeriesOp); + +store.Operations.Send(timeSeriesBatchOp); +`} + + +#### Append & Delete entries in the same batch: + +* In this example, we append and delete entries in the same batch operation. + +* Note: the Delete actions are executed **before** all Append actions. + + + +{`var baseTime = DateTime.Today; + +// Define some Append operations: +var appendOp1 = new TimeSeriesOperation.AppendOperation +\{ + Timestamp = baseTime.AddMinutes(1), Values = new[] \{79d\}, Tag = "watches/fitbit" +\}; + +var appendOp2 = new TimeSeriesOperation.AppendOperation +\{ + Timestamp = baseTime.AddMinutes(2), Values = new[] \{82d\}, Tag = "watches/fitbit" +\}; + +var appendOp3 = new TimeSeriesOperation.AppendOperation +\{ + Timestamp = baseTime.AddMinutes(3), Values = new[] \{80d\}, Tag = "watches/fitbit" +\}; + +// Define a Delete operation: +var deleteOp = new TimeSeriesOperation.DeleteOperation +\{ + From = baseTime.AddMinutes(2), To = baseTime.AddMinutes(3) +\}; + +var timeSeriesOp = new TimeSeriesOperation +\{ + Name = "HeartRates" +\}; + +// Add the Append & Delete operations to the list of actions +// Note: the Delete action will be executed BEFORE all the Append actions +// even though it is added last +timeSeriesOp.Append(appendOp1); +timeSeriesOp.Append(appendOp2); +timeSeriesOp.Append(appendOp3); +timeSeriesOp.Delete(deleteOp); + +var timeSeriesBatchOp = new TimeSeriesBatchOperation("users/john", timeSeriesOp); + +store.Operations.Send(timeSeriesBatchOp); + +// Results: +// All 3 entries that were appended will exist and are not deleted. +// This is because the Delete action occurs first, before all Append actions. +`} + + + + + +## Syntax + +#### `TimeSeriesBatchOperation` + + + +{`public TimeSeriesBatchOperation(string documentId, TimeSeriesOperation operation) +`} + + + +| Parameter | Type | Description | +|----------------|-----------------------|---------------------------------------------------------------------------------------| +| **documentId** | `string` | The ID of the document to which you want to Append/Delete time series data. | +| **operation** | `TimeSeriesOperation` | This class defines which Append/Delete actions to perform within the batch operation. | + +#### `TimeSeriesOperation` + + + +{`public class TimeSeriesOperation +\{ + public string Name; + public void Append(AppendOperation appendOperation) + public void Delete(DeleteOperation deleteOperation) +\} +`} + + + +| Property | Type | Description | +|------------|----------|--------------------------------------| +| **Name** | `string` | The time series name | +| **Append** | `method` | Add an `AppendOperation` to the list | +| **Delete** | `method` | Add a `DeleteOperation` to the list | + +#### `AppendOperation` + + + +{`public class AppendOperation +\{ + public DateTime Timestamp; + public double[] Values; + public string Tag; +\} +`} + + + +| Property | Type | Description | +|---------------|------------|----------------------------------------------------------| +| **Timestamp** | `DateTime` | The time series entry will be appended at this timestamp | +| **Values** | `double[]` | Entry values | +| **Tag** | `string` | Entry tag (optional) | + +#### `DeleteOperation` + + + +{`public class DeleteOperation +\{ + public DateTime? From, To; +\} +`} + + + +| Property | Type | Description | +|------------|-------------|------------------------------------------------------------------------------------------------| +| **From** | `DateTime?` | Entries will be deleted starting at this timestamp (inclusive)
Default: `DateTime.MinValue` | +| **To** | `DateTime?` | Entries will be deleted up to this timestamp (inclusive)
Default: `DateTime.MaxValue` | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/_append-and-delete-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/_append-and-delete-nodejs.mdx new file mode 100644 index 0000000000..19faddb3d8 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/_append-and-delete-nodejs.mdx @@ -0,0 +1,241 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `TimeSeriesBatchOperation` to Append and Delete multiple time series entries on a single document. + A list of predefined Append and Delete actions will be executed in this single batch operation. + +* To Append and Delete multiple time series entries on multiple documents, see [PatchByQueryOperation](../../../../document-extensions/timeseries/client-api/operations/patch.mdx#patchbyqueryoperation). + +* For a general _Operations_ overview, see [What are Operations](../../../../client-api/operations/what-are-operations.mdx). + +* In this page: + * [Usage](../../../../document-extensions/timeseries/client-api/operations/append-and-delete.mdx#usage) + * [Examples](../../../../document-extensions/timeseries/client-api/operations/append-and-delete.mdx#examples) + * [Append multiple entries](../../../../document-extensions/timeseries/client-api/operations/append-and-delete.mdx#append-multiple-entries) + * [Delete multiple entries](../../../../document-extensions/timeseries/client-api/operations/append-and-delete.mdx#delete-multiple-entries) + * [Append & delete entries in the same batch](../../../../document-extensions/timeseries/client-api/operations/append-and-delete.mdx#append--delete-entries-in-the-same-batch) + * [Syntax](../../../../document-extensions/timeseries/client-api/operations/append-and-delete.mdx#syntax) + + +## Usage + +**Flow**: + +* Prepare the Append and Delete operations: + * Create an instance of `TimeSeriesOperation.AppendOperation` to define an Append action. + * Create an instance of ` TimeSeriesOperation.DeleteOperation` fo define a Delete action. +* Create an instance of `TimeSeriesOperation` and pass it the the time series name. + * Call `TimeSeriesOperation.append` to add the Append operation. + * Call `TimeSeriesOperation.delete` to add the Delete operation. +* Create a `TimeSeriesBatchOperation` instance and pass it: + * The document ID + * The `TimeSeriesOperation` object +* Execute the `TimeSeriesBatchOperation` operation by calling `store.operations.send` + +**Note**: + + * All the added Append and Delete operations will be executed in a single-node transaction. + * Delete actions are executed **before** Append actions. As seen in [this example](../../../../document-extensions/timeseries/client-api/operations/append-and-delete.mdx#append--delete-entries-in-the-same-batch). + * Appending entries to a time series that doesn't yet exist yet will create the time series. + * An exception will be thrown if the specified document does Not exist. + + + +## Examples + +#### Append multiple entries: + +In this example, we append four entries to a time series. + + + +{`const baseTime = new Date(); + +// Define the Append operations: +let nextMinute = new Date(baseTime.getTime() + 60_000); +const appendOp1 = new AppendOperation(nextMinute, [79], "watches/fitbit"); + +nextMinute = new Date(baseTime.getTime() + 60_000 * 2); +const appendOp2 = new AppendOperation(nextMinute, [82], "watches/fitbit"); + +nextMinute = new Date(baseTime.getTime() + 60_000 * 3); +const appendOp3 = new AppendOperation(nextMinute, [80], "watches/fitbit"); + +nextMinute = new Date(baseTime.getTime() + 60_000 * 4); +const appendOp4 = new AppendOperation(nextMinute, [78], "watches/fitbit"); + +// Define the 'TimeSeriesOperation': +const timeSeriesOp = new TimeSeriesOperation("HeartRates"); + +// Add the Append operations by calling 'append': +timeSeriesOp.append(appendOp1); +timeSeriesOp.append(appendOp2); +timeSeriesOp.append(appendOp3); +timeSeriesOp.append(appendOp4); + +// Define 'TimeSeriesBatchOperation': +const timeSeriesBatchOp = new TimeSeriesBatchOperation("users/john", timeSeriesOp); + +// Execute the batch operation: +await documentStore.operations.send(timeSeriesBatchOp); +`} + + +#### Delete multiple entries: + +In this example, we delete a range of two entries from a time series. + + + +{`const baseTime = new Date(); + +const from = new Date(baseTime.getTime() + 60_000 * 2); +const to = new Date(baseTime.getTime() + 60_000 * 3); + +// Define the Delete operation: +const deleteOp = new DeleteOperation(from, to); + +// Define the 'TimeSeriesOperation': +const timeSeriesOp = new TimeSeriesOperation("HeartRates"); + +// Add the Delete operation by calling 'delete': +timeSeriesOp.delete(deleteOp); + +// Define the 'TimeSeriesBatchOperation': +const timeSeriesBatchOp = new TimeSeriesBatchOperation("users/john", timeSeriesOp); + +// Execute the batch operation: +await documentStore.operations.send(timeSeriesBatchOp); +`} + + +#### Append & delete entries in the same batch: + +* In this example, we append and delete entries in the same batch operation. + +* Note: the Delete actions are executed **before** all Append actions. + + + +{`const baseTime = new Date(); + +// Define some Append operations: +let nextMinute = new Date(baseTime.getTime() + 60_000); +const appendOp1 = new AppendOperation(nextMinute, [79], "watches/fitbit"); + +nextMinute = new Date(baseTime.getTime() + 60_000 * 2); +const appendOp2 = new AppendOperation(nextMinute, [82], "watches/fitbit"); + +nextMinute = new Date(baseTime.getTime() + 60_000 * 3); +const appendOp3 = new AppendOperation(nextMinute, [80], "watches/fitbit"); + +const from = new Date(baseTime.getTime() + 60_000 * 2); +const to = new Date(baseTime.getTime() + 60_000 * 3); + +// Define a Delete operation: +const deleteOp = new DeleteOperation(from, to); + +// Define the 'TimeSeriesOperation': +const timeSeriesOp = new TimeSeriesOperation("HeartRates"); + +// Add the Append & Delete operations to the list of actions +// Note: the Delete action will be executed BEFORE all the Append actions + // even though it is added last +timeSeriesOp.append(appendOp1); +timeSeriesOp.append(appendOp2); +timeSeriesOp.append(appendOp3); +timeSeriesOp.delete(deleteOp); + +// Define the 'TimeSeriesBatchOperation': +const timeSeriesBatchOp = new TimeSeriesBatchOperation("users/john", timeSeriesOp); + +// Execute the batch operation: +await documentStore.operations.send(timeSeriesBatchOp); + +// Results: +// All 3 entries that were appended will exist and are not deleted. +// This is because the Delete action occurs first, before all Append actions. +`} + + + + + +## Syntax + +#### `TimeSeriesBatchOperation` + + + +{`TimeSeriesBatchOperation(documentId, operation) +`} + + + +| Parameter | Type | Description | +|----------------|-----------------------|---------------------------------------------------------------------------------------| +| **documentId** | `string` | The ID of the document to which you want to Append/Delete time series data. | +| **operation** | `TimeSeriesOperation` | This class defines which Append/Delete actions to perform within the batch operation. | + +#### `TimeSeriesOperation` + + + +{`class TimeSeriesOperation \{ + name; + append; + delete; +\} +`} + + + +| Property | Type | Description | +|------------|----------|----------------------------------------------------------------------| +| **name** | `string` | The time series name | +| **append** | `method` | Pass a `AppendOperation` object to this method to add it to the list | +| **delete** | `method` | Pass a `DeleteOperation` object to this method to add it to the list | + +#### `AppendOperation` + + + +{`class AppendOperation \{ + timestamp; + values; + tag; +\} +`} + + + +| Property | Type | Description | +|---------------|------------|----------------------------------------------------------| +| **timestamp** | `Date` | The time series entry will be appended at this timestamp | +| **values** | `number[]` | Entry values | +| **tag** | `string` | Entry tag (optional) | + +#### `DeleteOperation` + + + +{`class DeleteOperation \{ + from; + to; +\} +`} + + + +| Property | Type | Description | +|-----------|--------|---------------------------------------------------------------------------------------------------| +| **from** | `Date` | Entries will be deleted starting at this timestamp (inclusive)
Default: the minimum date value | +| **to** | `Date` | Entries will be deleted up to this timestamp (inclusive)
Default: the maximum date value | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/_category_.json b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/_category_.json new file mode 100644 index 0000000000..fbb9b4b328 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 2, + "label": Operations, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/_get-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/_get-csharp.mdx new file mode 100644 index 0000000000..3b7f92587e --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/_get-csharp.mdx @@ -0,0 +1,216 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetTimeSeriesOperation` to retrieve entries from a **single** time series. + You can also retrieve entries from a single time series using the Session's [TimesSeriesFor.Get](../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx) method. + +* Use `GetMultipleTimeSeriesOperation` to retrieve entries from **multiple** time series. + +* For a general _Operations_ overview, see [What are Operations](../../../../client-api/operations/what-are-operations.mdx). + +* In this page: + * [Get entries - from single time series](../../../../document-extensions/timeseries/client-api/operations/get.mdx#get-entries---from-single-time-series) + * [Example](../../../../document-extensions/timeseries/client-api/operations/get.mdx#example) + * [Syntax](../../../../document-extensions/timeseries/client-api/operations/get.mdx#syntax) + * [Get entries - from multiple time series](../../../../document-extensions/timeseries/client-api/operations/get.mdx#get-entries---from-multiple-time-series) + * [Example](../../../../document-extensions/timeseries/client-api/operations/get.mdx#example-1) + * [Syntax](../../../../document-extensions/timeseries/client-api/operations/get.mdx#syntax-1) + + +## Get entries - from single time series +### Example + +In this example, we retrieve all entries from a single time series. + + + +{`// Define the get operation +var getTimeSeriesOp = new GetTimeSeriesOperation( + "employees/1-A", // The document ID + "HeartRates", // The time series name + DateTime.MinValue, // Entries range start + DateTime.MaxValue); // Entries range end + +// Execute the operation by passing it to 'Operations.Send' +TimeSeriesRangeResult timeSeriesEntries = store.Operations.Send(getTimeSeriesOp); + +// Access entries +var firstEntryReturned = timeSeriesEntries.Entries[0]; +`} + + +### Syntax + + + +{`public GetTimeSeriesOperation( + string docId, + string timeseries, + DateTime? from = null, + DateTime? to = null, + int start = 0, + int pageSize = int.MaxValue, + bool returnFullResults = false) +`} + + + +| Parameter | Type | Description | +|-----------------------|-------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **docId** | `string` | Document ID | +| **timeseries** | `string` | Time series name | +| **from** | `DateTime?` | Get time series entries starting from this timestamp (inclusive).
Default: `DateTime.MinValue` | +| **to** | `DateTime?` | Get time series entries ending at this timestamp (inclusive).
Default: `DateTime.MaxValue` | +| **start** | `int` | The position of the first result to retrieve (for paging)
Default: 0 | +| **pageSize** | `int` | Number of results per page to retrieve (for paging)
Default: `int.MaxValue` | +| **returnFullResults** | `bool` | This param is only relevant when [getting incremental time series data](../../../../document-extensions/timeseries/incremental-time-series/client-api/operations/get.mdx) | + +**Return Value**: + + + +{`public class TimeSeriesRangeResult +\{ + // Timestamp of first entry returned + public DateTime From; + + // Timestamp of last entry returned + public DateTime To; + + // The resulting entries + // Will be empty if requesting an entries range that does Not exist + public TimeSeriesEntry[] Entries; + + // The number of entries returned + // Will be undefined if not all entries of this time series were returned + public long? TotalResults; +\} +`} + + + + + +* Details of class `TimeSeriesEntry ` are listed in [this syntax section](../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#syntax). +* If the requested time series does Not exist, the returned object will be `null`. +* No exceptions are generated. + + + + + +## Get entries - from multiple time series +### Example + +In this example, we retrieve entries from the specified ranges of two time series. + + + +{`// Define the get operation +var getMultipleTimeSeriesOp = new GetMultipleTimeSeriesOperation("employees/1-A", + new List + \{ + new TimeSeriesRange + \{ + Name = "ExerciseHeartRates", From = baseTime.AddHours(1), To = baseTime.AddHours(10) + \}, + new TimeSeriesRange + \{ + Name = "RestHeartRates", From = baseTime.AddHours(11), To = baseTime.AddHours(20) + \} + \}); + +// Execute the operation by passing it to 'Operations.Send' +TimeSeriesDetails timesSeriesEntries = store.Operations.Send(getMultipleTimeSeriesOp); + +// Access entries +var timeSeriesEntry = timesSeriesEntries.Values["ExerciseHeartRates"][0].Entries[0]; +`} + + +### Syntax + + + +{`public GetMultipleTimeSeriesOperation( + string docId, + IEnumerable ranges, + int start = 0, + int pageSize = int.MaxValue, + bool returnFullResults = false) +`} + + + +| Parameter | Type | Description | +|-----------------------|--------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **docId** | `string` | Document ID | +| **ranges** | `IEnumerable` | Provide a `TimeSeriesRange` object for each time series from which you want to retrieve data | +| **start** | `int` | The position of the first result to retrieve (for paging)
Default: 0 | +| **pageSize** | `int` | Number of results per page to retrieve (for paging)
Default: `int.MaxValue` | +| **returnFullResults** | `bool` | This param is only relevant when [getting incremental time series data](../../../../document-extensions/timeseries/incremental-time-series/client-api/operations/get.mdx) | + + + +{`public class TimeSeriesRange +\{ + public string Name; // Name of time series + public DateTime From; // Get time series entries starting from this timestamp (inclusive). + public DateTime To; // Get time series entries ending at this timestamp (inclusive). +\} +`} + + + +**Return Value**: + + + +{`public class TimeSeriesDetails +\{ + // The document ID + public string Id \{ get; set; \} + + // Dictionary of time series name to the time series results + public Dictionary> Values \{ get; set; \} +\} +`} + + + + + +{`public class TimeSeriesRangeResult +\{ + // Timestamp of first entry returned + public DateTime From; + + // Timestamp of last entry returned + public DateTime To; + + // The resulting entries + // Will be empty if requesting an entries range that does Not exist + public TimeSeriesEntry[] Entries; + + // The number of entries returned + // Will be undefined if not all entries of this time series were returned + public long? TotalResults; +\} +`} + + + +* If any of the requested time series do not exist, the returned object will be `null`. + +* When an entries range that does not exist are requested, + the return value for the that range is a `TimeSeriesRangeResult` object with an empty `Entries` property. + +* No exceptions are generated. + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/_get-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/_get-nodejs.mdx new file mode 100644 index 0000000000..054c2e4ff7 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/_get-nodejs.mdx @@ -0,0 +1,221 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `GetTimeSeriesOperation` to retrieve entries from a **single** time series. + You can also retrieve entries from a single time series using the Session's [timesSeriesFor.get](../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx) method. + +* Use `GetMultipleTimeSeriesOperation` to retrieve entries from **multiple** time series. + +* For a general _Operations_ overview, see [What are Operations](../../../../client-api/operations/what-are-operations.mdx). + +* In this page: + * [Get entries - from single time series](../../../../document-extensions/timeseries/client-api/operations/get.mdx#get-entries---from-single-time-series) + * [Example](../../../../document-extensions/timeseries/client-api/operations/get.mdx#example) + * [Syntax](../../../../document-extensions/timeseries/client-api/operations/get.mdx#syntax) + * [Get entries - from multiple time series](../../../../document-extensions/timeseries/client-api/operations/get.mdx#get-entries---from-multiple-time-series) + * [Example](../../../../document-extensions/timeseries/client-api/operations/get.mdx#example-1) + * [Syntax](../../../../document-extensions/timeseries/client-api/operations/get.mdx#syntax-1) + + +## Get entries - from single time series +### Example + +In this example, we retrieve all entries from a single time series. + + + +{`// Define the get operation +var getTimeSeriesOp = new GetTimeSeriesOperation( + "employees/1-A", // The document ID + "HeartRates"); // The time series name + +// Execute the operation by passing it to 'operations.send' +const timeSeriesEntries = await documentStore.operations.send(getTimeSeriesOp); + +// Access entries +const firstEntryReturned = timeSeriesEntries.entries[0]; +`} + + +### Syntax + + + +{`// Available overloads: +// ==================== +const getTimeSeriesOp = new GetTimeSeriesOperation(docId, timeseries); +const getTimeSeriesOp = new GetTimeSeriesOperation(docId, timeseries, from, to); +const getTimeSeriesOp = new GetTimeSeriesOperation(docId, timeseries, from, to, start); +const getTimeSeriesOp = new GetTimeSeriesOperation(docId, timeseries, from, to, start, pageSize); +`} + + + +| Parameter | Type | Description | +|-----------------------|-----------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **docId** | `string` | Document ID | +| **timeseries** | `string` | Time series name | +| **from** | `Date` | Get time series entries starting from this timestamp (inclusive).
Default: The minimum date value will be used. | +| **to** | `Date` | Get time series entries ending at this timestamp (inclusive).
Default: The maximum date value will be used. | +| **start** | `number` | The position of the first result to retrieve (for paging).
Default: 0 | +| **pageSize** | `number` | Number of results per page to retrieve (for paging).
Default: `2,147,483,647` (equivalent to `int.MaxValue` in C#). | + +**Return Value**: + + + +{`class TimeSeriesRangeResult \{ + // Timestamp of first entry returned + from; // Date; + + // Timestamp of last entry returned + to; // Date; + + // The resulting entries + // Will be empty if requesting an entries range that does Not exist + entries; // TimeSeriesEntry[]; + + // The number of entries returned + // Will be undefined if not all entries of this time series were returned + totalResults; // number +\} +`} + + + + + +* Details of class `TimeSeriesEntry ` are listed in [this syntax section](../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#syntax). +* If the requested time series does Not exist, a null object will be returned. +* No exceptions are generated. + + + + + +## Get entries - from multiple time series +### Example + +In this example, we retrieve entries from the specified ranges of two time series. + + + +{`const baseTime = new Date(); + +const startTime1 = new Date(baseTime.getTime() + 60_000 * 5); +const endTime1 = new Date(baseTime.getTime() + 60_000 * 10); + +const startTime2 = new Date(baseTime.getTime() + 60_000 * 15); +const endTime2 = new Date(baseTime.getTime() + 60_000 * 20); + +// Define the get operation +const getMultipleTimeSeriesOp = new GetMultipleTimeSeriesOperation( + "employees/1-A", + [ + \{ + name: "ExerciseHeartRates", + from: startTime1, + to: endTime1 + \}, + \{ + name: "RestHeartRates", + from: startTime2, + to: endTime2 + \} + ])); + +// Execute the operation by passing it to 'operations.send' +const timesSeriesEntries = await documentStore.operations.send(getMultipleTimeSeriesOp); + +// Access entries +const timeSeriesEntry = timesSeriesEntries.values.get("ExerciseHeartRates")[0].entries[0]; +`} + + +### Syntax + + + +{`// Available overloads: +// ==================== +const getMultipleTimeSeriesOp = new GetMultipleTimeSeriesOperation(docId, ranges); +const getMultipleTimeSeriesOp = new GetMultipleTimeSeriesOperation(docId, ranges, start, pageSize); +`} + + + +| Parameter | Type | Description | +|-----------------------|---------------------|---------------------------------------------------------------------------------------------------------------------------------| +| **docId** | `string` | Document ID | +| **ranges** | `TimeSeriesRange[]` | Provide a `TimeSeriesRange` object for each time series from which you want to retrieve data | +| **start** | `number` | The position of the first result to retrieve (for paging)
Default: 0 | +| **pageSize** | `number` | Number of results per page to retrieve (for paging)
Default: Default: `2,147,483,647` (equivalent to `int.MaxValue` in C#). | + + + +{`// The TimeSeriesRange object: +// =========================== +\{ + // Name of time series + name, // string + + // Get time series entries starting from this timestamp (inclusive). + from, // Date + + // Get time series entries ending at this timestamp (inclusive). + to // Date +\} +`} + + + +**Return Value**: + + + +{`class TimeSeriesDetails \{ + // The document ID + id; // string + + // Dictionary of time series name to the time series results + values; // Map +\} +`} + + + + + +{`class TimeSeriesRangeResult \{ + // Timestamp of first entry returned + from; // Date; + + // Timestamp of last entry returned + to; // Date; + + // The resulting entries + // Will be empty if requesting an entries range that does Not exist + entries; // TimeSeriesEntry[]; + + // The number of entries returned + // Will be undefined if not all entries of this time series were returned + totalResults; // number +\} +`} + + + +* If any of the requested time series do not exist, the returned object will be `null`. + +* When an entries range that does not exist are requested, + the return value for the that range is a `TimeSeriesRangeResult` object with an empty `entries` property. + +* No exceptions are generated. + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/_patch-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/_patch-csharp.mdx new file mode 100644 index 0000000000..91df67b584 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/_patch-csharp.mdx @@ -0,0 +1,276 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + + +* Patching time series data (Append or Delete entries) can be performed via [Operations](../../../../client-api/operations/what-are-operations.mdx). + * Use [PatchOperation](../../../../client-api/operations/patching/single-document.mdx) to patch data on a **single** document. + * Use [PatchByQueryOperation](../../../../client-api/operations/patching/set-based.mdx) to patch data on **multiple** documents. + +* Patching time series entries on a single document can also be performed via the [Session](../../../../document-extensions/timeseries/client-api/session/patch.mdx). + +* In this page: + * [Patch time series data - single document](../../../../document-extensions/timeseries/client-api/operations/patch.mdx#patch-time-series-data---single-document) + * [Usage](../../../../document-extensions/timeseries/client-api/operations/patch.mdx#usage) + * [Examples](../../../../document-extensions/timeseries/client-api/operations/patch.mdx#examples) + * [Syntax](../../../../document-extensions/timeseries/client-api/operations/patch.mdx#syntax) + * [Patch time series data - multiple documents](../../../../document-extensions/timeseries/client-api/operations/patch.mdx#patch-time-series-data---multiple-documents) + * [Usage](../../../../document-extensions/timeseries/client-api/operations/patch.mdx#usage-1) + * [Examples](../../../../document-extensions/timeseries/client-api/operations/patch.mdx#examples-1) + * [Syntax](../../../../document-extensions/timeseries/client-api/operations/patch.mdx#syntax-1) + + +## Patch time series data - single document +### Usage + +* Create a `PatchRequest` instance: + * Define the Append or Delete action using the [JavaScript time series API](../../../../document-extensions/timeseries/client-api/javascript-support.mdx). + +* Create a `PatchOperation` instance and pass it: + * The ID of the document to patch + * The document change vector (or `null`) + * The `PatchRequest` object + +* Execute the `PatchOperation` operation by calling `store.Operations.Send` + +* NOTE: + * The server treats timestamps passed in the patch request script as **UTC**, no conversion is applied by the client to local time. + * Appending entries to a time series that doesn't yet exist yet will create the time series. + * No exception is thrown if the specified document does not exist. +### Examples + +* In this example, we **append** a single entry to time series "HeartRates" on the specified document. + + +{`var baseTime = DateTime.UtcNow; + +var patchRequest = new PatchRequest +\{ + // Define the patch request using JavaScript: + Script = "timeseries(this, $timeseries).append($timestamp, $values, $tag);", + + // Provide values for the parameters in the script: + Values = + \{ + \{ "timeseries", "HeartRates" \}, + \{ "timestamp", baseTime.AddMinutes(1) \}, + \{ "values", 59d \}, + \{ "tag", "watches/fitbit" \} + \} +\}; + +// Define the patch operation; +var patchOp = new PatchOperation("users/john", null, patchRequest); + +// Execute the operation: +store.Operations.Send(patchOp); +`} + + + +* In this example, we **append** 100 entries to time series "HeartRates" on the specified document. + Timestamps and values are drawn from an array and other arguments are provided in the "Values" property. + + +{`var baseTime = DateTime.UtcNow; + +// Create arrays of timestamps and random values to patch +var values = new List(); +var timeStamps = new List(); + +for (var i = 0; i < 100; i++) +\{ + values.Add(68 + Math.Round(19 * new Random().NextDouble())); + timeStamps.Add(baseTime.AddMinutes(i)); +\} + +var patchRequest = new PatchRequest +\{ + Script = @"var i = 0; + for (i = 0; i < $values.length; i++) \{ + timeseries(id(this), $timeseries).append ( + $timeStamps[i], + $values[i], + $tag); + \}", + Values = + \{ + \{ "timeseries", "HeartRates" \}, + \{ "timeStamps", timeStamps \}, + \{ "values", values \}, + \{ "tag", "watches/fitbit" \} + \} +\}; + +var patchOp = new PatchOperation("users/john", null, patchRequest); +store.Operations.Send(patchOp); +`} + + + +* In this example, we **delete** a range of 50 entries from time series "HeartRates" on the specified document. + + +{`store.Operations.Send(new PatchOperation("users/john", null, + new PatchRequest + \{ + Script = "timeseries(this, $timeseries).delete($from, $to);", + Values = + \{ + \{ "timeseries", "HeartRates" \}, + \{ "from", baseTime \}, + \{ "to", baseTime.AddMinutes(49) \} + \} + \})); +`} + + +### Syntax + +* The detailed syntax of `PatchOperation` is listed under this [syntax section](../../../../client-api/operations/patching/single-document.mdx#operations-api). + +* The detailed syntax of `PatchRequest` is listed under this [syntax section](../../../../client-api/operations/patching/single-document.mdx#patchrequest). + +* The available JavaScript API methods are detailed in the [time series JavaScript support](../../../../document-extensions/timeseries/client-api/javascript-support.mdx) article. + + + +## Patch time series data - multiple documents +### Usage + +* In order to patch time series data on multiple documents, you need to: + * Define a query that retrieves the set of documents to be patched (can be a dynamic or an index query). + * Define the patching action that will be executed on the matching documents. + +* This is achieved by defining a string, or creating an instance of `IndexQuery` that contains such string, + with the following two parts: + * **The query**: provide an [RQL](../../../../client-api/session/querying/what-is-rql.mdx) code snippet to filter the documents you want to patch. + * **The patching script**: use the [JavaScript time series API](../../../../document-extensions/timeseries/client-api/javascript-support.mdx) to define the patching action. + +* Create a `PatchByQueryOperation` instance and pass it the `IndexQuery` object, or the defined string. + +* Execute the `PatchByQueryOperation` by calling `store.Operations.Send`. + * The patch operation will be executed only on documents that match the query. + * This type of operation can be awaited for completion. Learn more in [Manage length operations](../../../../client-api/operations/what-are-operations.mdx#manage-lengthy-operations). + +* NOTE: + * The server treats timestamps passed in the patch request script as **UTC**, no conversion is applied. + * No exception is thrown if any of the documents no longer exist during patching. +### Examples + +* In this example, we **append** an entry to time series "HeartRates" on ALL documents in the "Users" collection. + + +{`var indexQuery = new IndexQuery +\{ + // Define the query and the patching action that follows the 'update' keyword: + Query = @"from Users as u + update + \{ + timeseries(u, $name).append($time, $values, $tag) + \}", + + // Provide values for the parameters in the script: + QueryParameters = new Parameters + \{ + \{ "name", "HeartRates" \}, + \{ "time", baseline.AddMinutes(1) \}, + \{ "values", new[] \{59d\} \}, + \{ "tag", "watches/fitbit" \} + \} +\}; + +// Define the patch operation: +var patchByQueryOp = new PatchByQueryOperation(indexQuery); + +// Execute the operation: +store.Operations.Send(patchByQueryOp); +`} + + + +* In this example, we **delete** the "HeartRates" time series from documents that match the query criteria. + + +{`PatchByQueryOperation deleteByQueryOp = new PatchByQueryOperation(new IndexQuery +\{ + Query = @"from Users as u + where u.Age < 30 + update + \{ + timeseries(u, $name).delete($from, $to) + \}", + + QueryParameters = new Parameters + \{ + \{ "name", "HeartRates" \}, + \{ "from", DateTime.MinValue \}, + \{ "to", DateTime.MaxValue \} + \} +\}); + +// Execute the operation: +// Time series "HeartRates" will be deleted for all users with age < 30 +store.Operations.Send(deleteByQueryOp); +`} + + + +* In this example, for each document in the "Users" collection, we patch a document field with data retrieved from its time series entries. + The document's time series data itself is Not patched. + The document `NumberOfUniqueTagsInTS` field will be updated with the number of unique tags in the user's "HeartRates" time series. + To do this, we use the JavaScript [get](../../../../document-extensions/timeseries/client-api/javascript-support.mdx#section-3) method to get all the time series entries for each document + and extract each entry's tag. + + +{`PatchByQueryOperation patchNumOfUniqueTags = new PatchByQueryOperation(new IndexQuery +\{ + Query = @" + declare function patchDocumentField(doc) \{ + var differentTags = []; + var entries = timeseries(doc, $name).get($from, $to); + + for (var i = 0; i < entries.length; i++) \{ + var e = entries[i]; + + if (e.Tag !== null) \{ + if (!differentTags.includes(e.Tag)) \{ + differentTags.push(e.Tag); + \} + \} + \} + + doc.NumberOfUniqueTagsInTS = differentTags.length; + return doc; + \} + + from Users as u + update \{ + put(id(u), patchDocumentField(u)) + \}", + + QueryParameters = new Parameters + \{ + \{ "name", "HeartRates" \}, + \{ "from", DateTime.MinValue \}, + \{ "to", DateTime.MaxValue \} + \} +\}); + +// Execute the operation and Wait for completion: +var result = store.Operations.Send(patchNumOfUniqueTags).WaitForCompletion(); +`} + + +### Syntax + +* The detailed syntax of `PatchByQueryOperation` is listed under this [syntax section](../../../../client-api/operations/patching/set-based.mdx#syntax-overview). + +* The available JavaScript API methods are detailed in the [time series JavaScript support](../../../../document-extensions/timeseries/client-api/javascript-support.mdx) article. + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/_patch-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/_patch-nodejs.mdx new file mode 100644 index 0000000000..e8a94ff94a --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/_patch-nodejs.mdx @@ -0,0 +1,271 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + + +* Patching time series data (Append or Delete entries) can be performed via [Operations](../../../../client-api/operations/what-are-operations.mdx). + * Use [PatchOperation](../../../../client-api/operations/patching/single-document.mdx) to patch data on a **single** document. + * Use [PatchByQueryOperation](../../../../client-api/operations/patching/set-based.mdx) to patch data on **multiple** documents. + +* Patching time series entries on a single document can also be performed via the [session](../../../../document-extensions/timeseries/client-api/session/patch.mdx). + +* In this page: + * [Patch time series data - single document](../../../../document-extensions/timeseries/client-api/operations/patch.mdx#patch-time-series-data---single-document) + * [Usage](../../../../document-extensions/timeseries/client-api/operations/patch.mdx#usage) + * [Examples](../../../../document-extensions/timeseries/client-api/operations/patch.mdx#examples) + * [Syntax](../../../../document-extensions/timeseries/client-api/operations/patch.mdx#syntax) + * [Patch time series data - multiple documents](../../../../document-extensions/timeseries/client-api/operations/patch.mdx#patch-time-series-data---multiple-documents) + * [Usage](../../../../document-extensions/timeseries/client-api/operations/patch.mdx#usage-1) + * [Examples](../../../../document-extensions/timeseries/client-api/operations/patch.mdx#examples-1) + * [Syntax](../../../../document-extensions/timeseries/client-api/operations/patch.mdx#syntax-1) + + +## Patch time series data - single document +### Usage + +* Create a `PatchRequest` instance: + * Define the Append or Delete action using the [JavaScript time series API](../../../../document-extensions/timeseries/client-api/javascript-support.mdx). + +* Create a `PatchOperation` instance and pass it: + * The ID of the document to patch + * The document change vector (or `null`) + * The `PatchRequest` object + +* Execute the `PatchOperation` operation by calling `store.operations.send` + +* NOTE: + * The server treats timestamps passed in the patch request script as **UTC**, no conversion is applied by the client to local time. + * Appending entries to a time series that doesn't yet exist yet will create the time series. + * No exception is thrown if the specified document does not exist. +### Examples + +* In this example, we **append** a single entry to time series "HeartRates" on the specified document. + + +{`const baseTime = new Date(); + +const patchRequest = new PatchRequest(); + +// Define the patch request using JavaScript: +patchRequest.script = "timeseries(this, $timeseries).append($timestamp, $values, $tag);"; + +// Provide values for the parameters in the script: +patchRequest.values = \{ + timeseries: "HeartRates", + timestamp: baseTime.toISOString(), + values: 59, + tag: "watches/fitbit" +\}; + +// Define the patch operation: +const patchOp = new PatchOperation("users/john", null, patchRequest); + +// Execute the operation: +await documentStore.operations.send(patchOp); +`} + + + +* In this example, we **append** 100 entries to time series "HeartRates" on the specified document. + Timestamps and values are drawn from an array and other arguments are provided in the "values" property. + + +{`const baseTime = new Date(); + +// Create arrays of timestamps and random values to patch +const values = []; +const timeStamps = []; + +for (let i = 0; i < 100; i++) \{ + const randomValue = 68 + Math.round(19 * Math.random()); + values.push(randomValue); + + const timeStamp = baseTime.getTime() + 60_000 * i; + timeStamps.push(new Date(timeStamp).toISOString()); +\} + +const patchRequest = new PatchRequest(); + +patchRequest.script = \` + for (let i = 0; i < $values.length; i++) \{ + timeseries(id(this), $timeseries).append( + $timeStamps[i], + $values[i], + $tag); + \}\`; + +patchRequest.values = \{ + timeseries: "HeartRates", + timeStamps: timeStamps, + values: values, + tag: "watches/fitbit" +\}; + +const patchOp = new PatchOperation("users/john", null, patchRequest); +await documentStore.operations.send(patchOp); +`} + + + +* In this example, we **delete** a range of 50 entries from time series "HeartRates" on the specified document. + + +{`const baseTime = new Date(); + +const patchRequest = new PatchRequest(); + +patchRequest.script = "timeseries(this, $timeseries).delete($from, $to);"; + +patchRequest.values = \{ + timeseries: "HeartRates", + from: baseTime.toISOString(), + to: new Date(baseTime.getTime() + 60_000 * 49).toISOString() +\}; + +const patchOp = new PatchOperation("users/john", null, patchRequest); +await documentStore.operations.send(patchOp); +`} + + +### Syntax + +* The detailed syntax of `PatchOperation` is listed under this [syntax section](../../../../client-api/operations/patching/single-document.mdx#operations-api-syntax). + +* The detailed syntax of `PatchRequest` is listed under this [syntax section](../../../../client-api/operations/patching/single-document.mdx#session-api-using-defer-syntax). + +* The available JavaScript API methods are detailed in the [time series JavaScript support](../../../../document-extensions/timeseries/client-api/javascript-support.mdx) article. + + + +## Patch time series data - multiple documents +### Usage + +* In order to patch time series data on multiple documents, you need to: + * Define a query that retrieves the set of documents to be patched (can be a dynamic or an index query). + * Define the patching action that will be executed on the matching documents. + +* This is achieved by defining a string, or creating an instance of `IndexQuery` that contains such string, + with the following two parts: + * **The query**: provide an [RQL](../../../../client-api/session/querying/what-is-rql.mdx) code snippet to filter the documents you want to patch. + * **The patching script**: use the [JavaScript time series API](../../../../document-extensions/timeseries/client-api/javascript-support.mdx) to define the patching action. + +* Create a `PatchByQueryOperation` instance and pass it the `IndexQuery` object, or the defined string. + +* Execute the `PatchByQueryOperation` by calling `store.operations.send`. + * The patch operation will be executed only on documents that match the query. + * This type of operation can be awaited for completion. Learn more in [Manage length operations](../../../../client-api/operations/what-are-operations.mdx#manage-lengthy-operations). + +* NOTE: + * The server treats timestamps passed in the patch request script as **UTC**, no conversion is applied. + * No exception is thrown if any of the documents no longer exist during patching. +### Examples + +* In this example, we **append** an entry to time series "HeartRates" on ALL documents in the "Users" collection. + + +{`const indexQuery = new IndexQuery(); + +// Define the query & patch string: +indexQuery.query = \` + from users as u + update \{ + timeseries(u, $name).append($time, $values, $tag) + \}\`; + +// Provide values for the parameters in the script: +indexQuery.queryParameters = \{ + name: "HeartRates", + time: new Date(baseTime.getTime() + 60_000).toISOString(), + values: 59, + tag: "watches/fitbit" +\} + +// Define the patch operation: +const patchByQueryOp = new PatchByQueryOperation(indexQuery); + +// Execute the operation: +await documentStore.operations.send(patchByQueryOp); +`} + + + +* In this example, we **delete** the "HeartRates" time series from documents that match the query criteria. + + +{`const indexQuery = new IndexQuery(); + +indexQuery.query = \` + from users as u + where u.age < 30 + update + \{ + timeseries(u, "HeartRates").delete() + \}\`; + +const deleteByQueryOp = new PatchByQueryOperation(indexQuery); + +// Execute the operation: +// Time series "HeartRates" will be deleted for all users with age < 30 +await documentStore.operations.send(deleteByQueryOp); +`} + + + +* In this example, for each document in the "Users" collection, we patch a document field with data retrieved from its time series entries. + The document's time series data itself is Not patched. + The document `numberOfUniqueTagsInTS` field will be updated with the number of unique tags in the user's "HeartRates" time series. + To do this, we use the JavaScript [get](../../../../document-extensions/timeseries/client-api/javascript-support.mdx#section-3) method to get all the time series entries for each document + and extract each entry's tag. + + +{`const indexQuery = new IndexQuery(); + +indexQuery.query = \` + declare function patchDocumentField(doc) \{ + var differentTags = []; + var entries = timeseries(doc, $name).get(); + + for (var i = 0; i < entries.length; i++) \{ + var e = entries[i]; + + if (e.Tag !== null) \{ + if (!differentTags.includes(e.Tag)) \{ + differentTags.push(e.Tag); + \} + \} + \} + + doc.numberOfUniqueTagsInTS = differentTags.length; + return doc; + \} + + from users as u + update \{ + put(id(u), patchDocumentField(u)) + \} +\`; + +indexQuery.queryParameters = \{ + name: "HeartRates" +\} + +const patchByQueryOp = new PatchByQueryOperation(indexQuery); + +// Execute the operation and wait for completion: +const asyncOp = await documentStore.operations.send(patchByQueryOp); +await asyncOp.waitForCompletion(); +`} + + +### Syntax + +* The detailed syntax of `PatchByQueryOperation` is listed under this [syntax section](../../../../client-api/operations/patching/set-based.mdx#patchbyqueryoperation-syntax). + +* The available JavaScript API methods are detailed in the [time series JavaScript support](../../../../document-extensions/timeseries/client-api/javascript-support.mdx) article. + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/append-and-delete.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/append-and-delete.mdx new file mode 100644 index 0000000000..9f5467e917 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/append-and-delete.mdx @@ -0,0 +1,42 @@ +--- +title: "Append & Delete Time Series Operations" +hide_table_of_contents: true +sidebar_label: Append & Delete +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import AppendAndDeleteCsharp from './_append-and-delete-csharp.mdx'; +import AppendAndDeleteNodejs from './_append-and-delete-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/get.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/get.mdx new file mode 100644 index 0000000000..f83e412775 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/get.mdx @@ -0,0 +1,42 @@ +--- +title: "Get Time Series Operation" +hide_table_of_contents: true +sidebar_label: Get +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetCsharp from './_get-csharp.mdx'; +import GetNodejs from './_get-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/patch.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/patch.mdx new file mode 100644 index 0000000000..b3a306af9c --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/operations/patch.mdx @@ -0,0 +1,45 @@ +--- +title: "Patch Time Series Operations" +hide_table_of_contents: true +sidebar_label: Patch +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import PatchCsharp from './_patch-csharp.mdx'; +import PatchNodejs from './_patch-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/overview.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/overview.mdx new file mode 100644 index 0000000000..ccc044ad1a --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/overview.mdx @@ -0,0 +1,43 @@ +--- +title: "Time Series: Client API Overview" +hide_table_of_contents: true +sidebar_label: Overview +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import OverviewCsharp from './_overview-csharp.mdx'; +import OverviewNodejs from './_overview-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_append-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_append-csharp.mdx new file mode 100644 index 0000000000..877407ff31 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_append-csharp.mdx @@ -0,0 +1,183 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `TimeSeriesFor.Append` for the following actions: + * **Creating a new time series** + Appending an entry to a time series that doesn't exist yet + will create the time series and add it the new entry. + * **Creating a new time series entry** + Appending a new entry to an existing time series + will add the entry to the series at the specified timestamp. + * **Modifying an existing time series entry** + Use `Append` to update the data of an existing entry with the specified timestamp. + +* Each call to `Append` handles a **single** [time series entry](../../../../document-extensions/timeseries/design.mdx#time-series-entries). + +* To append **multiple** entries in a single transaction you can: + * Call `Append` as many times as needed before calling `session.SaveChanges`, as shown in the examples below. + * Use patching to update the time series. Learn more in [Patch time series entries](../../../../document-extensions/timeseries/client-api/session/patch.mdx). + * Append entries directly on the _Store_ via [Operations](../../../../client-api/operations/what-are-operations.mdx). + Learn more in [Append time series operations](../../../../document-extensions/timeseries/client-api/operations/append-and-delete.mdx). +* In this page: + * [`Append` usage](../../../../document-extensions/timeseries/client-api/session/append.mdx#append-usage) + * [Examples](../../../../document-extensions/timeseries/client-api/session/append.mdx#examples) + * [Append entries with a single value](../../../../document-extensions/timeseries/client-api/session/append.mdx#append-entries-with-a-single-value) + * [Append entries with multiple values](../../../../document-extensions/timeseries/client-api/session/append.mdx#append-entries-with-multiple-values) + * [Syntax](../../../../document-extensions/timeseries/client-api/session/append.mdx#syntax) + + +## `Append` usage + +**Flow**: + +* Open a session. +* Create an instance of `TimeSeriesFor` and pass it the following: + * Provide an explicit document ID, -or- + pass an [entity tracked by the session](../../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#unit-of-work-pattern), + e.g. a document object returned from [session.Query](../../../../client-api/session/querying/how-to-query.mdx) or from [session.Load](../../../../client-api/session/loading-entities.mdx#load). + * Specify the time series name. +* Call `TimeSeriesFor.Append` and pass it the time series entry details. +* Call `session.SaveChanges` for the action to take effect on the server. + +**Note**: + +* A `DocumentDoesNotExistException` exception is thrown if the specified document does not exist. + + + +## Examples + +#### Append entries with a single value: + +* In this example, entries are appended with a single value. +* Although a loop is used to append multiple entries, + all entries are appended in a single transaction when `SaveChanges` is executed. + + + +{`var baseline = DateTime.Today; + +// Append 10 HeartRate values +using (var session = store.OpenSession()) +\{ + session.Store(new User \{ Name = "John" \}, "users/john"); + + ISessionDocumentTimeSeries tsf = session.TimeSeriesFor("users/john", "HeartRates"); + + for (int i = 0; i < 10; i++) + \{ + tsf.Append(baseline.AddSeconds(i), new[] \{ 67d \}, "watches/fitbit"); + \} + + session.SaveChanges(); +\} +`} + + +#### Append entries with multiple values: + +* In this example, we append multi-value StockPrice entries. +* Notice the clarity gained by [naming the values](../../../../document-extensions/timeseries/client-api/named-time-series-values.mdx). + + + + +{`using (var session = store.OpenSession()) +{ + session.Store(new User { Name = "John" }, "users/john"); + + session.TimeSeriesFor("users/john", "StockPrices") + .Append(baseTime.AddDays(1), + new[] { 52, 54, 63.5, 51.4, 9824 }, "companies/kitchenAppliances"); + + session.TimeSeriesFor("users/john", "StockPrices") + .Append(baseTime.AddDays(2), + new[] { 54, 55, 61.5, 49.4, 8400 }, "companies/kitchenAppliances"); + + session.TimeSeriesFor("users/john", "StockPrices") + .Append(baseTime.AddDays(3), + new[] { 55, 57, 65.5, 50, 9020 }, "companies/kitchenAppliances"); + + session.SaveChanges(); +} +`} + + + + +{`using (var session = store.OpenSession()) +{ + session.Store(new User { Name = "John" }, "users/john"); + + // Call 'Append' with the custom StockPrice class + session.TimeSeriesFor("users/john") + .Append(baseTime.AddDays(1), new StockPrice + { + Open = 52, + Close = 54, + High = 63.5, + Low = 51.4, + Volume = 9824, + }, "companies/kitchenAppliances"); + + session.TimeSeriesFor("users/john") + .Append(baseTime.AddDays(2), new StockPrice + { + Open = 54, + Close = 55, + High = 61.5, + Low = 49.4, + Volume = 8400, + }, "companies/kitchenAppliances"); + + session.TimeSeriesFor("users/john") + .Append(baseTime.AddDays(3), new StockPrice + { + Open = 55, + Close = 57, + High = 65.5, + Low = 50, + Volume = 9020, + }, "companies/kitchenAppliances"); + + session.SaveChanges(); +} +`} + + + + + + +## Syntax + + + +{`// Append an entry with a single value (double) +void Append(DateTime timestamp, double value, string tag = null); +`} + + + + + +{`// Append an entry with multiple values (IEnumerable) +void Append(DateTime timestamp, IEnumerable values, string tag = null); +`} + + + +| Parameter | Type | Description | +|---------------|-----------------------|-------------------------------| +| **timestamp** | `DateTime` | Time series entry's timestamp | +| **value** | `double` | Entry's value | +| **values** | `IEnumerable` | Entry's values | +| **tag** | `string` | An optional tag for the entry | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_append-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_append-nodejs.mdx new file mode 100644 index 0000000000..93fdf40117 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_append-nodejs.mdx @@ -0,0 +1,279 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `timeSeriesFor.append` for the following actions: + * **Creating a new time series** + Appending an entry to a time series that doesn't exist yet + will create the time series and add the new entry to it. + * **Creating a new time series entry** + Appending a new entry to an existing time series + will add the entry to the series at the specified timestamp. + * **Modifying an existing time series entry** + Use `append` to update the data of an existing entry with the specified timestamp. + +* Each call to `append` handles a **single** [time series entry](../../../../document-extensions/timeseries/design.mdx#time-series-entries). + +* To append **multiple** entries in a single transaction you can: + * Call `append` as many times as needed before calling `session.saveChanges`, as shown in the examples below. + * Use patching to update the time series. Learn more in [Patch time series entries](../../../../document-extensions/timeseries/client-api/session/patch.mdx). + * Append entries directly on the _Store_ via [Operations](../../../../client-api/operations/what-are-operations.mdx). + Learn more in [Append time series operations](../../../../document-extensions/timeseries/client-api/operations/append-and-delete.mdx). +* In this page: + * [`append` usage](../../../../document-extensions/timeseries/client-api/session/append.mdx#append-usage) + * [Examples](../../../../document-extensions/timeseries/client-api/session/append.mdx#examples) + * [Append entries with a single value](../../../../document-extensions/timeseries/client-api/session/append.mdx#append-entries-with-a-single-value) + * [Append entries with multiple values](../../../../document-extensions/timeseries/client-api/session/append.mdx#append-entries-with-multiple-values) + * [Syntax](../../../../document-extensions/timeseries/client-api/session/append.mdx#syntax) + + +## `append` usage + +**Flow**: + +* Open a session. +* Create an instance of `timeSeriesFor` and pass it the following: + * Provide an explicit document ID, -or- + pass an [entity tracked by the session](../../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#unit-of-work-pattern), + e.g. a document object returned from [session.query](../../../../client-api/session/querying/how-to-query.mdx) or from [session.load](../../../../client-api/session/loading-entities.mdx#load). + * Specify the time series name. +* Call `timeSeriesFor.append` and pass it the time series entry details. +* Call `session.saveChanges` for the action to take effect on the server. + +**Note**: + +* A `DocumentDoesNotExistException` exception is thrown if the specified document does not exist. + + + +## Examples + +#### Append entries with a single value: + +* In this example, entries are appended with a single value. +* Although a loop is used to append multiple entries, + all entries are appended in a single transaction when `saveChanges` is executed. + + + +{`// Open a session and store a new document +const session = documentStore.openSession(); +await session.store(new User("John"), "users/john"); + +// Get an instance of 'timeSeriesFor' +// Pass the document ID and the time series name +const timeSeriesName = "HeartRates"; +const tsf = session.timeSeriesFor("users/john", timeSeriesName); + +// Create time series and add entries: +// =================================== + +// Define an optional tag and some base time for the first entry: +const optionalTag = "watches/fitbit"; +const baseTime = new Date(); +baseTime.setUTCHours(0); + +// The first 'append' call will create the 'HeartRates' time series on the document +// (since this series doesn't exist yet on the document) and insert the first entry +tsf.append(baseTime, 65, optionalTag); + +// The next 'append' calls will add more entries to the 'HeartRates' time series +for (let i = 1; i < 10; i++) +\{ + const nextMinute = new Date(baseTime.getTime() + 60_000 * i); + const nextMeasurement = 65 + i; + tsf.append(nextMinute, nextMeasurement, optionalTag); +\} + +// Modify an existing entry: +// ========================= + +// Modify the last entry that was added +// The entry with the specified time stamp will be updated +tsf.append(new Date(baseTime.getTime() + 60_000 * 9), 60, optionalTag); + +// Save changes +await session.saveChanges(); + +// Results: +// ======== +// * The document will contain a time series named "HeartRates" with 10 entries. +// * The entries' timestamps are saved on the server in UTC. +`} + + +#### Append entries with multiple values: + +* In this example, we append multi-value StockPrice entries. +* Notice the clarity gained by [naming the values](../../../../document-extensions/timeseries/client-api/named-time-series-values.mdx). + + + + +{`const session = documentStore.openSession(); +await session.store(new User("John"), "users/john"); + +const tsf = session.timeSeriesFor("users/john", "StockPrices"); + +const optionalTag = "companies/kitchenAppliances"; +const baseTime = new Date(); +baseTime.setUTCHours(0); + +const oneDay = 24 * 60 * 60 * 1000; +let nextDay = new Date(baseTime.getTime() + oneDay); + +// Provide multiple values to the entity +tsf.append(nextDay, [ 52, 54, 63.5, 51.4, 9824 ], optionalTag); + +nextDay = new Date(baseTime.getTime() + oneDay * 2); +tsf.append(nextDay, [ 54, 55, 61.5, 49.4, 8400 ], optionalTag); + +nextDay = new Date(baseTime.getTime() + oneDay * 3); +tsf.append(nextDay, [ 55, 57, 65.5, 50, 9020 ], optionalTag); + +await session.saveChanges(); + +// Results: +// ======== +// * The document will contain a time series called "StockPrices" with 3 entries. +// * Each entry will have 5 values. +// * The entries' timestamps are saved on the server in UTC. +`} + + + + +{`// Register the named values for the 'StockPrices' series on the server +await documentStore.timeSeries.register("Users", + "StockPrices", ["open", "close", "high", "low", "volume"]); + +const session = documentStore.openSession(); +await session.store(new User("John"), "users/john"); + +// Get an instance of 'timeSeriesFor', pass: +// * the document ID +// * the time series name +// * the class that will hold the entry's values +const tsf = session.timeSeriesFor("users/john", "StockPrices", StockPrice); + +const optionalTag = "companies/kitchenAppliances"; +const baseTime = new Date(); +baseTime.setUTCHours(0); +const oneDay = 24 * 60 * 60 * 1000; + +// Provide the multiple values via the StockPrice class +const price1 = new StockPrice(); +price1.open = 52; +price1.close = 54; +price1.high = 63.5; +price1.low = 51.4; +price1.volume = 9824; + +let nextDay = new Date(baseTime.getTime() + oneDay); +tsf.append(nextDay, price1, optionalTag); + +const price2 = new StockPrice(); +price2.open = 54; +price2.close = 55; +price2.high = 61.5; +price2.low = 49.4; +price2.volume = 8400; + +nextDay = new Date(baseTime.getTime() + oneDay * 2); +tsf.append(nextDay, price2, optionalTag); + +const price3 = new StockPrice(); +price3.open = 55; +price3.close = 57; +price3.high = 65.5; +price3.low = 50; +price3.volume = 9020; + +nextDay = new Date(baseTime.getTime() + oneDay * 3); +tsf.append(nextDay, price3, optionalTag); + +await session.saveChanges(); + +// Results: +// ======== +// * The document will contain a time series called "StockPrices" with 3 entries. +// * Each entry will have 5 named values. +// * The entries' timestamps are saved on the server in UTC. +`} + + + + +{`// This class is used in the "Named Values" example +class StockPrice { + + // Define the names for the entry values + static TIME_SERIES_VALUES = ["open", "close", "high", "low", "volume"]; + + constructor( + open = 0, + close = 0, + high = 0, + low = 0, + volume = 0 + ) { + Object.assign(this, { + open, + close, + high, + low, + volume + }); + } +} +`} + + + + + + +## Syntax + + + +{`// Available overloads: +// ==================== + +append(timestamp, value); +append(timestamp, value, tag); +`} + + + + + +{`append(timestamp, values); +append(timestamp, values, tag); +`} + + + + + +{`append(timestamp, entry); +append(timestamp, entry, tag); +append(entry); +`} + + + +| Parameter | Type | Description | +|---------------|----------|--------------------------------| +| **timestamp** | Date | Time series entry's timestamp | +| **value** | number | Entry's value | +| **values** | number[] | Entry's values | +| **tag** | string | An optional tag for the entry | +| **entry** | object | object with the entry's values | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_append-php.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_append-php.mdx new file mode 100644 index 0000000000..b216cf67d2 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_append-php.mdx @@ -0,0 +1,187 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `timeSeriesFor.append` to: + * **Create a new time series** + Appending an entry to a time series that doesn't exist yet + will create the time series and add it the new entry. + * **Create a new time series entry** + Appending a new entry to an existing time series + will add the entry to the series at the specified timestamp. + * **Modify an existing time series entry** + Use `append` to update the data of an existing entry with the specified timestamp. + +* To append multiple entries in a single transaction you can: + * Call `append` as many times as needed before calling `session.saveChanges`, as shown in the examples below. + * Use patching to update the time series. Learn more in [Patch time series entries](../../../../document-extensions/timeseries/client-api/session/patch.mdx). + * Append entries directly on the _Store_ via [Operations](../../../../client-api/operations/what-are-operations.mdx). + +* In this page: + * [Usage](../../../../document-extensions/timeseries/client-api/session/append.mdx#usage) + * [Examples](../../../../document-extensions/timeseries/client-api/session/append.mdx#examples) + * [Append entries with a single value](../../../../document-extensions/timeseries/client-api/session/append.mdx#append-entries-with-a-single-value) + * [Append entries with multiple values](../../../../document-extensions/timeseries/client-api/session/append.mdx#append-entries-with-multiple-values) + + +## Usage + +**Flow**: + +* Open a session. +* Create an instance of `timeSeriesFor` and pass it the following: + * Provide an explicit document ID, -or- + pass an [entity tracked by the session](../../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#unit-of-work-pattern), + e.g. a document object returned from [session.query](../../../../client-api/session/querying/how-to-query.mdx) + or from [session.load](../../../../client-api/session/loading-entities.mdx#load). + * Specify the time series name. +* Call `timeSeriesFor.append` and pass it the time series entry details. +* Call `session.saveChanges` for the action to take effect on the server. + +**Note**: + +* A `DocumentDoesNotExistException` exception is thrown if the specified document does not exist. + + + +## Examples + +#### Append entries with a single value: + +* In this example, entries are appended with a single value. +* Although a loop is used to append multiple entries, + all entries are appended in a single transaction when `saveChanges` is executed. + + + + +{`$baseTime = DateUtils::today(); + +// Append 10 HeartRate values +$session = $store->openSession(); +try { + $user = new User(); + $user->setName("John"); + $session->store($user, "users/john"); + + $tsf = $session->timeSeriesFor("users/john", "HeartRates"); + + for ($i = 0; $i < 10; $i++) + { + $tsf->append((clone $baseTime)->add(new DateInterval("PT" . $i . "S")), [ 67 ], "watches/fitbit"); + } + + $session->saveChanges(); +} finally { + $session->close(); +} +`} + + + +#### Append entries with multiple values: + +* In this example, we append multi-value StockPrice entries. +* Notice the clarity gained by naming the values. + + + + +{`$session = $store->openSession(); +try { + $user = new User(); + $user->setName("John"); + $session->store($user, "users/john"); + + $session->timeSeriesFor("users/john", "StockPrices") + ->append( + (clone $baseTime)->add(new DateInterval("P1D")), + [ 52, 54, 63.5, 51.4, 9824 ], + "companies/kitchenAppliances" + ); + + $session->timeSeriesFor("users/john", "StockPrices") + ->append( + (clone $baseTime)->add(new DateInterval("P2D")), + [ 54, 55, 61.5, 49.4, 8400 ], + "companies/kitchenAppliances" + ); + + $session->timeSeriesFor("users/john", "StockPrices") + ->append( + (clone $baseTime)->add(new DateInterval("P3D")), + [ 55, 57, 65.5, 50, 9020 ], + "companies/kitchenAppliances" + ); + + $session->saveChanges(); +} finally { + $session->close(); +} +`} + + + + +{`$session = $store->openSession(); +try { + $user = new User(); + $user->setName("John"); + $session->store($user, "users/john"); + + // Call 'Append' with the custom StockPrice class + $sp = new StockPrice(); + $sp->setOpen(52); + $sp->setClose(54); + $sp->setHigh(63.5); + $sp->setLow(51.4); + $sp->setVolume(9824); + + $session->typedTimeSeriesFor(StockPrice::class, "users/john") + ->append( + (clone $baseTime)->add(new DateInterval("P1D")), + $sp, + "companies/kitchenAppliances" + ); + + $sp = new StockPrice(); + $sp->setOpen(54); + $sp->setClose(55); + $sp->setHigh(61.5); + $sp->setLow(49.4); + $sp->setVolume(8400); + $session->typedTimeSeriesFor(StockPrice::class, "users/john") + ->append( + (clone $baseTime)->add(new DateInterval("P2D")), + $sp, + "companies/kitchenAppliances" + ); + + $sp = new StockPrice(); + $sp->setOpen(55); + $sp->setClose(57); + $sp->setHigh(65.5); + $sp->setLow(50); + $sp->setVolume(9020); + $session->typedTimeSeriesFor(StockPrice::class, "users/john") + ->append( + (clone $baseTime)->add(new DateInterval("P3D")), + $sp, + "companies/kitchenAppliances" + ); + + $session->saveChanges(); +} finally { + $session->close(); +} +`} + + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_append-python.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_append-python.mdx new file mode 100644 index 0000000000..f5096a0d96 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_append-python.mdx @@ -0,0 +1,159 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `time_series_for.append_single` or `time_series_for.append` for the following actions: + * **Creating a new time series** + Appending an entry to a time series that doesn't exist yet + will create the time series and add it the new entry. + * **Creating a new time series entry** + Appending a new entry to an existing time series + will add the entry to the series at the specified timestamp. + * **Modifying an existing time series entry** + Use `append_single` or `append` to update the data of an existing entry with the specified timestamp. + +* Each call to `append_single` handles a **single** time series value at + a **single** [time series entry](../../../../document-extensions/timeseries/design.mdx#time-series-entries). + Each call to `append` can handle **multiple** time series values at + a **single** [time series entry](../../../../document-extensions/timeseries/design.mdx#time-series-entries). + +* To append **multiple** entries in a single transaction you can: + * Call `append_single` or `append` as many times as needed before calling `session.save_changes`, as shown in the examples below. + * Use patching to update the time series. Learn more in [Patch time series entries](../../../../document-extensions/timeseries/client-api/session/patch.mdx). + * Append entries directly on the _Store_ via [Operations](../../../../client-api/operations/what-are-operations.mdx). + Learn more in [Append time series operations](../../../../document-extensions/timeseries/client-api/operations/append-and-delete.mdx). + +* In this page: + * [Usage](../../../../document-extensions/timeseries/client-api/session/append.mdx#usage) + * [Examples](../../../../document-extensions/timeseries/client-api/session/append.mdx#examples) + * [Append entries with a single value](../../../../document-extensions/timeseries/client-api/session/append.mdx#append-entries-with-a-single-value) + * [Append entries with multiple values](../../../../document-extensions/timeseries/client-api/session/append.mdx#append-entries-with-multiple-values) + * [Syntax](../../../../document-extensions/timeseries/client-api/session/append.mdx#syntax) + + +## Usage + +**Flow**: + +* Open a session. +* Create an instance of `time_series_for` and pass it the following: + * Provide an explicit document ID, -or- + pass an [entity tracked by the session](../../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#unit-of-work-pattern), + e.g. a document object returned from [session.query](../../../../client-api/session/querying/how-to-query.mdx) + or from [session.load](../../../../client-api/session/loading-entities.mdx#load). + * Specify the time series name. +* Call `time_series_for.append_single` or `time_series_for.append` and pass it the time series entry details. +* Call `session.save_changes` for the action to take effect on the server. + +**Note**: + +* A `DocumentDoesNotExistException` exception is thrown if the specified document does not exist. + + + +## Examples + +#### Append entries with a single value: + +* In this example, entries are appended with a single value. +* Although a loop is used to append multiple entries, + all entries are appended in a single transaction when `save_changes` is executed. + + + +{`base_line = datetime.utcnow() + +# Append 10 HeartRate values +with store.open_session() as session: + session.store(User(name="John"), "users/john") + + tsf = session.time_series_for("users/john", "HeartRates") + + for i in range(10): + tsf.append_single(base_line + timedelta(seconds=i), 67.0, "watches/fitbit") + + session.save_changes() +`} + + +#### Append entries with multiple values: + +* In this example, we append multi-value StockPrice entries. +* Notice the clarity gained by naming the values. + + + + +{`with store.open_session() as session: + session.store(User(name="John"), "users/john") + + session.time_series_for("users/john", "StockPrices").append( + base_line + timedelta(days=1), [52, 54, 63.5, 51.4, 9824], "companies/kitchenAppliances" + ) + + session.time_series_for("users/john", "StockPrices").append( + base_line + timedelta(days=2), [54, 55, 61.5, 49.4, 8400], "companies/kitchenAppliances" + ) + + session.time_series_for("users/john", "StockPrices").append( + base_line + timedelta(days=3), [55, 57, 65.5, 50, 9020], "companies/kitchenAppliances" + ) + + session.save_changes() +`} + + + + +{`with store.open_session() as session: + session.store(User(name="John"), "users/john") + + session.typed_time_series_for(StockPrice, "users/john").append_single( + base_line + timedelta(days=1), StockPrice(52, 54, 63.5, 51.4, 9824), "companies/kitchenAppliances" + ) + + session.typed_time_series_for(StockPrice, "users/john").append_single( + base_line + timedelta(days=2), StockPrice(54, 55, 61.5, 49.4, 8400), "companies/kitchenAppliances" + ) + + session.typed_time_series_for(StockPrice, "users/john").append_single( + base_line + timedelta(days=3), StockPrice(55, 57, 65.5, 50, 9020), "companies/kitchenAppliances" + ) + + session.save_changes() +`} + + + + + + +## Syntax + + + +{`def append_single(self, timestamp: datetime, value: float, tag: Optional[str] = None) -> None: ... +`} + + + + + +{`def append(self, timestamp: datetime, values: List[float], tag: Optional[str] = None) -> None: ... +`} + + + +| Parameter | Type | Description | +|-----------|------|-------------| +| **timestamp** | `datetime` | Time series entry's timestamp | +| **value** | `float` | Entry's value | +| **values** | `List[float]` | Entry's values | +| **tag** (Optional) | `str` | An optional tag for the entry | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_category_.json b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_category_.json new file mode 100644 index 0000000000..75fd17712e --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 1, + "label": Session, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_delete-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_delete-csharp.mdx new file mode 100644 index 0000000000..a18701d575 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_delete-csharp.mdx @@ -0,0 +1,110 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `TimeSeriesFor.Delete` for the following actions: + * **Delete a single time series entry** + * **Delete a range of entries** + * **Delete the whole time series**: + To remove the whole time series simply delete all its entries. + +* In this page: + * [`Delete` usage](../../../../document-extensions/timeseries/client-api/session/delete.mdx#delete-usage) + * [Examples](../../../../document-extensions/timeseries/client-api/session/delete.mdx#examples) + * [Delete single entry](../../../../document-extensions/timeseries/client-api/session/delete.mdx#delete-single-entry) + * [Delete range of entries](../../../../document-extensions/timeseries/client-api/session/delete.mdx#delete-range-of-entries) + * [Syntax](../../../../document-extensions/timeseries/client-api/session/delete.mdx#syntax) + + +## `Delete` usage + +**Flow**: + +* Open a session. +* Create an instance of `TimeSeriesFor` and pass it the following: + * Provide an explicit document ID, or - + pass an [entity tracked by the session](../../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#unit-of-work-pattern), + e.g. a document object returned from [session.Query](../../../../client-api/session/querying/how-to-query.mdx) or from [session.Load](../../../../client-api/session/loading-entities.mdx#load). + * Specify the time series name. +* Call `TimeSeriesFor.Delete`: + * Provide a single timestamp to delete a specific entry, or - + * Specify a range of timestamps to delete multiple entries. +* Call `session.SaveChanges` for the action to take effect on the server. + +**Note**: + +* If the specified document doesn't exist, a `DocumentDoesNotExistException` is thrown. +* Attempting to delete nonexistent entries results in a no-op and generates no exception. +* To delete the whole time series simply delete all its entries. + The series is removed when all its entries are deleted. +* Deleting a document deletes all its time series as well. + + + +## Examples + +In the following examples we delete time series entries appended by sample code in the +[Append](../../../../document-extensions/timeseries/client-api/session/append.mdx) article. +#### Delete single entry: + + + +{`// Delete a single entry +using (var session = store.OpenSession()) +\{ + session.TimeSeriesFor("users/john", "HeartRates") + .Delete(baseline.AddMinutes(1)); + + session.SaveChanges(); +\} +`} + + +#### Delete range of entries: + + + +{`// Delete a range of entries from the time series +using (var session = store.OpenSession()) +\{ + session.TimeSeriesFor("users/john", "HeartRates") + .Delete(baseline.AddSeconds(0), baseline.AddSeconds(9)); + + session.SaveChanges(); +\} +`} + + + + + +## Syntax + + + +{`// Delete a single time-series entry +void Delete(DateTime at); +`} + + + + + +{`// Delete a range of time-series entries +void Delete(DateTime? from = null, DateTime? to = null); +`} + + + +| Parameter | Type | Description | +|-----------|-------------|:--------------------------------------------| +| **at** | `DateTime` | Timestamp of a time series entry to delete. | +| **from** | `DateTime?` | Delete the time series entries range that starts at this timestamp (inclusive).
Default: `DateTime.MinValue` | +| **to** | `DateTime?` | Delete the time series entries range that ends at this timestamp (inclusive).
Default: `DateTime.MaxValue` | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_delete-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_delete-nodejs.mdx new file mode 100644 index 0000000000..4eb4928771 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_delete-nodejs.mdx @@ -0,0 +1,125 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `timeSeriesFor.delete` for the following actions: + * **Delete a single time series entry** + * **Delete a range of entries** + * **Delete the whole time series** + +* In this page: + * [`delete` usage](../../../../document-extensions/timeseries/client-api/session/delete.mdx#delete-usage) + * [Examples](../../../../document-extensions/timeseries/client-api/session/delete.mdx#examples) + * [Delete single entry](../../../../document-extensions/timeseries/client-api/session/delete.mdx#delete-single-entry) + * [Delete range of entries](../../../../document-extensions/timeseries/client-api/session/delete.mdx#delete-range-of-entries) + * [Delete time series](../../../../document-extensions/timeseries/client-api/session/delete.mdx#delete-time-series-1) + * [Syntax](../../../../document-extensions/timeseries/client-api/session/delete.mdx#syntax) + + +## `Delete` usage + +**Flow**: + +* Open a session. +* Create an instance of `timeSeriesFor` and pass it the following: + * Provide an explicit document ID, or - + pass an [entity tracked by the session](../../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#unit-of-work-pattern), + e.g. a document object returned from [session.query](../../../../client-api/session/querying/how-to-query.mdx) or from [session.load](../../../../client-api/session/loading-entities.mdx#load). + * Specify the time series name. +* Call `TimeSeriesFor.Delete`: + * Provide a single timestamp to delete a specific entry, or - + * Specify a range of timestamps to delete multiple entries. +* Call `session.saveChanges` for the action to take effect on the server. + +**Note**: + +* If the specified document doesn't exist, a `DocumentDoesNotExistException` is thrown. +* Attempting to delete nonexistent entries results in a no-op and generates no exception. +* To delete the whole time series simply delete all its entries. + The series is removed when all its entries are deleted. +* Deleting a document deletes all its time series as well. + + + +## Examples + +In the following examples we delete time series entries appended by sample code in the +[Append](../../../../document-extensions/timeseries/client-api/session/append.mdx) article. +#### Delete single entry: + + + +{`// Get an instance of 'timeSeriesFor' +const tsf = session.timeSeriesFor("users/john", "HeartRates"); + +// Call 'deleteAt' to delete a specific entry +timeStampOfEntry = new Date(baseTime.getTime() + 60_000); +tsf.deleteAt(timeStampOfEntry); + +// Save changes +await session.saveChanges(); +`} + + +#### Delete range of entries: + + + +{`// Get an instance of 'timeSeriesFor' +const tsf = session.timeSeriesFor("users/john", "HeartRates"); + +// Delete a range of 5 entries +FromTimeStamp = new Date(baseTime.getTime()); +ToTimeStamp = new Date(baseTime.getTime() + 60_000 * 5); +tsf.delete(FromTimeStamp, ToTimeStamp); + +// Save changes +await session.saveChanges(); +`} + + +#### Delete time series: + + + +{`// Get an instance of 'timeSeriesFor' +const tsf = session.timeSeriesFor("users/john", "HeartRates"); + +// Delete ALL entries +// The whole time series will be removed +tsf.delete(); + +// Save changes +await session.saveChanges(); +`} + + + + + +## Syntax + + + +{`// Available overloads: +// ==================== + +delete(); // Delete all entries +deleteAt(at); // Delete a specific entry +delete(from, to); // Delete a range of enties +`} + + + +| Parameter | Type | Description | +|-----------|--------|:----------------------------------------------| +| **at** | `Date` | Timestamp of the time series entry to delete. | +| **from** | `Date` | Delete the time series entries range that starts at this timestamp (inclusive).
Pass `null` to use the minimum date value. | +| **to** | `Date` | Delete the time series entries range that ends at this timestamp (inclusive).
Pass `null` to use the maximum date value. | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_delete-php.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_delete-php.mdx new file mode 100644 index 0000000000..5e807bd0e0 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_delete-php.mdx @@ -0,0 +1,62 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `timeSeriesFor.delete` to delete time series entries. +* A time series is removed when all of its entries are deleted. + +* In this page: + * [usage](../../../../document-extensions/timeseries/client-api/session/delete.mdx#usage) + * [Example](../../../../document-extensions/timeseries/client-api/session/delete.mdx#example) + + +## usage + +**Flow**: + +* Open a session. +* Create an instance of `timeSeriesFor` and pass it the following: + * Provide an explicit document ID, or - + pass an [entity tracked by the session](../../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#unit-of-work-pattern), + e.g. a document object returned from [session.query](../../../../client-api/session/querying/how-to-query.mdx) + or from [session.load](../../../../client-api/session/loading-entities.mdx#load). + * Specify the time series name. +* Call `timeSeriesFor.delete` and provide the **timestamps range** of the entries you want to delete. +* Call `session.SaveChanges` for the action to take effect on the server. + +**Note**: + +* If the specified document doesn't exist, a `DocumentDoesNotExistException` will be thrown. +* Attempting to delete nonexistent entries results in a no-op and generates no exception. +* To delete a whole time series simply delete all its entries. + The series is removed when all its entries are deleted. +* Deleting a document deletes all its time series as well. + + + +## Example + +In the following example we delete a time series entry appended by sample code in the +[append](../../../../document-extensions/timeseries/client-api/session/append.mdx#examples) article. + + +{`// Delete a single entry +$session = $store->openSession(); +try \{ + $session->timeSeriesFor("users/john", "HeartRates") + ->delete((clone $baseTime)->add(new DateInterval("PT1M"))); + + $session->saveChanges(); +\} finally \{ + $session->close(); +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_delete-python.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_delete-python.mdx new file mode 100644 index 0000000000..3268625c51 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_delete-python.mdx @@ -0,0 +1,83 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `time_series_for.delete_at` to delete a time series entry. +* Use `time_series_for.delete` to delete a range of time series entries. +* A time series is removed when all of its entries are deleted. + +* In this page: + * [usage](../../../../document-extensions/timeseries/client-api/session/delete.mdx#usage) + * [Example](../../../../document-extensions/timeseries/client-api/session/delete.mdx#example) + * [Syntax](../../../../document-extensions/timeseries/client-api/session/delete.mdx#syntax) + + +## usage + +**Flow**: + +* Open a session. +* Create an instance of `time_series_for` and pass it the following: + * Provide an explicit document ID, or - + pass an [entity tracked by the session](../../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#unit-of-work-pattern), + e.g. a document object returned from [session.query](../../../../client-api/session/querying/how-to-query.mdx) + or from [session.load](../../../../client-api/session/loading-entities.mdx#load). + * Specify the time series name. +* Call `time_series_for.delete_at` and provide the **timestamp** of an entry you want to delete, + -or- + Call `time_series_for.delete` and provide the **timestamps range** of the entries you want to delete. +* Call `session.save_changes` for the action to take effect on the server. + +**Note**: + +* If the specified document doesn't exist, a `DocumentDoesNotExistException` will be thrown. +* Attempting to delete nonexistent entries results in a no-op and generates no exception. +* To delete a whole time series simply delete all its entries. + The series is removed when all its entries are deleted. +* Deleting a document deletes all its time series as well. + + + +## Example + +In the following example we delete a time series entry appended by sample code in the +[Append](../../../../document-extensions/timeseries/client-api/session/append.mdx) article. + + +{`# Delete a single entry +with store.open_session() as session: + session.time_series_for("users/john", "HeartRates").delete(base_line + timedelta(minutes=1)) + session.save_changes() +`} + + + + + +## Syntax + + + +{`def delete_at(self, at: datetime) -> None: ... +`} + + + + + +{`def delete(self, datetime_from: Optional[datetime] = None, datetime_to: Optional[datetime] = None): ... +`} + + + +| Parameter | Type | Description | +|-----------|-------------|:-------------------------------------------| +| **at** | `datetime` | Timestamp of a time series entry to delete | +| **datetime_from** (Optional) | `datetime` | Delete the time series entries range that starts at this timestamp (inclusive) | +| **datetime_to** (Optional) | `datetime` | Delete the time series entries range that ends at this timestamp (inclusive) | + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_patch-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_patch-csharp.mdx new file mode 100644 index 0000000000..7928ba27a4 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_patch-csharp.mdx @@ -0,0 +1,148 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Patching multiple time series entries (append or delete entries) can be performed via the _Session_ + using [session.Advanced.Defer](../../../../client-api/operations/patching/single-document.mdx#session-api-using-defer), as described below. + * You can handle a single document at a time. + * The patching action is defined by the provided [JavaScript](../../../../document-extensions/timeseries/client-api/javascript-support.mdx). + +* Patching time series entries can also be done directly on the _Store_ via [Operations](../../../../client-api/operations/what-are-operations.mdx), + where multiple documents can be handled at a time. Learn more in [Patching time series operations](../../../../document-extensions/timeseries/client-api/operations/patch.mdx). + +* In this page: + * [Usage](../../../../document-extensions/timeseries/client-api/session/patch.mdx#usage) + * [Patching examples](../../../../document-extensions/timeseries/client-api/session/patch.mdx#patching-examples) + * [Append multiple entries](../../../../document-extensions/timeseries/client-api/session/patch.mdx#append-multiple-entries) + * [Delete multiple entries](../../../../document-extensions/timeseries/client-api/session/patch.mdx#delete-multiple-entries) + * [Syntax](../../../../document-extensions/timeseries/client-api/session/patch.mdx#syntax) + + +## Usage + +* Open a session +* Construct a `PatchCommandData` instance and pass it the following: + * The document ID that contains the time series + * The document change vector (or `null`) + * A `PatchRequest` instance with a JavaScript that appends or removes time series entries +* Call `session.Advanced.Defer` and pass it the `PatchCommandData` command. + Note that you can call _Defer_ multiple times prior to calling _SaveChanges_. +* Call `session.SaveChanges()`. + All patch requests added via _Defer_ will be sent to the server for execution when _SaveChanges_ is called. + + + +## Patching examples + +#### Append multiple entries: + +In this example, we append 100 time series entries with random heart rate values to a document. + + + +{`var baseline = DateTime.Today; + +// Create arrays of timestamps and random values to patch +List values = new List(); +List timeStamps = new List(); + +for (var i = 0; i < 100; i++) +\{ + values.Add(68 + Math.Round(19 * new Random().NextDouble())); + timeStamps.Add(baseline.AddSeconds(i)); +\} + +session.Advanced.Defer(new PatchCommandData("users/1-A", null, + new PatchRequest + \{ + Script = @" + var i = 0; + for(i = 0; i < $values.length; i++) + \{ + timeseries(id(this), $timeseries) + .append ( + new Date($timeStamps[i]), + $values[i], + $tag); + \}", + + Values = + \{ + \{ "timeseries", "HeartRates" \}, + \{ "timeStamps", timeStamps\}, + \{ "values", values \}, + \{ "tag", "watches/fitbit" \} + \} + \}, null)); + +session.SaveChanges(); +`} + + +#### Delete multiple entries: + +In this example, we remove a range of 50 time series entries from a document. + + + +{`// Delete time-series entries +session.Advanced.Defer(new PatchCommandData("users/1-A", null, + new PatchRequest + \{ + Script = @"timeseries(this, $timeseries) + .delete( + $from, + $to + );", + Values = + \{ + \{ "timeseries", "HeartRates" \}, + \{ "from", baseline.AddSeconds(0) \}, + \{ "to", baseline.AddSeconds(49) \} + \} + \}, null)); + +session.SaveChanges(); +`} + + + + + +## Syntax + +**`PatchCommandData`** + + + +{`public PatchCommandData(string id, string changeVector, + PatchRequest patch, PatchRequest patchIfMissing) +`} + + + +Learn more about `PatchCommandData` [here](../../../../client-api/operations/patching/single-document.mdx#session-api-using-defer). +**`PatchRequest`** + + + +{`public class PatchRequest +\{ + // The patching script + public string Script \{ get; set; \} + + // Values for the parameters used by the patching script + public Dictionary Values \{ get; set; \} +\} +`} + + + +Learn more about `PatchRequest` [here](../../../../client-api/operations/patching/single-document.mdx#session-api-using-defer). + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_patch-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_patch-nodejs.mdx new file mode 100644 index 0000000000..5b93a2d11b --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_patch-nodejs.mdx @@ -0,0 +1,148 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Patching multiple time series entries (append or delete entries) can be performed via the _Session_ + using [session.advanced.defer](../../../../client-api/operations/patching/single-document.mdx#session-api-using-defer), as described below. + * You can handle a single document at a time. + * The patching action is defined by the provided [JavaScript](../../../../document-extensions/timeseries/client-api/javascript-support.mdx). + +* Patching time series entries can also be done directly on the _Store_ via [Operations](../../../../client-api/operations/what-are-operations.mdx), + where multiple documents can be handled at a time. Learn more in [Patching time series operations](../../../../document-extensions/timeseries/client-api/operations/patch.mdx). + +* In this page: + * [Usage](../../../../document-extensions/timeseries/client-api/session/patch.mdx#usage) + * [Patching examples](../../../../document-extensions/timeseries/client-api/session/patch.mdx#patching-examples) + * [Append multiple entries](../../../../document-extensions/timeseries/client-api/session/patch.mdx#append-multiple-entries) + * [Delete multiple entries](../../../../document-extensions/timeseries/client-api/session/patch.mdx#delete-multiple-entries) + * [Syntax](../../../../document-extensions/timeseries/client-api/session/patch.mdx#syntax) + + +## Usage + +* Open a session +* Construct a `PatchCommandData` instance and pass it the following: + * The document ID that contains the time series + * The document change vector (or `null`) + * A `PatchRequest` instance with a JavaScript that appends or removes time series entries +* Call `session.advanced.defer` and pass it the `PatchCommandData` command. + Note that you can call _defer_ multiple times prior to calling _saveChanges_. +* Call `session.saveChanges()`. + All patch requests added via _defer_ will be sent to the server for execution when _saveChanges_ is called. + + + +## Patching examples + +#### Append multiple entries: + +In this example, we append 100 time series entries with random heart rate values to a document. + + + +{`const baseTime = new Date(); + +// Prepare random values and timestamps to patch +const values = []; +const timeStamps = []; + +for (let i = 0; i < 100; i++) \{ + const randomValue = 65 + Math.round(20 * Math.random()); + values.push(randomValue); + + // NOTE: the timestamp passed in the patch request script should be in UTC + const timeStamp = new Date(baseTime.getTime() + 60_000 * i); + const utcDate = new Date(timeStamp.getTime() + timeStamp.getTimezoneOffset() * 60_000); + timeStamps.push(utcDate); +\} + +// Define the patch request +// ======================== + +const patchRequest = new PatchRequest(); + +// Provide a JavaScript script, use the 'append' method +// Note: "args." can be replaced with "$". E.g.: "args.tag" => "$tag" +patchRequest.script = \` + for(var i = 0; i < args.values.length; i++) + \{ + timeseries(id(this), args.timeseries) + .append ( + new Date(args.timeStamps[i]), + args.values[i], + args.tag); + \}\`; + +// Provide values for the params used within the script +patchRequest.values = \{ + timeseries: "HeartRates", + timeStamps: timeStamps, + values: values, + tag: "watches/fitbit" +\} + +// Define the patch command +const patchCommand = new PatchCommandData("users/john", null, patchRequest, null) + +// Pass the patch command to 'defer' +session.advanced.defer(patchCommand); + +// Call saveChanges for the patch request to execute on the server +await session.saveChanges(); +`} + + +#### Delete multiple entries: + +In this example, we remove a range of 50 time series entries from a document. + + + +{`// Define the patch request +// ======================== + +const patchRequest = new PatchRequest(); + +// Provide a JavaScript script, use the 'delete' method +// Note: "args." can be replaced with "$". E.g.: "args.to" => "$to" +patchRequest.script = \`timeseries(this, args.timeseries) + .delete( + args.from, + args.to + );\`; + +// NOTE: the 'from' & 'to' params in the patch request script should be in UTC +const utcDate = new Date(baseTime.getTime() + baseTime.getTimezoneOffset() * 60_000); + +// Provide values for the params used within the script +patchRequest.values = \{ + timeseries: "HeartRates", + from: utcDate, + to: new Date(utcDate.getTime() + 60_000 * 49) +\} + +// Define the patch command +const patchCommand = new PatchCommandData("users/john", null, patchRequest, null) + +// Pass the patch command to 'defer' +session.advanced.defer(patchCommand); + +// Call saveChanges for the patch request to execute on the server +await session.saveChanges(); +`} + + + + + +## Syntax + +A detailed syntax description for `PatchCommandData` & `PatchRequest` can be found in the following section: +[Session API using defer syntax](../../../../client-api/operations/patching/single-document.mdx#session-api-using-defer-syntax). + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_patch-php.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_patch-php.mdx new file mode 100644 index 0000000000..54249efc05 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_patch-php.mdx @@ -0,0 +1,140 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Patching multiple time series entries (append or delete entries) can be performed via the _Session_ + using `session.advanced.defer` as described below. + * You can handle a single document at a time. + * The patching action is defined by the provided `JavaScript`. + +* Patching time series entries can also be done directly on the _Store_ via [Operations](../../../../client-api/operations/what-are-operations.mdx), + where multiple documents can be handled at a time. + +* In this page: + * [Usage](../../../../document-extensions/timeseries/client-api/session/patch.mdx#usage) + * [Patching examples](../../../../document-extensions/timeseries/client-api/session/patch.mdx#patching-examples) + * [Append multiple entries](../../../../document-extensions/timeseries/client-api/session/patch.mdx#append-multiple-entries) + * [Delete multiple entries](../../../../document-extensions/timeseries/client-api/session/patch.mdx#delete-multiple-entries) + * [Syntax](../../../../document-extensions/timeseries/client-api/session/patch.mdx#syntax) + + +## Usage + +* Open a session +* Construct a `PatchCommandData` instance and pass it the following: + * The document ID that contains the time series + * The document change vector (or `null`) + * A `PatchRequest` instance with a JavaScript that appends or removes time series entries +* Call `session.advanced.defer` and pass it the `PatchCommandData` command. + Note that you can call _Defer_ multiple times prior to calling _SaveChanges_. +* Call `session.saveChanges`. + All patch requests added via _Defer_ will be sent to the server for execution when _SaveChanges_ is called. + + + +## Patching examples + +#### Append multiple entries: + +In this example, we append 100 time series entries with random heart rate values to a document. + + + +{`$baseTime = DateUtils::today(); + +/ Create arrays of timestamps and random values to patch +values = []; +timeStamps = []; + +or ($i = 0; $i < 100; $i++) +{ + $values[] = 68 + rand(0, 19); + $timeStamps[] = (clone $baseTime)->add(new DateInterval("PT" . $i . "S")); +} + +patchRequest = new PatchRequest(); +patchRequest->setScript(" + var i = 0; + for(i = 0; i < \\$values.length; i++) + \{ + timeseries(id(this), \\$timeseries) + .append ( + new Date(\\$timeStamps[i]), + \\$values[i], + \\$tag); + \}"); +patchRequest->setValues([ + "timeseries" => "HeartRates", + "timeStamps" => $timeStamps, + "values" => $values, + "tag" => "watches/fitbit" +); + +session->advanced()->defer(new PatchCommandData("users/1-A", null, $patchRequest, null)); + +session->saveChanges(); +`} + + +#### Delete multiple entries: + +In this example, we remove a range of 50 time series entries from a document. + + + +{`// Delete time-series entries +$patchRequest = new PatchRequest(); +$patchRequest->setScript("timeseries(this, \\$timeseries) + .delete( + \\$from, + \\$to + );"); +$patchRequest->setValues([ + "timeseries" => "HeartRates", + "from" => $baseTime, + "to" => (clone $baseTime)->add(new DateInterval("PT49S")) +]); + +$session->advanced()->defer(new PatchCommandData("users/1-A", null, $patchRequest, null)); + +$session->saveChanges(); +`} + + + + + +## Syntax + +**`PatchCommandData`** + + + +{`new PatchCommandData(?string $id, ?string $changeVector, ?PatchRequest $patch, ?PatchRequest $patchIfMissing = null); +`} + + +**`PatchRequest`** + + + +{`class PatchRequest +\{ + // The patching script + private ?string $script = null; + + // Values for the parameters used by the patching script + private ?ObjectMap $values = null; + + // ... getters and setters ... +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_patch-python.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_patch-python.mdx new file mode 100644 index 0000000000..eb3942375f --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_patch-python.mdx @@ -0,0 +1,148 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Patching multiple time series entries (append or delete entries) can be performed via the _Session_ + using [session.advanced.defer](../../../../client-api/operations/patching/single-document.mdx#session-api-using-defer), as described below. + * You can handle a single document at a time. + * The patching action is defined by the provided [JavaScript](../../../../document-extensions/timeseries/client-api/javascript-support.mdx). + +* Patching time series entries can also be done directly on the _Store_ via [Operations](../../../../client-api/operations/what-are-operations.mdx), + where multiple documents can be handled at a time. Learn more in [Patching time series operations](../../../../document-extensions/timeseries/client-api/operations/patch.mdx). + +* In this page: + * [Usage](../../../../document-extensions/timeseries/client-api/session/patch.mdx#usage) + * [Patching examples](../../../../document-extensions/timeseries/client-api/session/patch.mdx#patching-examples) + * [Append multiple entries](../../../../document-extensions/timeseries/client-api/session/patch.mdx#append-multiple-entries) + * [Delete multiple entries](../../../../document-extensions/timeseries/client-api/session/patch.mdx#delete-multiple-entries) + * [Syntax](../../../../document-extensions/timeseries/client-api/session/patch.mdx#syntax) + + +## Usage + +* Open a session +* Construct a `PatchCommandData` instance and pass it the following: + * The document ID that contains the time series + * The document change vector (or `None`) + * A `PatchRequest` instance with a JavaScript that appends or removes time series entries +* Call `session.advanced.defer` and pass it the `PatchCommandData` command. + Note that you can call _Defer_ multiple times prior to calling _SaveChanges_. +* Call `session.save_changes`. + All patch requests added via _Defer_ will be sent to the server for execution when _SaveChanges_ is called. + + + +## Patching examples + +#### Append multiple entries: + +In this example, we append 100 time series entries with random heart rate values to a document. + + + +{`base_line = datetime.utcnow() + +# Create arrays of timestamps and random values to patch +values = [] +time_stamps = [] + +for i in range(100): + values.append(68 + round(19 * random.uniform(0.0, 1.0))) + time_stamps.append(base_line + timedelta(seconds=i)) + +session.advanced.defer( + PatchCommandData( + "users/1-A", + None, + PatchRequest( + script=( + "var i = 0;" + "for(i = 0; i < $values.length; i++)" + "\{" + " timeseries(id(this), $timeseries)" + " .append (" + " new Date($time_stamps[i])," + " $values[i]," + " $tag);" + "\}" + ), + values=\{ + "timeseries": "HeartRates", + "time_stamps": time_stamps, + "values": values, + "tag": "watches/fitbit", + \}, + ), + None, + ) +) + +session.save_changes() +`} + + +#### Delete multiple entries: + +In this example, we remove a range of 50 time series entries from a document. + + + +{`# Delete time-series entries +session.advanced.defer( + PatchCommandData( + "users/1-A", + None, + PatchRequest( + script=("timeseries(this, $timeseries)" ".delete(" " $from," " $to" ");"), + values=\{ + "timeseries": "HeartRates", + "from": base_line, + "to": base_line + timedelta(seconds=49), + \}, + ), + None, + ) +) +`} + + + + + +## Syntax + +**`PatchCommandData`** + + + +{`class PatchCommandData(CommandData): + def __init__( + self, + key: str, + change_vector: Union[None, str], + patch: PatchRequest, + patch_if_missing: Optional[PatchRequest] = None, + ): ... +`} + + + +Learn more about `PatchCommandData` [here](../../../../client-api/operations/patching/single-document.mdx#session-api-using-defer). +**`PatchRequest`** + + + +{`class PatchRequest: + def __init__(self, script: Optional[str] = "", values: Optional[Dict[str, object]] = None): ... +`} + + + +Learn more about `PatchRequest` [here](../../../../client-api/operations/patching/single-document.mdx#session-api-using-defer). + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_querying-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_querying-csharp.mdx new file mode 100644 index 0000000000..22dd636e1f --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_querying-csharp.mdx @@ -0,0 +1,536 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Time series data can be effectively queried in RavenDB, + allowing users to access and analyze information based on specific time intervals. + +* Time series queries can be made using: + * The high-level `Query` method utilizing LINQ, + * The lower-level API `DocumentQuery`, + * Or directly through [RQL](../../../../client-api/session/querying/what-is-rql.mdx), + which can be provided to a `RawQuery` or executed from the Studio's [Query view](../../../../studio/database/queries/query-view.mdx). + +* In this page: + * [Query](../../../../document-extensions/timeseries/client-api/session/querying.mdx#query) + * [Query usage](../../../../document-extensions/timeseries/client-api/session/querying.mdx#query-usage) + * [Query examples](../../../../document-extensions/timeseries/client-api/session/querying.mdx#query-examples) + * [Query syntax](../../../../document-extensions/timeseries/client-api/session/querying.mdx#query-syntax) + * [DocumentQuery](../../../../document-extensions/timeseries/client-api/session/querying.mdx#documentquery) + * [DocumentQuery usage](../../../../document-extensions/timeseries/client-api/session/querying.mdx#documentquery-usage) + * [DocumentQuery examples](../../../../document-extensions/timeseries/client-api/session/querying.mdx#documentquery-examples) + * [DocumentQuery Syntax](../../../../document-extensions/timeseries/client-api/session/querying.mdx#documentquery-syntax) + * [RawQuery](../../../../document-extensions/timeseries/client-api/session/querying.mdx#rawquery) + * [RawQuery usage](../../../../document-extensions/timeseries/client-api/session/querying.mdx#rawquery-usage) + * [RawQuery examples](../../../../document-extensions/timeseries/client-api/session/querying.mdx#rawquery-examples) + * [RawQuery syntax](../../../../document-extensions/timeseries/client-api/session/querying.mdx#rawquery-syntax) + + + + +Learn more about time series queries in the [section dedicated to this subject](../../../../document-extensions/timeseries/querying/overview-and-syntax.mdx). + +## Query + +### Query usage + +* Open a session +* Call `session.Query`: + * Extend the query using LINQ expressions + * Provide a `Where` query predicate to locate documents whose time series you want to query + * Use `Select` to choose a time series and project time series data + * Execute the query +* Results will be in the form: + * `TimeSeriesRawResult` for non-aggregated data, or - + * `TimeSeriesAggregationResult` for aggregated data +* Note: + The RavenDB client translates the LINQ query to [RQL](../../../../client-api/session/querying/what-is-rql.mdx) before transmitting it to the server for execution. +### Query examples + +* This LINQ query filters users by their age and retrieves their HeartRates time series. + The first occurence of `Where` filters the documents. + The second `Where` filters the time series entries. + + + +{`using (var session = store.OpenSession()) +{ + // Define the query: + var query = session.Query() + // Filter the user documents + .Where(u => u.Age < 30) + // Call 'Select' to project the time series entries + .Select(q => RavenQuery.TimeSeries(q, "HeartRates") + // Filter the time series entries + .Where(ts => ts.Tag == "watches/fitbit") + // 'ToList' must be applied here to the inner time series query definition + // This will not trigger query execution at this point + .ToList()); + + // Execute the query: + // The following call to 'ToList' will trigger query execution + List result = query.ToList(); +} +`} + + + + +{`from "Users" as q +where q.Age < 30 +select timeseries(from q.HeartRates where (Tag == "watches/fitbit")) +`} + + + + +* In this example, we select a three-day range from the HeartRates time series. + + + +{`var baseTime = new DateTime(2020, 5, 17, 00, 00, 00); + +var query = session.Query() + .Select(q => RavenQuery + .TimeSeries(q, "HeartRates", baseTime, baseTime.AddDays(3)) + .ToList()); + +List result = query.ToList(); +`} + + + + +{`from "Users" as q +select timeseries(from q.HeartRates between "2020-05-17T00:00:00.0000000" and "2020-05-17T00:03:00.0000000") +`} + + + + +* In this example, we retrieve a company's stock trade data. + Note the usage of named values, so we may address trade Volume [by name](../../../../document-extensions/timeseries/client-api/named-time-series-values.mdx). + + + +{`using (var session = store.OpenSession()) +{ + var query = session.Query() + .Where(c => c.Address.City == "New York") + .Select(q => RavenQuery.TimeSeries(q, "StockPrices", baseTime, baseTime.AddDays(3)) + .Where(ts => ts.Tag == "companies/kitchenAppliances") + .ToList()); + + List queryResults = query.ToList(); + + TimeSeriesEntry[] tsEntries = queryResults[0].Results; + + double volumeDay1 = tsEntries[0].Values[4]; + double volumeDay2 = tsEntries[1].Values[4]; + double volumeDay3 = tsEntries[2].Values[4]; +} +`} + + + + +{`using (var session = store.OpenSession()) +{ + var query = + session.Query() + .Where(c => c.Address.City == "New York") + // Use the StockPrice type in the time series query + .Select(q => RavenQuery.TimeSeries(q, "StockPrices", baseTime, baseTime.AddDays(3)) + .Where(ts => ts.Tag == "companies/kitchenAppliances") + .ToList()); + + List> queryResults = query.ToList(); + + var tsEntries = queryResults[0].Results; + + double volumeDay1 = tsEntries[0].Value.Volume; + double volumeDay2 = tsEntries[1].Value.Volume; + double volumeDay3 = tsEntries[2].Value.Volume; +} +`} + + + + +* In this example, we group heart-rate data of people above the age of 72 into 1-day groups, + and retrieve each group's average heart rate and number of measurements. + The aggregated results are retrieved as `List`. + + +{`var query = session.Query() + .Where(u => u.Age > 72) + .Select(q => RavenQuery.TimeSeries(q, "HeartRates", baseline, baseline.AddDays(10)) + .Where(ts => ts.Tag == "watches/fitbit") + .GroupBy(g => g.Days(1)) + .Select(g => new + \{ + Avg = g.Average(), + Cnt = g.Count() + \}) + .ToList()); + +List result = query.ToList(); +`} + + +### Query syntax + +* The `session.Query` syntax is available [here](../../../../client-api/session/querying/how-to-query.mdx#syntax). + +* To define a time series query use `RavenQuery.TimeSeries` within the query `Select` clause. + +* `RavenQuery.TimeSeries` overloads: + + + +{`public static ITimeSeriesQueryable TimeSeries(object documentInstance, + string name) +`} + + + + +{`public static ITimeSeriesQueryable TimeSeries(object documentInstance, + string name, DateTime from, DateTime to) +`} + + + + | Parameter | Type | Description | + |----------------------|------------|-----------------------------------------| + | **documentInstance** | `object` | Document Instance | + | **name** | `string` | Time Series Name | + | **from** (optional) | `DateTime` | Range Start
Default: `DateTime.Min` | + | **to** (optional) | `DateTime` | Range End
Default: `DateTime.Max` | + +* `RavenQuery.TimeSeries` can be extended with the following time series methods: + + + +{`Offset(TimeSpan offset); +Scale(double value); +FromLast(Action timePeriod); +FromFirst(Action timePeriod); +LoadByTag(); +GroupBy(string s); +GroupBy(Action timePeriod); +Where(Expression> predicate); +`} + + + + + +## DocumentQuery + +### DocumentQuery usage + +* Open a session +* Call `session.Advanced.DocumentQuery`: + * Extend the query using RavenDB's fluent API methods + * Provide a `WhereEquals` query predicate to locate documents whose time series you want to query + * Use `SelectTimeSeries` to choose a time series and project time series data + * Execute the query +* Results will be in the form: + * `TimeSeriesRawResult` for non-aggregated data, or - + * `TimeSeriesAggregationResult` for aggregated data +* Note: + The RavenDB client translates query to [RQL](../../../../client-api/session/querying/what-is-rql.mdx) before transmitting it to the server for execution. +### DocumentQuery examples + +* A _DocumentQuery_ using only the `From()` method. + The query returns all entries from the 'HeartRates' time series. + + +{`// Define the query: +var query = session.Advanced.DocumentQuery() + .SelectTimeSeries(builder => builder + .From("HeartRates") + // 'ToList' must be applied here to the inner time series query definition + // This will not trigger query execution at this point + .ToList()); + + +// Execute the query: +// The following call to 'ToList' will trigger query execution +List results = query.ToList(); +`} + + + +* A _DocumentQuery_ using `Between()`. + The query returns only entries from the specified time range. + + +{`var query = session.Advanced.DocumentQuery() + .SelectTimeSeries(builder => builder + .From("HeartRates") + .Between(DateTime.Now, DateTime.Now.AddDays(1)) + .ToList()); + +List results = query.ToList(); +`} + + + +* A _DocumentQuery_ using `FromFirst()`. + The query returns the first three days of the 'HeartRates' time series. + + +{`var query = session.Advanced.DocumentQuery() + .SelectTimeSeries(builder => builder + .From("HeartRates") + .FromFirst(x => x.Days(3)) + .ToList()); + +List results = query.ToList(); +`} + + + +* A _DocumentQuery_ using `FromLast()`. + The query returns the last three days of the 'HeartRates' time series. + + +{`var query = session.Advanced.DocumentQuery() + .SelectTimeSeries(builder => builder + .From("HeartRates") + .FromLast(x => x.Days(3)) + .ToList()); + +List results = query.ToList(); +`} + + + +* A _DocumentQuery_ that loads the related `Monitor` documents that are specified in the time entries tags. + The results are then filtered by their content. + + + +{`var query = session.Advanced.DocumentQuery() + .SelectTimeSeries(builder => builder + .From("HeartRates") + .LoadByTag() + .Where((entry, monitor) => entry.Value <= monitor.Accuracy) + .ToList()); + +List results = query.ToList(); +`} + + + + +{`public class Monitor +{ + public double Accuracy { get; set; } +} +`} + + + +### DocumentQuery syntax + +The session [DocumentQuery](../../../../client-api/session/querying/document-query/what-is-document-query.mdx), +which is accessible from `session.Advanced`, can be extended with several useful time series methods. +To access these methods, begin with method `SelectTimeSeries()`: + + + +{`IDocumentQuery SelectTimeSeries(Func timeSeriesQuery); +`} + + + +`SelectTimeSeries()` takes an `ITimeSeriesQueryBuilder`. The builder has the following methods: + + + +{`From(string name); +Between(DateTime start, DateTime end); +FromLast(Action timePeriod); +FromFirst(Action timePeriod); +LoadByTag(); +//LoadByTag is extended by a special version of Where(): +Where(Expression> predicate); +`} + + + +| Parameter | Type | Description | +|----------------------------|-------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **name** | `string` | The name of the time series (in one or more documents) to query | +| **start** | `DateTime` | First parameter for `Between()`.
The beginning of the time series range to filter. | +| **end** | `DateTime` | Second parameter for `Between()`.
The end of the time series range to filter. | +| **timePeriod** | `Action` | Expression returning a number of time units representing a time series range either at the beginning or end of the queried time series. | +| `LoadByTag` type parameter | `TTag` | Time series entry tags can be just strings, but they can also be document IDs, representing a reference to a related document. `LoadByTag` takes the type of the entity. | +| **predicate** | `Expression>` | | + +`FromLast()` and `FromFirst()` take an `ITimePeriodBuilder`, which is used to represent a range of time from milliseconds to years: + + + +{`public interface ITimePeriodBuilder +\{ + Milliseconds(int duration); + Seconds(int duration); + Minutes(int duration); + Hours(int duration); + Days(int duration); + Months(int duration); + Quarters(int duration); + Years(int duration); +\} +`} + + + +#### Return Value: + +* **`List`** for aggregated data. + When the query [aggregates time series entries](../../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx), + the results are returned in an aggregated array. + +* **`List`** for non-aggregated data. + When the query doesn't aggregate time series entries, the results are returned in a list of time series results. + + + +## RawQuery + +### RawQuery usage + +* Open a session +* Call `session.Advanced.RawQuery`, pass it the raw RQL that will be sent to the server +* Results will be in the form: + * `TimeSeriesRawResult` for non-aggregated data, or - + * `TimeSeriesAggregationResult` for aggregated data +* Note: + The raw query transmits the provided RQL to the server as is, without checking or altering its content. +### RawQuery examples + +* In this example, we retrieve all HearRates time series for all users under 30. + + + +{`// Raw query with no aggregation - Select syntax +var query = session.Advanced.RawQuery(@" + from Users where Age < 30 + select timeseries ( + from HeartRates + )"); + +List results = query.ToList(); +`} + + + + +* In this example, a raw RQL query retrieves 24 hours of heart rate data from users under the age of 30. + The query does not aggregate data, so results are in the form of a `TimeSeriesRawResult` list. + We define an **offset**, to adjust retrieved results to the client's local time-zone. + + + +{`var baseTime = new DateTime(2020, 5, 17, 00, 00, 00); // May 17 2020, 00:00:00 + +// Raw query with no aggregation - Declare syntax +var query = + session.Advanced.RawQuery(@" + declare timeseries getHeartRates(user) + { + from user.HeartRates + between $from and $to + offset '02:00' + } + from Users as u where Age < 30 + select getHeartRates(u) + ") + .AddParameter("from", baseTime) + .AddParameter("to", baseTime.AddHours(24)); + +List results = query.ToList(); +`} + + + + +{`var baseline = new DateTime(2020, 5, 17, 00, 00, 00); // May 17 2020, 00:00:00 + +// Raw query with no aggregation - Select syntax +var query = + session.Advanced.RawQuery(@" + from Users as u where Age < 30 + select timeseries ( + from HeartRates + between $from and $to + offset '02:00' + )") + .AddParameter("from", baseline) + .AddParameter("to", baseline.AddHours(24)); + +var results = query.ToList(); +`} + + + + +* In this example, the query aggregates 7 days of HeartRates entries into 1-day groups. + From each group, two values are selected and projected to the client: + the **min** and **max** hourly HeartRates values. + The aggregated results are in the form of a `TimeSeriesAggregationResult` list. + + +{`var baseline = new DateTime(2020, 5, 17, 00, 00, 00); // May 17 2020, 00:00:00 + +// Raw Query with aggregation +var query = + session.Advanced.RawQuery(@" + from Users as u + select timeseries( + from HeartRates + between $start and $end + group by '1 days' + select min(), max() + offset '03:00') + ") + .AddParameter("start", baseline) + .AddParameter("end", baseline.AddDays(7)); + +List results = query.ToList(); +`} + + +### RawQuery syntax + + + +{`IRawDocumentQuery RawQuery(string query); +`} + + + +| Parameter | Type | Description | +|------------|----------|----------------------| +| **query** | `string` | The RQL query string | + +**Return Value**: + +* **`List`** for aggregated data. + When the query [aggregates time series entries](../../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx), + the results are returned in an aggregated array. + +* **`List`** for non-aggregated data. + When the query doesn't aggregate time series entries, the results are returned in a list of time series results. + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_querying-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_querying-nodejs.mdx new file mode 100644 index 0000000000..bf454bb25a --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/_querying-nodejs.mdx @@ -0,0 +1,489 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Time series data can be effectively queried in RavenDB, + allowing users to access and analyze information based on specific time intervals. + +* Time series queries can be made using: + * The `query` method + * Or directly through [RQL](../../../../client-api/session/querying/what-is-rql.mdx), + which can be provided to a `rawQuery` or executed from the Studio's [Query view](../../../../studio/database/queries/query-view.mdx). + +* In this page: + * [Query](../../../../document-extensions/timeseries/client-api/session/querying.mdx#query) + * [Query usage](../../../../document-extensions/timeseries/client-api/session/querying.mdx#query-usage) + * [Query examples](../../../../document-extensions/timeseries/client-api/session/querying.mdx#query-examples) + * [Query syntax](../../../../document-extensions/timeseries/client-api/session/querying.mdx#query-syntax) + * [RawQuery](../../../../document-extensions/timeseries/client-api/session/querying.mdx#rawquery) + * [RawQuery usage](../../../../document-extensions/timeseries/client-api/session/querying.mdx#rawquery-usage) + * [RawQuery examples](../../../../document-extensions/timeseries/client-api/session/querying.mdx#rawquery-examples) + * [RawQuery syntax](../../../../document-extensions/timeseries/client-api/session/querying.mdx#rawquery-syntax) + + + + +Learn more about time series queries in the [section dedicated to this subject](../../../../document-extensions/timeseries/querying/overview-and-syntax.mdx). + +## Query + +### Query usage + +* Open a session +* Call `session.query`: + * Provide a query predicate to locate documents whose time series you want to query + * Use `selectTimeSeries` to choose a time series and project time series data + * Execute the query +* Results will be in the form: + * `TimeSeriesRawResult` for non-aggregated data, or - + * `TimeSeriesAggregationResult` for aggregated data +* Note: + The RavenDB client translates the query to [RQL](../../../../client-api/session/querying/what-is-rql.mdx) before transmitting it to the server for execution. +### Query examples + +* This query filters users by their age and retrieves their HeartRates time series. + + + +{`// Define the time series query part (expressed in RQL): +const tsQueryText = \` + from HeartRates + where Tag == "watches/fitbit"\`; + +// Define the high-level query: +const query = session.query({ collection: "users" }) + .whereLessThan("age", 30) + // Call 'selectTimeSeries' and pass it: + // * the time series query text + // * the \`TimeSeriesRawResult\` return type + .selectTimeSeries(b => b.raw(tsQueryText), TimeSeriesRawResult); + +// Execute the query: +const results = await query.all(); + +// Access entries results: +rawResults = results[0]; +assert.equal((rawResults instanceof TimeSeriesRawResult), true); + +const tsEntry = rawResults.results[0]; +assert.equal((tsEntry instanceof TimeSeriesEntry), true); + +const tsValue = tsEntry.value; +`} + + + + +{`from "users" +where age < 30 +select timeseries( + from "HeartRates" + where Tag == "watches/fitbit" +) +`} + + + + +* In this example, we select a 5-minute range from the HeartRates time series. + + + +{`const startTime = new Date(); +const endTime = new Date(startTime.getTime() + 5 * 60_000); + +// Define the time series query text: +const tsQueryText = \` + from HeartRates + between $start and $end\`; + +// Define the query: +const query = session.query({ collection: "users" }) + // Call 'selectTimeSeries' and pass it: + // * the time series query text + // * the \`TimeSeriesRawResult\` return type + .selectTimeSeries(b => b.raw(tsQueryText), TimeSeriesRawResult) + // Add the parameters content + .addParameter("start", startTime) + .addParameter("end", endTime); + +// Execute the query: +const results = await query.all(); +`} + + + + +{`from "Users" +select timeseries( + from "HeartRates" + between "2024-05-19T18:13:17.466Z" and "2024-05-19T18:18:17.466Z" +) +`} + + + + +* In this example, we retrieve a company's stock trade data. + Note the usage of named values, so we may address trade Volume [by name](../../../../document-extensions/timeseries/client-api/named-time-series-values.mdx). + This example is based on the sample entries that were entered in [this example](../../../../document-extensions/timeseries/client-api/session/append.mdx#append-entries-with-multiple-values). + + + +{`const oneDay = 24 * 60 * 60 * 1000; +const startTime = new Date(); +const endTime = new Date(startTime.getTime() + 3 * oneDay); + +// Note: the 'where' clause must come after the 'between' clause +const tsQueryText = \` + from StockPrices + between $start and $end + where Tag == "AppleTech"\`; + +const query = session.query({ collection: "companies" }) + .whereEquals("address.city", "New York") + .selectTimeSeries(b => b.raw(tsQueryText), TimeSeriesRawResult) + .addParameter("start", startTime) + .addParameter("end", endTime); + +// Execute the query: +const results = await query.all(); + +// Access entries results: +const tsEntries = results[0].results; + +const volumeDay1 = tsEntries[0].values[4]; +const volumeDay2 = tsEntries[1].values[4]; +const volumeDay3 = tsEntries[2].values[4]; +`} + + + + +{`const oneDay = 24 * 60 * 60 * 1000; +const startTime = new Date(); +const endTime = new Date(startTime.getTime() + 3 * oneDay); + +// Note: the 'where' clause must come after the 'between' clause +const tsQueryText = \` + from StockPrices + between $start and $end + where Tag == "AppleTech"\`; + +const query = session.query({ collection: "companies" }) + .whereEquals("address.city", "New York") + .selectTimeSeries(b => b.raw(tsQueryText), TimeSeriesRawResult) + .addParameter("start", startTime) + .addParameter("end", endTime); + +// Execute the query: +const results = await query.all(); + +// Access entries results: +const tsEntries = results[0].results; + +// Call 'asTypedEntry' to be able to access the entry's values by their names +// Pass the class type (StockPrice) +const volumeDay1 = tsEntries[0].asTypedEntry(StockPrice).value.volume; +const volumeDay2 = tsEntries[1].asTypedEntry(StockPrice).value.volume; +const volumeDay3 = tsEntries[2].asTypedEntry(StockPrice).value.volume; +`} + + + + +{`from "companies" +where address.city == "New York" +select timeseries( + from StockPrices + between $start and $end + where Tag == "AppleTech" +) +{"start":"2024-05-20T07:54:07.259Z","end":"2024-05-23T07:54:07.259Z"} +`} + + + + +* In this example, we group heart-rate data of people above the age of 72 into 1-day groups, + For each group, we retrieve the number of measurements, the minimum, maximum, and average heart rate. + + + +{`const oneDay = 24 * 60 * 60 * 1000; +const startTime = new Date(); +const endTime = new Date(startTime.getTime() + 10 * oneDay); + +const tsQueryText = \`from HeartRates between $start and $end + where Tag == "watches/fitbit" + group by "1 day" + select count(), min(), max(), avg()\`; + +const query = session.query({ collection: "users" }) + .whereGreaterThan("age", 72) + // Call 'selectTimeSeries' and pass it: + // * the time series query text + // * the \`TimeSeriesAggregationResult\` return type + .selectTimeSeries(b => b.raw(tsQueryText), TimeSeriesAggregationResult) + .addParameter("start", startTime) + .addParameter("end", endTime); + +// Execute the query: +const results = await query.all(); +const aggregatedResults = results[0].results; + +const averageForDay1 = aggregatedResults[0].average[0]; +const averageForDay2 = aggregatedResults[1].average[0]; +`} + + + + +{`from "users" +where age > 72 +select timeseries( + from HeartRates between $start and $end + where Tag == "watches/fitbit" + group by '1 day' + select count(), min(), max(), avg() +) +{"start":"2024-05-20T09:32:58.951Z","end":"2024-05-30T09:32:58.951Z"} +`} + + + +### Query syntax + +The `session.query` syntax is available [here](../../../../client-api/session/querying/how-to-query.mdx#syntax). + +Extend the `session.query` method with `selectTimeSeries()`. + + + +{`selectTimeSeries(timeSeriesQuery, projectionClass); +`} + + + +| Parameter | Type | Description | +|-----------------------|---------------------|---------------------------------------------------------------------------------| +| **timeSeriesQuery** | `(builder) => void` | The time series query builder | +| **projectionClass** | `object` | The query result type
`TimeSeriesRawResult` or `TimeSeriesAggregationResult` | + +The time series query builder has one method: + + + +{`raw(queryText); +`} + + + +| Parameter | Type | Description | +|----------------|----------|-----------------------------------------------| +| **queryText** | `string` | The time series query part, expressed in RQL. | + +| Return value | Description | +|---------------------------------|---------------------------------------------------------------------------------------------------------------------------| +| `TimeSeriesRawResult[]` | The returned value for non-aggregated data | +| `TimeSeriesAggregationResult[]` | The returned value for [aggregated data](../../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx) | + + + +{`class TimeSeriesRawResult \{ + results; // TimeSeriesEntry[] + asTypedResult>(clazz); +\} + +class TimeSeriesAggregationResult extends TimeSeriesQueryResult \{ + results; // TimeSeriesRangeAggregation[]; + asTypedEntry(clazz); +\} +`} + + + + + +## RawQuery + +### RawQuery usage + +* Open a session +* Call `session.advanced.rawQuery`, pass it the raw RQL that will be sent to the server +* Results will be in the form: + * `TimeSeriesRawResult` for non-aggregated data, or - + * `TimeSeriesAggregationResult` for aggregated data +* Note: + The raw query transmits the provided RQL to the server as is, without checking or altering its content. +### RawQuery examples + +* In this example, we retrieve all HearRates time series for all users under 30. + + + +{`const rql = \`from users where age < 30 + select timeseries( + from HeartRates + )\`; + +const query = session.advanced.rawQuery(rql, TimeSeriesRawResult); + +const result = await query.all(); +`} + + + + +{`from users where age < 30 +select timeseries( + from HeartRates +) +`} + + + + +* In this example, a raw RQL query retrieves 24 hours of heart rate data from users under 30. + The query does not aggregate data, so results are in the form of a `TimeSeriesRawResult` list. + We define an **offset**, to adjust retrieved results to the client's local time-zone. + + + +{`const rql = \` + declare timeseries getHeartRates(user) + { + from user.HeartRates + between $start and $end + offset "03:00" + } + + from users as u where age < 30 + select getHeartRates(u)\`; + +const startTime = new Date(); +const endTime = new Date(startTime.getTime() + 24 * 60 * 60 * 1000); + +const query = session.advanced.rawQuery(rql, TimeSeriesRawResult) + .addParameter("start", startTime) + .addParameter("end", endTime); + +const result = await query.all(); +`} + + + + +{`const rql = \` + from Users as u where Age < 30 + select timeseries ( + from HeartRates + between $start and $end + offset "03:00" + )\`; + +const startTime = new Date(); +const endTime = new Date(startTime.getTime() + 24 * 60 * 60 * 1000); + +const query = session.advanced.rawQuery(rql, TimeSeriesRawResult) + .addParameter("start", startTime) + .addParameter("end", endTime); + +const result = await query.all(); +`} + + + + +{`// declare syntax +// ============== + +declare timeseries getHeartRates(user) +{ + from user.HeartRates + between $start and $end + offset '03:00' +} + +from users as u where age < 30 +select getHeartRates(u) +{"start":"2024-05-20T11:52:22.316Z","end":"2024-05-21T11:52:22.316Z"} + +// select syntax +// ============= + +from Users as u where Age < 30 +select timeseries ( + from HeartRates + between $start and $end + offset "03:00" +) +{"start":"2024-05-20T11:55:56.701Z","end":"2024-05-21T11:55:56.701Z"} +`} + + + + +* In this example, the query aggregates 7 days of HeartRates entries into 1-day groups. + From each group, two values are selected and projected to the client: + the **min** and **max** hourly HeartRates values. + The aggregated results are in the form of a `TimeSeriesAggregationResult` list. + + + +{`const rql = \` + from users as u + select timeseries( + from HeartRates between $start and $end + group by '1 day' + select min(), max() + offset "03:00" + )\`; + +const oneDay = 24 * 60 * 60 * 1000; +const startTime = new Date(); +const endTime = new Date(startTime.getTime() + 7 * oneDay); + +const query = session.advanced.rawQuery(rql, TimeSeriesRawResult) + .addParameter("start", startTime) + .addParameter("end", endTime); + +const result = await query.all(); +`} + + + + +{`from users as u +select timeseries( + from HeartRates between $start and $end + group by '1 day' + select min(), max() + offset "03:00" +) +{"start":"2024-05-20T12:06:40.595Z","end":"2024-05-27T12:06:40.595Z"} +`} + + + +### RawQuery syntax + + + +{`session.rawQuery(query); +`} + + + +| Parameter | Type | Description | +|------------|----------|----------------------| +| **query** | `string` | The RQL query string | + +The return value is the same as listed under the [query syntax](../../../../document-extensions/timeseries/client-api/session/querying.mdx#query-syntax). + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/append.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/append.mdx new file mode 100644 index 0000000000..392d36d647 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/append.mdx @@ -0,0 +1,52 @@ +--- +title: "Append & Update Time Series" +hide_table_of_contents: true +sidebar_label: Append +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import AppendCsharp from './_append-csharp.mdx'; +import AppendPython from './_append-python.mdx'; +import AppendPhp from './_append-php.mdx'; +import AppendNodejs from './_append-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/delete.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/delete.mdx new file mode 100644 index 0000000000..82d42bc111 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/delete.mdx @@ -0,0 +1,52 @@ +--- +title: "Delete Time Series" +hide_table_of_contents: true +sidebar_label: Delete +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import DeleteCsharp from './_delete-csharp.mdx'; +import DeletePython from './_delete-python.mdx'; +import DeletePhp from './_delete-php.mdx'; +import DeleteNodejs from './_delete-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_category_.json b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_category_.json new file mode 100644 index 0000000000..1200b0577f --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 2, + "label": Get, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_get-entries-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_get-entries-csharp.mdx new file mode 100644 index 0000000000..ff6991bf91 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_get-entries-csharp.mdx @@ -0,0 +1,223 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `TimeSeriesFor.Get` to retrieve a range of entries from a **single** time series. + To retrieve a range of entries from **multiple** series, + use the [GetMultipleTimeSeriesOperation](../../../../../document-extensions/timeseries/client-api/operations/get.mdx#getmultipletimeseriesoperation) operation. + +* The retrieved data can be paged to get the time series entries gradually, one custom-size page at a time. + +* By default, the session will track the retrieved time series data. + See [disable tracking](../../../../../client-api/session/configuration/how-to-disable-tracking.mdx) to learn how to disable. + +* When getting the time series entries, + you can also _include_ the series' **parent document** and/or **documents referred to by the entry tag**. + Learn more [below](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#include-parent-and-tagged-documents). + +* Calling `TimeSeriesFor.Get` will result in a trip to the server unless the series' parent document was loaded + (or queried for) with the time series included beforehand. + Learn more in: [Including time series](../../../../../document-extensions/timeseries/client-api/session/include/overview.mdx). + +* In this page: + * [`Get` usage](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#get-usage) + * [Examples](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#examples) + * [Get all entries](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#get-all-entries) + * [Get range of entries](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#get-range-of-entries) + * [Get entries with multiple values](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#get-entries-with-multiple-values) + * [Include parent and tagged documents](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#include-parent-and-tagged-documents) + * [Syntax](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#syntax) + + +## `Get` usage + +* Open a session. +* Create an instance of `TimeSeriesFor` and pass it the following: + * Provide an explicit document ID, -or- + pass an [entity tracked by the session](../../../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#unit-of-work-pattern), + e.g. a document object returned from [session.Query](../../../../../client-api/session/querying/how-to-query.mdx) or from [session.Load](../../../../../client-api/session/loading-entities.mdx#load). + * Specify the time series name. +* Call `TimeSeriesFor.Get`. + + + +## Examples + +#### Get all entries: + +In this example, we retrieve all entries of the "Heartrate" time series. +The ID of the parent document is explicitly specified. + + + +{`// Get all time series entries +TimeSeriesEntry[] val = session.TimeSeriesFor("users/john", "HeartRates") + .Get(DateTime.MinValue, DateTime.MaxValue); +`} + + +#### Get range of entries: + +In this example, we query for a document and get its "Heartrate" time series data. + + + +{`// Query for a document with the Name property "John" +// and get its HeartRates time-series values +using (var session = store.OpenSession()) +\{ + var baseline = DateTime.Today; + + IRavenQueryable query = session.Query() + .Where(u => u.Name == "John"); + + var result = query.ToList(); + + TimeSeriesEntry[] val = session.TimeSeriesFor(result[0], "HeartRates") + .Get(DateTime.MinValue, DateTime.MaxValue); + + session.SaveChanges(); +\} +`} + + +#### Get entries with multiple values: + +* Here, we check whether a stock's closing-time price is rising from day to day (over three days). + This example is based on the sample entries that were entered in [this example](../../../../../document-extensions/timeseries/client-api/session/append.mdx#append-entries-with-multiple-values). + +* Since each time series entry contains multiple StockPrice values, + we include a sample that uses [named time series values](../../../../../document-extensions/timeseries/client-api/named-time-series-values.mdx) + to make the code easier to read. + + + + +{`// Use Get without a named type +// Is the stock's closing-price rising? +bool goingUp = false; + +using (var session = store.OpenSession()) +{ + TimeSeriesEntry[] val = session.TimeSeriesFor("users/john", "StockPrices") + .Get(); + + var closePriceDay1 = val[0].Values[1]; + var closePriceDay2 = val[1].Values[1]; + var closePriceDay3 = val[2].Values[1]; + + if ((closePriceDay2 > closePriceDay1) + && + (closePriceDay3 > closePriceDay2)) + goingUp = true; +} +`} + + + + +{`goingUp = false; + +using (var session = store.OpenSession()) +{ + // Call 'Get' with the custom StockPrice class type + TimeSeriesEntry[] val = session.TimeSeriesFor("users/john") + .Get(); + + var closePriceDay1 = val[0].Value.Close; + var closePriceDay2 = val[1].Value.Close; + var closePriceDay3 = val[2].Value.Close; + + if ((closePriceDay2 > closePriceDay1) + && + (closePriceDay3 > closePriceDay2)) + goingUp = true; +} +`} + + + + + + +## Include parent and tagged documents + +* When retrieving time series entries using `TimeSeriesFor.Get`, + you can include the series' parent document and/or documents referred to by the entries [tags](../../../../../document-extensions/timeseries/overview.mdx#tags). + +* The included documents will be cached in the session, and instantly retrieved from memory if loaded by the user. + +* Use the following syntax to include the parent or tagged documents: + + + +{`// Get all time series entries +TimeSeriesEntry[] entries = + session.TimeSeriesFor("users/john", "HeartRates") + .Get(DateTime.MinValue, DateTime.MaxValue, + includes: builder => builder + // Include documents referred-to by entry tags + .IncludeTags() + // Include Parent Document + .IncludeDocument()); +`} + + + + + +## Syntax + + + +{`TimeSeriesEntry[] Get(DateTime? from = null, DateTime? to = null, + int start = 0, int pageSize = int.MaxValue); +`} + + + + + +{`//The stongly-typed API is used, to address time series values by name. +TimeSeriesEntry[] Get(DateTime? from = null, DateTime? to = null, +int start = 0, int pageSize = int.MaxValue); +`} + + + +| Parameter | Type | Description | +|--------------|-------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **from** | `DateTime?` | Get the range of time series entries starting from this timestamp (inclusive).
Default: `DateTime.MinValue` | +| **to** | `DateTime?` | Get the range of time series entries ending at this timestamp (inclusive).
Default: `DateTime.MaxValue` | +| **start** | `int` | Paging first entry.
E.g. 50 means the first page would start at the 50th time series entry.
Default: 0, for the first time-series entry. | +| **pageSize** | `int` | Paging page-size.
E.g. set `pageSize` to 10 to retrieve pages of 10 entries.
Default: `int.MaxValue`, for all time series entries. | + +**Return Values** + +* **`TimeSeriesEntry[]`** - an array of time series entry classes. + + + +{`public class TimeSeriesEntry +\{ + public DateTime Timestamp \{ get; set; \} + public double[] Values \{ get; set; \} + public string Tag \{ get; set; \} + public bool IsRollup \{ get; set; \} + + public double Value; + + //.. +\} +`} + + + +* **`TimeSeriesEntry[]`** - Time series values that can be referred to [by name](../../../../../document-extensions/timeseries/client-api/named-time-series-values.mdx). + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_get-entries-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_get-entries-nodejs.mdx new file mode 100644 index 0000000000..2a124591f2 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_get-entries-nodejs.mdx @@ -0,0 +1,242 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `timeSeriesFor.get` to retrieve a range of entries from a **single** time series. + To retrieve a range of entries from **multiple** series, + use the [GetMultipleTimeSeriesOperation](../../../../../document-extensions/timeseries/client-api/operations/get.mdx#getmultipletimeseriesoperation) operation. + +* The retrieved data can be paged to get the time series entries gradually, one custom-size page at a time. + +* By default, the session will track the retrieved time series data. + See [disable tracking](../../../../../client-api/session/configuration/how-to-disable-tracking.mdx) to learn how to disable. + +* When getting the time series entries, + you can also _include_ the series' **parent document** and/or **documents referred to by the entry tag**. + Learn more [below](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#include-parent-and-tagged-documents). + +* Calling `timeSeriesFor.get` will result in a trip to the server unless the series' parent document was loaded + (or queried for) with the time series included beforehand. + Learn more in [Including time series](../../../../../document-extensions/timeseries/client-api/session/include/overview.mdx). + +* In this page: + * [`get` usage](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#get-usage) + * [Examples](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#examples) + * [Get all entries](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#get-all-entries) + * [Get range of entries](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#get-range-of-entries) + * [Get entries with multiple values](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#get-entries-with-multiple-values) + * [Include parent and tagged documents](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#include-parent-and-tagged-documents) + * [Syntax](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#syntax) + + +## `get` usage + +* Open a session. +* Create an instance of `timeSeriesFor` and pass it the following: + * Provide an explicit document ID, -or- + pass an [entity tracked by the session](../../../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#unit-of-work-pattern), + e.g. a document object returned from [session.query](../../../../../client-api/session/querying/how-to-query.mdx) or from [session.load](../../../../../client-api/session/loading-entities.mdx#load). + * Specify the time series name. +* Call `timeSeriesFor.get`. + + + +## Examples + +#### Get all entries: + +In this example, we retrieve all entries of the "Heartrate" time series. +The ID of the parent document is explicitly specified. + + + +{`// Get all time series entries +const allEntries = await session + .timeSeriesFor("users/john", "HeartRates") + .get(); +`} + + +#### Get range of entries: + +In this example, we query for a document and get a range of entries from its "Heartrate" time series. + + + +{`// Query for a document +const user = await session.query(\{ collection: "users" \}) + .whereEquals("name", "John") + .first(); + +const from = new Date(); +const to = new Date(baseTime.getTime() + 60_000 * 5); + +// Retrieve a range of 6 entries +const tsEntries = await session + .timeSeriesFor(user, "HeartRates") + .get(from, to); +`} + + +#### Get entries with multiple values: + +* Here, we check if a stock's closing price is rising consecutively over three days. + This example is based on the sample entries that were entered in [this example](../../../../../document-extensions/timeseries/client-api/session/append.mdx#append-entries-with-multiple-values). + +* Since each time series entry contains multiple StockPrice values, + we include a sample that uses [named time series values](../../../../../document-extensions/timeseries/client-api/named-time-series-values.mdx) + to make the code easier to read. + + + + +{`let goingUp = false; + +const allEntries = await session + .timeSeriesFor("users/john", "StockPrices") + .get(); + +const closePriceDay1 = allEntries[0].values[1]; +const closePriceDay2 = allEntries[1].values[1]; +const closePriceDay3 = allEntries[2].values[1]; + +// Check if the stock's closing price is rising +if ((closePriceDay2 > closePriceDay1) && (closePriceDay3 > closePriceDay2)) { + goingUp = true; +} +`} + + + + +{`let goingUp = false; + +const allEntries = await session + .timeSeriesFor("users/john", "StockPrices") + .get(); + +// Call 'asTypedEntry' to be able to access the entry's values by their names +// Pass the class type (StockPrice) +const typedEntry1 = allEntries[0].asTypedEntry(StockPrice); + +// Access the entry value by its StockPrice class property name (close) +const closePriceDay1 = typedEntry1.value.close; + +const typedEntry2 = allEntries[1].asTypedEntry(StockPrice); +const closePriceDay2 = typedEntry2.value.close; + +const typedEntry3 = allEntries[2].asTypedEntry(StockPrice); +const closePriceDay3 = typedEntry3.value.close; + +// Check if the stock's closing price is rising +if ((closePriceDay2 > closePriceDay1) && (closePriceDay3 > closePriceDay2)) { + goingUp = true; +} +`} + + + + + + +## Include parent and tagged documents + +* When retrieving time series entries using `timeSeriesFor.get`, + you can include the series' parent document and/or documents referred to by the entries [tags](../../../../../document-extensions/timeseries/overview.mdx#tags). + +* The included documents will be cached in the session, and instantly retrieved from memory if loaded by the user. + +* Use the following syntax to include the parent or tagged documents: + + + +{`const allEntries = await session + .timeSeriesFor("users/john", "HeartRates") + // Get all entries + .get(null, null, builder => builder + .includeDocument() // include the parent document + .includeTags()); // include documents referenced in the entries tags + +// The following 'load' call will not trigger a server request +const user = await session.load("users/john"); +`} + + + + + +## Syntax + + + +{`// Available overloads: +// ==================== + +get(); // Get all entries + +get(from, to); +get(from, to, start); + +get(start, pageSize); +get(from, to, start, pageSize); + +get(from, to, includes); +get(from, to, includes, start); +get(from, to, includes, start, pageSize); +`} + + + +| Parameter | Type | Description | +|--------------|----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------| +| **from** | `Date` | Get the range of time series entries starting from this timestamp (inclusive).
Pass `null` to use the minimum date value. | +| **to** | `Date` | Get the range of time series entries ending at this timestamp (inclusive).
Pass `null` to use the maximum date value. | +| **start** | `number` | Paging first entry.
E.g. 50 means the first page would start at the 50th time series entry.
Default: 0, for the first time-series entry. | +| **pageSize** | `number` | Paging page-size.
E.g. set `pageSize` to 10 to retrieve pages of 10 entries.
Default: the equivalent of C# `int.MaxValue`, for all time series entries. | +| **includes** | `(includeBuilder) => void` | Builder function with a fluent API
containing the `includeTags` and `includeDocument` methods. | + +| Return value | | +|------------------------------|----------------------------------------------| +| `Promise` | A `Promise` resolving to the list of entries | + + + +{`class TimeSeriesEntry \{ + // The entry's time stamp + timestamp; // Date + + // The entry's tag, can contain a related document ID + tag; // string + + // List of up to 32 values for this entry + values; // number[] + + // Is this an entry that belongs to a "rollup" time series + isRollup; // boolean + + // Nodes info for incremental time series + nodeValues; // Record; + + // A method that returns the entry as a typed entry (TypedTimeSeriesEntry) + asTypedEntry(clazz); // 'clazz' designates the type for the value +\} + +class TypedTimeSeriesEntry \{ + timestamp; // Date + tag; // string + values; // number[] + isRollup; // boolean + + // Access the value of a typed entry as an object + value; // object of type clazz +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_get-entries-php.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_get-entries-php.mdx new file mode 100644 index 0000000000..3a0fbb9fa3 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_get-entries-php.mdx @@ -0,0 +1,215 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `timeSeriesFor.get` to retrieve a range of entries from a **single** time series. + To retrieve a range of entries from **multiple** series, + use the `GetMultipleTimeSeriesOperation`operation. + +* The retrieved data can be paged to get the time series entries gradually, one custom-size page at a time. + +* By default, the session will track the retrieved time series data. + See [disable tracking](../../../../../client-api/session/configuration/how-to-disable-tracking.mdx) to learn how to disable. + +* When getting the time series entries, + you can also _include_ the series' **parent document** and/or **documents referred to by the entry tag**. + Learn more [below](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#include-parent-and-tagged-documents). + +* Calling `timeSeriesFor.get` will result in a trip to the server unless the series' parent document was loaded + (or queried for) with the time series included beforehand. + +* In this page: + * [`get` usage](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#get-usage) + * [Examples](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#examples) + * [Get all entries](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#get-all-entries) + * [Get range of entries](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#get-range-of-entries) + * [Get entries with multiple values](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#get-entries-with-multiple-values) + * [Include parent and tagged documents](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#include-parent-and-tagged-documents) + * [Syntax](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#syntax) + + +## `get` usage + +* Open a session. +* Create an instance of `timeSeriesFor` and pass it the following: + * Provide an explicit document ID, -or- + pass an [entity tracked by the session](../../../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#unit-of-work-pattern), + e.g. a document object returned from [session.query](../../../../../client-api/session/querying/how-to-query.mdx) + or from [session.load](../../../../../client-api/session/loading-entities.mdx#load). + * Specify the time series name. +* Call `timeSeriesFor.get`. + + + +## Examples + +#### Get all entries: + +In this example, we retrieve all entries of the "Heartrate" time series. +The ID of the parent document is explicitly specified. + + + +{`// Get all time series entries +/** @var TimeSeriesEntryArray $val */ +$val = $session->timeSeriesFor("users/john", "HeartRates") + ->get(); +`} + + +#### Get range of entries: + +In this example, we query for a document and get its "Heartrate" time series data. + + + +{`// Query for a document with the Name property "John" +// and get its HeartRates time-series values +$session = $store->openSession(); +try \{ + $baseTime = DateUtils::today(); + + $query = $session->query(User::class) + ->whereEquals("Name", "John"); + + $result = $query->toList(); + + /** @var TimeSeriesEntryArray $val */ + $val = $session->timeSeriesFor($result[0], "HeartRates") + ->get(); + + $session->saveChanges(); +\} finally \{ + $session->close(); +\} +`} + + +#### Get entries with multiple values: + +* Here, we check whether a stock's closing-time price is rising from day to day (over three days). + This example is based on the sample entries that were entered in [this example](../../../../../document-extensions/timeseries/client-api/session/append.mdx#append-entries-with-multiple-values). + +* Since each time series entry contains multiple StockPrice values, + we include a sample that uses [named](../../../../../document-extensions/timeseries/client-api/session/append.mdx#append-entries-with-multiple-values) + time series values to make the code easier to read. + + + +{`$goingUp = false; + +$session = $store->openSession(); +try \{ + // Call 'Get' with the custom StockPrice class type + /** @var TimeSeriesEntryArray $val */ + $val = $session->typedTimeSeriesFor(StockPrice::class, "users/john") + ->get(); + + $closePriceDay1 = $val[0]->getValue()->getClose(); + $closePriceDay2 = $val[1]->getValue()->getClose(); + $closePriceDay3 = $val[2]->getValue()->getClose(); + + if (($closePriceDay2 > $closePriceDay1) + && + ($closePriceDay3 > $closePriceDay2)) + $goingUp = true; +\} finally \{ + $session->close(); +\} +`} + + + + + +## Include parent and tagged documents + +* When retrieving time series entries using `timeSeriesFor.get`, + you can include the series parent document and/or documents referred to by the entries + [tags](../../../../../document-extensions/timeseries/overview.mdx#tags). + +* The included documents will be cached in the session, and instantly retrieved from memory if loaded by the user. + +* Use the following syntax to include the parent or tagged documents: + + + +{`// Get all time series entries +/** @var TimeSeriesEntryArray $entries */ +$entries = + $session->timeSeriesFor("users/john", "HeartRates") + ->get(null, null, + includes: function($builder) \{ + return $builder + // Include documents referred-to by entry tags + ->includeTags() + // Include Parent Document + ->includeDocument(); + \}); +`} + + + + + +## Syntax + + + +{`public function get(?DateTimeInterface $from = null, ?DateTimeInterface $to = null, ?Closure $includes = null, int $start = 0, int $pageSize = PHP_INT_MAX): ?TimeSeriesEntryArray; +`} + + + + + +{`// The stongly-typed API is used, to address time series values by name. + +/** + * Return the time series values for the provided range + * + * @param DateTimeInterface|null $from + * @param DateTimeInterface|null $to + * @param int $start + * @param int $pageSize + * @return TypedTimeSeriesEntryArray|null + */ +public function get(?DateTimeInterface $from = null, ?DateTimeInterface $to = null, int $start = 0, int $pageSize = PHP_INT_MAX): ?TypedTimeSeriesEntryArray; +`} + + + +| Parameter | Type | Description | +|-----------|------|-------------| +| **from** (Optional) | `DateTimeInterface` | Get the range of time series entries starting from this timestamp (inclusive). | +| **to** (Optional) | `DateTimeInterface` | Get the range of time series entries ending at this timestamp (inclusive). | +| **start** | `int` | Paging first entry.
E.g. 50 means the first page would start at the 50th time series entry.
Default: `0`, for the first time-series entry. | +| **pageSize** | `int` | Paging page-size.
E.g. set `page_size` to 10 to retrieve pages of 10 entries.
Default: `PHP_INT_MAX`, for all time series entries. | + +**Return Values (Optional)** + +* `?TimeSeriesEntryArray` - an array of time series entry classes. + + +{`class TimeSeriesEntry +\{ + private ?DateTime $timestamp = null; + private ?string $tag = null; + private ?array $values = null; + private bool $rollup = false; + + private ?array $nodeValues = null; // Map + + + //.. +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_get-entries-python.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_get-entries-python.mdx new file mode 100644 index 0000000000..786c101b71 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_get-entries-python.mdx @@ -0,0 +1,209 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `time_series_for.get` to retrieve a range of entries from a **single** time series. + To retrieve a range of entries from **multiple** series, + use the [GetMultipleTimeSeriesOperation](../../../../../document-extensions/timeseries/client-api/operations/get.mdx#getmultipletimeseriesoperation) operation. + +* The retrieved data can be paged to get the time series entries gradually, one custom-size page at a time. + +* By default, the session will track the retrieved time series data. + See [disable tracking](../../../../../client-api/session/configuration/how-to-disable-tracking.mdx) to learn how to disable. + +* When getting the time series entries, + you can also _include_ the series' **parent document** and/or **documents referred to by the entry tag**. + Learn more [below](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#include-parent-and-tagged-documents). + +* Calling `time_series_for.get` will result in a trip to the server unless the series' parent document was loaded + (or queried for) with the time series included beforehand. + Learn more in: [Including time series](../../../../../document-extensions/timeseries/client-api/session/include/overview.mdx). + +* In this page: + * [`Get` usage](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#get-usage) + * [Examples](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#examples) + * [Get all entries](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#get-all-entries) + * [Get range of entries](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#get-range-of-entries) + * [Get entries with multiple values](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#get-entries-with-multiple-values) + * [Include parent and tagged documents](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#include-parent-and-tagged-documents) + * [Syntax](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#syntax) + + +## `Get` usage + +* Open a session. +* Create an instance of `time_series_for` and pass it the following: + * Provide an explicit document ID, -or- + pass an [entity tracked by the session](../../../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx#unit-of-work-pattern), + e.g. a document object returned from [session.query](../../../../../client-api/session/querying/how-to-query.mdx) + or from [session.load](../../../../../client-api/session/loading-entities.mdx#load). + * Specify the time series name. +* Call `time_series_for.get`. + + + +## Examples + +#### Get all entries: + +In this example, we retrieve all entries of the "Heartrate" time series. +The ID of the parent document is explicitly specified. + + + +{`# Get all time series entries +val = session.time_series_for("users/john", "HeartRates").get(datetime.min, datetime.max) +`} + + +#### Get range of entries: + +In this example, we query for a document and get its "Heartrate" time series data. + + + +{`# Query for a document with the Name property "John" +# and get its HeartRates time-series values +with store.open_session() as session: + base_line = datetime.utcnow() + + query = session.query(object_type=User).where_equals("Name", "John") + + result = list(query) + + doc_id = session.advanced.get_document_id(result[0]) + + val = session.time_series_for(doc_id, "HeartRates").get(datetime.min, datetime.max) + + session.save_changes() +`} + + +#### Get entries with multiple values: + +* Here, we check whether a stock's closing-time price is rising from day to day (over three days). + This example is based on the sample entries that were entered in [this example](../../../../../document-extensions/timeseries/client-api/session/append.mdx#append-entries-with-multiple-values). + +* Since each time series entry contains multiple StockPrice values, + we include a sample that uses [named](../../../../../document-extensions/timeseries/client-api/session/append.mdx#append-entries-with-multiple-values) + time series values to make the code easier to read. + + + +{`going_up = False + +# Use Get with a Named type +with store.open_session() as session: + val = session.typed_time_series_for(StockPrice, "users/john").get() + + close_price_day_1 = val[0].value.close + close_price_day_2 = val[1].value.close + close_price_day_3 = val[2].value.close + if close_price_day_2 > close_price_day_1 and close_price_day_3 > close_price_day_2: + going_up = True +`} + + + + + +## Include parent and tagged documents + +* When retrieving time series entries using `time_series_for.get`, + you can include the series parent document and/or documents referred to by the entries + [tags](../../../../../document-extensions/timeseries/overview.mdx#tags). + +* The included documents will be cached in the session, and instantly retrieved from memory if loaded by the user. + +* Use the following syntax to include the parent or tagged documents: + + + +{`# Get all time series entries +entries = session.time_series_for("users/john", "HeartRates").get_with_include( + datetime.min, + datetime.max, + lambda builder: builder + # Include documents referred-to by entry tags + .include_tags() + # Include Parent Document + .include_document(), +) +`} + + + + + +## Syntax + + + +{`def get( + self, + from_date: Optional[datetime] = None, + to_date: Optional[datetime] = None, + start: int = 0, + page_size: int = int_max, +) -> Optional[List[TimeSeriesEntry]]: ... +`} + + + + + +{`# The strongly-typed API is used, to address time series values by name. +def get( + self, + from_date: Optional[datetime] = None, + to_date: Optional[datetime] = None, + start: int = 0, + page_size: int = int_max, +) -> Optional[List[TypedTimeSeriesEntry[_T_TS_Values_Bindable]]]: ... +`} + + + +| Parameter | Type | Description | +|--------------------------|------------|-----------------------------------------------------------------------------------------------------------------------------------------------------| +| **from_date** (Optional) | `datetime` | Get the range of time series entries starting from this timestamp (inclusive).
Default: `datetime.min` | +| **to_date** (Optional) | `datetime` | Get the range of time series entries ending at this timestamp (inclusive).
Default: `datetime.max` | +| **start** | `int` | Paging first entry.
E.g. 50 means the first page would start at the 50th time series entry.
Default: `0`, for the first time-series entry. | +| **page_size** | `int` | Paging page-size.
E.g. set `page_size` to 10 to retrieve pages of 10 entries.
Default: `int_max`, for all time series entries. | + +**Return Values (Optional)** + +* `List[TypedTimeSeriesEntry[_T_TS_Values_Bindable]]` - an array of time series entry classes. + + +{`class TimeSeriesEntry: + def __init__(self, timestamp: datetime = None, tag: str = None, values: List[int] = None, rollup: bool = None): + self.timestamp = timestamp + self.tag = tag + self.values = values + self.rollup = rollup + + @property + def value(self): + if len(self.values) == 1: + return self.values[0] + raise ValueError("Entry has more than one value.") + + @value.setter + def value(self, value: int): + if len(self.values) == 1: + self.values[0] = value + return + raise ValueError("Entry has more than one value") +`} + + + +* `TimeSeriesEntry[]` - Time series values that can be referred to by name. + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_get-names-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_get-names-csharp.mdx new file mode 100644 index 0000000000..a84c6a1c63 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_get-names-csharp.mdx @@ -0,0 +1,72 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `Advanced.GetTimeSeriesFor` to get the names of all time series for the specified entity. + +* In this page: + * [`GetTimeSeriesFor` usage](../../../../../document-extensions/timeseries/client-api/session/get/get-names.mdx#gettimeseriesfor-usage) + * [Example](../../../../../document-extensions/timeseries/client-api/session/get/get-names.mdx#example) + * [Syntax](../../../../../document-extensions/timeseries/client-api/session/get/get-names.mdx#syntax) + + +## `GetTimeSeriesFor` usage + +**Flow**: + +* Open a session. +* Load an entity to the session either using [session.Load](../../../../../client-api/session/loading-entities.mdx#load) + or by querying for the document via [session.Query](../../../../../client-api/session/querying/how-to-query.mdx). + In both cases, the resulting entity will be tracked by the session. +* Call `Advanced.GetTimeSeriesFor`, pass the tracked entity. + +**Note**: + +* If the entity is Not tracked by the session, an `ArgumentException` exception is thrown. + + + +## Example + + + +{`// Open a session +using (var session = store.OpenSession()) +\{ + // Load a document entity to the session + User user = session.Load("users/john"); + + // Call GetTimeSeriesFor, pass the entity + List tsNames = session.Advanced.GetTimeSeriesFor(user); + + // Results will include the names of all time series associated with document 'users/john' +\} +`} + + + + + +## Syntax + + + +{`List GetTimeSeriesFor(T instance); +`} + + + +| Parameter | Type | Description | +|--------------|-------|-----------------------------------------------------| +| **instance** | `T` | The entity whose time series names you want to get. | + +| Return value | | +|----------------|-------------------------------------------------------------------------------------------------------| +| `List` | A list of names of all the time series associated with the entity, sorted alphabetically by the name. | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_get-names-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_get-names-nodejs.mdx new file mode 100644 index 0000000000..3fb6267a35 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_get-names-nodejs.mdx @@ -0,0 +1,71 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `advanced.getTimeSeriesFor` to get the names of all time series for the specified entity. + +* In this page: + * [`GetTimeSeriesFor` usage](../../../../../document-extensions/timeseries/client-api/session/get/get-names.mdx#gettimeseriesfor-usage) + * [Example](../../../../../document-extensions/timeseries/client-api/session/get/get-names.mdx#example) + * [Syntax](../../../../../document-extensions/timeseries/client-api/session/get/get-names.mdx#syntax) + + +## `getTimeSeriesFor` usage + +**Flow**: + +* Open a session. +* Load an entity to the session either using [session.load](../../../../../client-api/session/loading-entities.mdx#load) + or by querying for the document via [session.query](../../../../../client-api/session/querying/how-to-query.mdx). + In both cases, the resulting entity will be tracked by the session. +* Call `advanced.getTimeSeriesFor`, pass the tracked entity. + +**Note**: + +* If the entity is Not tracked by the session, an `ArgumentException` exception is thrown. + + + +## Example + + + +{`// Open a session +const session = documentStore.openSession(); + +// Load a document entity to the session +const user = await session.load("users/john"); + +// Call getTimeSeriesFor, pass the entity +const tsNames = session.advanced.getTimeSeriesFor(user); + +// Results will include the names of all time series associated with document 'users/john' +`} + + + + + +## Syntax + + + +{`getTimeSeriesFor(instance); +`} + + + +| Parameter | Type | Description | +|--------------|----------|-----------------------------------------------------| +| **instance** | `object` | The entity whose time series names you want to get. | + +| Return value | | +|--------------|-------------------------------------------------------------------------------------------------------| +| `string[]` | A list of names of all the time series associated with the entity, sorted alphabetically by the name. | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_get-names-php.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_get-names-php.mdx new file mode 100644 index 0000000000..dc6bf65446 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_get-names-php.mdx @@ -0,0 +1,75 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `advanced.getTimeSeriesFor` to get the names of all time series for the specified entity. + +* In this page: + * [`getTimeSeriesFor` usage](../../../../../document-extensions/timeseries/client-api/session/get/get-names.mdx#gettimeseriesfor-usage) + * [Example](../../../../../document-extensions/timeseries/client-api/session/get/get-names.mdx#example) + * [Syntax](../../../../../document-extensions/timeseries/client-api/session/get/get-names.mdx#syntax) + + +## `getTimeSeriesFor` usage + +**Flow**: + +* Open a session. +* Load an entity to the session either using [session.load](../../../../../client-api/session/loading-entities.mdx#load) + or by querying for the document via [session.query](../../../../../client-api/session/querying/how-to-query.mdx). + In both cases, the resulting entity will be tracked by the session. +* Call `advanced.getTimeSeriesFor`, pass the tracked entity. + +**Note**: + +* If the entity is Not tracked by the session, an `ArgumentException` exception is thrown. + + + +## Example + + + +{`// Open a session +$session = $store->openSession(); +try \{ + // Load a document entity to the session + $user = $session->load(User::class, "users/john"); + + // Call GetTimeSeriesFor, pass the entity + $tsNames = $session->advanced()->getTimeSeriesFor($user); + + // Results will include the names of all time series associated with document 'users/john' +\} finally \{ + $session->close(); +\} +`} + + + + + +## Syntax + + + +{`def get_time_series_for(self, entity: object) -> List[str]: + ... +`} + + + +| Parameter | Type | Description | +|--------------|-------|-----------------------------------------------------| +| **entity** | `object` | The entity whose time series names you want to get | + +| Return value | | +|----------------|---------------------------------------------------------------------------------------------------| +| `List[str]` | A list of names of all the time series associated with the entity, sorted alphabetically by the name | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_get-names-python.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_get-names-python.mdx new file mode 100644 index 0000000000..8ed40128d8 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/_get-names-python.mdx @@ -0,0 +1,71 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `advanced.get_time_series_for` to get the names of all time series for the specified entity. + +* In this page: + * [`get_time_series_for` usage](../../../../../document-extensions/timeseries/client-api/session/get/get-names.mdx#get_time_series_for-usage) + * [Example](../../../../../document-extensions/timeseries/client-api/session/get/get-names.mdx#example) + * [Syntax](../../../../../document-extensions/timeseries/client-api/session/get/get-names.mdx#syntax) + + +## `get_time_series_for` usage + +**Flow**: + +* Open a session. +* Load an entity to the session either using [session.load](../../../../../client-api/session/loading-entities.mdx#load) + or by querying for the document via [session.query](../../../../../client-api/session/querying/how-to-query.mdx). + In both cases, the resulting entity will be tracked by the session. +* Call `advanced.get_time_series_for`, pass the tracked entity. + +**Note**: + +* If the entity is Not tracked by the session, an `ArgumentException` exception is thrown. + + + +## Example + + + +{`# Open a session +with store.open_session() as session: + # Load a document entity to the session + user = session.load("users/john") + + # Call GetTimeSeriesFor, pass the entity + ts_names = session.advanced.get_time_series_for(user) + + # Results will include the names of all time series associated with document 'users/john' +`} + + + + + +## Syntax + + + +{`def get_time_series_for(self, entity: object) -> List[str]: + ... +`} + + + +| Parameter | Type | Description | +|--------------|-------|-----------------------------------------------------| +| **entity** | `object` | The entity whose time series names you want to get | + +| Return value | | +|----------------|---------------------------------------------------------------------------------------------------| +| `List[str]` | A list of names of all the time series associated with the entity, sorted alphabetically by the name | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/get-entries.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/get-entries.mdx new file mode 100644 index 0000000000..12be497c2e --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/get-entries.mdx @@ -0,0 +1,52 @@ +--- +title: "Get Time Series Entries" +hide_table_of_contents: true +sidebar_label: Get Entries +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetEntriesCsharp from './_get-entries-csharp.mdx'; +import GetEntriesPython from './_get-entries-python.mdx'; +import GetEntriesPhp from './_get-entries-php.mdx'; +import GetEntriesNodejs from './_get-entries-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/get-names.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/get-names.mdx new file mode 100644 index 0000000000..6dbca2ba6d --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/get/get-names.mdx @@ -0,0 +1,52 @@ +--- +title: "Get Time Series Names" +hide_table_of_contents: true +sidebar_label: Get Names +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GetNamesCsharp from './_get-names-csharp.mdx'; +import GetNamesPython from './_get-names-python.mdx'; +import GetNamesPhp from './_get-names-php.mdx'; +import GetNamesNodejs from './_get-names-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_category_.json b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_category_.json new file mode 100644 index 0000000000..0e4fb9152a --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 3, + "label": Include, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_overview-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_overview-csharp.mdx new file mode 100644 index 0000000000..6e956b052b --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_overview-csharp.mdx @@ -0,0 +1,19 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When retrieving documents that contain time series, you can request to _include_ their time series data. + +* The included time series data is held by the client's session, so it can be handed to the user instantly when requested without issuing an additional request to the server. + +* Time series data can be _included_ when - + * [Loading a document using `session.Load`](../../../../../document-extensions/timeseries/client-api/session/include/with-session-load.mdx) + * [Loading a document by query via `session.Query`](../../../../../document-extensions/timeseries/client-api/session/include/with-session-query.mdx) + * [Loading a document by raw query via `session.Advanced.RawQuery`](../../../../../document-extensions/timeseries/client-api/session/include/with-raw-queries.mdx) + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_overview-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_overview-nodejs.mdx new file mode 100644 index 0000000000..745732c9ca --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_overview-nodejs.mdx @@ -0,0 +1,19 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When retrieving documents that contain time series, you can request to _include_ their time series data. + +* The included time series data is held by the client's session, so it can be handed to the user instantly when requested without issuing an additional request to the server. + +* Time series data can be _included_ when - + * [Loading a document using `session.load`](../../../../../document-extensions/timeseries/client-api/session/include/with-session-load.mdx) + * [Loading a document by query via `session.query`](../../../../../document-extensions/timeseries/client-api/session/include/with-session-query.mdx) + * [Loading a document by raw query via `session.advanced.rawQuery`](../../../../../document-extensions/timeseries/client-api/session/include/with-raw-queries.mdx) + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_with-raw-queries-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_with-raw-queries-csharp.mdx new file mode 100644 index 0000000000..6eb5bd786b --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_with-raw-queries-csharp.mdx @@ -0,0 +1,74 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `include timeseries` in your RQL expression in order to include time series data when making + a raw query via [session.Advanced.RawQuery](../../../../../client-api/session/querying/how-to-query.mdx#sessionadvancedrawquery). + +* The included time series data is stored within the session and can be provided instantly when requested + without any additional server calls. + +* In this page: + * [Include time series when making a raw query](../../../../../document-extensions/timeseries/client-api/session/include/with-raw-queries.mdx#include-time-series-when-making-a-raw-query) + * [Syntax](../../../../../document-extensions/timeseries/client-api/session/include/with-raw-queries.mdx#syntax) + + +## Include time series when making a raw query + +In this example, we use a raw query to retrieve a document +and _include_ entries from the document's "HeartRates" time series. + + + +{`using (var session = store.OpenSession()) +\{ + var baseTime = DateTime.Today; + + var from = baseTime; + var to = baseTime.AddMinutes(5); + + // Define the Raw Query: + IRawDocumentQuery query = session.Advanced.RawQuery + // Use 'include timeseries' in the RQL + ("from Users include timeseries('HeartRates', $from, $to)") + // Pass optional parameters + .AddParameter("from", from) + .AddParameter("to", to); + + // Execute the query: + // For each document in the query results, + // the time series entries will be 'loaded' to the session along with the document + var users = query.ToList(); + + // The following call to 'Get' will Not trigger a server request, + // the entries will be retrieved from the session's cache. + IEnumerable entries = session.TimeSeriesFor(users[0], "HeartRates") + .Get(from, to); +\} +`} + + + + + +## Syntax + +**`Advanced.RawQuery`** + + + +{`IRawDocumentQuery RawQuery(string query); +`} + + + +| Parameter | Type | Description | +|-----------|----------|-------------------| +| **query** | `string` | The raw RQL query | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_with-raw-queries-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_with-raw-queries-nodejs.mdx new file mode 100644 index 0000000000..e450bb634e --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_with-raw-queries-nodejs.mdx @@ -0,0 +1,78 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use `include timeseries` in your RQL expression in order to include time series data when making + a raw query via [session.advanced.rawQuery](../../../../../client-api/session/querying/how-to-query.mdx#sessionadvancedrawquery). + +* The included time series data is stored within the session and can be provided instantly when requested + without any additional server calls. + +* In this page: + * [Include time series when making a raw query](../../../../../document-extensions/timeseries/client-api/session/include/with-raw-queries.mdx#include-time-series-when-making-a-raw-query) + * [Syntax](../../../../../document-extensions/timeseries/client-api/session/include/with-raw-queries.mdx#syntax) + + +## Include time series when making a raw query + +In this example, we use a raw query to retrieve a document +and _include_ entries from the document's "HeartRates" time series. + + + +{`const baseTime = new Date(); +const from = baseTime; +const to = new Date(baseTime.getTime() + 60_000 * 5); + +// Define the Raw Query: +const rawQuery = session.advanced + // Use 'include timeseries' in the RQL + .rawQuery("from users include timeseries('HeartRates', $from, $to)") + // Pass optional parameters + .addParameter("from", from) + .addParameter("to", to); + +// Execute the query: +// For each document in the query results, +// the time series entries will be 'loaded' to the session along with the document +const userDocuments = await rawQuery.all(); + +const numberOfRequests1 = session.advanced.numberOfRequests; + +// The following call to 'get' will Not trigger a server request, +// the entries will be retrieved from the session's cache. +const entries = await session.timeSeriesFor(userDocuments[0], "HeartRates") + .get(from, to); + +const entryValue = entries[0].value; + +const numberOfRequests2 = session.advanced.numberOfRequests; +assert.equal(numberOfRequests1, numberOfRequests2); +`} + + + + + +## Syntax + +**`advanced.rawQuery`** + + + +{`rawQuery(query, documentType?); +`} + + + +| Parameter | Type | Description | +|--------------------|----------|----------------------------------------------| +| **query** | `string` | The raw RQL query. | +| **documentType** | `object` | The document class type (an optional param). | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_with-session-load-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_with-session-load-csharp.mdx new file mode 100644 index 0000000000..d06704fb42 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_with-session-load-csharp.mdx @@ -0,0 +1,81 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When loading a document via `session.Load`, + you can _include_ all entries of a time series or a specific range of entries. + +* The included time series data is stored within the session + and can be provided instantly when requested without any additional server calls. + +* In this page: + * [Include time series when loading a document](../../../../../document-extensions/timeseries/client-api/session/include/with-session-load.mdx#include-time-series-when-loading-a-document) + * [Syntax](../../../../../document-extensions/timeseries/client-api/session/include/with-session-load.mdx#syntax) + + +## Include time series when loading a document + +In this example, we load a document using `session.Load`, +and include a selected range of entries from time series "HeartRates". + + + +{`using (var session = store.OpenSession()) +\{ + var baseline = DateTime.Today; + + // Load a document + User user = session.Load("users/john", includeBuilder => + // Call 'IncludeTimeSeries' to include time series entries, pass: + // * The time series name + // * Start and end timestamps indicating the range of entries to include + includeBuilder.IncludeTimeSeries("HeartRates", baseline.AddMinutes(3), baseline.AddMinutes(8))); + + // The following call to 'Get' will Not trigger a server request, + // the entries will be retrieved from the session's cache. + IEnumerable entries = session.TimeSeriesFor("users/john", "HeartRates") + .Get(baseline.AddMinutes(3), baseline.AddMinutes(8)); +\} +`} + + + + + +## Syntax + +**`session.Load`** + + + +{`T Load(string id, Action> includes); +`} + + + +| Parameter | Type | Description | +|--------------|------------------------------|----------------| +| **id** | `string` | Document ID | +| **includes** | `Action>` | Include Object | + +**`Include`** builder methods: + + + +{`TBuilder IncludeTimeSeries(string name, DateTime? from = null, DateTime? to = null); +`} + + + +| Parameter | Type | Description | +|-----------|-------------|-----------------------------------------------------------------------| +| **name** | `string` | Time series name. | +| **from** | `DateTime?` | Time series range start (inclusive).
Default: `DateTime.MinValue`. | +| **to** | `DateTime?` | Time series range end (inclusive).
Default: `DateTime.MaxValue`. | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_with-session-load-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_with-session-load-nodejs.mdx new file mode 100644 index 0000000000..77bbb77d67 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_with-session-load-nodejs.mdx @@ -0,0 +1,90 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When loading a document via `session.load`, + you can _include_ all entries of a time series or a specific range of entries. + +* The included time series data is stored within the session + and can be provided instantly when requested without any additional server calls. + +* In this page: + * [Include time series when loading a document](../../../../../document-extensions/timeseries/client-api/session/include/with-session-load.mdx#include-time-series-when-loading-a-document) + * [Syntax](../../../../../document-extensions/timeseries/client-api/session/include/with-session-load.mdx#syntax) + + +## Include time series when loading a document + +In this example, we load a document using `session.load`, +and include a selected range of entries from time series "HeartRates". + + + +{`const session = documentStore.openSession(); + +const baseTime = new Date(); +const from = new Date(baseTime.getTime() + 60_000 * 3); +const to = new Date(baseTime.getTime() + 60_000 * 8); + +// Load a document entity to the session +const user = await session.load("users/john", \{ + // Call 'includeTimeSeries' to include time series entries, pass: + // * The time series name + // * Start and end timestamps indicating the range of entries to include + includes: builder => builder.includeTimeSeries("HeartRates", from, to) +\}); + +const numberOfRequests1 = session.advanced.numberOfRequests; + +// The following call to 'get' will Not trigger a server request, +// the entries will be retrieved from the session's cache. +const entries = await session + .timeSeriesFor("users/john", "HeartRates") + .get(from, to); + +const numberOfRequests2 = session.advanced.numberOfRequests; +assert.equal(numberOfRequests1, numberOfRequests2); +`} + + + + + +## Syntax + +**`session.load`** + + + +{`load(id, options?); +`} + + + +| Parameter | Type | Description | +|--------------|----------|-----------------------------------------------------------------------------------------------| +| **id** | `string` | Document ID to load. | +| **options** | `object` | object containing the `includes` builder that specifies which time series entries to include. | + +**`includes`** builder methods: + + + +{`includeTimeSeries(name); +includeTimeSeries(name, from, to); +`} + + + +| Parameter | Type | Description | +|-----------|----------|----------------------------------------------------------------------| +| **name** | `string` | Time series name. | +| **from** | `Date` | Time series range start (inclusive).
Default: minimum date value. | +| **to** | `Date` | Time series range end (inclusive).
Default: maximum date value. | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_with-session-query-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_with-session-query-csharp.mdx new file mode 100644 index 0000000000..7336dae3b1 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_with-session-query-csharp.mdx @@ -0,0 +1,64 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When querying via `session.Query` for documents that contain time series, + you can request to include their time series data in the server response. + +* The included time series data is stored within the session + and can be provided instantly when requested without any additional server calls. + +* In this page: + * [Include time series when making a query](../../../../../document-extensions/timeseries/client-api/session/include/with-session-query.mdx#include-time-series-when-making-a-query) + * [Syntax](../../../../../document-extensions/timeseries/client-api/session/include/with-session-query.mdx#syntax) + + +## Include time series when making a query + +In this example, we retrieve a document using `session.Query` +and _include_ entries from the document's "HeartRates" time series. + + + +{`using (var session = store.OpenSession()) +\{ + // Query for a document and include a whole time-series + User user = session.Query() + .Where(u => u.Name == "John") + .Include(includeBuilder => includeBuilder.IncludeTimeSeries("HeartRates")) + .FirstOrDefault(); + + // The following call to 'Get' will Not trigger a server request, + // the entries will be retrieved from the session's cache. + IEnumerable val = session.TimeSeriesFor(user, "HeartRates") + .Get(); +\} +`} + + + + + +## Syntax + +**`Include`** builder methods: + + + +{`TBuilder IncludeTimeSeries(string name, DateTime? from = null, DateTime? to = null); +`} + + + +| Parameter | Type | Description | +|-----------|-------------|-----------------------------------------------------------------------| +| **name** | `string` | Time series Name. | +| **from** | `DateTime?` | Time series range start (inclusive).
Default: `DateTime.MinValue`. | +| **to** | `DateTime?` | Time series range end (inclusive).
Default: `DateTime.MaxValue`. | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_with-session-query-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_with-session-query-nodejs.mdx new file mode 100644 index 0000000000..9f949253da --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/_with-session-query-nodejs.mdx @@ -0,0 +1,83 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When querying via `session.query` for documents that contain time series, + you can request to include their time series data in the server response. + +* The included time series data is stored within the session + and can be provided instantly when requested without any additional server calls. + +* In this page: + * [Include time series when making a query](../../../../../document-extensions/timeseries/client-api/session/include/with-session-query.mdx#include-time-series-when-making-a-query) + * [Syntax](../../../../../document-extensions/timeseries/client-api/session/include/with-session-query.mdx#syntax) + + +## Include time series when making a query + +In this example, we retrieve a document using `session.query` +and _include_ entries from the document's "HeartRates" time series. + + + + +{`// Query for a document and include a whole time-series +const user = await session.query({ collection: "users" }) + .whereEquals("name", "John") + // Call 'includeTimeSeries' to include the time series entries in the response + // Pass the time series name + // (find more include builder overloads under the Syntax section) + .include(includeBuilder => includeBuilder.includeTimeSeries("HeartRates")) + .first(); + +const numberOfRequests1 = session.advanced.numberOfRequests; + +// The following call to 'get' will Not trigger a server request, +// the entries will be retrieved from the session's cache. +const entries = await session.timeSeriesFor(user, "HeartRates") + .get(); + +const entryValue = entries[0].value; + +const numberOfRequests2 = session.advanced.numberOfRequests; +assert.equal(numberOfRequests1, numberOfRequests2); +`} + + + + +{`from "users" +where name = "John" +include timeseries("HeartRates", null, null) +limit null, 1 +`} + + + + + + +## Syntax + +**`include`** builder methods: + + + +{`includeTimeSeries(name); +includeTimeSeries(name, from, to); +`} + + + +| Parameter | Type | Description | +|-----------|----------|----------------------------------------------------------------------| +| **name** | `string` | Time series Name. | +| **from** | `Date` | Time series range start (inclusive).
Default: minimum date value. | +| **to** | `Date` | Time series range end (inclusive).
Default: maximum date value. | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/overview.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/overview.mdx new file mode 100644 index 0000000000..a4972c855a --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/overview.mdx @@ -0,0 +1,42 @@ +--- +title: "Including Time Series" +hide_table_of_contents: true +sidebar_label: Overview +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import OverviewCsharp from './_overview-csharp.mdx'; +import OverviewNodejs from './_overview-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/with-raw-queries.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/with-raw-queries.mdx new file mode 100644 index 0000000000..a6993941d0 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/with-raw-queries.mdx @@ -0,0 +1,42 @@ +--- +title: "Include Time Series with Raw Queries" +hide_table_of_contents: true +sidebar_label: With Raw Queries +sidebar_position: 3 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import WithRawQueriesCsharp from './_with-raw-queries-csharp.mdx'; +import WithRawQueriesNodejs from './_with-raw-queries-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/with-session-load.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/with-session-load.mdx new file mode 100644 index 0000000000..291cb41e50 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/with-session-load.mdx @@ -0,0 +1,42 @@ +--- +title: "Include Time Series with Load" +hide_table_of_contents: true +sidebar_label: With Load +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import WithSessionLoadCsharp from './_with-session-load-csharp.mdx'; +import WithSessionLoadNodejs from './_with-session-load-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/with-session-query.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/with-session-query.mdx new file mode 100644 index 0000000000..7333da64a3 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/include/with-session-query.mdx @@ -0,0 +1,42 @@ +--- +title: "Include Time Series with Query" +hide_table_of_contents: true +sidebar_label: With Query +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import WithSessionQueryCsharp from './_with-session-query-csharp.mdx'; +import WithSessionQueryNodejs from './_with-session-query-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/patch.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/patch.mdx new file mode 100644 index 0000000000..70c44eb927 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/patch.mdx @@ -0,0 +1,55 @@ +--- +title: "Patch Time Series Entries" +hide_table_of_contents: true +sidebar_label: Patch +sidebar_position: 4 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import PatchCsharp from './_patch-csharp.mdx'; +import PatchPython from './_patch-python.mdx'; +import PatchPhp from './_patch-php.mdx'; +import PatchNodejs from './_patch-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/querying.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/querying.mdx new file mode 100644 index 0000000000..442246ddd1 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/client-api/session/querying.mdx @@ -0,0 +1,42 @@ +--- +title: "Time Series Querying" +hide_table_of_contents: true +sidebar_label: Querying +sidebar_position: 5 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import QueryingCsharp from './_querying-csharp.mdx'; +import QueryingNodejs from './_querying-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/design.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/design.mdx new file mode 100644 index 0000000000..d857866c54 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/design.mdx @@ -0,0 +1,42 @@ +--- +title: "Time Series Design" +hide_table_of_contents: true +sidebar_label: Design +sidebar_position: 6 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import DesignCsharp from './_design-csharp.mdx'; +import DesignNodejs from './_design-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/_category_.json b/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/_category_.json new file mode 100644 index 0000000000..7801aaf2f5 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 7, + "label": Incremental Time Series, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/_category_.json b/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/_category_.json new file mode 100644 index 0000000000..2b13474af7 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 1, + "label": Client API, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/javascript-support.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/javascript-support.mdx new file mode 100644 index 0000000000..0eb7a6236b --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/javascript-support.mdx @@ -0,0 +1,144 @@ +--- +title: "Incremental Time Series: JavaScript Support" +hide_table_of_contents: true +sidebar_label: JavaScript Support +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Incremental Time Series: JavaScript Support + + + + +* RavenDB's time-series + [Javascript Support](../../../../document-extensions/timeseries/client-api/javascript-support.mdx) + has been extended to support incremental time series. + +* You can use the Javascript [timeseries.increment](../../../../document-extensions/timeseries/incremental-time-series/client-api/javascript-support.mdx#timeseriesincrement) + method to create and modify incremental time series and their entries. + The method behaves the same way it does when it is called [using C#](../../../../document-extensions/timeseries/incremental-time-series/client-api/session/increment.mdx). + +* Incremental time series **cannot** use the non-incremental time series + [timeseries.append](../../../../document-extensions/timeseries/client-api/javascript-support.mdx#section-1) method. + +* Other Javascript methods available for an incremental time series: + * [timeseries.delete](../../../../document-extensions/timeseries/client-api/javascript-support.mdx#section-2) + * [timeseries.get](../../../../document-extensions/timeseries/client-api/javascript-support.mdx#section-3) + +* In this page: + * [The `timeseries` Interface](../../../../document-extensions/timeseries/incremental-time-series/client-api/javascript-support.mdx#the-timeseries-interface) + * [`timeseries.increment`](../../../../document-extensions/timeseries/incremental-time-series/client-api/javascript-support.mdx#timeseriesincrement) + * [Usage Sample](../../../../document-extensions/timeseries/incremental-time-series/client-api/javascript-support.mdx#usage-sample) + + + +## The `timeseries` Interface + +* Use `timeseries (doc, name)` to choose a time series by the ID of its owner document and + by the series name. + + | Parameter | Type | Description | + |-----------|---------------------------------------------|-----------------------------------------------------------------------------| + | doc | `string`
or
`document instance` | Document ID, e.g. `users/1-A`

e.g. `this` | + | name | `string` | Incremental time series Name (e.g. `INC:StockPrice`) | + +* Use one of the following methods to access the chosen time series: + * [timeseries.increment](../../../../document-extensions/timeseries/incremental-time-series/client-api/javascript-support.mdx#the-timeseries-interface) + * [timeseries.delete](../../../../document-extensions/timeseries/client-api/javascript-support.mdx#section-2) + * [timeseries.get](../../../../document-extensions/timeseries/client-api/javascript-support.mdx#section-3) + +## `timeseries.increment` + +* There are four `Increment` methods: + * Increment a time series entry's array of values at the provided timestamp. + + +{`// Increment a time series entry's array of values at the provided timestamp +void Increment(DateTime timestamp, IEnumerable values); +`} + + + * Increment a time series entry's array of values at the current time. + + +{`// Increment a time series entry's array of values at the current time +void Increment(IEnumerable values); +`} + + + * Increment an entry value at the provided timestamp. + (If the entry exists and has more than one value, only the first + value in its list will be incremented by the passed value.) + + +{`// Increment an entry value at the provided timestamp +void Increment(DateTime timestamp, double value); +`} + + + * Increment an entry value at the current time. + (If the entry exists and has more than one value, only the first + value in its list will be incremented by the passed value.) + + +{`// Increment an entry value at the current time +void Increment(double value); +`} + + + +* **Parameters** + + | Parameters | Type | Description | + |:-------------|:-------------|:-------------| + | `timestamp` | DateTime | Time series entry's timestamp | + | `values` | IEnumerable<double> | A list of delta values to increment the entry values by | + | `value` | double | The delta to increment the entry value by | + +* **Exceptions** + If the document doesn't exist, a `DocumentDoesNotExistException` exception is thrown. + + + +## Usage Sample + +In this sample we use +[session.Advanced.Defer](../../../../document-extensions/timeseries/client-api/session/patch.mdx#patching-using-sessionadvanceddefer) +to patch an incremental time series. +We go through a series of collected stock prices, and add a **2** factor to each collected stock price, +that has been originally miscalculated. + + +{`session.Advanced.Defer(new PatchCommandData("users/1-A", null, + new PatchRequest + \{ + Script = @" + var i = 0; + for(i = 0; i < $timeStamps.length; i++) + \{ + timeseries(id(this), $timeseries) + .increment ( + new Date($timeStamps[i]), + $factor); + \}", + + Values = + \{ + \{ "timeseries", "INC:StockPrice" \}, + \{ "timeStamps", timeStamps\}, + \{ "factor", 2 \}, + \} + \}, null)); +`} + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/operations/_category_.json b/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/operations/_category_.json new file mode 100644 index 0000000000..226246e81b --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/operations/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 1, + "label": Operations, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/operations/get.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/operations/get.mdx new file mode 100644 index 0000000000..6f64c9e296 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/operations/get.mdx @@ -0,0 +1,130 @@ +--- +title: "Operations: Get Incremental Time Series" +hide_table_of_contents: true +sidebar_label: Get +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Operations: Get Incremental Time Series + + +* Get time series entries using `GetTimeSeriesOperation`. +* Using this method, you can retrieve node values from incremental time series entries. + +* In this page: + * [`GetTimeSeriesOperation`](../../../../../document-extensions/timeseries/incremental-time-series/client-api/operations/get.mdx#gettimeseriesoperation) + * [Syntax](../../../../../document-extensions/timeseries/incremental-time-series/client-api/operations/get.mdx#syntax) + * [Usage Flow](../../../../../document-extensions/timeseries/incremental-time-series/client-api/operations/get.mdx#usage-flow) + * [Code Samples](../../../../../document-extensions/timeseries/incremental-time-series/client-api/operations/get.mdx#usage-sample) + + +## `GetTimeSeriesOperation` + +Use `GetTimeSeriesOperation` to retrieve the distinct values stored per-node for the requested entries. +### Syntax + +* `GetTimeSeriesOperation` Definition: + + +{`public GetTimeSeriesOperation( + string docId, string timeseries, DateTime? @from = null, + DateTime? to = null, int start = 0, int pageSize = int.MaxValue, + bool returnFullResults = false) +`} + + + +* **Parameters** + + | Parameters | Type | Description | + |:-------------|:-------------|:-------------| + | `docId` | `string` | Document ID | + | `timeseries` | `string` | Time series name | + | `from` (optional) | `DateTime?` | Range start
Default: `DateTime.Min` || + | `to` (optional) | `DateTime?` | Range end
Default: `DateTime.Max` || + | `start` | `int` | Start of first Page | + | `pageSize` | `int` | Size of each page, counted in [entries with unique timestamps](../../../../../document-extensions/timeseries/incremental-time-series/overview.mdx#incremental-time-series-structure) | + | `returnFullResults` | `bool` | If true, retrieve the values stored per-node.
If false, return `null ` in `TimeSeriesEntry.NodeValues`. | + + +* **Return Value**: **`TimeSeriesRangeResult`** + + +{`public class TimeSeriesRangeResult + \{ + public DateTime From, To; + public TimeSeriesEntry[] Entries; + + // The number of unique values + public long? TotalResults; + +`} + + + + + +{`public class TimeSeriesEntry + \{ + public DateTime Timestamp \{ get; set; \} + public double[] Values \{ get; set; \} + public string Tag \{ get; set; \} + public bool IsRollup \{ get; set; \} + + // The nodes distribution per each entry + public Dictionary NodeValues \{ get; set; \} +`} + + + + * `TimeSeriesRangeResult.TotalResults` will contain the number of **unique** values. + If the time series contains entries with multiple values (remember + that since this is an incremental time series this means duplications + of the same number at the same timestamp) all values will be aggregated + in `TotalResults` to a single unique value. + * Requesting a time series that doesn't exist will return `null`. + * Requesting an entries range that doesn't exist will return a `TimeSeriesRangeResult` object + with an empty `Entries` property. + +* **Exceptions** + Exceptions are not generated. +### Usage Sample + +* In this sample we retrieve 50 entries from an incremental time series that contains + two per-node values in each entry. + We then calculate where the next `Get` operation should start, and run another `Get` + operation starting there. + + +{`int pageSize = 100; +var entries = store.Operations + .Send(new GetTimeSeriesOperation("users/ayende", + "INC:Downloads", start: 0, pageSize: pageSize, + returnFullResults: true)); + +//load another page, starting with the first entry that wasn't read yet +int nextStart = entries.Entries.Length; +entries = store.Operations + .Send(new GetTimeSeriesOperation("users/ayende", + "INC:Downloads", start: nextStart, pageSize: pageSize, + returnFullResults: true)); +`} + + + + + +## `GetMultipleTimeSeriesOperation` + +To retrieve data from **multiple** time series, +use [GetMultipleTimeSeriesOperation](../../../../../document-extensions/timeseries/client-api/operations/get.mdx#getmultipletimeseriesoperation). + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/session/_category_.json b/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/session/_category_.json new file mode 100644 index 0000000000..466eb27303 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/session/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 0, + "label": Session, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/session/delete.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/session/delete.mdx new file mode 100644 index 0000000000..af7c70d960 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/session/delete.mdx @@ -0,0 +1,130 @@ +--- +title: "Session: Delete Incremental Time Series" +hide_table_of_contents: true +sidebar_label: Delete +sidebar_position: 3 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Session: Delete Incremental Time Series + + +Delete a range of incremental time series entries using `IncrementalTimeSeriesFor.Delete`. + +* You can delete a **single entry** or a **range of entries**. + +* In this page: + * [`IncrementalTimeSeriesFor.Delete`](../../../../../document-extensions/timeseries/incremental-time-series/client-api/session/delete.mdx#incrementaltimeseriesfordelete) + * [Syntax](../../../../../document-extensions/timeseries/incremental-time-series/client-api/session/delete.mdx#syntax) + * [Usage Flow](../../../../../document-extensions/timeseries/incremental-time-series/client-api/session/delete.mdx#usage-flow) + * [Code Samples](../../../../../document-extensions/timeseries/incremental-time-series/client-api/session/delete.mdx#code-samples) + +## `IncrementalTimeSeriesFor.Delete` + +`IncrementalTimeSeriesFor.Delete` is used for the removal of incremental time series and +their entries. + +* There is no need to explicitly delete an incremental time series; + the series is deleted when all its entries are deleted. +* Attempting to delete nonexistent entries results in a no-op, + generating no exception. + + + + +## Syntax + +* There are two `IncrementalTimeSeriesFor.Delete` methods: + * Delete a range of time series entries + -or- + if values are omitted, delete the entire series. + + +{`// Delete incremental time series values range from .. to, +// or, if values are omitted, delete the whole series. +void Delete(DateTime? from = null, DateTime? to = null); +`} + + + * Delete a single time series entry. + + +{`// Delete the entry value at the specified time stamp +void Delete(DateTime at); +`} + + + +* **Parameters** + + | Parameters | Type | Description | + |:-------------|:-------------|:-------------| + | `from` (optional) | `DateTime?` | Delete the range of entries starting at this timestamp. | + | `to` (optional) | `DateTime?` | Delete the range of entries ending at this timestamp. | + | `at` | `DateTime` | Timestamp of the entry to be deleted. | + +* **Return Value** + No return value. + +* **Exceptions** + * `DocumentDoesNotExistException` is thrown If the document doesn't exist. + * Attempting to delete nonexistent entries results in a no-op and does not generate an exception. + + + +## Usage Flow + +* Open a session +* Create an instance of `IncrementalTimeSeriesFor` and pass it: + * An explicit document ID, + -or- + An [entity tracked by the session](../../../../../client-api/session/loading-entities.mdx), + e.g. a document object returned from [session.Query](../../../../../client-api/session/querying/how-to-query.mdx) + or from [session.Load](../../../../../client-api/session/loading-entities.mdx#load). + * The time series name. + The name **must** begin with "INC:" (can be upper or lower case) to identify the time series as incremental. +* Call `IncrementalTimeSeriesFor.Delete`. +* Call `session.SaveChanges` for the action to take effect on the server. + + + +## Code Samples + +* Delete a single entry: + + +{`// Delete a single entry +using (var session = store.OpenSession()) +\{ + session.IncrementalTimeSeriesFor("companies/webstore") + .Delete(baseline.AddMinutes(1)); + + session.SaveChanges(); +\} +`} + + + +* Delete a range of entries: + + +{`// Delete a range of entries from the time series +using (var session = store.OpenSession()) +\{ + session.IncrementalTimeSeriesFor("companies/webstore", "INC:Downloads") + .Delete(baseline.AddDays(0), baseline.AddDays(9)); + + session.SaveChanges(); +\} +`} + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/session/get.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/session/get.mdx new file mode 100644 index 0000000000..8004141e9c --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/session/get.mdx @@ -0,0 +1,106 @@ +--- +title: "Session: Get Incremental Time Series Entries" +hide_table_of_contents: true +sidebar_label: Get +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Session: Get Incremental Time Series Entries + + +* To get a range of incremental time series entries, use one of the `IncrementalTimeSeriesFor.Get` methods. +* This method retrieves only the **accumulated values** of incremental time series entries. + To retrieve the values stored by each node, use [GetTimeSeriesOperation](../../../../../document-extensions/timeseries/incremental-time-series/client-api/operations/get.mdx). + + + +* [Include](../../../../../document-extensions/timeseries/client-api/session/include/overview.mdx) + time series data while [loading](../../../../../document-extensions/timeseries/client-api/session/include/with-session-load.mdx) + or [querying](../../../../../document-extensions/timeseries/client-api/session/include/with-session-query.mdx) + documents, to keep the data locally in the client's session and refrain from unnecessary additional trips to the server. +* When caching is enabled, time series data is kept in the session cache as well. + + + +* In this page: + * [`IncrementalTimeSeriesFor.Get`](../../../../../document-extensions/timeseries/incremental-time-series/client-api/session/get.mdx#incrementaltimeseriesforget) + * [Syntax](../../../../../document-extensions/timeseries/incremental-time-series/client-api/session/get.mdx#syntax) + * [Usage Flow](../../../../../document-extensions/timeseries/incremental-time-series/client-api/session/get.mdx#usage-flow) + * [Code Samples](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx#code-samples) + + +## `IncrementalTimeSeriesFor.Get` + +`IncrementalTimeSeriesFor.Get` retrieves a range of entries from a single time series. + +* To retrieve multiple series' data, use the + [GetMultipleTimeSeriesOperation](../../../../../document-extensions/timeseries/client-api/operations/get.mdx#getmultipletimeseriesoperation) + document-store operation. +* Retrieved data can be sliced to **pages** to get time series entries + gradually, one custom-size page at a time. + + + +## Syntax + +* `IncrementalTimeSeriesFor.Get` method: + + +{`// Return time series values for the provided range +TimeSeriesEntry[] Get(DateTime? from = null, DateTime? to = null, int start = 0, int pageSize = int.MaxValue); +`} + + + +* **Parameters** + + | Parameters | Type | Description | + |:-------------|:-------------|:-------------| + | `from` | `DateTime?` | Range Start | + | `to` | `DateTime?` | Range End | + | `start` | `int` | Paging first entry.
E.g. 50 means the first page would start at the 50th time series entry.
Default: 0, for the first time-series entry. | + | `pagesize` | `int` | Paging page-size.
E.g. set `pagesize` to 10 to retrieve pages of 10 entries.
Default: int.MaxValue, for all time series entries. | + +* **Return Values** + * **`TimeSeriesEntry[]`** - an array of time series entry classes. + + + +## Usage Flow + +* Open a session +* Create an instance of `IncrementalTimeSeriesFor` and pass it: + * An explicit document ID, + -or- + An [entity tracked by the session](../../../../../client-api/session/loading-entities.mdx), + e.g. a document object returned from [session.Query](../../../../../client-api/session/querying/how-to-query.mdx) + or from [session.Load](../../../../../client-api/session/loading-entities.mdx#load). + * The time series name. + The name **must** begin with "INC:" (can be upper or lower case) to identify the time series as incremental. +* Call `IncrementalTimeSeriesFor.Get`. + + + +## Code Samples + +* In this sample we retrieve all the entries of a time series. + + +{`// Get all time series entries +TimeSeriesEntry[] val = session.IncrementalTimeSeriesFor("companies/webstore", "INC:Downloads") +.Get(DateTime.MinValue, DateTime.MaxValue); +`} + + + +* Find additional samples [here](../../../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx). + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/session/increment.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/session/increment.mdx new file mode 100644 index 0000000000..c168e7b02f --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/session/increment.mdx @@ -0,0 +1,135 @@ +--- +title: "Session: Create & Modify Incremental Time Series" +hide_table_of_contents: true +sidebar_label: Increment +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Session: Create & Modify Incremental Time Series + + +* Create and modify incremental time series and their entries using `IncrementalTimeSeriesFor.Increment`. + +* There is no need to explicitly create or delete a time series. + * A time series is created when its first entry is incremented. + * A time series is deleted when all entries are [deleted](../../../../../document-extensions/timeseries/client-api/session/delete.mdx) from it. + +* You can add a single [incremental time series entry](../../../../../document-extensions/timeseries/design.mdx#time-series-entries) at a time. + Note, however, that you can `Increment` as many times as you need to before calling + `session.SaveChanges`, to create multiple entries in a single transaction. + +* In this page: + * [`IncrementalTimeSeriesFor.Increment`](../../../../../document-extensions/timeseries/incremental-time-series/client-api/session/increment.mdx#timeseriesforappend) + * [Syntax](../../../../../document-extensions/timeseries/incremental-time-series/client-api/session/increment.mdx#syntax) + * [Usage Flow](../../../../../document-extensions/timeseries/incremental-time-series/client-api/session/increment.mdx#usage-flow) + * [Code Samples](../../../../../document-extensions/timeseries/incremental-time-series/client-api/session/increment.mdx#code-samples) + +## `IncrementalTimeSeriesFor.Increment` + +* `IncrementalTimeSeriesFor.Increment` is used for the creation of incremental time series and + their entries, and for the modification of entry values. + * **Creating a new Incremental Time Series** + Incrementing entry values for an incremental time series that doesn't exist yet will + create the new incremental time series with this entry. + * **Creating an Incremental Time Series Entry** + Incrementing an entry value for an entry that doesn't exist yet will add the entry to + this series at the specified timestamp. + * **Modifying Entry Values** + Increment a value for an existing entry by a number of your choice. + + + +## Syntax + +* There are four `IncrementalTimeSeriesFor.Increment` methods: + * Increment a time series entry's array of values at the provided timestamp. + + +{`// Increment a time series entry's array of values at the provided timestamp +void Increment(DateTime timestamp, IEnumerable values); +`} + + + * Increment a time series entry's array of values at the current time. + + +{`// Increment a time series entry's array of values at the current time +void Increment(IEnumerable values); +`} + + + * Increment an entry value at the provided timestamp. + (If the entry exists and has more than one value, only the first + value in its list will be incremented by the passed value.) + + +{`// Increment an entry value at the provided timestamp +void Increment(DateTime timestamp, double value); +`} + + + * Increment an entry value at the current time. + (If the entry exists and has more than one value, only the first + value in its list will be incremented by the passed value.) + + +{`// Increment an entry value at the current time +void Increment(double value); +`} + + + +* **Parameters** + + | Parameters | Type | Description | + |:-------------|:-------------|:-------------| + | `timestamp` | DateTime | Time series entry's timestamp | + | `values` | IEnumerable<double> | A list of delta values to increment the entry values by | + | `value` | double | The delta to increment the entry value by | + +* **Exceptions** + If the document doesn't exist, a `DocumentDoesNotExistException` exception is thrown. + + + +## Usage Flow + +* Open a session +* Create an instance of `IncrementalTimeSeriesFor` and pass it: + * An explicit document ID, + -or- + An [entity tracked by the session](../../../../../client-api/session/loading-entities.mdx), + e.g. a document object returned from [session.Query](../../../../../client-api/session/querying/how-to-query.mdx) + or from [session.Load](../../../../../client-api/session/loading-entities.mdx#load). + * The time series name. + The name **must** begin with "INC:" (can be upper or lower case) to identify the time series as incremental. +* Call `IncrementalTimeSeriesFor.Increment`. +* Call `session.SaveChanges` for the action to take effect on the server. + + + +## Code Samples + +* Increment an array of values in an incremental time series entry. + + +{`var ts = session.IncrementalTimeSeriesFor("companies/webstore", "INC:Downloads"); +ts.Increment(baseline.AddMinutes(1), new double[] \{ 10, -10, 0, 0 \}); +session.SaveChanges(); +`} + + + * If the time series doesn't exist, it will be created with this first entry. + * If the entry doesn't exist, it will be created with the provided values. + * If the entry exists, its values will be increased by the provided values. + * a negative number will decrease the current value. + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/session/overview.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/session/overview.mdx new file mode 100644 index 0000000000..7adc077c27 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/client-api/session/overview.mdx @@ -0,0 +1,67 @@ +--- +title: "Session: Overview" +hide_table_of_contents: true +sidebar_label: Overview +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Session: Overview + + +* Incremental Time Series can be created and managed using a set of + [session](../../../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx) + API methods, whose functionality is mostly identical to that of + [non-incremental time series session methods](../../../../../document-extensions/timeseries/client-api/overview.mdx#available-time-series-session-methods). + +* The incremental time series uses the + [Increment](../../../../../document-extensions/timeseries/incremental-time-series/client-api/session/increment.mdx) + method to - + * Create a new time series, + * Create a new time series entry, + * and Increase a value by some delta. + +* In this page: + * [Session API Methods](../../../../../document-extensions/timeseries/incremental-time-series/client-api/session/overview.mdx#session-api-methods) + * [IncrementalTimeSeriesFor Methods](../../../../../document-extensions/timeseries/incremental-time-series/client-api/session/overview.mdx#methods) + * [Additional Session Methods](../../../../../document-extensions/timeseries/incremental-time-series/client-api/session/overview.mdx#additional-session-methods) + +## Session API Methods +### `IncrementalTimeSeriesFor` Methods + +The `IncrementalTimeSeriesFor` class provides useful incremental time series +session API methods, including [Increment](../../../../../document-extensions/timeseries/incremental-time-series/client-api/session/increment.mdx), +[Get](../../../../../document-extensions/timeseries/incremental-time-series/client-api/session/get.mdx), +and [Delete](../../../../../document-extensions/timeseries/incremental-time-series/client-api/session/delete.mdx). + +To use it - + +* Open a session +* Create an instance of `IncrementalTimeSeriesFor` and pass it: + * An explicit document ID, + -or- + An [entity tracked by the session](../../../../../client-api/session/loading-entities.mdx), + e.g. a document object returned from [session.Query](../../../../../client-api/session/querying/how-to-query.mdx) + or from [session.Load](../../../../../client-api/session/loading-entities.mdx#load). + * The time series name. + The name **must** begin with "INC:" (can be upper or lower case) to identify the time series as incremental. +* Call an `IncrementalTimeSeriesFor` method +* Call `session.SaveChanges` for the action to take place. +### Additional Session Methods + +Additional session API methods handle incremental time series the +same way they do non-incremental time series, allowing you to - + +* [Include](../../../../../document-extensions/timeseries/client-api/session/include/overview.mdx) incremental time series, +* [Patch](../../../../../document-extensions/timeseries/client-api/session/patch.mdx) incremental time series, +* and [Query](../../../../../document-extensions/timeseries/client-api/session/querying.mdx) incremental time series. + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/overview.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/overview.mdx new file mode 100644 index 0000000000..3b0f5d9c63 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/incremental-time-series/overview.mdx @@ -0,0 +1,152 @@ +--- +title: "Incremental Time Series: Overview" +hide_table_of_contents: true +sidebar_label: Overview +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Incremental Time Series: Overview + + +* **Incremental Time Series** are [time series](../../../document-extensions/timeseries/overview.mdx) + whose values are designated to function as counters. + +* Similar to Counters, an incremental-time-series value can be increased/decreased by some + delta on any node. + Each node manages and stores its own accumulated local changes per value. + +* Simultaneous updates to the same value from multiple nodes/clients do not cause any conflict. + The value's total contents is simply the accumulation of that value's contents stored per-cluster node + for the same timestamp. + +* Incremental Time series can be created and managed using dedicated [API methods](../../../document-extensions/timeseries/incremental-time-series/client-api/session/overview.mdx) + and via [Studio](../../../studio/database/document-extensions/time-series.mdx#incremental-time-series). + +* In this page: + * [What are Incremental Time Series and Why Use Them](../../../document-extensions/timeseries/incremental-time-series/overview.mdx#what-are-incremental-time-series-and-why-use-them) + * [Incremental Time Series -vs- Non-incremental Time Series](../../../document-extensions/timeseries/incremental-time-series/overview.mdx#incremental-time-series--vs--non-incremental-time-series) + * [Incremental Time Series Structure](../../../document-extensions/timeseries/incremental-time-series/overview.mdx#incremental-time-series-structure) + + + + +## What are Incremental Time Series and Why Use Them + +* **A Time Series That Counts** + Many scenarios require us to continuously update a counter, and yet keep track of + the changes made to its value over time. + [Counters](../../../document-extensions/counters/overview.mdx) let us count, but not keep track of changes. + [Time series](../../../document-extensions/timeseries/overview.mdx) let us record changes over time, + but are not designated for counting. + Incremental time series allow us to easily achieve both goals, by permitting clients + to **use entry values as counters, while storing their modifications over time in an evolving time series**. + + A web page admin can store the ongoing number of downloads made by visitors in an + incremental time series. + In addition to recording the number of downloads over time, the number of downloads + per timestamp can be incremented as needed. + The number of downloads can be queried at any time for hourly or daily changes over + the passing week or month, and a graph of the results can be plotted using Studio or any other tool. + + +* **Parallel Modification** + * An incremental time series entry can be **modified by multiple clients without conflict**. + * A node handling a request to increment a value stores the value's new contents locally. + This contents replicates to all other nodes but does not override this value's contents on + the other nodes. + Instead, **per timestamp, each node stores an incremental time series entry composed of + the value's contents per node**. + * When querying a time series, you can retrieve the **total** value contents, + accumulated from the per-node values, or get the **distinct** values per-node. + + + A real-life scenario that makes good use of this feature is a bunch of traffic cameras + installed in different directions of a large road intersection, counting passing cars. + Each camera reports to its own cluster node, and the cluster collects the data into + a single time series. Each time series entry contains data from all nodes. + An admin can then query both the accumulated values, counting all cars passing through + the junction at any given moment, and each camera's data separately for a detailed look + at its side of the junction. + + + + + +## Incremental Time Series -vs- Non-incremental Time Series + +* **Name Convention** + * Non-incremental: + Any name can be used, as long as it doesn't start with the dedicated incremental prefix. + * Incremental: + Name must start with `INC:` (can be either upper or lower case). + +* **General Usage** + * Non-incremental: + Record data over time with no intention of changing the stored values. + i.e. once stored, Heartrates or Stock prices values need no alteration. + * Incremental: + Record data over time and allow to increase/decrease each value per timestamp. + +* **Modified values & Replication** + * Non-incremental: + A value that is modified on one node is replicated to other nodes and will + replace the existing value's contents on the other nodes. + Upon concurrent updates for the same timestamp, the highest value from all + nodes takes over the value's contents. + * Incremental: + A request to increase a value contains the **delta** by which the value is to be + increased or decreased. + A node handling such request stores the value's new contents locally. + This contents replicates to all other nodes but doesn't override this value's + contents on the other nodes. + Instead, **per timestamp, each node stores an incremental time series entry + that is composed of the value's contents per node**. + +* **Tag per Entry** + * Non-incremental: + Any tag can be set by the user per entry. + * Incremental: + The user cannot set a tag per entry. + The entry's tag is set by the server for inner usage only and is composed of the + Node tag and the database ID. + +* **Rollup Policies** + * Non-incremental: + Rollup policies can be created for speedy filtering and size reduction in the original series. + * Incremental: + Rollup policies can be created as above. + However, the resulting rollup time series is non incremental. + It can be handled via [TimeSeriesFor](../../../document-extensions/timeseries/client-api/session/append.mdx#timeseriesforappend), not + [IncrementalTimeSeriesFor](../../../document-extensions/timeseries/incremental-time-series/client-api/session/overview.mdx#methods), + and its values can no longer be [Incremented](../../../document-extensions/timeseries/incremental-time-series/client-api/session/increment.mdx). + + + +## Incremental Time Series Structure + +* The basic structure and behavior of incremental time series are similar + to those of [non-incremental time series](../../../document-extensions/timeseries/overview.mdx#time-series-data). +* An incremental time series is attached to a document just like a non-incremental + time series is. + In Studio, incremental time series are even accessed through the + familiar [Time Series View](../../../studio/database/document-extensions/time-series.mdx#creating-a-new-incremental-time-series-by-creating-its-first-entry). +* An incremental time series is divided into segments and entries the same way + a non-incremental time series is. +* The main structural difference is incremental time series' capacity to store multiple + per-node values in a single entry and manage them separately. +* Each entry is assigned a single unique timestamp. + However, when different nodes update the same value at the same timestamp, the entries + stored on those nodes will be assigned with the same timestamp. +* The number of values that can be stored per incremental time series entry + remains 32, but since each entry contains all values stored on all nodes the + entry's size may be much larger. + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/indexing.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/indexing.mdx new file mode 100644 index 0000000000..3a1854fe91 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/indexing.mdx @@ -0,0 +1,48 @@ +--- +title: "Indexing Time Series" +hide_table_of_contents: true +sidebar_label: Indexing Time Series +sidebar_position: 3 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import IndexingCsharp from './_indexing-csharp.mdx'; +import IndexingPython from './_indexing-python.mdx'; +import IndexingPhp from './_indexing-php.mdx'; +import IndexingNodejs from './_indexing-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/overview.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/overview.mdx new file mode 100644 index 0000000000..0c5e2c0be7 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/overview.mdx @@ -0,0 +1,236 @@ +--- +title: "Time Series Overview" +hide_table_of_contents: true +sidebar_label: Overview +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Time Series Overview + + +* A huge number of systems, including an expanding variety of IoT devices, + produce continuous streams of values that can be collected and used for + various needs. **Time series** are vectors of data points that are designated + to collect values over time, store them consecutively, and manage them + with high efficiency and performance. + +* RavenDB Time series can be managed and utilized using a thorough set of + [API methods](../../document-extensions/timeseries/client-api/overview.mdx), + the [Studio](../../studio/database/document-extensions/time-series.mdx), + and [various RavenDB features](../../document-extensions/timeseries/time-series-and-other-features.mdx). + +* Time series data is **compressed** to lower storage usage and transaction time. + +* In this page: + * [Overview](../../document-extensions/timeseries/overview.mdx#overview) + * [RavenDB's Time Series Implementation](../../document-extensions/timeseries/overview.mdx#ravendbs-time-series-implementation) + * [Distributed Time Series](../../document-extensions/timeseries/overview.mdx#distributed-time-series) + * [Time Series as Document Extensions](../../document-extensions/timeseries/overview.mdx#time-series-as-document-extensions) + * [Time Series Features](../../document-extensions/timeseries/overview.mdx#time-series-features) + * [Time Series Data](../../document-extensions/timeseries/overview.mdx#time-series-data) + * [Separate Name and Data Storage](../../document-extensions/timeseries/overview.mdx#separate-name-and-data-storage) + * [Time Series Segments](../../document-extensions/timeseries/overview.mdx#time-series-segments) + * [Transactions Performance](../../document-extensions/timeseries/overview.mdx#transactions-performance) + * [Common-Queries Performance](../../document-extensions/timeseries/overview.mdx#common-queries-performance) + * [Time Series Entries](../../document-extensions/timeseries/overview.mdx#time-series-entries) + * [Timestamps](../../document-extensions/timeseries/overview.mdx#timestamps) + * [Values](../../document-extensions/timeseries/overview.mdx#values) + * [Tags](../../document-extensions/timeseries/overview.mdx#tags) + + +## Overview + +Time series can be **aggregated** and **queried** to illustrate process +behavior, predict future developments, track noticeable value changes, +and create other helpful statistics. + +Here are a few examples for value streams that can be easily and effectively +handled by time series. + +* _A sequence of heart rate values can be collected from a smart + wrist-watch_, and be used to build a person's training program. +* _Weather-stations' measurements_ collected over a chosen time period + can be compared to equivalent past periods to predict the weather. +* _Bandwidth usage reports of a home cable modem monitor_ can be used + to build a better charging plan. +* _Coordinates sent by delivery trucks' GPS trackers_ can be collected + and analyzed to secure the vehicles and improve the service. +* _Daily changes in stock prices_ can be used to build investment plans. +#### RavenDB's Time Series Implementation + +Time series functionality is fully integrated into RavenDB's +distributed environment and document model. +#### Distributed Time Series + +Distributed clients and nodes can modify time series concurrently; +the modifications are merged by the cluster [without conflict](../../document-extensions/timeseries/design.mdx#no-conflicts). +#### Time Series as Document Extensions + +RavenDB's Time Series, like its +[distributed counters](../../document-extensions/counters/overview.mdx), +[attachments](../../document-extensions/attachments/what-are-attachments.mdx) +and [document revisions](../../document-extensions/revisions/overview.mdx), +are **document extensions**. + +* A time series always extends a single specific document. + The context and source of the time series can be kept clear this way, + and time series management can use the comfort and strength of the + document interface. + A barometer's specifications document, for example, can be the parent + document for a time series that is populated with measurements taken + by a barometer of this specification. + +* Like the other document extensions, time series can take part in fully + transactional operations. +#### Time Series Features + +Notable time series features include - + +* **Highly-Efficient Storage Management** + Time series data is [compressed](../../document-extensions/timeseries/design.mdx#compression) + and [segmented](../../document-extensions/timeseries/overview.mdx#time-series-segments) to minimize storage usage and transmission time. +* **A Thorough Set of API Methods** + The [time series API](../../document-extensions/timeseries/client-api/overview.mdx) includes a variety of `session methods` and `store operations`. +* **Full GUI Support** + Time series can be viewed and managed using the [Studio](../../studio/database/document-extensions/time-series.mdx). +* **Time Series Indexing** + Time series can be [indexed](../../document-extensions/timeseries/indexing.mdx). +* **Time Series Querying and Aggregation** + * [High-performance common queries](../../document-extensions/timeseries/overview.mdx#common-queries-performance) + The results of a set of common queries are prepared in advance in time series segments' headers, + so the response to querying for a series minimum value, for example, is returned nearly instantly. + * [LINQ and raw RQL queries](../../document-extensions/timeseries/querying/overview-and-syntax.mdx) + Flexible queries and aggregations can be executed using LINQ expressions and raw RQL over + time series **timestamps**, **values**, and **tags**. +* **Including Time Series** + You can [include (pre-fetch) time series data](../../document-extensions/timeseries/client-api/session/include/overview.mdx) when loading or querying for documents. + Included data is held by the client's session, and is delivered to the user with no additional server calls. +* **Patching** + You can patch time series data into your documents. + Learn more in [Patching time series](../../document-extensions/timeseries/client-api/session/patch.mdx). +* **Rollup and Retention Policies** + * [Rollup Policies](../../document-extensions/timeseries/rollup-and-retention.mdx) + You can set time series rollup policies to aggregate large series into smaller sets by your definitions. + * [Retention Policies](../../document-extensions/timeseries/rollup-and-retention.mdx) + You can set time series retention policies to automatically remove time series entries that have reached their expiration date/time. + + + +## Time Series Data + +Time series **names** are kept in their parent documents' metadata, while their +**data** is kept [separately](../../document-extensions/timeseries/overview.mdx#separate-name-and-data-storage). +Time series data is **compressed** and composed of consecutive +[segments](../../document-extensions/timeseries/overview.mdx#time-series-segments) and +[entries](../../document-extensions/timeseries/overview.mdx#time-series-entries). + + + +## Separate Name and Data Storage + +The separation of names and data prevents time series value updates from +invoking document-change events, keeping documents' availability and performance +whatever size their time series grow to be and however frequent their value-updates +are. + + + +## Time Series Segments + +Time series are composed of consecutive **segments**. +When a time series is created, its values are held in a single segment. +As the number of values grows (or when a certain amount of time has passed +since the last entry appendage), segments are added to the series. + + +Segments are managed automatically by RavenDB, clients do not need to do +anything in this regard. + +#### Transactions Performance + +Time series segmentation heightens performance and minimizes transaction +and query time, since only the relevant segments of even a very long series +would be retrieved and queried, and only relevant segments would be updated. +#### Common Queries Performance +Segmentation also helps provide results for common queries extremely +fast, since results for such queries as `Min`, `Max` and others are +automatically stored and updated in segment headers, and are always +available for instant retrieval. + + + +## Time Series Entries + +Each time series segment is composed of consecutive **time series entries**. +Each entry is composed of a **timestamp**, 1 to 32 **values**, and an **optional tag**. +#### Timestamps + + +A single `DateTime` timestamp marks each entry in millisecond precision. + + +Timestamps are always indicated using [UTC](https://en.wikipedia.org/wiki/Coordinated_Universal_Time). + +Timestamps, and therefore time series entries, are always ordered **by time**, +from the oldest timestamp to the newest. +E.g. in a heart rate time series, timestamps would indicate the time in which each +heart rate measurement has been taken. +#### Values + + +Up to 32 `double` **values** can be appended per-entry. + + +We allow storing as many as 32 values per entry, since appending multiple +values may be a requirement for some time series. Here are a few examples. + +* A heart-rate time series + An entry with a single value (the heart-rate measurement taken by + a smart wrist-watch) is added to the time series every minute. + +* A truck-route time series + An entry with 2 values (the latitude and longitude reported by + a GPS device) is added to the time series every minute. + +* A stock-price time series + An entry with 5 values (stock price when the trade starts and + ends, its highest and lowest prices during the day, and its daily + trade volume) is added to the time series every day. +#### Tags + + +A single **optional** `string` tag can be added per entry. + + +Tags are designated to provide information regarding their entries. + +* **Descriptive Tags** + A tag can be a **short descriptive text**. + +* **Reference Tags** + A tag can also contain a document's ID, and function as a **reference to this document**. + + A reference-tag is preferable when we want the tag to be very short and yet refer us + to an unlimited source of information. + + Reference-tags can be used to [filter time series data](../../document-extensions/timeseries/querying/filtering.mdx#using-tags-as-references---) + during a query. + **E.g.**, the query can - + **1.** load time series entries whose tags refer to device-specification documents. + **2.** retrieve and examine the specification document referred to by each entry. + **3.** project to the client only values measured by Japanese devices. + + + Prefer re-using a few tags many times over using many unique tags, + to minimize memory and storage usage and optimize time series performance. + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/_aggregation-and-projections-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_aggregation-and-projections-csharp.mdx new file mode 100644 index 0000000000..4732ebbdf5 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_aggregation-and-projections-csharp.mdx @@ -0,0 +1,590 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Time series queries can easily generate powerful statistics by applying an aggregation function + (such as Min, Max, Count, Average, etc.) to a group of entries within a chosen time frame, + such as an hour or a week. + +* For an overview of the available time series queries, please refer to [Time series querying](../../../document-extensions/timeseries/client-api/session/querying.mdx). + +* In this page: + * [Grouping and aggregation options](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#grouping-and-aggregation-options) + * [Examples](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#examples) + * [Aggregate entries with single value](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#aggregate-entries-with-single-value) + * [Aggregate entries with multiple values](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#aggregate-entries-with-multiple-values) + * [Aggregate entries without grouping by time frame](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#aggregate-entries-without-grouping-by-time-frame) + * [Aggregate entries filtered by referenced document](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#aggregate-entries-filtered-by-referenced-document) + * [Secondary grouping by tag](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#secondary-grouping-by-tag) + * [Group by dynamic criteria](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#group-by-dynamic-criteria) + * [Project document data in addition to aggregated data](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#project-document-data-in-addition-to-aggregated-data) + * [Syntax](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#syntax) + + +## Grouping and aggregation options + +* **Group entries by time frame**: + First, you can group the time series entries based on the specified time frame. + The following time units are available: + + * `Milliseconds` + * `Seconds` + * `Minutes` + * `Hours` + * `Days` + * `Months` + * `Quarters` + * `Years` + +* **Secondary grouping**: + After grouping by a time unit, you can also perform a _secondary grouping_ by the time series [tag](../../../document-extensions/timeseries/overview.mdx#tags). + +* **Aggregate values**: + You can select one or more aggregation functions to retrieve aggregated values for each group. + The resulting aggregated values are **projected** to the client in the query result. + The following functions are available: + + * `Min()` - the lowest value + * `Max()` - the highest value + * `Sum()` - sum of all values + * `Average()` - the average value + * `First()` - value of first entry + * `Last()` - value of last entry + * `Percentile()` - The value under which the specified percentage of values fall + * `Slope()` - the change in value divided by the change in time between the first and last entries + * `StandardDeviation()` - the standard deviation of all values (a measure of how spread out the values are from the average) + * `Count()` - The result of Count() is always returned, even if you do not explicitly request it. + * When each entry has a single value: + Returns the number of entries. + * When each entry has multiple values: + Returns an array of the size of the number of values. + Each array element contains the number of entries having a measurement for that value. + + + +**Execute all aggregation functions**: +When a query groups entries by a time frame but does Not explicitly select a specific aggregation function, +the server will implicitly execute ALL available aggregation functions (except for Percentile, Slope, and StandardDeviation) for each group. + + + + +**Get aggregated values without grouping**: +When selecting aggregation functions WITHOUT first grouping the time series entries, +the aggregation calculations will be executed over the entire set of time series entries instead of per group of entries. + + + + +## Examples + +#### Aggregate entries with single value: + +* Each entry in the "HeartRates" time series within the Employees collection contains a single value. + +* In this example, for each employee document, we group entries from the "HeartRates" time series + by a 1-hour time frame and then project the lowest and highest values of each group. + + + + +{`var query = session.Query() + .Select(u => RavenQuery + .TimeSeries(u, "HeartRates") + // Call 'GroupBy' to group the time series entries by a time frame + .GroupBy(g => g.Hours(1)) + // Call 'Select' to choose aggregation functions that will be evaluated for each group + .Select(g => new + { + // Project the lowest and highest value of each group + Min = g.Min(), + Max = g.Max() + }) + .ToList()); + +// Execute the query +List results = query.ToList(); +`} + + + + +{`// Query collection Employees + from "Employees" + // Project the time series data: + select timeseries ( + from HeartRates + // Use 'group by' to group the time series entries by the specified time frame + group by "1 hour" // Group entries into consecutive 1-hour groups + // Use 'select' to choose aggregation functions that will be evaluated for each group + select min(), max() // Project the lowest and highest value of each group + ) +`} + + + +#### Aggregate entries with multiple values: + +* Each entry in the "StockPrices" time series within the Companies collection holds five values: + Values[0] - **Open** - stock price when trade opens + Values[1] - **Close** - stock price when trade ends + Values[2] - **High** - highest stock price during trade time + Values[3] - **Low** - lowest stock price during trade time + Values[4] - **Volume** - overall trade volume + +* In this example, for each company that is located in USA, we group entries from the "StockPrices" time series + by a 7-day time frame and then project the highest and lowest values of each group. + + + + +{`var query = session.Query() + // Query only USA companies: + .Where(c => c.Address.Country == "USA") + .Select(u => RavenQuery + .TimeSeries(u, "StockPrices") + // Query stock price behavior when trade volume is high + .Where(ts => ts.Values[4] > 500000) + // Group entries into consecutive 7-day groups + .GroupBy(g => g.Days(7)) + // Project the lowest and highest value of each group + .Select(g => new + { + Min = g.Min(), + Max = g.Max() + }) + .ToList()); + +// Execute the query +List results = query.ToList(); +`} + + + + +{`declare timeseries SP(c) +{ + from c.StockPrices + where Values[4] > 500_000 // Query stock price behavior when trade volume is high + group by "7 days" // Group entries into consecutive 7-day groups + select max(), min() // Project the lowest and highest value of each group +} + +from "Companies" as c + // Query only USA companies: + where c.Address.Country == "USA" + // Project the time series data: + select SP(c) +`} + + + + +{`from "Companies" as c +// Query only USA companies: +where c.Address.Country = 'USA' + // Project the time series data: + select timeseries ( + from StockPrices + where Values[4] > 500000 // Query stock price behavior when trade volume is high + group by "7 day" // Group entries into consecutive 7-day groups + select max(), min() // Project the lowest and highest value of each group + ) +`} + + + + +* Since each entry holds 5 values, the query will project: + * 5 `Max` values for each group (the highest Values[0], highest Values[1], etc.) and + * 5 `Min` values for each group (the lowest Values[0], lowest Values[1], etc.) +#### Aggregate entries without grouping by time frame: + +* This example is similar to the one above, except that time series entries are Not grouped by a time frame. + +* The highest and lowest values are collected from the entire set of time series entries that match the query criteria. + + + + +{`var query = session.Query() + .Where(c => c.Address.Country == "USA") + .Select(u => RavenQuery + .TimeSeries(u, "StockPrices") + .Where(ts => ts.Values[4] > 500000) + // Project the lowest and highest value of ALL entries that match the query criteria + .Select(g => new + { + Min = g.Min(), + Max = g.Max() + }) + .ToList()); + +// Execute the query +List results = query.ToList(); +`} + + + + +{`declare timeseries SP(c) +{ + from c.StockPrices + where Values[4] > 500_000 + select max(), min() +} + +from "Companies" as c +where c.Address.Country == "USA" +select SP(c) +`} + + + + +* Since no grouping is done, results wil include the highest and lowest Open, Close, High, Low, and Volume values + for ALL entries in the time series that match the query criteria. +#### Aggregate entries filtered by referenced document: + +* The tag in each entry in the "StockPrices" series contains an Employee document ID. + +* In this example, we load this [referenced document](../../../document-extensions/timeseries/querying/filtering.mdx#filter-by-referenced-document) + and filter the entries by its properties. + + + + +{`var query = session.Query() + .Select(u => RavenQuery + .TimeSeries(u, "StockPrices") + // Call 'LoadByTag' to load the document referenced by the tag + .LoadByTag() + // Filter entries: + // Process only entries that reference an employee with the Sales title + .Where((entry, employee) => employee.Title == "Sales Representative") + .GroupBy(g =>g.Months(1)) + .Select(g => new + { + Min = g.Min(), + Max = g.Max() + }) + .ToList()); + +// Execute the query +List results = query.ToList(); +`} + + + + +{`from "Companies" as c +select timeseries( + from StockPrices + // Load the referenced document into variable 'employee' + load Tag as employee + // Filter entries by the 'Title' field of the employee document + where employee.Title == "Sales Representative" + group by "1 month" + select min(), max() +) +`} + + + + +* Only entries that reference an employee with title 'Sales Representative' will be grouped by 1 month, + and the results will include the highest and lowest values for each group. +#### Secondary grouping by tag: + +* In this example, we perform secondary grouping by the entries' tags. + +* The tag in each entry in the "StockPrices" series contains an Employee document ID. + +* "StockPrices" entries are grouped by 6 months and then by the tags of the entries within that time frame. + + + + +{`var query = session.Query() + .Select(u => RavenQuery + .TimeSeries(u, "StockPrices") + .GroupBy(g => g + // First group by 6 months + .Months(6) + // Then group by tag + .ByTag()) + .Select(g => new + { + // Project the highest and lowest values of each group + Min = g.Min(), + Max = g.Max() + }) + .ToList()); + +// Execute the query +List results = query.ToList(); +`} + + + + +{`from "Companies" +select timeseries ( + from StockPrices + // Use the 'tag' keyword to perform a secondary grouping by the entries' tags. + group by "6 months", tag // Group by months and by tag + select max(), min() // Project the highest and lowest values of each group +) +`} + + + +#### Group by dynamic criteria: + +Starting in version 5.2, the LINQ method `GroupBy()` can take a switch statement or a method as an argument. + +* In this example, we pass a switch statement to the `GroupBy()` method. + + + + +{`var grouping = GroupingInterval.Year; // Dynamic input from the client + +var groupingAction = grouping switch +{ + GroupingInterval.Year => (Action)(builder => builder.Years(1)), + GroupingInterval.Month=> (Action)(builder => builder.Months(1)), + GroupingInterval.Day => (Action)(builder => builder.Days(1)) +}; + +var query = session.Query() + .Select(c => RavenQuery + .TimeSeries(c, "StockPrices") + .GroupBy(groupingAction) + .Select(g => new + { + Ave = g.Average() + }) + .ToList()); + +// Execute the query +List results = query.ToList(); +`} + + + + +{`public enum GroupingInterval +{ + Hour, + Day, + Month, + Year +} +`} + + + + +{`from "Companies" +select timeseries ( + from StockPrices + group by "1 year" + select average +) +`} + + + + +* In this example, we pass the `GroupingFunction()` metod to the `GroupBy()` method. + + + + +{`var query = session.Query() + .Select(c => RavenQuery + .TimeSeries(c, "StockPrices") + .GroupBy(builder => GroupingFunction(builder, "year")) + .Select(g => new + { + Ave = g.Average() + }) + .ToList()); + +// Execute the query +List results = query.ToList(); +`} + + + + +{`public static ITimeSeriesAggregationOperations GroupingFunction(ITimePeriodBuilder builder, + string input) +{ + if (input == "year") + { + return builder.Years(1); + } + + if (input == "month") + { + return builder.Months(1); + } + + return builder.Days(1); +} +`} + + + + +{`from "Companies" +select timeseries ( + from StockPrices + group by "1 year" + select average +) +`} + + + +#### Project document data in addition to aggregated data: + +* In addition to projecting the aggregated time series data, you can project data from the parent document that contains the time series. + +* In this example, projecting the **company name** alongside the query results clearly associates each entry in the result set with a specific company. + This provides immediate context and makes it easier to interpret the time series data. + + + + +{`var query = session.Query() + .Select(company => new + { + // Projecting time series data: + MinMaxValues = RavenQuery.TimeSeries(company, "StockPrices") + .Where(e => e.Values[4] > 500_000) + .GroupBy(g => g.Days(7)) + .Select(x => new + { + Min = x.Min(), + Max = x.Max(), + }) + .ToList(), + + // Projecting the company name: + CompanyName = company.Name + }); + +// Execute the query +var results = query.ToList(); +`} + + + + +{`declare timeseries SP(c) +{ + from c.StockPrices + where Values[4] > 500_000 // Query stock price behavior when trade volume is high + group by "7 days" // Group entries into consecutive 7-day groups + select max(), min() // Project the lowest and highest value of each group +} + +from "Companies" as c +// Project the company's name along with the time series query results to make results more clear +select SP(c) as MinMaxValues, c.Name as CompanyName +`} + + + + +{`from "Companies" as c +select timeseries ( + from StockPrices + where Values[4] > 500000 + group by "7 day" + select max(), min() +) as MinMaxValues, +c.Name as CompanyName // Project property 'Name' from the company document +`} + + + + + + +## Syntax + + + +{`public interface ITimeSeriesQueryable +\{ + + ITimeSeriesQueryable Where(Expression> predicate); + + ITimeSeriesQueryable Offset(TimeSpan offset); + + ITimeSeriesQueryable Scale(double value); + + ITimeSeriesQueryable FromLast(Action timePeriod); + + ITimeSeriesQueryable FromFirst(Action timePeriod); + + ITimeSeriesLoadQueryable LoadByTag(); + + // GroupBy overloads: + ITimeSeriesAggregationQueryable GroupBy(string s); + ITimeSeriesAggregationQueryable GroupBy(Action timePeriod); + + // Select method: + ITimeSeriesAggregationQueryable Select( + Expression> selector); + + TimeSeriesRawResult ToList(); +\} +`} + + + + +{`public interface ITimePeriodBuilder +\{ + ITimeSeriesAggregationOperations Milliseconds(int duration); + ITimeSeriesAggregationOperations Seconds(int duration); + ITimeSeriesAggregationOperations Minutes(int duration); + ITimeSeriesAggregationOperations Hours(int duration); + ITimeSeriesAggregationOperations Days(int duration); + ITimeSeriesAggregationOperations Months(int duration); + ITimeSeriesAggregationOperations Quarters(int duration); + ITimeSeriesAggregationOperations Years(int duration); +\} +`} + + + + +{`public interface ITimeSeriesGrouping +\{ + double[] Max(); + double[] Min(); + double[] Sum(); + double[] Average(); + double[] First(); + double[] Last(); + long[] Count(); + double[] Percentile(double number); + double[] Slope(); + double[] StandardDeviation(); +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/_aggregation-and-projections-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_aggregation-and-projections-nodejs.mdx new file mode 100644 index 0000000000..18451dc80c --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_aggregation-and-projections-nodejs.mdx @@ -0,0 +1,362 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Time series queries can easily generate powerful statistics by applying an aggregation function + (such as Min, Max, Count, Average, etc.) to a group of entries within a chosen time frame, + such as an hour or a week. + +* For an overview of the available time series queries, please refer to [Time series querying](../../../document-extensions/timeseries/client-api/session/querying.mdx). + +* In this page: + * [Grouping and aggregation options](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#grouping-and-aggregation-options) + * [Examples](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#examples) + * [Aggregate entries with single value](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#aggregate-entries-with-single-value) + * [Aggregate entries with multiple values](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#aggregate-entries-with-multiple-values) + * [Aggregate entries without grouping by time frame](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#aggregate-entries-without-grouping-by-time-frame) + * [Aggregate entries filtered by referenced document](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#aggregate-entries-filtered-by-referenced-document) + * [Secondary grouping by tag](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#secondary-grouping-by-tag) + * [Project document data in addition to aggregated data](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#project-document-data-in-addition-to-aggregated-data) + + +## Grouping and aggregation options + +* **Group entries by time frame**: + First, you can group the time series entries based on the specified time frame. + The following time units are available: + + * `milliseconds` ( milliseconds / milli / ms) + * `seconds` ( seconds/ second / s ) + * `minutes` ( minutes / minute / min ) + * `hours` ( hours / hour / h ) + * `days` ( days / day / d ) + * `months` ( months / month / mon / mo ) + * `quarters` ( quarters / quarter / q ) + * `years` ( years / year / y ) + +* **Secondary grouping**: + After grouping by a time unit, you can also perform a _secondary grouping_ by the time series [tag](../../../document-extensions/timeseries/overview.mdx#tags). + +* **Aggregate values**: + You can select one or more aggregation functions to retrieve aggregated values for each group. + The resulting aggregated values are **projected** to the client in the query result. + The following functions are available: + + * `min()` - the lowest value + * `max()` - the highest value + * `sum()` - sum of all values + * `average()` - the average value + * `first()` - value of first entry + * `last()` - value of last entry + * `percentile()` - The value under which the specified percentage of values fall + * `slope` - the change in value divided by the change in time between the first and last entries + * `standardDeviation()` - the standard deviation of all values (a measure of how spread out the values are from the average) + * `count()` - The result of Count() is always returned, even if you do not explicitly request it. + * When each entry has a single value: + Returns the number of entries. + * When each entry has multiple values: + Returns an array of the size of the number of values. + Each array element contains the number of entries having a measurement for that value. + + + +**Execute all aggregation functions**: +When a query groups entries by a time frame but does Not explicitly select a specific aggregation function, +the server will implicitly execute ALL available aggregation functions (except for Percentile, Slope, and StandardDeviation) for each group. + + + + +**Get aggregated values without grouping**: +When selecting aggregation functions WITHOUT first grouping the time series entries, +the aggregation calculations will be executed over the entire set of time series entries instead of per group of entries. + + + + +## Examples + +#### Aggregate entries with single value + +* Each entry in the "HeartRates" time series within the Employees collection contains a single value. + +* In this example, for each employee document, we group entries from the "HeartRates" time series + by a 1-hour time frame and then project the lowest and highest values of each group. + + + + +{`// Define the time series query part (expressed in RQL): +const tsQueryText = \` + from HeartRates + // Use 'group by' to group the time series entries by the specified time frame + group by "1 hour" + // Use 'select' to choose aggregation functions that will be evaluated + // Project the lowest and highest value of each group + select min(), max()\`; + +// Define the query: +const query = session.query({ collection: "employees" }) + .selectTimeSeries(b => b.raw(tsQueryText), TimeSeriesRawResult); + +// Execute the query: +const results = await query.all(); +`} + + + + +{`// Query collection Employees + from "Employees" + // Project the time series data: + select timeseries ( + from HeartRates + // Use 'group by' to group the time series entries by the specified time frame + group by "1 hour" // Group entries into consecutive 1-hour groups + // Use 'select' to choose aggregation functions that will be evaluated for each group + select min(), max() // Project the lowest and highest value of each group + ) +`} + + + +#### Aggregate entries with multiple values: + +* Each entry in the "StockPrices" time series within the Companies collection holds five values: + Values[0] - **Open** - stock price when trade opens + Values[1] - **Close** - stock price when trade ends + Values[2] - **High** - highest stock price during trade time + Values[3] - **Low** - lowest stock price during trade time + Values[4] - **Volume** - overall trade volume + +* In this example, for each company that is located in USA, we group entries from the "StockPrices" time series + by a 7-day time frame and then project the highest and lowest values of each group. + + + + +{`const tsQueryText = \` + from StockPrices + // Query stock price behavior when trade volume is high + where Values[4] > 500000 + // Group entries into consecutive 7-day groups + group by "7 day" + // Project the lowest and highest value of each group + select max(), min()\`; + +const query = session.query({ collection: "companies" }) + .whereEquals("Address.Country", "USA") + .selectTimeSeries(b => b.raw(tsQueryText), TimeSeriesRawResult); + +const results = await query.all(); +`} + + + + +{`declare timeseries SP(c) +{ + from c.StockPrices + where Values[4] > 500_000 // Query stock price behavior when trade volume is high + group by "7 days" // Group entries into consecutive 7-day groups + select max(), min() // Project the lowest and highest value of each group +} + +from "Companies" as c + // Query only USA companies: + where c.Address.Country == "USA" + // Project the time series data: + select SP(c) +`} + + + + +{`from "Companies" as c +// Query only USA companies: +where c.Address.Country = 'USA' + // Project the time series data: + select timeseries ( + from StockPrices + where Values[4] > 500000 // Query stock price behavior when trade volume is high + group by "7 day" // Group entries into consecutive 7-day groups + select max(), min() // Project the lowest and highest value of each group + ) +`} + + + + +* Since each entry holds 5 values, the query will project: + * 5 `Max` values for each group (the highest Values[0], highest Values[1], etc.) and + * 5 `Min` values for each group (the lowest Values[0], lowest Values[1], etc.) +#### Aggregate entries without grouping by time frame: + +* This example is similar to the one above, except that time series entries are Not grouped by a time frame. + +* The highest and lowest values are collected from the entire set of time series entries that match the query criteria. + + + + +{`const tsQueryText = \` + from StockPrices + where Values[4] > 500_000 + select max(), min()\`; + +const query = session.query({ collection: "companies" }) + .whereEquals("Address.Country", "USA") + .selectTimeSeries(b => b.raw(tsQueryText), TimeSeriesRawResult); + +const results = await query.all(); +`} + + + + +{`declare timeseries SP(c) +{ + from c.StockPrices + where Values[4] > 500_000 + select max(), min() +} + +from "Companies" as c +where c.Address.Country == "USA" +select SP(c) +`} + + + + +* Since no grouping is done, results wil include the highest and lowest Open, Close, High, Low, and Volume values + for ALL entries in the time series that match the query criteria. +#### Aggregate entries filtered by referenced document: + +* The tag in each entry in the "StockPrices" series contains an Employee document ID. + +* In this example, we load this [referenced document](../../../document-extensions/timeseries/querying/filtering.mdx#filter-by-referenced-document) + and filter the entries by its properties. + + + + +{`const tsQueryText = \` + from StockPrices + // Load the referenced document into variable 'employee' + load Tag as employee + // Filter entries by the 'Title' field of the employee document + where employee.Title == "Sales Representative" + group by "1 month" + select min(), max()\`; + +const query = session.query({ collection: "companies" }) + .selectTimeSeries(b => b.raw(tsQueryText), TimeSeriesRawResult); + +const results = await query.all(); +`} + + + + +{`from "Companies" as c +select timeseries( + from StockPrices + // Load the referenced document into variable 'employee' + load Tag as employee + // Filter entries by the 'Title' field of the employee document + where employee.Title == "Sales Representative" + group by "1 month" + select min(), max() +) +`} + + + + +* Only entries that reference an employee with title 'Sales Representative' will be grouped by 1 month, + and the results will include the highest and lowest values for each group. +#### Secondary grouping by tag: + +* In this example, we perform secondary grouping by the entries' tags. + +* The tag in each entry in the "StockPrices" series contains an Employee document ID. + +* "StockPrices" entries are grouped by 6 months and then by the tags of the entries within that time frame. + + + + +{`const tsQueryText = \` + from StockPrices + // Use the 'tag' keyword to perform a secondary grouping by the entries' tags + // Group by months and by tag + group by "6 months", tag + // Project the highest and lowest values of each group + select max(), min()\`; + +const query = session.query({ collection: "companies" }) + .selectTimeSeries(b => b.raw(tsQueryText), TimeSeriesRawResult); + +const results = await query.all(); +`} + + + + +{`from "Companies" +select timeseries ( + from StockPrices + // Use the 'tag' keyword to perform a secondary grouping by the entries' tags. + group by "6 months", tag // Group by months and by tag + select max(), min() // Project the highest and lowest values of each group +) +`} + + + +#### Project document data in addition to aggregated data: + +* In addition to projecting the aggregated time series data, you can project data from the parent document that contains the time series. + +* In this example, projecting the **company name** alongside the query results clearly associates each entry in the result set with a specific company. + This provides immediate context and makes it easier to interpret the time series data. + + + + +{`declare timeseries SP(c) +{ + from c.StockPrices + where Values[4] > 500_000 // Query stock price behavior when trade volume is high + group by "7 days" // Group entries into consecutive 7-day groups + select max(), min() // Project the lowest and highest value of each group +} + +from "Companies" as c +// Project the company's name along with the time series query results to make results more clear +select SP(c) as MinMaxValues, c.Name as CompanyName +`} + + + + +{`from "Companies" as c +select timeseries ( + from StockPrices + where Values[4] > 500000 + group by "7 day" + select max(), min() +) as MinMaxValues, +c.Name as CompanyName // Project property 'Name' from the company document +`} + + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/_aggregation-and-projections-php.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_aggregation-and-projections-php.mdx new file mode 100644 index 0000000000..bb03079745 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_aggregation-and-projections-php.mdx @@ -0,0 +1,368 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Time series queries can easily generate powerful statistics by applying an aggregation function + (such as Min, Max, Count, Average, etc.) to a group of entries within a chosen time frame, + such as an hour or a week. + +* In this page: + * [Grouping and aggregation options](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#grouping-and-aggregation-options) + * [Examples](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#examples) + * [Aggregate entries with single value](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#aggregate-entries-with-single-value) + * [Aggregate entries with multiple values](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#aggregate-entries-with-multiple-values) + * [Aggregate entries without grouping by time frame](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#aggregate-entries-without-grouping-by-time-frame) + * [Aggregate entries filtered by referenced document](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#aggregate-entries-filtered-by-referenced-document) + * [Secondary grouping by tag](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#secondary-grouping-by-tag) + * [Project document data in addition to aggregated data](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx#project-document-data-in-addition-to-aggregated-data) + + +## Grouping and aggregation options + +* **Group entries by time frame**: + First, you can group the time series entries based on the specified time frame. + The following time units are available: + + * `milliseconds` ( milliseconds / milli / ms) + * `seconds` ( seconds/ second / s ) + * `minutes` ( minutes / minute / min ) + * `hours` ( hours / hour / h ) + * `days` ( days / day / d ) + * `months` ( months / month / mon / mo ) + * `quarters` ( quarters / quarter / q ) + * `years` ( years / year / y ) + +* **Secondary grouping**: + After grouping by a time unit, you can also perform a _secondary grouping_ by the time series [tag](../../../document-extensions/timeseries/overview.mdx#tags). + +* **Aggregate values**: + You can select one or more aggregation functions to retrieve aggregated values for each group. + The resulting aggregated values are **projected** to the client in the query result. + The following functions are available: + + * `min()` - the lowest value + * `max()` - the highest value + * `sum()` - sum of all values + * `average()` - the average value + * `first()` - value of first entry + * `last()` - value of last entry + * `percentile()` - The value under which the specified percentage of values fall + * `slope` - the change in value divided by the change in time between the first and last entries + * `standardDeviation()` - the standard deviation of all values (a measure of how spread out the values are from the average) + * `count()` - The result of Count() is always returned, even if you do not explicitly request it. + * When each entry has a single value: + Returns the number of entries. + * When each entry has multiple values: + Returns an array of the size of the number of values. + Each array element contains the number of entries having a measurement for that value. + + + +**Execute all aggregation functions**: +When a query groups entries by a time frame but does Not explicitly select a specific aggregation function, +the server will implicitly execute ALL available aggregation functions (except for Percentile, Slope, and StandardDeviation) for each group. + + + + +**Get aggregated values without grouping**: +When selecting aggregation functions WITHOUT first grouping the time series entries, +the aggregation calculations will be executed over the entire set of time series entries instead of per group of entries. + + + + +## Examples + +#### Aggregate entries with single value + +* Each entry in the "HeartRates" time series within the Employees collection contains a single value. + +* In this example, for each employee document, we group entries from the "HeartRates" time series + by a 1-hour time frame and then project the lowest and highest values of each group. + + + + +{`// Define the time series query part (expressed in RQL): +$tsQueryText = " + from HeartRates + // Use 'group by' to group the time series entries by the specified time frame + group by \\"1 hour\\" + // Use 'select' to choose aggregation functions that will be evaluated + // Project the lowest and highest value of each group + select min(), max()"; + +$query = $session->query(Employee::class) + ->selectTimeSeries(TimeSeriesRawResult::class, function($b) use ($tsQueryText) { return $b->raw($tsQueryText);}); + +// Execute the query +/** @var array $results */ +$results = $query->toList(); +`} + + + + +{`// Query collection Employees + from "Employees" + // Project the time series data: + select timeseries ( + from HeartRates + // Use 'group by' to group the time series entries by the specified time frame + group by "1 hour" // Group entries into consecutive 1-hour groups + // Use 'select' to choose aggregation functions that will be evaluated for each group + select min(), max() // Project the lowest and highest value of each group + ) +`} + + + +#### Aggregate entries with multiple values: + +* Each entry in the "StockPrices" time series within the Companies collection holds five values: + Values[0] - **Open** - stock price when trade opens + Values[1] - **Close** - stock price when trade ends + Values[2] - **High** - highest stock price during trade time + Values[3] - **Low** - lowest stock price during trade time + Values[4] - **Volume** - overall trade volume + +* In this example, for each company that is located in USA, we group entries from the "StockPrices" time series + by a 7-day time frame and then project the highest and lowest values of each group. + + + + +{`$tsQueryText = " + from StockPrices + // Query stock price behavior when trade volume is high + where Values[4] > 500000 + // Group entries into consecutive 7-day groups + group by \\"7 day\\" + // Project the lowest and highest value of each group + select max(), min()"; + +$query = $session->query(Company::class) + ->whereEquals("Address.Country", "USA") + ->selectTimeSeries(TimeSeriesRawResult::class, function($b) use ($tsQueryText) { return $b->raw($tsQueryText);}); + +// Execute the query +/** @var array $results */ +$results = $query->toList(); +`} + + + + +{`declare timeseries SP(c) +{ + from c.StockPrices + where Values[4] > 500_000 // Query stock price behavior when trade volume is high + group by "7 days" // Group entries into consecutive 7-day groups + select max(), min() // Project the lowest and highest value of each group +} + +from "Companies" as c + // Query only USA companies: + where c.Address.Country == "USA" + // Project the time series data: + select SP(c) +`} + + + + +{`from "Companies" as c +// Query only USA companies: +where c.Address.Country = 'USA' + // Project the time series data: + select timeseries ( + from StockPrices + where Values[4] > 500000 // Query stock price behavior when trade volume is high + group by "7 day" // Group entries into consecutive 7-day groups + select max(), min() // Project the lowest and highest value of each group + ) +`} + + + + +* Since each entry holds 5 values, the query will project: + * 5 `Max` values for each group (the highest Values[0], highest Values[1], etc.) and + * 5 `Min` values for each group (the lowest Values[0], lowest Values[1], etc.) +#### Aggregate entries without grouping by time frame: + +* This example is similar to the one above, except that time series entries are Not grouped by a time frame. + +* The highest and lowest values are collected from the entire set of time series entries that match the query criteria. + + + + +{`$tsQueryText = " + from StockPrices + where Values[4] > 500_000 + select max(), min()"; + +$query = $session->query(Company::class) + ->whereEquals("Address.Country", "USA") + ->selectTimeSeries(TimeSeriesRawResult::class, function($b) use ($tsQueryText) { return $b->raw($tsQueryText);}); + +// Execute the query +/** @var array $results */ +$results = $query->toList(); +`} + + + + +{`declare timeseries SP(c) +{ + from c.StockPrices + where Values[4] > 500_000 + select max(), min() +} + +from "Companies" as c +where c.Address.Country == "USA" +select SP(c) +`} + + + + +* Since no grouping is done, results wil include the highest and lowest Open, Close, High, Low, and Volume values + for ALL entries in the time series that match the query criteria. +#### Aggregate entries filtered by referenced document: + +* The tag in each entry in the "StockPrices" series contains an Employee document ID. + +* In this example, we load this [referenced document](../../../document-extensions/timeseries/querying/filtering.mdx#filter-by-referenced-document) + and filter the entries by its properties. + + + + +{`$tsQueryText = " + from StockPrices + // Load the referenced document into variable 'employee' + load Tag as employee + // Filter entries by the 'Title' field of the employee document + where employee.Title == \\"Sales Representative\\" + group by \\"1 month\\" + select min(), max()"; + +$query = $session->query(Company::class) + ->selectTimeSeries(TimeSeriesRawResult::class, function($b) use ($tsQueryText) { return $b->raw($tsQueryText);}); + +// Execute the query +/** @var array $results */ +$results = $query->toList(); +`} + + + + +{`from "Companies" as c +select timeseries( + from StockPrices + // Load the referenced document into variable 'employee' + load Tag as employee + // Filter entries by the 'Title' field of the employee document + where employee.Title == "Sales Representative" + group by "1 month" + select min(), max() +) +`} + + + + +* Only entries that reference an employee with title 'Sales Representative' will be grouped by 1 month, + and the results will include the highest and lowest values for each group. +#### Secondary grouping by tag: + +* In this example, we perform secondary grouping by the entries' tags. + +* The tag in each entry in the "StockPrices" series contains an Employee document ID. + +* "StockPrices" entries are grouped by 6 months and then by the tags of the entries within that time frame. + + + + +{`$tsQueryText = " + from StockPrices + // Use the 'tag' keyword to perform a secondary grouping by the entries' tags + // Group by months and by tag + group by \\"6 months\\", tag + // Project the highest and lowest values of each group + select max(), min()"; + +$query = $session->query(Company::class) + ->selectTimeSeries(TimeSeriesRawResult::class, function($b) use ($tsQueryText) { return $b->raw($tsQueryText);}); + +// Execute the query +/** @var array $results */ +$results = $query->toList(); +`} + + + + +{`from "Companies" +select timeseries ( + from StockPrices + // Use the 'tag' keyword to perform a secondary grouping by the entries' tags. + group by "6 months", tag // Group by months and by tag + select max(), min() // Project the highest and lowest values of each group +) +`} + + + +#### Project document data in addition to aggregated data: + +* In addition to projecting the aggregated time series data, you can project data from the parent document that contains the time series. + +* In this example, projecting the **company name** alongside the query results clearly associates each entry in the result set with a specific company. + This provides immediate context and makes it easier to interpret the time series data. + + + + +{`declare timeseries SP(c) +{ + from c.StockPrices + where Values[4] > 500_000 // Query stock price behavior when trade volume is high + group by "7 days" // Group entries into consecutive 7-day groups + select max(), min() // Project the lowest and highest value of each group +} + +from "Companies" as c +// Project company name along with time series query results to make the results clearer +select SP(c) as MinMaxValues, c.Name as CompanyName +`} + + + + +{`from "Companies" as c +select timeseries ( + from StockPrices + where Values[4] > 500000 + group by "7 day" + select max(), min() +) as MinMaxValues, +c.Name as CompanyName // Project property 'Name' from the company document +`} + + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/_category_.json b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_category_.json new file mode 100644 index 0000000000..73651575e4 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 2, + "label": Querying, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/_choosing-query-range-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_choosing-query-range-csharp.mdx new file mode 100644 index 0000000000..0b03ef2cdd --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_choosing-query-range-csharp.mdx @@ -0,0 +1,288 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Queries can retrieve data from the entire time series or from a specific range of entries, + such as those collected in the last 7 days. + +* For an overview of the available time series queries and their syntax, please refer to [Time series querying](../../../document-extensions/timeseries/client-api/session/querying.mdx). + +* In this page: + * [Choose range in a query](../../../document-extensions/timeseries/querying/choosing-query-range.mdx#choose-range-in-a-query) + * [Specify range](../../../document-extensions/timeseries/querying/choosing-query-range.mdx#specify-range) + * [Retrieve first or last entries](../../../document-extensions/timeseries/querying/choosing-query-range.mdx#retrieve-first-or-last-entries) + * [Choose range - RQL syntax](../../../document-extensions/timeseries/querying/choosing-query-range.mdx#choose-range---rql-syntax) + * [`between` and `and`](../../../document-extensions/timeseries/querying/choosing-query-range.mdx#and-) + * [`first` and `last`](../../../document-extensions/timeseries/querying/choosing-query-range.mdx#and--1) + + +## Choose range in a query + +#### Specify range: + +* Provide 'from' & 'to' DateTime values to the time series query to retrieve entries only from that range (inclusive). + Omitting these parameters will retrieve the entire series. + +* The provided DateTime values are handled by the server as UTC. + The client does Not perform any conversion to UTC prior to sending the request to the server. + +* In this example, we specify a 10-minute range from which we retrieve UK employees "HeartRates" entries. + + + + +{`var baseTime = new DateTime(2020, 5, 17, 00, 00, 00); +var from = baseTime; +var to = baseTime.AddMinutes(10); + +var query = session + .Query() + .Where(employee => employee.Address.Country == "UK") + .Select(employee => RavenQuery + // Specify the range: + // pass a 'from' and a 'to' DateTime values to the 'TimeSeries' method + .TimeSeries(employee, "HeartRates", from, to) + // Call 'Offset' to adjust the timestamps in the returned results to your local time (optional) + .Offset(TimeSpan.FromHours(3)) + .ToList()); + +// Execute the query +List result = query.ToList(); +`} + + + + +{`var baseTime = new DateTime(2020, 5, 17, 00, 00, 00); +var from = baseTime; +var to = baseTime.AddMinutes(10); + +var query = session.Advanced + .DocumentQuery() + .WhereEquals(employee => employee.Address.Country, "UK") + .SelectTimeSeries(builder => builder.From("HeartRates") + // Specify the range: + // pass a 'from' and a 'to' DateTime values to the 'Between' method + .Between(from, to) + // Call 'Offset' to adjust the timestamps in the returned results to your local time (optional) + .Offset(TimeSpan.FromHours(3)) + .ToList()); + +// Execute the query +List result = query.ToList(); +`} + + + + +{`var baseTime = new DateTime(2020, 5, 17, 00, 00, 00); + +var query = session.Advanced + .RawQuery(@" + from Employees + where Address.Country == 'UK' + select timeseries ( + from HeartRates + between $from and $to + offset '03:00' + )") + .AddParameter("from", baseTime) + .AddParameter("to", baseTime.AddMinutes(10)); + +// Execute the query +List results = query.ToList(); +`} + + + + +{`from "Employees" as employee +where employee.Address.Country == "UK" +select timeseries( + from employee.HeartRates + between "2020-05-17T00:00:00.0000000" + and "2020-05-17T00:10:00.0000000" + offset "03:00" +) +`} + + + +#### Retrieve first or last entries: + +* Use `FromFirst()` to specify the time frame from the start of the time series. + Use `FromLast()` to specify the time frame from the end of the time series. + A query function can use either `FromFirst` or `FromLast`, but not both. + +* In this example, we select only entries in the last 30 minutes of the "HeartRates" time series. + + + + +{`var query = session + .Query() + .Select(p => RavenQuery + .TimeSeries(p, "HeartRates") + // Call 'FromLast' + // specify the time frame from the end of the time series + .FromLast(x => x.Minutes(30)) + .Offset(TimeSpan.FromHours(3)) + .ToList()); + +// Execute the query +List result = query.ToList(); +`} + + + + +{`var query = session.Advanced + .DocumentQuery() + .SelectTimeSeries(builder => builder.From("HeartRates") + // Call 'FromLast' + // specify the time frame from the end of the time series + .FromLast(x => x.Minutes(30)) + .Offset(TimeSpan.FromHours(3)) + .ToList()); + +// Execute the query +List result = query.ToList(); +`} + + + + +{`var query = session.Advanced + // Provide the raw RQL to the RawQuery method: + .RawQuery(@" + from Employees + select timeseries ( + from HeartRates + last 30 min + offset '03:00' + )"); + +// Execute the query +List results = query.ToList(); +`} + + + + +{`from "Employees" as e +select timeseries( + from e.HeartRates + last 30 min + offset "03:00" +) +`} + + + + + + +## Choose range - RQL syntax + +#### `between` and `and`: + +* Use the `between` and `and` keywords to retrieve time series entries from the specified range (inclusive). + Provide the timestamps in UTC format. + E.g.: + + + +{`from "Employees" +where Address.Country == "UK" +select timeseries( + from HeartRates + between "2020-05-17T00:00:00.0000000Z" // start of range + and "2020-05-17T01:00:00.0000000Z" // end of range +) + +// Results will include only time series entries within the specified range for employees from UK. +`} + + + + +{`declare timeseries getHeartRates(employee) +{ + from HeartRates + between "2020-05-17T00:00:00.0000000Z" // start of range + and "2020-05-17T01:00:00.0000000Z" // end of range +} + +from "Employees" as e +where e.Address.Country == "UK" +select getHeartRates(e) + +// Results will include only time series entries within the specified range for employees from UK. +`} + + + + +* RQL queries can be executed from Studio's [query view](../../../studio/database/queries/query-view.mdx). + Using Studio, you can apply parameters as follows for a clearer query. + + +{`$from = "2020-05-17T00:00:00.0000000Z" +$to = "2020-05-17T01:00:00.0000000Z" + +from "Employees" +where Address.Country == "UK" +select timeseries( + from HeartRates + between $from and $to // using parameters +) +`} + + +#### `first` and `last`: + +* Use `first` to specify the time frame from the start of the time series. + Use `last` to specify the time frame from the end of the time series. + A query function can use either `first` or `last`, but not both. E.g. - + + + +{`// Retrieve all entries from the last day, starting from the end of time series "HeartRates" +from "Employees" +select timeseries( + from HeartRates + last 1 day +) +`} + + + + + +{`// Retrieve the first 10 minutes of entries from the beginning of time series "HeartRates" +from "Employees" +select timeseries( + from HeartRates + first 10 min +) +`} + + + +* The range is specified using a whole number of one of the following units. + + * **seconds** ( seconds/ second / s ) + * **minutes** ( minutes / minute / min ) + * **hours** ( hours / hour / h ) + * **days** ( days / day / d ) + * **months** ( months / month / mon / mo ) + * **quarters** ( quarters / quarter / q ) + * **years** ( years / year / y ) + * Note: **milliseconds** are currently not supported by 'first' and 'last' in a time series query. + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/_choosing-query-range-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_choosing-query-range-nodejs.mdx new file mode 100644 index 0000000000..43a4162c15 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_choosing-query-range-nodejs.mdx @@ -0,0 +1,323 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Queries can retrieve data from the entire time series or from a specific range of entries, + such as those collected in the last 7 days. + +* For an overview of the available time series queries and their syntax, please refer to [Time series querying](../../../document-extensions/timeseries/client-api/session/querying.mdx). + +* In this page: + * [Choose range in a query](../../../document-extensions/timeseries/querying/choosing-query-range.mdx#choose-range-in-a-query) + * [Specify range](../../../document-extensions/timeseries/querying/choosing-query-range.mdx#specify-range) + * [Retrieve first or last entries](../../../document-extensions/timeseries/querying/choosing-query-range.mdx#retrieve-first-or-last-entries) + * [Choose range - RQL syntax](../../../document-extensions/timeseries/querying/choosing-query-range.mdx#choose-range---rql-syntax) + * [`between` and `and`](../../../document-extensions/timeseries/querying/choosing-query-range.mdx#and-) + * [`first` and `last`](../../../document-extensions/timeseries/querying/choosing-query-range.mdx#and--1) + + +## Choose range in a query + +#### Specify range: + +* Provide 'from' & 'to' DateTime values to the time series query to retrieve entries only from that range (inclusive). + Omitting these parameters will retrieve the entire series. + +* The provided DateTime values are handled by the server as UTC. + The client does Not perform any conversion to UTC prior to sending the request to the server. + +* Note: calling 'offset' will only adjust the timestamps in the returned results to your local time (optional). + +* In this example, we specify a 10-minute range from which we retrieve UK employees "HeartRates" entries. + + + + +{`// Define the time series query part (expressed in RQL): +const tsQueryText = \` + from HeartRates + between "2020-05-17T00:00:00.0000000" + and "2020-05-17T00:10:00.0000000" + offset "03:00"\`; + +// Define the query: +const query = session.query({ collection: "employees" }) + .whereEquals("Address.Country", "UK") + .selectTimeSeries(b => b.raw(tsQueryText), TimeSeriesRawResult); + +// Execute the query: +const results = await query.all(); + +// Access entries results: +rawResults = results[0]; +assert.equal((rawResults instanceof TimeSeriesRawResult), true); + +const tsEntry = rawResults.results[0]; +assert.equal((tsEntry instanceof TimeSeriesEntry), true); + +const tsValue = tsEntry.value; +`} + + + + +{`const from = new Date("2020-05-17T00:00:00.0000000"); +const to = new Date("2020-05-17T00:10:00.0000000"); + +// Define the time series query part (expressed in RQL): +const tsQueryText = \` + from HeartRates + between $from and $to + offset "03:00"\`; + +// Define the query: +const query = session.query({ collection: "employees" }) + .whereEquals("Address.Country", "UK") + .selectTimeSeries(b => b.raw(tsQueryText), TimeSeriesRawResult) + .addParameter("from", from) + .addParameter("to", to); + +// Execute the query: +const results = await query.all(); + +// Access entries results: +rawResults = results[0]; +assert.equal((rawResults instanceof TimeSeriesRawResult), true); + +const tsEntry = rawResults.results[0]; +assert.equal((tsEntry instanceof TimeSeriesEntry), true); + +const tsValue = tsEntry.value; +`} + + + + +{`const rql = \` + from "Employees" as employee + where employee.Address.Country == "UK" + select timeseries( + from employee.HeartRates + between "2020-05-17T00:00:00.0000000" + and "2020-05-17T00:10:00.0000000" + offset "03:00" + )\`; + +const query = session.advanced.rawQuery(rql, TimeSeriesRawResult); + +const result = await query.all(); +`} + + + + +{`const rql = \` + from "Employees" as employee + where employee.Address.Country == "UK" + select timeseries( + from employee.HeartRates + between $from and $to + offset "03:00" + )\`; + +const from = new Date("2020-05-17T00:00:00.0000000"); +const to = new Date("2020-05-17T00:10:00.0000000"); + +const query = session.advanced.rawQuery(rql, TimeSeriesRawResult) + .addParameter("from", from) + .addParameter("to", to); + +const result = await query.all(); +`} + + + + +{`// RQL: +from "employees" as employee +where employee.Address.Country == "UK" +select timeseries( + from employee.HeartRates + between "2020-05-17T00:00:00.0000000" + and "2020-05-17T00:10:00.0000000" + offset "03:00" +) + +// RQL with parameters: +from "employees" +where Address.Country = $p0 +select timeseries( + from HeartRates + between $from and $to + offset "03:00" +) +{ + "p0": "UK", + "from": "2020-05-17T00:00:00.0000000", + "to": "2020-05-17T00:10:00.0000000" +} +`} + + + +#### Retrieve first or last entries: + +* Use `first` to specify the time frame from the start of the time series. + Use `last` to specify the time frame from the end of the time series. + A query function can use either `first` or `last`, but not both. + +* In this example, we select only entries in the last 30 minutes of the "HeartRates" time series. + + + + +{`// Define the time series query part (expressed in RQL): +const tsQueryText = \` + from HeartRates + last 30 min + offset "03:00"\`; + +// Define the query: +const query = session.query({ collection: "employees" }) + .selectTimeSeries(b => b.raw(tsQueryText), TimeSeriesRawResult); + +// Execute the query: +const results = await query.all(); +`} + + + + +{`const rql = \` + from "Employees" as employee + select timeseries( + from employee.HeartRates + last 30 min + offset "03:00" + )\`; + +const query = session.advanced.rawQuery(rql, TimeSeriesRawResult); + +const result = await query.all(); +`} + + + + +{`from "Employees" as e +select timeseries( + from e.HeartRates + last 30 min + offset "03:00" +) +`} + + + + + + +## Choose range - RQL syntax + +#### `between` and `and`: + +* Use the `between` and `and` keywords to retrieve time series entries from the specified range (inclusive). + Provide the timestamps in UTC format. + E.g.: + + + +{`from "Employees" +where Address.Country == "UK" +select timeseries( + from HeartRates + between "2020-05-17T00:00:00.0000000Z" // start of range + and "2020-05-17T01:00:00.0000000Z" // end of range +) + +// Results will include only time series entries within the specified range for employees from UK. +`} + + + + +{`declare timeseries getHeartRates(employee) +{ + from HeartRates + between "2020-05-17T00:00:00.0000000Z" // start of range + and "2020-05-17T01:00:00.0000000Z" // end of range +} + +from "Employees" as e +where e.Address.Country == "UK" +select getHeartRates(e) + +// Results will include only time series entries within the specified range for employees from UK. +`} + + + + +* RQL queries can be executed from Studio's [query view](../../../studio/database/queries/query-view.mdx). + Using Studio, you can apply parameters as follows for a clearer query. + + +{`$from = "2020-05-17T00:00:00.0000000Z" +$to = "2020-05-17T01:00:00.0000000Z" + +from "Employees" +where Address.Country == "UK" +select timeseries( + from HeartRates + between $from and $to // using parameters +) +`} + + +#### `first` and `last`: + +* Use `first` to specify the time frame from the start of the time series. + Use `last` to specify the time frame from the end of the time series. + A query function can use either `first` or `last`, but not both. E.g. - + + + +{`// Retrieve all entries from the last day, starting from the end of time series "HeartRates" +from "Employees" +select timeseries( + from HeartRates + last 1 day +) +`} + + + + + +{`// Retrieve the first 10 minutes of entries from the beginning of time series "HeartRates" +from "Employees" +select timeseries( + from HeartRates + first 10 min +) +`} + + + +* The range is specified using a whole number of one of the following units. + + * **seconds** ( seconds/ second / s ) + * **minutes** ( minutes / minute / min ) + * **hours** ( hours / hour / h ) + * **days** ( days / day / d ) + * **months** ( months / month / mon / mo ) + * **quarters** ( quarters / quarter / q ) + * **years** ( years / year / y ) + * Note: **milliseconds** are currently not supported by 'first' and 'last' in a time series query. + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/_choosing-query-range-php.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_choosing-query-range-php.mdx new file mode 100644 index 0000000000..9af59bc0ce --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_choosing-query-range-php.mdx @@ -0,0 +1,167 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Queries can retrieve data from the entire time series or from a specific range of entries, + such as those collected in the last 7 days. + +* In this page: + * [Choose range in a query](../../../document-extensions/timeseries/querying/choosing-query-range.mdx#choose-range-in-a-query) + * [Specify range](../../../document-extensions/timeseries/querying/choosing-query-range.mdx#specify-range) + * [Retrieve first or last entries](../../../document-extensions/timeseries/querying/choosing-query-range.mdx#retrieve-first-or-last-entries) + * [Choose range - RQL syntax](../../../document-extensions/timeseries/querying/choosing-query-range.mdx#choose-range---rql-syntax) + * [`between` and `and`](../../../document-extensions/timeseries/querying/choosing-query-range.mdx#and-) + * [`first` and `last`](../../../document-extensions/timeseries/querying/choosing-query-range.mdx#and--1) + + +## Choose range in a query + +#### Specify range: + +* Provide 'from' & 'to' DateTime values to the time series query to retrieve entries only from that range (inclusive). + Omitting these parameters will retrieve the entire series. + +* The provided DateTime values are handled by the server as UTC. + The client does Not perform any conversion to UTC prior to sending the request to the server. + +* In this example, we specify a 10-minute range from which we retrieve UK employees "HeartRates" entries. + + + +{`from "Employees" as employee +where employee.Address.Country == "UK" +select timeseries( + from employee.HeartRates + between "2020-05-17T00:00:00.0000000" + and "2020-05-17T00:10:00.0000000" + offset "03:00" +) +`} + + +#### Retrieve first or last entries: + +* Use `first` to specify the time frame from the start of the time series. + Use `last` to specify the time frame from the end of the time series. + A query function can use either `first` or `last`, but not both. + +* In this example, we select only entries in the last 30 minutes of the "HeartRates" time series. + + + +{`from "Employees" as e +select timeseries( + from e.HeartRates + last 30 min + offset "03:00" +) +`} + + + + + +## Choose range - RQL syntax + +#### `between` and `and`: + +* Use the `between` and `and` keywords to retrieve time series entries from the specified range (inclusive). + Provide the timestamps in UTC format. + E.g.: + + + +{`from "Employees" +where Address.Country == "UK" +select timeseries( + from HeartRates + between "2020-05-17T00:00:00.0000000Z" // start of range + and "2020-05-17T01:00:00.0000000Z" // end of range +) + +// Results will include only time series entries within the specified range for employees from UK. +`} + + + + +{`declare timeseries getHeartRates(employee) +{ + from HeartRates + between "2020-05-17T00:00:00.0000000Z" // start of range + and "2020-05-17T01:00:00.0000000Z" // end of range +} + +from "Employees" as e +where e.Address.Country == "UK" +select getHeartRates(e) + +// Results will include only time series entries within the specified range for employees from UK. +`} + + + + +* RQL queries can be executed from Studio's [query view](../../../studio/database/queries/query-view.mdx). + Using Studio, you can apply parameters as follows for a clearer query. + + +{`$from = "2020-05-17T00:00:00.0000000Z" +$to = "2020-05-17T01:00:00.0000000Z" + +from "Employees" +where Address.Country == "UK" +select timeseries( + from HeartRates + between $from and $to // using parameters +) +`} + + +#### `first` and `last`: + +* Use `first` to specify the time frame from the start of the time series. + Use `last` to specify the time frame from the end of the time series. + A query function can use either `first` or `last`, but not both. E.g. - + + + +{`// Retrieve all entries from the last day, starting from the end of time series "HeartRates" +from "Employees" +select timeseries( + from HeartRates + last 1 day +) +`} + + + + + +{`// Retrieve the first 10 minutes of entries from the beginning of time series "HeartRates" +from "Employees" +select timeseries( + from HeartRates + first 10 min +) +`} + + + +* The range is specified using a whole number of one of the following units. + + * **seconds** ( seconds/ second / s ) + * **minutes** ( minutes / minute / min ) + * **hours** ( hours / hour / h ) + * **days** ( days / day / d ) + * **months** ( months / month / mon / mo ) + * **quarters** ( quarters / quarter / q ) + * **years** ( years / year / y ) + * Note: **milliseconds** are currently not supported by 'first' and 'last' in a time series query. + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/_choosing-query-range-python.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_choosing-query-range-python.mdx new file mode 100644 index 0000000000..61ec973808 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_choosing-query-range-python.mdx @@ -0,0 +1,169 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Queries can retrieve data from the entire time series or from a specific range of entries, + such as those collected in the last 7 days. + +* For an overview of the available time series queries and their syntax, please refer to [Time series querying](../../../document-extensions/timeseries/client-api/session/querying.mdx). + +* In this page: + * [Choose range in a query](../../../document-extensions/timeseries/querying/choosing-query-range.mdx#choose-range-in-a-query) + * [Specify range](../../../document-extensions/timeseries/querying/choosing-query-range.mdx#specify-range) + * [Retrieve first or last entries](../../../document-extensions/timeseries/querying/choosing-query-range.mdx#retrieve-first-or-last-entries) + * [Choose range - RQL syntax](../../../document-extensions/timeseries/querying/choosing-query-range.mdx#choose-range---rql-syntax) + * [`between` and `and`](../../../document-extensions/timeseries/querying/choosing-query-range.mdx#and-) + * [`first` and `last`](../../../document-extensions/timeseries/querying/choosing-query-range.mdx#and--1) + + +## Choose range in a query + +#### Specify range: + +* Provide 'from' & 'to' DateTime values to the time series query to retrieve entries only from that range (inclusive). + Omitting these parameters will retrieve the entire series. + +* The provided DateTime values are handled by the server as UTC. + The client does Not perform any conversion to UTC prior to sending the request to the server. + +* In this example, we specify a 10-minute range from which we retrieve UK employees "HeartRates" entries. + + + +{`from "Employees" as employee +where employee.Address.Country == "UK" +select timeseries( + from employee.HeartRates + between "2020-05-17T00:00:00.0000000" + and "2020-05-17T00:10:00.0000000" + offset "03:00" +) +`} + + +#### Retrieve first or last entries: + +* Use `first` to specify the time frame from the start of the time series. + Use `last` to specify the time frame from the end of the time series. + A query function can use either `first` or `last`, but not both. + +* In this example, we select only entries in the last 30 minutes of the "HeartRates" time series. + + + +{`from "Employees" as e +select timeseries( + from e.HeartRates + last 30 min + offset "03:00" +) +`} + + + + + +## Choose range - RQL syntax + +#### `between` and `and`: + +* Use the `between` and `and` keywords to retrieve time series entries from the specified range (inclusive). + Provide the timestamps in UTC format. + E.g.: + + + +{`from "Employees" +where Address.Country == "UK" +select timeseries( + from HeartRates + between "2020-05-17T00:00:00.0000000Z" // start of range + and "2020-05-17T01:00:00.0000000Z" // end of range +) + +// Results will include only time series entries within the specified range for employees from UK. +`} + + + + +{`declare timeseries getHeartRates(employee) +{ + from HeartRates + between "2020-05-17T00:00:00.0000000Z" // start of range + and "2020-05-17T01:00:00.0000000Z" // end of range +} + +from "Employees" as e +where e.Address.Country == "UK" +select getHeartRates(e) + +// Results will include only time series entries within the specified range for employees from UK. +`} + + + + +* RQL queries can be executed from Studio's [query view](../../../studio/database/queries/query-view.mdx). + Using Studio, you can apply parameters as follows for a clearer query. + + +{`$from = "2020-05-17T00:00:00.0000000Z" +$to = "2020-05-17T01:00:00.0000000Z" + +from "Employees" +where Address.Country == "UK" +select timeseries( + from HeartRates + between $from and $to // using parameters +) +`} + + +#### `first` and `last`: + +* Use `first` to specify the time frame from the start of the time series. + Use `last` to specify the time frame from the end of the time series. + A query function can use either `first` or `last`, but not both. E.g. - + + + +{`// Retrieve all entries from the last day, starting from the end of time series "HeartRates" +from "Employees" +select timeseries( + from HeartRates + last 1 day +) +`} + + + + + +{`// Retrieve the first 10 minutes of entries from the beginning of time series "HeartRates" +from "Employees" +select timeseries( + from HeartRates + first 10 min +) +`} + + + +* The range is specified using a whole number of one of the following units. + + * **seconds** ( seconds/ second / s ) + * **minutes** ( minutes / minute / min ) + * **hours** ( hours / hour / h ) + * **days** ( days / day / d ) + * **months** ( months / month / mon / mo ) + * **quarters** ( quarters / quarter / q ) + * **years** ( years / year / y ) + * Note: **milliseconds** are currently not supported by 'first' and 'last' in a time series query. + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/_filtering-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_filtering-csharp.mdx new file mode 100644 index 0000000000..e1d0247d44 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_filtering-csharp.mdx @@ -0,0 +1,361 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In addition to limiting time series query results by specifying the [range of entries](../../../document-extensions/timeseries/querying/choosing-query-range.mdx) to retrieve, + you can filter the time series entries by their **values**, **tag**, or by the contents of a **document referenced in the tag**. + +* For an overview of the available time series queries, please refer to [Time series querying](../../../document-extensions/timeseries/client-api/session/querying.mdx). + +* In this page: + * [Filter by value](../../../document-extensions/timeseries/querying/filtering.mdx#filter-by-value) + * [Filter by tag](../../../document-extensions/timeseries/querying/filtering.mdx#filter-by-tag) + * [Filter by referenced document](../../../document-extensions/timeseries/querying/filtering.mdx#filter-by-referenced-document) + + +## Filter by value + +* A time series entry can have up to 32 [values](../../../document-extensions/timeseries/overview.mdx#values). + +* A time series query can filter entries based on these values. + + + + +{`// For example, in the "HeartRates" time series, +// retrieve only entries where the value exceeds 75 BPM + +var baseTime = new DateTime(2020, 5, 17, 00, 00, 00); +var from = baseTime; +var to = baseTime.AddMinutes(10); + +var query = session.Query() + .Select(employee => RavenQuery + .TimeSeries(employee, "HeartRates", from, to) + // Call 'Where' to filter entries by the value + .Where(ts => ts.Value > 75) + .ToList()); + +var results = query.ToList(); +`} + + + + +{`// For example, in the "HeartRates" time series, +// retrieve only entries where the value exceeds 75 BPM + +var baseTime = new DateTime(2020, 5, 17, 00, 00, 00); +var from = baseTime; +var to = baseTime.AddMinutes(10); + +var query = session.Advanced.DocumentQuery() + .SelectTimeSeries(builder => builder.From("HeartRates") + .Between(from, to) + // Call 'Where' to filter entries by the value + .Where(ts => ts.Value > 75) + .ToList()); + +var results = query.ToList(); +`} + + + + +{`// For example, in the "HeartRates" time series, +// retrieve only entries where the value exceeds 75 BPM + +var baseTime = new DateTime(2020, 5, 17, 00, 00, 00); +var from = baseTime; +var to = baseTime.AddMinutes(10); + +var query = session.Advanced.RawQuery(@" + from Employees + select timeseries ( + from HeartRates + between $from and $to + // Use the 'where Value' clause to filter by the value + where Value > 75 + )") + .AddParameter("from", from) + .AddParameter("to", to); + +var results = query.ToList(); +`} + + + + +{`from Employees +select timeseries ( + from HeartRates + between "2020-05-17T00:00:00.0000000" + and "2020-05-17T00:10:00.0000000" + // Use the "where Value" clause to filter entries by the value + where Value > 75 +) +`} + + + + + + +## Filter by tag + +* A time series entry can have an optional [tag](../../../document-extensions/timeseries/overview.mdx#tags). + +* A time series query can filter entries based on this tag. + + + + +{`// Retrieve only entries where the tag string content is "watches/fitbit" + +var baseTime = new DateTime(2020, 5, 17, 00, 00, 00); +var from = baseTime; +var to = baseTime.AddMinutes(10); + +var query = session.Query() + .Select(employee => RavenQuery + .TimeSeries(employee, "HeartRates", from, to) + // Call 'Where' to filter entries by the tag + .Where(ts => ts.Tag == "watches/fitbit") + .ToList()); + +var results = query.ToList(); +`} + + + + +{`// Retrieve only entries where the tag string content is "watches/fitbit" + +var baseTime = new DateTime(2020, 5, 17, 00, 00, 00); +var from = baseTime; +var to = baseTime.AddMinutes(10); + +var query = session.Advanced.DocumentQuery() + .SelectTimeSeries(builder => builder.From("HeartRates") + .Between(from, to) + // Call 'Where' to filter entries by the tag + .Where(ts => ts.Tag == "watches/fitbit") + .ToList()); + +var results = query.ToList(); +`} + + + + +{`// Retrieve only entries where the tag string content is "watches/fitbit" + +var baseTime = new DateTime(2020, 5, 17, 00, 00, 00); +var from = baseTime; +var to = baseTime.AddMinutes(10); + +var query = session.Advanced.RawQuery(@" + from Employees + select timeseries ( + from HeartRates + between $from and $to + // Use the 'where Tag' clause to filter entries by the tag string content + where Tag == 'watches/fitbit' + )") + .AddParameter("from", from) + .AddParameter("to", to); + +var results = query.ToList(); +`} + + + + +{`from Employees +select timeseries ( + from HeartRates + between "2020-05-17T00:00:00.0000000" + and "2020-05-17T00:10:00.0000000" + // Use the "where Tag" clause to filter entries by the tag string content + where Tag == "watches/fitbit" +) +`} + + + + + + + +{`// Retrieve only entries where the tag string content is one of several options + +var baseTime = new DateTime(2020, 5, 17, 00, 00, 00); +var from = baseTime; +var to = baseTime.AddMinutes(10); + +var optionalTags = new[] {"watches/apple", "watches/samsung", "watches/xiaomi"}; + +var query = session.Query() + .Select(employee => RavenQuery + .TimeSeries(employee, "HeartRates", from, to) + // Call 'Where' to filter entries by the tag + .Where(ts => + ts.Tag == "watches/apple" || ts.Tag == "watches/samsung" || ts.Tag == "watches/xiaomi") + .ToList()); + +var results = query.ToList(); +`} + + + + +{`// Retrieve only entries where the tag string content is one of several options + +var baseTime = new DateTime(2020, 5, 17, 00, 00, 00); +var from = baseTime; +var to = baseTime.AddMinutes(10); + +var optionalTags = new[] {"watches/apple", "watches/samsung", "watches/xiaomi"}; + +var query = session.Advanced.DocumentQuery() + .SelectTimeSeries(builder => builder.From("HeartRates") + .Between(from, to) + // Call 'Where' to filter entries by the tag + .Where(ts => + ts.Tag == "watches/apple" || ts.Tag == "watches/samsung" || ts.Tag == "watches/xiaomi") + .ToList()); + +var results = query.ToList(); +`} + + + + +{`// Retrieve only entries where the tag string content is one of several options + +var baseTime = new DateTime(2020, 5, 17, 00, 00, 00); +var from = baseTime; +var to = baseTime.AddMinutes(10); + +var optionalTags = new[] {"watches/apple", "watches/samsung", "watches/xiaomi"}; + +var query = session.Advanced.RawQuery(@" + from Employees + select timeseries ( + from HeartRates + between $from and $to + // Use the 'where Tag in' clause to filter by various tag options + where Tag in ($optionalTags) + )") + .AddParameter("from", from) + .AddParameter("to", to) + .AddParameter("optionalTags", optionalTags); + +var results = query.ToList(); +`} + + + + +{`from Employees +select timeseries ( + from HeartRates + between "2020-05-17T00:00:00.0000000" + and "2020-05-17T00:10:00.0000000" + // Use the "where Tag in" clause to filter by various tag options + where Tag in ("watches/apple", "watches/samsung", "watches/xiaomi") +) +`} + + + + + + +## Filter by referenced document + +* A time series entry's [tag](../../../document-extensions/timeseries/overview.mdx#tags) can contain the **ID of a document**. + +* A time series query can filter entries based on the contents of this referenced document. + The referenced document is loaded, and entries are filtered by its properties. + + + + +{`// Retrieve entries that reference a document that has "Sales Manager" in its 'Title' property + +var query = session.Query() + // Query companies from USA + .Where(c => c.Address.Country == "USA") + .Select(company => RavenQuery + .TimeSeries(company, "StockPrices") + // Use 'LoadByTag' to load the employee document referenced in the tag + .LoadByTag() + // Use 'Where' to filter the entries by the 'Title' property of the loaded document + .Where((ts, employeeDoc) => employeeDoc.Title == "Sales Manager") + .ToList()); + +var results = query.ToList(); +`} + + + + +{`// Retrieve entries that reference a document that has "Sales Manager" in its 'Title' property + +var query = session.Advanced.DocumentQuery() + // Query companies from USA + .WhereEquals(company => company.Address.Country, "USA") + .SelectTimeSeries(builder => builder.From("StockPrices") + // Use 'LoadByTag' to load the employee document referenced in the tag + .LoadByTag() + // Use 'Where' to filter the entries by the 'Title' property of the loaded document + .Where((ts, employeeDoc) => employeeDoc.Title == "Sales Manager") + .ToList()); + +var results = query.ToList(); +`} + + + + +{`// Retrieve entries that reference a document that has "Sales Manager" in its 'Title' property + +var query = session.Advanced.RawQuery(@" + from Companies + where Address.Country == 'USA' + select timeseries ( + from StockPrices + // Use 'load Tag' to load the employee document referenced in the tag + load Tag as employeeDoc + // Use 'where ' to filter entries by the properties of the loaded document + where employeeDoc.Title == 'Sales Manager' + )" +); + +var results = query.ToList(); +`} + + + + +{`from Companies +where Address.Country == "USA" +select timeseries ( + from StockPrices + // Use 'load Tag' to load the employee document referenced in the tag + load Tag as employeeDoc + // Use 'where ' to filter entries by the properties of the loaded document + where employeeDoc.Title == "Sales Manager" +) +`} + + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/_filtering-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_filtering-nodejs.mdx new file mode 100644 index 0000000000..6de58d7c67 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_filtering-nodejs.mdx @@ -0,0 +1,268 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In addition to limiting time series query results by specifying the [range of entries](../../../document-extensions/timeseries/querying/choosing-query-range.mdx) to retrieve, + you can filter the time series entries by their **values**, **tag**, or by the contents of a **document referenced in the tag**. + +* For an overview of the available time series queries, please refer to [Time series querying](../../../document-extensions/timeseries/client-api/session/querying.mdx). + +* In this page: + * [Filter by value](../../../document-extensions/timeseries/querying/filtering.mdx#filter-by-value) + * [Filter by tag](../../../document-extensions/timeseries/querying/filtering.mdx#filter-by-tag) + * [Filter by referenced document](../../../document-extensions/timeseries/querying/filtering.mdx#filter-by-referenced-document) + + +## Filter by value + +* A time series entry can have up to 32 [values](../../../document-extensions/timeseries/overview.mdx#values). + +* A time series query can filter entries based on these values. + + + + +{`// For example, in the "HeartRates" time series, +// retrieve only entries where the value exceeds 75 BPM + +const tsQueryText = \` + from HeartRates + between "2020-05-17T00:00:00.0000000" + and "2020-05-17T00:10:00.0000000" + // Use the "where Value" clause to filter entries by the value + where Value > 75\`; + +const query = session.query({ collection: "employees" }) + .selectTimeSeries(b => b.raw(tsQueryText), TimeSeriesRawResult); + +const results = await query.all(); +`} + + + + +{`// For example, in the "HeartRates" time series, +// retrieve only entries where the value exceeds 75 BPM + +const rql = \` + from Employees + select timeseries ( + from HeartRates + between "2020-05-17T00:00:00.0000000" + and "2020-05-17T00:10:00.0000000" + // Use the 'where Value' clause to filter by the value + where Value > 75 + )\`; + +const query = session.advanced.rawQuery(rql, TimeSeriesRawResult); + +const result = await query.all(); +`} + + + + +{`from Employees +select timeseries ( + from HeartRates + between "2020-05-17T00:00:00.0000000" + and "2020-05-17T00:10:00.0000000" + // Use the "where Value" clause to filter entries by the value + where Value > 75 +) +`} + + + + + + +## Filter by tag + +* A time series entry can have an optional [tag](../../../document-extensions/timeseries/overview.mdx#tags). + +* A time series query can filter entries based on this tag. + + + + +{`// Retrieve only entries where the tag string content is "watches/fitbit" + +const tsQueryText = \` + from HeartRates + between "2020-05-17T00:00:00.0000000" + and "2020-05-17T00:10:00.0000000" + // Use the "where Tag" clause to filter entries by the tag string content + where Tag == "watches/fitbit"\`; + +const query = session.query({ collection: "employees" }) + .selectTimeSeries(b => b.raw(tsQueryText), TimeSeriesRawResult); + +const results = await query.all(); +`} + + + + +{`// Retrieve only entries where the tag string content is "watches/fitbit" + +const rql = \` + from Employees + select timeseries ( + from HeartRates + between "2020-05-17T00:00:00.0000000" + and "2020-05-17T00:10:00.0000000" + // Use the 'where Tag' clause to filter entries by the tag string content + where Tag == 'watches/fitbit' + )\`; + +const query = session.advanced.rawQuery(rql, TimeSeriesRawResult); + +const result = await query.all(); +`} + + + + +{`from Employees +select timeseries ( + from HeartRates + between "2020-05-17T00:00:00.0000000" + and "2020-05-17T00:10:00.0000000" + // Use the "where Tag" clause to filter entries by the tag string content + where Tag == "watches/fitbit" +) +`} + + + + + + + +{`// Retrieve only entries where the tag string content is one of several options + +const tsQueryText = \` + from HeartRates + between "2020-05-17T00:00:00.0000000" + and "2020-05-17T00:10:00.0000000" + // Use the "where Tag in" clause to filter by various tag options + where Tag in ("watches/apple", "watches/samsung", "watches/xiaomi")\`; + +const query = session.query({ collection: "employees" }) + .selectTimeSeries(b => b.raw(tsQueryText), TimeSeriesRawResult); + +const results = await query.all(); +`} + + + + +{`// Retrieve only entries where the tag string content is one of several options + +const optionalTags = ["watches/apple", "watches/samsung", "watches/xiaomi"]; + +const rql = \` + from Employees + select timeseries ( + from HeartRates + between "2020-05-17T00:00:00.0000000" + and "2020-05-17T00:10:00.0000000" + // Use the 'where Tag in' clause to filter by various tag options + where Tag in ($optionalTags) + )\`; + +const query = session.advanced.rawQuery(rql, TimeSeriesRawResult) + .addParameter("optionalTags", optionalTags); + +const result = await query.all(); +`} + + + + +{`from Employees +select timeseries ( + from HeartRates + between "2020-05-17T00:00:00.0000000" + and "2020-05-17T00:10:00.0000000" + // Use the "where Tag in" clause to filter by various tag options + where Tag in ("watches/apple", "watches/samsung", "watches/xiaomi") +) +`} + + + + + + +## Filter by referenced document + +* A time series entry's [tag](../../../document-extensions/timeseries/overview.mdx#tags) can contain the **ID of a document**. + +* A time series query can filter entries based on the contents of this referenced document. + The referenced document is loaded, and entries are filtered by its properties. + + + + +{`// Retrieve entries that reference a document that has "Sales Manager" in its 'Title' property + +const tsQueryText = \` + from StockPrices + // Use 'load Tag' to load the employee document referenced in the tag + load Tag as employeeDoc + // Use 'where ' to filter entries by the properties of the loaded document + where employeeDoc.Title == "Sales Manager"\`; + +const query = session.query({ collection: "companies" }) + .whereEquals("Address.Country", "USA") + .selectTimeSeries(b => b.raw(tsQueryText), TimeSeriesRawResult); + +const results = await query.all(); +`} + + + + +{`// Retrieve entries that reference a document that has "Sales Manager" in its 'Title' property + +const rql = \` + from Companies + where Address.Country == 'USA' + select timeseries ( + from StockPrices + // Use 'load Tag' to load the employee document referenced in the tag + load Tag as employeeDoc + // Use 'where ' to filter entries by the properties of the loaded document + where employeeDoc.Title == 'Sales Manager' + )\`; + +const query = session.advanced.rawQuery(rql, TimeSeriesRawResult); + +const result = await query.all(); +`} + + + + +{`from Companies +where Address.Country == "USA" +select timeseries ( + from StockPrices + // Use 'load Tag' to load the employee document referenced in the tag + load Tag as employeeDoc + // Use 'where ' to filter entries by the properties of the loaded document + where employeeDoc.Title == "Sales Manager" +) +`} + + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/_filtering-php.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_filtering-php.mdx new file mode 100644 index 0000000000..73eb3c9da0 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_filtering-php.mdx @@ -0,0 +1,366 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In addition to limiting time series query results by specifying the [range of entries](../../../document-extensions/timeseries/querying/choosing-query-range.mdx) to retrieve, + you can filter the time series entries by their **values**, **tag**, or by the contents of a **document referenced in the tag**. + +* In this page: + * [Filter by value](../../../document-extensions/timeseries/querying/filtering.mdx#filter-by-value) + * [Filter by tag](../../../document-extensions/timeseries/querying/filtering.mdx#filter-by-tag) + * [Filter by referenced document](../../../document-extensions/timeseries/querying/filtering.mdx#filter-by-referenced-document) + + +## Filter by value + +* A time series entry can have up to 32 [values](../../../document-extensions/timeseries/overview.mdx#values). + +* A time series query can filter entries based on these values. + + + + +{`// For example, in the "HeartRates" time series, +// retrieve only entries where the value exceeds 75 BPM + +$tsQueryText = + "from HeartRates" . + "between \\"2020-05-17T00:00:00.0000000\\"" . + "and \\"2020-05-17T00:10:00.0000000\\"" . + // Use the "where Value" clause to filter entries by the value + "where Value > 75"; + +$query = $session->advanced()->query(Employee::class) + ->selectTimeSeries(TimeSeriesRawResult::class, function($builder) use ($tsQueryText) { + return $builder->raw($tsQueryText); + }); + +$results = $query->toList(); +`} + + + + +{`// For example, in the "HeartRates" time series, +// retrieve only entries where the value exceeds 75 BPM + +$tsQueryText = + "from HeartRates" . + "between \\"2020-05-17T00:00:00.0000000\\"" . + "and \\"2020-05-17T00:10:00.0000000\\"" . + // Use the "where Value" clause to filter entries by the value + "where Value > 75"; + +$query = $session->advanced()->documentQuery(Employee::class) + ->selectTimeSeries(TimeSeriesRawResult::class, function($builder) use ($tsQueryText) { + return $builder->raw($tsQueryText); + }); + +$results = $query->toList(); +`} + + + + +{`// For example, in the "HeartRates" time series, +// retrieve only entries where the value exceeds 75 BPM + +$baseTime = new DateTime("2020-05-17"); +$from = $baseTime; +$to = (clone $baseTime)->add(new DateInterval("PT10M")); + +$query = $session->advanced()->rawQuery(TimeSeriesRawResult::class, " + from Employees + select timeseries ( + from HeartRates + between \\$from and \\$to + // Use the 'where Value' clause to filter by the value + where Value > 75 + )") + ->addParameter("from", $from) + ->addParameter("to", $to); + +$results = $query->toList(); +`} + + + + +{`from Employees +select timeseries ( + from HeartRates + between "2020-05-17T00:00:00.0000000" + and "2020-05-17T00:10:00.0000000" + // Use the "where Value" clause to filter entries by the value + where Value > 75 +) +`} + + + + + + +## Filter by tag + +* A time series entry can have an optional [tag](../../../document-extensions/timeseries/overview.mdx#tags). + +* A time series query can filter entries based on this tag. + + + + +{`// Retrieve only entries where the tag string content is "watches/fitbit" + +$tsQueryText = + "from HeartRates" . + "between \\"2020-05-17T00:00:00.0000000\\"" . + "and \\"2020-05-17T00:10:00.0000000\\"" . + // Use the "where Tag" clause to filter entries by the tag string content" . + "where Tag == \\"watches/fitbit\\";"; + +$query = $session->advanced()->query(Employee::class) + ->selectTimeSeries(TimeSeriesRawResult::class, function($builder) use ($tsQueryText) { + return $builder->raw($tsQueryText); + }); + +$results = $query->toList(); +`} + + + + +{`// Retrieve only entries where the tag string content is "watches/fitbit" + +$tsQueryText = + "from HeartRates" . + "between \\"2020-05-17T00:00:00.0000000\\"" . + "and \\"2020-05-17T00:10:00.0000000\\"" . + // Use the "where Tag" clause to filter entries by the tag string content" . + "where Tag == \\"watches/fitbit\\";"; + +$query = $session->advanced()->documentQuery(Employee::class) + ->selectTimeSeries(TimeSeriesRawResult::class, function($builder) use ($tsQueryText) { + return $builder->raw($tsQueryText); + }); + +$results = $query->toList(); +`} + + + + +{`// Retrieve only entries where the tag string content is "watches/fitbit" + +$baseTime = new DateTime("2020-05-17"); +$from = $baseTime; +$to = (clone $baseTime)->add(new DateInterval("PT10M")); + +$query = $session->advanced()->rawQuery(TimeSeriesRawResult::class, " + from Employees + select timeseries ( + from HeartRates + between \\$from and \\$to + // Use the 'where Tag' clause to filter entries by the tag string content + where Tag == 'watches/fitbit' + )") + ->addParameter("from", $from) + ->addParameter("to", $to); + +$results = $query->toList(); +`} + + + + +{`from Employees +select timeseries ( + from HeartRates + between "2020-05-17T00:00:00.0000000" + and "2020-05-17T00:10:00.0000000" + // Use the "where Tag" clause to filter entries by the tag string content + where Tag == "watches/fitbit" +) +`} + + + + + + + +{`// Retrieve only entries where the tag string content is one of several options + +$tsQueryText = + "from HeartRates" . + "between \\"2020-05-17T00:00:00.0000000\\"" . + "and \\"2020-05-17T00:10:00.0000000\\"" . + // Use the "where Tag in" clause to filter by various tag options + "where Tag in (\\"watches/apple\\", \\"watches/samsung\\", \\"watches/xiaomi\\")"; + +$query = $session->advanced()->query(Employee::class) + ->selectTimeSeries(TimeSeriesRawResult::class, function($builder) use ($tsQueryText) { + return $builder->raw($tsQueryText); + }); + +$results = $query->toList(); +`} + + + + +{`// Retrieve only entries where the tag string content is one of several options + +$tsQueryText = + "from HeartRates" . + "between \\"2020-05-17T00:00:00.0000000\\"" . + "and \\"2020-05-17T00:10:00.0000000\\"" . + // Use the "where Tag in" clause to filter by various tag options + "where Tag in (\\"watches/apple\\", \\"watches/samsung\\", \\"watches/xiaomi\\")"; + +$query = $session->advanced()->documentQuery(Employee::class) + ->selectTimeSeries(TimeSeriesRawResult::class, function($builder) use ($tsQueryText) { + return $builder->raw($tsQueryText); + }); + +$results = $query->toList(); +`} + + + + +{`// Retrieve only entries where the tag string content is one of several options + +$baseTime = new DateTime("2020-05-17"); +$from = $baseTime; +$to = (clone $baseTime)->add(new DateInterval("PT10M")); + +$optionalTags = [ "watches/apple", "watches/samsung", "watches/xiaomi" ]; + +$query = $session->advanced()->rawQuery(TimeSeriesRawResult::class, " + from Employees + select timeseries ( + from HeartRates + between \\$from and \\$to + // Use the 'where Tag in' clause to filter by various tag options + where Tag in (\\$optionalTags) + )") + ->addParameter("from", $from) + ->addParameter("to", $to) + ->addParameter("optionalTags", $optionalTags); + +$results = $query->toList(); +`} + + + + +{`from Employees +select timeseries ( + from HeartRates + between "2020-05-17T00:00:00.0000000" + and "2020-05-17T00:10:00.0000000" + // Use the "where Tag in" clause to filter by various tag options + where Tag in ("watches/apple", "watches/samsung", "watches/xiaomi") +) +`} + + + + + + +## Filter by referenced document + +* A time series entry's [tag](../../../document-extensions/timeseries/overview.mdx#tags) can contain the **ID of a document**. + +* A time series query can filter entries based on the contents of this referenced document. + The referenced document is loaded, and entries are filtered by its properties. + + + + +{`// Retrieve entries that reference a document that has "Sales Manager" in its 'Title' property + +$tsQueryText = + "from StockPrices" . + // Use 'load Tag' to load the employee document referenced in the tag + "load Tag as employeeDoc" . + // Use 'where ' to filter entries by the properties of the loaded document + "where employeeDoc.Title == \\"Sales Manager\\""; + +$query = $session->advanced()->query(Company::class) + // Query companies from USA + ->whereEquals("Address.Country", "USA") + ->selectTimeSeries(TimeSeriesRawResult::class, function ($builder) use ($tsQueryText) { + return $builder->raw($tsQueryText); + }); + +$results = $query->toList(); +`} + + + + +{`// Retrieve entries that reference a document that has "Sales Manager" in its 'Title' property + +$tsQueryText = + "from StockPrices" . + // Use 'load Tag' to load the employee document referenced in the tag + "load Tag as employeeDoc" . + // Use 'where ' to filter entries by the properties of the loaded document + "where employeeDoc.Title == \\"Sales Manager\\""; + +$query = $session->advanced()->documentQuery(Company::class) + // Query companies from USA + ->whereEquals("Address.Country", "USA") + ->selectTimeSeries(TimeSeriesRawResult::class, function ($builder) use ($tsQueryText) { + return $builder->raw($tsQueryText); + }); + +$results = $query->toList(); +`} + + + + +{`// Retrieve entries that reference a document that has "Sales Manager" in its 'Title' property + +$query = $session->advanced()->rawQuery(Company::class, " + from Companies + where Address.Country == 'USA' + select timeseries ( + from StockPrices + // Use 'load Tag' to load the employee document referenced in the tag + load Tag as employeeDoc + // Use 'where \\' to filter entries by the properties of the loaded document + where employeeDoc.Title == 'Sales Manager' + )" +); + +$results = $query->toList(); +`} + + + + +{`from Companies +where Address.Country == "USA" +select timeseries ( + from StockPrices + // Use 'load Tag' to load the employee document referenced in the tag + load Tag as employeeDoc + // Use 'where ' to filter entries by the properties of the loaded document + where employeeDoc.Title == "Sales Manager" +) +`} + + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/_filtering-python.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_filtering-python.mdx new file mode 100644 index 0000000000..765eced40f --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_filtering-python.mdx @@ -0,0 +1,211 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +In addition to limiting time series query results by specifying the [range of entries](../../../document-extensions/timeseries/querying/choosing-query-range.mdx) to retrieve, +you can filter the time series entries by their **values**, **tag**, or by the contents of a **document referenced in the tag**. + +* In this page: + * [Filter by value](../../../document-extensions/timeseries/querying/filtering.mdx#filter-by-value) + * [Filter by tag](../../../document-extensions/timeseries/querying/filtering.mdx#filter-by-tag) + * [Filter by referenced document](../../../document-extensions/timeseries/querying/filtering.mdx#filter-by-referenced-document) + + +## Filter by value + +* A time series entry can have up to 32 [values](../../../document-extensions/timeseries/overview.mdx#values). + +* A time series query can filter entries based on these values. + + + + +{`# For example, in the "HeartRates" time series, +# retrieve only entries where the value exceeds 75 BPM +base_time = datetime(2020, 5, 17, 0, 0, 0, 0) +from_dt = base_time +to_dt = base_time + timedelta(minutes=10) + +query_string = """ +from Employees +select timeseries ( + from HeartRates + between $from and $to + // Use the 'where Value' clause to filter by the value + where Value > 75 +)""" + +query = ( + session.advanced.raw_query(query_string, TimeSeriesRawResult) + .add_parameter("from", from_dt) + .add_parameter("to", to_dt) +) + +results = list(query) +`} + + + + +{`from Employees +select timeseries ( + from HeartRates + between "2020-05-17T00:00:00.0000000" + and "2020-05-17T00:10:00.0000000" + // Use the "where Value" clause to filter entries by the value + where Value > 75 +) +`} + + + + + + +## Filter by tag + +* A time series entry can have an optional [tag](../../../document-extensions/timeseries/overview.mdx#tags). + +* A time series query can filter entries based on this tag. + + + + +{`# Retrieve only entries where the tag string content is "watches/fitbit" +base_time = datetime(2020, 5, 17, 0, 0, 0, 0) +from_dt = base_time +to_dt = base_time + timedelta(minutes=10) + +query_string = """ +from Employees +select timeseries ( + from HeartRates + between $from and $to + // Use the 'where Tag' clause to filter entries by the tag string content + where Tag == 'watches/fitbit' +)""" + +query = ( + session.advanced.raw_query(query_string, TimeSeriesRawResult) + .add_parameter("from", from_dt) + .add_parameter("to", to_dt) +) + +results = list(query) +`} + + + + +{`from Employees +select timeseries ( + from HeartRates + between "2020-05-17T00:00:00.0000000" + and "2020-05-17T00:10:00.0000000" + // Use the "where Tag" clause to filter entries by the tag string content + where Tag == "watches/fitbit" +) +`} + + + + + + + +{`# retrieve only entries where the tag string content is one of several options + +base_time = datetime(2020, 5, 17, 0, 0, 0, 0) +from_dt = base_time +to_dt = base_time + timedelta(minutes=10) + +optional_tags = ["watches/apple", "watches/samsung", "watches/xiaomi"] + +query_string = """ +from Employees +select timeseries ( + from HeartRates + between $from and $to + // Use the 'where Tag in' clause to filter by various tag options + where Tag in ($optionalTags) +)""" + +query = ( + session.advanced.raw_query(query_string, TimeSeriesRawResult) + .add_parameter("from", from_dt) + .add_parameter("to", to_dt) + .add_parameter("optionalTags", optional_tags) +) + +results = list(query) +`} + + + + +{`from Employees +select timeseries ( + from HeartRates + between "2020-05-17T00:00:00.0000000" + and "2020-05-17T00:10:00.0000000" + // Use the "where Tag in" clause to filter by various tag options + where Tag in ("watches/apple", "watches/samsung", "watches/xiaomi") +) +`} + + + + + + +## Filter by referenced document + +* A time series entry's [tag](../../../document-extensions/timeseries/overview.mdx#tags) can contain the **ID of a document**. + +* A time series query can filter entries based on the contents of this referenced document. + The referenced document is loaded, and entries are filtered by its properties. + + + + +{`# retrieve entries that reference a document that has "Sales Manager" in its 'Title' property + +query_string = """ +from Companies +where Address.Country == 'USA' +select timeseries ( + from StockPrices + // Use 'load Tag' to load the employee document referenced in the tag + load Tag as employeeDoc + // Use 'where ' to filter entries by the properties of the loaded document + where employeeDoc.Title == 'Sales Manager' +)""" + +query = session.advanced.raw_query(query_string, Company) + +results = list(query) +`} + + + + +{`from Companies +where Address.Country == "USA" +select timeseries ( + from StockPrices + // Use 'load Tag' to load the employee document referenced in the tag + load Tag as employeeDoc + // Use 'where ' to filter entries by the properties of the loaded document + where employeeDoc.Title == "Sales Manager" +) +`} + + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/_gap-filling-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_gap-filling-csharp.mdx new file mode 100644 index 0000000000..981d04861c --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_gap-filling-csharp.mdx @@ -0,0 +1,88 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Time series queries can add extra data points into the gaps between entries. + These data points get values extrapolated from the entries on either side of + the gap. This is called _interpolation_. + +* There are two interpolation methods available: + 1. Nearest - add values equal to the value of the nearest entry. + 2. Linear - place the data points on a straight line between the entries on + either side. + +* In this page: + * [Syntax](../../../document-extensions/timeseries/querying/gap-filling.mdx#syntax) + * [Examples](../../../document-extensions/timeseries/querying/gap-filling.mdx#examples) + + +## Syntax + +To add interpolation to a time series query, start by grouping the data by some +unit. +For example, suppose you have a time series with an entry for every hour, +but several hours are missing (1am, 3pm, etc.), and you want to fill those +gaps. You will want to group by 1 hour. +Or suppose you have a time series with an entry for every hour, and you want to +fill in the gap with one data point per minute: you will group by 1 minute. +See [here](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx) +to learn about aggregation in queries. + +Next, use: + +* For RQL queries: `interpolation()` +* For LINQ queries: the `Interpolation` option in `TimeSeriesAggregationOptions`. + +The two interpolation modes are: + +1. **Nearest**: add entries with values equal to the closest time series entry +before or after this data point. If the data point is exactly in the middle +between two entries, the data point gets the value of the earlier entry. + +2. **Linear**: the data point is placed on a projected line between two entries. +For example, if the entry for 1:00 PM has a value 100, and the entry for 2:00 PM +has a value 130, the interpolated data point for 1:40 PM will have the value 120. + +One data point is added for each aggregated time unit that does not contain any +values. When time series entries have multiple values, an interpolation will add +one data point for each pair of values found on *both* sides of the gap. + + + +## Examples + + + + +{`var query = session.Advanced.RawQuery(@" + from People + select timeseries( + from HeartRates + group by 1 second + with interpolation(linear) + "); +`} + + + + +{`var query = session.Query() + .Select(p => RavenQuery.TimeSeries(p, "HeartRates") + .GroupBy(g => g + .Hours(1) + .WithOptions(new TimeSeriesAggregationOptions + { + Interpolation = InterpolationType.Linear + })) + .ToList()); +`} + + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/_gap-filling-java.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_gap-filling-java.mdx new file mode 100644 index 0000000000..4b1aee9001 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_gap-filling-java.mdx @@ -0,0 +1,77 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Time series queries can add extra data points into the gaps between entries. + These data points get values extrapolated from the entries on either side of + the gap. This is called _interpolation_. + +* There are two interpolation methods available: + 1. Nearest - add values equal to the value of the nearest entry. + 2. Linear - place the data points on a straight line between the entries on + either side. + +* In this page: + * [Syntax](../../../document-extensions/timeseries/querying/gap-filling.mdx#syntax) + * [Examples](../../../document-extensions/timeseries/querying/gap-filling.mdx#examples) + + +## Syntax + +To add interpolation to a time series query, start by grouping the data by some +unit. +For example, suppose you have a time series with an entry for every hour, +but several hours are missing (1am, 3pm, etc.), and you want to fill those +gaps. You will want to group by 1 hour. +Or suppose you have a time series with an entry for every hour, and you want to +fill in the gap with one data point per minute: you will group by 1 minute. +See [here](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx) +to learn about aggregation in queries. + +Next, use: + +* For RQL queries: `interpolation()` + +The two interpolation modes are: + +1. **Nearest**: add entries with values equal to the closest time series entry +before or after this data point. If the data point is exactly in the middle +between two entries, the data point gets the value of the earlier entry. + +2. **Linear**: the data point is placed on a projected line between two entries. +For example, if the entry for 1:00 PM has a value 100, and the entry for 2:00 PM +has a value 130, the interpolated data point for 1:40 PM will have the value 120. + +One data point is added for each aggregated time unit that does not contain any +values. When time series entries have multiple values, an interpolation will add +one data point for each pair of values found on *both* sides of the gap. + + + +## Examples + + + + +{`IRawDocumentQuery query = session.advanced().rawQuery(RawQueryResult.class, "declare timeseries out(p)\\n" + + "{\\n" + + " from p.HeartRate between $start and $end\\n" + + " group by 1h\\n" + + " select min(), max()\\n" + + "}\\n" + + "from index 'People' as p\\n" + + "where p.age > 49\\n" + + "select out(p) as heartRate, p.name") + .addParameter("start", baseLine) + .addParameter("end", DateUtils.addDays(baseLine, 1)); +`} + + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/_gap-filling-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_gap-filling-nodejs.mdx new file mode 100644 index 0000000000..54bdb651b0 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_gap-filling-nodejs.mdx @@ -0,0 +1,72 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Time series queries can add extra data points into the gaps between entries. + These data points get values extrapolated from the entries on either side of + the gap. This is called _interpolation_. + +* There are two interpolation methods available: + 1. Nearest - add values equal to the value of the nearest entry. + 2. Linear - place the data points on a straight line between the entries on + either side. + +* In this page: + * [Syntax](../../../document-extensions/timeseries/querying/gap-filling.mdx#syntax) + * [Examples](../../../document-extensions/timeseries/querying/gap-filling.mdx#examples) + + +## Syntax + +To add interpolation to a time series query, start by grouping the data by some +unit. +For example, suppose you have a time series with an entry for every hour, +but several hours are missing (1am, 3pm, etc.), and you want to fill those +gaps. You will want to group by 1 hour. +Or suppose you have a time series with an entry for every hour, and you want to +fill in the gap with one data point per minute: you will group by 1 minute. +See [here](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx) +to learn about aggregation in queries. + +Next, use: + +* For RQL queries: `interpolation()` + +The two interpolation modes are: + +1. **Nearest**: add entries with values equal to the closest time series entry +before or after this data point. If the data point is exactly in the middle +between two entries, the data point gets the value of the earlier entry. + +2. **Linear**: the data point is placed on a projected line between two entries. +For example, if the entry for 1:00 PM has a value 100, and the entry for 2:00 PM +has a value 130, the interpolated data point for 1:40 PM will have the value 120. + +One data point is added for each aggregated time unit that does not contain any +values. When time series entries have multiple values, an interpolation will add +one data point for each pair of values found on *both* sides of the gap. + + + +## Examples + + + +{`const query = session.advanced.rawQuery( + "from People\\n" + + "select timeseries(\\n" + + " from HeartRates\\n" + + " group by 1 second\\n" + + " with interpolation(liner)\\n" + + ")", TimeSeriesAggregationResult +); +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/_overview-and-syntax-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_overview-and-syntax-csharp.mdx new file mode 100644 index 0000000000..18ae9a014f --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_overview-and-syntax-csharp.mdx @@ -0,0 +1,313 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Querying time series entries enables comprehending how a process gradually populates a time series over time and locating documents related to chosen time series entries. + +* Querying time series data is native to RavenDB's queries. + Clients can express time series queries in high-level LINQ expressions or directly in [RQL](../../../client-api/session/querying/what-is-rql.mdx). + +* Queries can be executed as dynamic queries or over [time series indexes](../../../document-extensions/timeseries/indexing.mdx). + +* In this page: + * [Time series query capabilities](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#time-series-query-capabilities) + * [Server and client queries](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#server-and-client-queries) + * [Dynamic and index queries](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#dynamic-and-index-queries) + * [Scaling query results](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#scaling-query-results) + * [RQL syntax](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#rql-syntax) + * [`select timeseries` syntax](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#section) + * [`declare timeseries` syntax](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#section-1) + * [Combine time series and custom functions](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#combine-time-series-and-custom-functions) + * [Use Studio To experiment](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#use-studio-to-experiment) + + +## Time series query capabilities + +Time series query can - + +* [Choose a range of time series entries](../../../document-extensions/timeseries/querying/choosing-query-range.mdx) to query from. +* [Filter](../../../document-extensions/timeseries/querying/filtering.mdx) time series entries by their tags, values and timestamps. +* [Aggregate](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx) time series entries into groups by a chosen time resolution, + e.g. gather the prices of a stock that's been collected over the past two months to week-long groups. + Entries can also be aggregated by their tags. +* Select entries by various criteria, e.g. by the min and max values of each aggregated group, + and [project](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx) them to the client. +* Calculate [statistical measures](../../../document-extensions/timeseries/querying/statistics.mdx): the percentile, slope, or standard deviation of a time series. + + + +## Server and client queries + +Time series queries are executed by the server and their results are projected to the client, +so they require very little client computation resources. + +* The server runs time series queries using RQL. +* Clients can phrase time series queries in **raw RQL** or using high level **LINQ expressions**. + High level queries are translated to RQL by the client before sending them to the server for execution. + + + +## Dynamic and index queries + +* **Dynamic queries**: + * Time series indexes are Not created automatically by the server when making a dynamic query. + * Use dynamic queries when time series you query are not indexed, + or when you prefer that RavenDB would choose an index automatically. See [queries always use an index](../../../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index). + E.g. - + + + +{`// Query for time series named "HeartRates" in employees hired after 1994 +from Employees as e +where HiredAt > "1994-01-01" +select timeseries( + from HeartRates +) +`} + + + +* **Index queries**: + * Static time series indexes can be created by clients (or using Studio). + To learn how to create such indexes, see [indexing time series](../../../document-extensions/timeseries/indexing.mdx). + * Examples of querying a static time series index can be found in [querying time series indexes](../../../document-extensions/timeseries/querying/using-indexes.mdx). + + + +## Scaling query results + +* Time series query results can be **scaled**, multiplied by some number. + This doesn't change the values themselves, only the output of the query. + Scaling can serve as a stage in a data processing pipeline, or just for the purposes of displaying the data in a more understandable format. + +* There are several use cases for scaling. + For example, suppose your time series records the changing speeds of different vehicles as they travel through a city, + with some data measured in miles per hour and others in kilometers per hour. Here, scaling can facilitate unit conversion. + +* Another use case involves the compression of time series data. + Numbers with very high precision (i.e., many digits after the decimal point) are less compressible than numbers with low precision. + Therefore, for efficient storage, you might want to change a value like `0.000018` to `18` when storing the data. + Then, when querying the data, you can scale by `0.000001` to restore the original value. + +* Scaling is a part of both RQL and LINQ syntax: + + * In **LINQ**, use `.Scale()`. + * In **RQL**, use `scale ` in a time series query, and input your scaling factor as a double. +#### Example: + + + + +{`var query = session.Query() + .Select(p => RavenQuery.TimeSeries(p, "HeartRates") + .Scale(10) + .ToList()) + .ToList(); + +// The value in the query results is 10 times the value stored on the server +var scaledValue = query[0].Results[0].Values[0]; +`} + + + + +{`from Users +select timeseries( + from HeartRates + scale 10 +) +`} + + + + + + +## RQL syntax + +A typical time series query can start by locating the documents whose time series we want to query. +For example, we can query for employees above 30: + + + +{`from Employees as e +where Birthday < '1994-01-01' +`} + + + +Then, you can query their time series entries using either of the following two equivalent syntaxes: + +* [`select timeseries` syntax](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#section) +* [`declare timeseries` syntax](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#section-1) +### `select timeseries` + +This syntax allows you to encapsulate your query's time series functionality in a `select timeseries` section. + + + +{`// Query for entries from time series "HeartRates" for employees above 30 +// ====================================================================== + +// This clause locates the documents whose time series we want to query: +from Employees as e +where Birthday < '1994-01-01' + +// Query the time series that belong to the matching documents: +select timeseries ( // The \`select\` clause defines the time series query. + from HeartRates // The \`from\` keyword is used to specify the time series name to query. +) +`} + + + +### `declare timeseries` + +This syntax allows you to declare a time series function (using `declare timeseries`) and call it from your query. +It introduces greater flexibility to your queries as you can, for example, pass arguments to the time series function. + +Here is a query written in both syntaxes. +It first queries for users above 30. If they possess a time series named "HeartRates", it retrieves a range of its entries. + +#### With Time Series Function + +```sql +// declare the time series function: +declare timeseries ts(jogger) { + from jogger.HeartRates + between + "2020-05-27T00:00:00.0000000Z" + and + "2020-06-23T00:00:00.0000000Z" +} + +from Users as jogger +where Age > 30 +// call the time series function +select ts(jogger) +``` + +#### Without Time Series Function + +```sql +from Users as jogger +where Age > 30 +select timeseries( + from HeartRates + between + "2020-05-27T00:00:00.0000000Z" + and + "2020-06-23T00:00:00.0000000Z") +``` + +## Combine time series and custom functions + +* You can declare and use both time series functions and custom functions in a query. + The custom functions can call the time series functions, pass them arguments, and use their results. + +* In the example below, a custom function (`customFunc`) is called by the query `select` clause to fetch and format a set of time series entries, which are then projected by the query. + The time series function (`tsQuery`) is called to retrieve the matching time series entries. + +* The custom function returns a flat set of values rather than a nested array, to ease the projection of retrieved values. + +* Note the generated RQL in the second tab, where the custom function is translated to a [custom JavaScript function](../../../client-api/session/querying/what-is-rql.mdx#declare). + + + + +{`var query = from user in session.Query() + + // The custom function + let customFunc = new Func, IEnumerable>( + entries => + entries.Select(e => new ModifiedTimeSeriesEntry + { + Timestamp = e.Timestamp, + Value = e.Values.Max(), + Tag = e.Tag ?? "none" + })) + + // The time series query + let tsQuery = RavenQuery.TimeSeries(user, "HeartRates") + .Where(entry => entry.Values[0] > 100) + .ToList() + + // Project query results + select new + { + Name = user.Name, + // Call the custom function + TimeSeriesEntries = customFunc(tsQuery.Results) + }; + +var queryResults = query.ToList(); +`} + + + + +{`// The time series function: +// ========================= +declare timeseries tsQuery(user) { + from user.HeartRates + where (Values[0] > 100) +} + +// The custom JavaScript function: +// =============================== +declare function customFunc(user) { + var results = []; + + // Call the time series function to retrieve heart rate values for the user + var r = tsQuery(user); + + // Prepare the results + for(var i = 0 ; i < r.Results.length; i ++) { + results.push({ + Timestamp: r.Results[i].Timestamp, + Value: r.Results[i].Values.reduce((a, b) => Raven_Max(a, b)), + Tag: r.Results[i].Tag ?? "none"}) + } + return results; +} + +// Query & project results: +// ======================== +from "Users" as user +select + user.Name, + customFunc(user) as timeSeriesEntries // Call the custom JavaScript function +`} + + + + +This is the custom `ModifiedTimeSeriesEntry` class that is used in the above LINQ sample: + + + +{`private class ModifiedTimeSeriesEntry +\{ + public DateTime Timestamp \{ get; set; \} + public double Value \{ get; set; \} + public string Tag \{ get; set; \} +\} +`} + + + + + +## Use Studio to experiment + +You can use [Studio](../../../studio/database/document-extensions/time-series.mdx) to try the RQL samples provided in this article and test your own queries. + +!["Time Series Query in Studio"](./assets/time-series-query.png) + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/_overview-and-syntax-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_overview-and-syntax-nodejs.mdx new file mode 100644 index 0000000000..3c635a3470 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_overview-and-syntax-nodejs.mdx @@ -0,0 +1,333 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Querying time series entries enables comprehending how a process gradually populates a time series over time and locating documents related to chosen time series entries. + +* Time series querying is native to RavenDB's queries. + Clients can express time series queries in high-level queries or directly in [RQL](../../../client-api/session/querying/what-is-rql.mdx). + +* Queries can be executed as dynamic queries or over [time series indexes](../../../document-extensions/timeseries/indexing.mdx). + +* In this page: + * [Time series query capabilities](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#time-series-query-capabilities) + * [Server and client queries](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#server-and-client-queries) + * [Dynamic and index queries](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#dynamic-and-index-queries) + * [Scaling query results](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#scaling-query-results) + * [RQL syntax](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#rql-syntax) + * [`select timeseries` syntax](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#section) + * [`declare timeseries` syntax](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#section-1) + * [Combine time series and custom functions](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#combine-time-series-and-custom-functions) + * [Use Studio To experiment](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#use-studio-to-experiment) + + +## Time series query capabilities + +Time series query can - + +* [Choose a range of time series entries](../../../document-extensions/timeseries/querying/choosing-query-range.mdx) to query from. +* [Filter](../../../document-extensions/timeseries/querying/filtering.mdx) time series entries by their tags, values and timestamps. +* [Aggregate](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx) time series entries into groups by a chosen time resolution, + e.g. gather the prices of a stock that's been collected over the past two months to week-long groups. + Entries can also be aggregated by their tags. +* Select entries by various criteria, e.g. by the min and max values of each aggregated group, + and [project](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx) them to the client. +* Calculate [statistical measures](../../../document-extensions/timeseries/querying/statistics.mdx): the percentile, slope, or standard deviation of a time series. + + + +## Server and client queries + +Time series queries are executed by the server and their results are projected to the client, +so they require very little client computation resources. + +* The server runs time series queries using RQL. +* Clients can phrase time series queries in **raw RQL** or using **high level queries**. + High level queries are translated to RQL by the client before sending them to the server for execution. + + + +## Dynamic and index queries + +* **Dynamic queries**: + * Time series indexes are Not created automatically by the server when making a dynamic query. + * Use dynamic queries when time series you query are not indexed, + or when you prefer that RavenDB would choose an index automatically. See [queries always use an index](../../../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index). + E.g. - + + + + +{`// Define the time series query text +const tsQueryText = "from HeartRates"; + +// Make a dynamic query over the "employees" collection +const queryResults = await session.query({ collection: "employees" }) + // Query for employees hired after 1994 + .whereGreaterThan("HiredAt", "1994-01-01") + // Call 'selectTimeSeries' to project the time series entries in the query results + // Pass the defined time series query text + .selectTimeSeries(b => b.raw(tsQueryText), TimeSeriesRawResult) + .all(); + +// Results: +// ======== + +// 1. Results will include all entries from time series "HeartRates" for matching employee documents. +// 2. Since this is a dynamic query that filters documents, +// an auto-index (Auto/employees/ByHiredAt) will be created if it doesn't already exist. +// However, it is NOT a time series index !! +// It is a regular documents auto-index that allows querying for documents based on their HiredAt field. + +// Access a time series entry value from the results: +const entryValue = queryResults[0].results[0].values[0]; +`} + + + + +{`from "employees" as e +where HiredAt > "1994-01-01" +select timeseries ( + from HeartRates +) +`} + + + + +* **Index queries**: + * Static time series indexes can be created by clients (or using Studio). + To learn how to create such indexes, see [indexing time series](document-extensions/timeseries/indexing). + * Examples of querying a static time series index can be found in [querying time series indexes](../../../document-extensions/timeseries/querying/using-indexes.mdx). + + + +## Scaling query results + +* Time series query results can be **scaled**, multiplied by some number. + This doesn't change the values themselves, only the output of the query. + Scaling can serve as a stage in a data processing pipeline, or just for the purposes of displaying the data in a more understandable format. + +* There are several use cases for scaling. + For example, suppose your time series records the changing speeds of different vehicles as they travel through a city, + with some data measured in miles per hour and others in kilometers per hour. Here, scaling can facilitate unit conversion. + +* Another use case involves the compression of time series data. + Numbers with very high precision (i.e., many digits after the decimal point) are less compressible than numbers with low precision. + Therefore, for efficient storage, you might want to change a value like `0.000018` to `18` when storing the data. + Then, when querying the data, you can scale by `0.000001` to restore the original value. +#### Example: + + + + +{`// Add 'scale ' to your time series query text +const tsQueryText = "from HeartRates scale 10"; + +const queryResults = await session.query({ collection: "users" }) + .selectTimeSeries(b => b.raw(tsQueryText), TimeSeriesRawResult) + .all(); + +// The value in the query results is 10 times the value stored on the server +const scaledValue = queryResults[0].results[0].values[0]; +`} + + + + +{`from "users" +select timeseries( + from HeartRates + scale 10 +) +`} + + + + + + +## RQL syntax + +A typical time series query can start by locating the documents whose time series we want to query. +For example, we can query for employees above 30: + + + +{`from Employees as e +where Birthday < '1994-01-01' +`} + + + +Then, you can query their time series entries using either of the following two equivalent syntaxes: + +* [`select timeseries` syntax](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#section) +* [`declare timeseries` syntax](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#section-1) +### `select timeseries` + +This syntax allows you to encapsulate your query's time series functionality in a `select timeseries` section. + + + +{`// Query for entries from time series "HeartRates" for employees above 30 +// ====================================================================== + +// This clause locates the documents whose time series we want to query: +from Employees as e +where Birthday < '1994-01-01' + +// Query the time series that belong to the matching documents: +select timeseries ( // The \`select\` clause defines the time series query. + from HeartRates // The \`from\` keyword is used to specify the time series name to query. +) +`} + + + +### `declare timeseries` + +This syntax allows you to declare a time series function (using `declare timeseries`) and call it from your query. +It introduces greater flexibility to your queries as you can, for example, pass arguments to the time series function. + +Here is a query written in both syntaxes. +It first queries for users above 30. If they possess a time series named "HeartRates", it retrieves a range of its entries. + +#### With Time Series Function + +```sql +// declare the time series function: +declare timeseries ts(jogger) { + from jogger.HeartRates + between + "2020-05-27T00:00:00.0000000Z" + and + "2020-06-23T00:00:00.0000000Z" +} + +from Users as jogger +where Age > 30 +// call the time series function +select ts(jogger) +``` + +#### Without Time Series Function + +```sql +from Users as jogger +where Age > 30 +select timeseries( + from HeartRates + between + "2020-05-27T00:00:00.0000000Z" + and + "2020-06-23T00:00:00.0000000Z") +``` + +## Combine time series and custom functions + +* You can declare and use both time series functions and [custom JavaScript function](../../../client-api/session/querying/what-is-rql.mdx#declare) in a query. + The custom functions can call the time series functions, pass them arguments, and use their results. + +* In the example below, a custom function (`customFunc`) is called by the query `select` clause to fetch and format a set of time series entries, which are then projected by the query. + The time series function (`tsQuery`) is called to retrieve the matching time series entries. + +* The custom function returns a flat set of values rather than a nested array, to ease the projection of retrieved values. + + + + +{`const queryResults = await session.advanced + // Provide RQL to rawQuery + .rawQuery(\` + // The time series function: + // ========================= + declare timeseries tsQuery(user) { + from user.HeartRates + where (Values[0] > 100) + } + + // The custom JavaScript function: + // =============================== + declare function customFunc(user) { + var results = []; + + // Call the time series function to retrieve heart rate values for the user + var r = tsQuery(user); + + // Prepare the results + for(var i = 0 ; i < r.Results.length; i ++) { + results.push({ + timestamp: r.Results[i].Timestamp, + value: r.Results[i].Values.reduce((a, b) => Raven_Max(a, b)), + tag: r.Results[i].Tag ?? "none"}) + } + return results; + } + + // Query & project results: + // ======================== + from "users" as user + select + user.name, + customFunc(user) as timeSeriesEntries // Call the custom JavaScript function + \`) + // Execute the query + .all(); +`} + + + + +{`// The time series function: +// ========================= +declare timeseries tsQuery(user) { + from user.HeartRates + where (Values[0] > 100.0) +} + +// The custom JavaScript function: +// =============================== +declare function customFunc(user) { + var results = []; + + // Call the time series function to retrieve heart rate values for the user + var r = tsQuery(user); + + // Prepare the results + for(var i = 0 ; i < r.Results.length; i ++) { + results.push({ + timestamp: r.Results[i].Timestamp, + value: r.Results[i].Values.reduce((a, b) => Raven_Max(a, b)), + tag: r.Results[i].Tag ?? "none"}) + } + return results; +} + +// Query & project results: +// ======================== +from "users" as user +select + user.name, + customFunc(user) as timeSeriesEntries // Call the custom JavaScript function +`} + + + + + + +## Use Studio to experiment + +You can use [Studio](../../../studio/database/document-extensions/time-series.mdx) to try the RQL samples provided in this article and test your own queries. + +!["Time Series Query in Studio"](./assets/time-series-query.png) + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/_overview-and-syntax-php.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_overview-and-syntax-php.mdx new file mode 100644 index 0000000000..cb160b6ec6 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_overview-and-syntax-php.mdx @@ -0,0 +1,282 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Querying time series entries enables comprehending how a process gradually populates a time series over time and locating documents related to chosen time series entries. + +* Querying time series data is native to RavenDB's queries. + Clients can express time series queries in high-level LINQ expressions or directly in [RQL](../../../client-api/session/querying/what-is-rql.mdx). + +* Queries can be executed as dynamic queries or over [time series indexes](../../../document-extensions/timeseries/indexing.mdx). + +* In this page: + * [Time series query capabilities](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#time-series-query-capabilities) + * [Server and client queries](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#server-and-client-queries) + * [Dynamic and index queries](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#dynamic-and-index-queries) + * [Scaling query results](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#scaling-query-results) + * [RQL syntax](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#rql-syntax) + * [`select timeseries` syntax](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#section) + * [`declare timeseries` syntax](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#section-1) + * [Combine time series and custom functions](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#combine-time-series-and-custom-functions) + * [Use Studio To experiment](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#use-studio-to-experiment) + + +## Time series query capabilities + +Time series query can - + +* [Choose a range of time series entries](../../../document-extensions/timeseries/querying/choosing-query-range.mdx) to query from. +* [Filter](../../../document-extensions/timeseries/querying/filtering.mdx) time series entries by their tags, values and timestamps. +* [Aggregate](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx) time series entries into groups by a chosen time resolution, + e.g. gather the prices of a stock that's been collected over the past two months to week-long groups. + Entries can also be aggregated by their tags. +* Select entries by various criteria, e.g. by the min and max values of each aggregated group, + and [project](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx) them to the client. +* Calculate [statistical measures](../../../document-extensions/timeseries/querying/statistics.mdx): the percentile, slope, or standard deviation of a time series. + + + +## Server and client queries + +Time series queries are executed by the server and their results are projected to the client, +so they require very little client computation resources. + +* The server runs time series queries using RQL. +* Clients can phrase time series queries in **raw RQL** or using high level **LINQ expressions**. + High level queries are translated to RQL by the client before sending them to the server for execution. + + + +## Dynamic and index queries + +* **Dynamic queries**: + * Time series indexes are Not created automatically by the server when making a dynamic query. + * Use dynamic queries when time series you query are not indexed, + or when you prefer that RavenDB would choose an index automatically. See [queries always use an index](../../../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index). + E.g. - + + + +{`// Query for time series named "HeartRates" in employees hired after 1994 +from Employees as e +where HiredAt > "1994-01-01" +select timeseries( + from HeartRates +) +`} + + + +* **Index queries**: + * Static time series indexes can be created by clients (or using Studio). + * Examples of querying a static time series index can be found in [querying time series indexes](../../../document-extensions/timeseries/querying/using-indexes.mdx). + + + +## Scaling query results + +* Time series query results can be **scaled**, multiplied by some number. + This doesn't change the values themselves, only the output of the query. + Scaling can serve as a stage in a data processing pipeline, or just for the purposes of displaying the data in a more understandable format. + +* There are several use cases for scaling. + For example, suppose your time series records the changing speeds of different vehicles as they travel through a city, + with some data measured in miles per hour and others in kilometers per hour. Here, scaling can facilitate unit conversion. + +* Another use case involves the compression of time series data. + Numbers with very high precision (i.e., many digits after the decimal point) are less compressible than numbers with low precision. + Therefore, for efficient storage, you might want to change a value like `0.000018` to `18` when storing the data. + Then, when querying the data, you can scale by `0.000001` to restore the original value. + +* Scaling is a part of both RQL and LINQ syntax: + + * In **LINQ**, use `.Scale()`. + * In **RQL**, use `scale ` in a time series query, and input your scaling factor as a double. +#### Example: + + + + +{`var query = session.Query() + .Select(p => RavenQuery.TimeSeries(p, "HeartRates") + .Scale(10) + .ToList()) + .ToList(); + +// The value in the query results is 10 times the value stored on the server +var scaledValue = query[0].Results[0].Values[0]; +`} + + + + +{`from Users +select timeseries( + from HeartRates + scale 10 +) +`} + + + + + + +## RQL syntax + +A typical time series query can start by locating the documents whose time series we want to query. +For example, we can query for employees above 30: + + + +{`from Employees as e +where Birthday < '1994-01-01' +`} + + + +Then, you can query their time series entries using either of the following two equivalent syntaxes: + +* [`select timeseries` syntax](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#section) +* [`declare timeseries` syntax](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#section-1) +### `select timeseries` + +This syntax allows you to encapsulate your query's time series functionality in a `select timeseries` section. + + + +{`// Query for entries from time series "HeartRates" for employees above 30 +// ====================================================================== + +// This clause locates the documents whose time series we want to query: +from Employees as e +where Birthday < '1994-01-01' + +// Query the time series that belong to the matching documents: +select timeseries ( // The \`select\` clause defines the time series query. + from HeartRates // The \`from\` keyword is used to specify the time series name to query. +) +`} + + + +### `declare timeseries` + +This syntax allows you to declare a time series function (using `declare timeseries`) and call it from your query. +It introduces greater flexibility to your queries as you can, for example, pass arguments to the time series function. + +Here is a query written in both syntaxes. +It first queries for users above 30. If they possess a time series named "HeartRates", it retrieves a range of its entries. + +#### With Time Series Function + +```sql +// declare the time series function: +declare timeseries ts(jogger) { + from jogger.HeartRates + between + "2020-05-27T00:00:00.0000000Z" + and + "2020-06-23T00:00:00.0000000Z" +} + +from Users as jogger +where Age > 30 +// call the time series function +select ts(jogger) +``` + +#### Without Time Series Function + +```sql +from Users as jogger +where Age > 30 +select timeseries( + from HeartRates + between + "2020-05-27T00:00:00.0000000Z" + and + "2020-06-23T00:00:00.0000000Z") +``` + +## Combine time series and custom functions + +* You can declare and use both time series functions and custom functions in a query. + The custom functions can call the time series functions, pass them arguments, and use their results. + +* In the example below, a custom function (`customFunc`) is called by the query `select` clause + to fetch and format a set of time series entries, which are then projected by the query. + The time series function (`tsQuery`) is called to retrieve the matching time series entries. + +* The custom function returns a flat set of values rather than a nested array, to ease the projection of retrieved values. + +* Note the generated RQL, where the custom function is translated to a [custom JavaScript function](../../../client-api/session/querying/what-is-rql.mdx#declare). + + + +{`// The time series function: +// ========================= +declare timeseries tsQuery(user) \{ + from user.HeartRates + where (Values[0] > 100) +\} + +// The custom JavaScript function: +// =============================== +declare function customFunc(user) \{ + var results = []; + + // Call the time series function to retrieve heart rate values for the user + var r = tsQuery(user); + + // Prepare the results + for(var i = 0 ; i < r.Results.length; i ++) \{ + results.push(\{ + Timestamp: r.Results[i].Timestamp, + Value: r.Results[i].Values.reduce((a, b) => Raven_Max(a, b)), + Tag: r.Results[i].Tag ?? "none"\}) + \} + return results; +\} + +// Query & project results: +// ======================== +from "Users" as user +select + user.Name, + customFunc(user) as timeSeriesEntries // Call the custom JavaScript function +`} + + + +This is the custom `ModifiedTimeSeriesEntry` class that is used in the above LINQ sample: + + + +{`class ModifiedTimeSeriesEntry +\{ + private ?DateTime $timestamp = null; + private ?float $value = null; + private ?string $tag = null; + + // ... getters and setters ... +\} +`} + + + + + +## Use Studio to experiment + +You can use [Studio](../../../studio/database/document-extensions/time-series.mdx) to try the RQL samples provided in this article and test your own queries. + +!["Time Series Query in Studio"](./assets/time-series-query.png) + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/_overview-and-syntax-python.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_overview-and-syntax-python.mdx new file mode 100644 index 0000000000..2025bccf4a --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_overview-and-syntax-python.mdx @@ -0,0 +1,280 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Querying time series entries enables comprehending how a process gradually populates a time series over time and locating documents related to chosen time series entries. + +* Querying time series data is native to RavenDB's queries. + Clients can express time series queries in high-level LINQ expressions or directly in [RQL](../../../client-api/session/querying/what-is-rql.mdx). + +* Queries can be executed as dynamic queries or over [time series indexes](../../../document-extensions/timeseries/indexing.mdx). + +* In this page: + * [Time series query capabilities](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#time-series-query-capabilities) + * [Server and client queries](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#server-and-client-queries) + * [Dynamic and index queries](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#dynamic-and-index-queries) + * [Scaling query results](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#scaling-query-results) + * [RQL syntax](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#rql-syntax) + * [`select timeseries` syntax](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#section) + * [`declare timeseries` syntax](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#section-1) + * [Combine time series and custom functions](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#combine-time-series-and-custom-functions) + * [Use Studio To experiment](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#use-studio-to-experiment) + + +## Time series query capabilities + +Time series query can - + +* [Choose a range of time series entries](../../../document-extensions/timeseries/querying/choosing-query-range.mdx) to query from. +* [Filter](../../../document-extensions/timeseries/querying/filtering.mdx) time series entries by their tags, values and timestamps. +* [Aggregate](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx) time series entries into groups by a chosen time resolution, + e.g. gather the prices of a stock that's been collected over the past two months to week-long groups. + Entries can also be aggregated by their tags. +* Select entries by various criteria, e.g. by the min and max values of each aggregated group, + and [project](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx) them to the client. +* Calculate [statistical measures](../../../document-extensions/timeseries/querying/statistics.mdx): the percentile, slope, or standard deviation of a time series. + + + +## Server and client queries + +Time series queries are executed by the server and their results are projected to the client, +so they require very little client computation resources. + +* The server runs time series queries using RQL. +* Clients can phrase time series queries in **raw RQL** or using high level **LINQ expressions**. + High level queries are translated to RQL by the client before sending them to the server for execution. + + + +## Dynamic and index queries + +* **Dynamic queries**: + * Time series indexes are Not created automatically by the server when making a dynamic query. + * Use dynamic queries when time series you query are not indexed, + or when you prefer that RavenDB would choose an index automatically. See [queries always use an index](../../../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index). + E.g. - + + + +{`// Query for time series named "HeartRates" in employees hired after 1994 +from Employees as e +where HiredAt > "1994-01-01" +select timeseries( + from HeartRates +) +`} + + + +* **Index queries**: + * Static time series indexes can be created by clients (or using Studio). + To learn how to create such indexes, see [indexing time series](document-extensions/timeseries/indexing). + * Examples of querying a static time series index can be found in [querying time series indexes](../../../document-extensions/timeseries/querying/using-indexes.mdx). + + + +## Scaling query results + +* Time series query results can be **scaled**, multiplied by some number. + This doesn't change the values themselves, only the output of the query. + Scaling can serve as a stage in a data processing pipeline, or just for the purposes of displaying the data in a more understandable format. + +* There are several use cases for scaling. + For example, suppose your time series records the changing speeds of different vehicles as they travel through a city, + with some data measured in miles per hour and others in kilometers per hour. Here, scaling can facilitate unit conversion. + +* Another use case involves the compression of time series data. + Numbers with very high precision (i.e., many digits after the decimal point) are less compressible than numbers with low precision. + Therefore, for efficient storage, you might want to change a value like `0.000018` to `18` when storing the data. + Then, when querying the data, you can scale by `0.000001` to restore the original value. + +* Scaling is a part of both RQL and LINQ syntax: + + * In **LINQ**, use `.Scale()`. + * In **RQL**, use `scale ` in a time series query, and input your scaling factor as a double. +#### Example: + + + + +{`var query = session.Query() + .Select(p => RavenQuery.TimeSeries(p, "HeartRates") + .Scale(10) + .ToList()) + .ToList(); + +// The value in the query results is 10 times the value stored on the server +var scaledValue = query[0].Results[0].Values[0]; +`} + + + + +{`from Users +select timeseries( + from HeartRates + scale 10 +) +`} + + + + + + +## RQL syntax + +A typical time series query can start by locating the documents whose time series we want to query. +For example, we can query for employees above 30: + + + +{`from Employees as e +where Birthday < '1994-01-01' +`} + + + +Then, you can query their time series entries using either of the following two equivalent syntaxes: + +* [`select timeseries` syntax](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#section) +* [`declare timeseries` syntax](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#section-1) +### `select timeseries` + +This syntax allows you to encapsulate your query's time series functionality in a `select timeseries` section. + + + +{`// Query for entries from time series "HeartRates" for employees above 30 +// ====================================================================== + +// This clause locates the documents whose time series we want to query: +from Employees as e +where Birthday < '1994-01-01' + +// Query the time series that belong to the matching documents: +select timeseries ( // The \`select\` clause defines the time series query. + from HeartRates // The \`from\` keyword is used to specify the time series name to query. +) +`} + + + +### `declare timeseries` + +This syntax allows you to declare a time series function (using `declare timeseries`) and call it from your query. +It introduces greater flexibility to your queries as you can, for example, pass arguments to the time series function. + +Here is a query written in both syntaxes. +It first queries for users above 30. If they possess a time series named "HeartRates", it retrieves a range of its entries. + +#### With Time Series Function + +```sql +// declare the time series function: +declare timeseries ts(jogger) { + from jogger.HeartRates + between + "2020-05-27T00:00:00.0000000Z" + and + "2020-06-23T00:00:00.0000000Z" +} + +from Users as jogger +where Age > 30 +// call the time series function +select ts(jogger) +``` + +#### Without Time Series Function + +```sql +from Users as jogger +where Age > 30 +select timeseries( + from HeartRates + between + "2020-05-27T00:00:00.0000000Z" + and + "2020-06-23T00:00:00.0000000Z") +``` + +## Combine time series and custom functions + +* You can declare and use both time series functions and custom functions in a query. + The custom functions can call the time series functions, pass them arguments, and use their results. + +* In the example below, a custom function (`customFunc`) is called by the query `select` clause + to fetch and format a set of time series entries, which are then projected by the query. + The time series function (`tsQuery`) is called to retrieve the matching time series entries. + +* The custom function returns a flat set of values rather than a nested array, to ease the projection of retrieved values. + +* Note the generated RQL, where the custom function is translated to a [custom JavaScript function](../../../client-api/session/querying/what-is-rql.mdx#declare). + + + +{`// The time series function: +// ========================= +declare timeseries tsQuery(user) \{ + from user.HeartRates + where (Values[0] > 100) +\} + +// The custom JavaScript function: +// =============================== +declare function customFunc(user) \{ + var results = []; + + // Call the time series function to retrieve heart rate values for the user + var r = tsQuery(user); + + // Prepare the results + for(var i = 0 ; i < r.Results.length; i ++) \{ + results.push(\{ + Timestamp: r.Results[i].Timestamp, + Value: r.Results[i].Values.reduce((a, b) => Raven_Max(a, b)), + Tag: r.Results[i].Tag ?? "none"\}) + \} + return results; +\} + +// Query & project results: +// ======================== +from "Users" as user +select + user.Name, + customFunc(user) as timeSeriesEntries // Call the custom JavaScript function +`} + + + +This is the custom `ModifiedTimeSeriesEntry` class that is used in the above LINQ sample: + + + +{`class ModifiedTimeSeriesEntry: + def __init__(self, timestamp: datetime = None, value: float = None, tag: str = None): + self.timestamp = timestamp + self.value = value + self.tag = tag +`} + + + + + +## Use Studio to experiment + +You can use [Studio](../../../studio/database/document-extensions/time-series.mdx) to try the RQL samples provided in this article and test your own queries. + +!["Time Series Query in Studio"](./assets/time-series-query.png) + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/_stream-timeseries-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_stream-timeseries-csharp.mdx new file mode 100644 index 0000000000..ac98c2d72e --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_stream-timeseries-csharp.mdx @@ -0,0 +1,179 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This page explains how time series data can be streamed: + 1. Stream a time series directly. + 2. Stream time series query results. + +* In this page: + * [Syntax](../../../document-extensions/timeseries/querying/stream-timeseries.mdx#syntax) + * [Examples](../../../document-extensions/timeseries/querying/stream-timeseries.mdx#examples) + + +## Syntax + +### Stream a time series directly: + +Get a time series (e.g. [using `TimeSeriesFor.Get`](../../../document-extensions/timeseries/client-api/session/get/get-entries.mdx)), +and call `Stream()`/`StreamAsync()`. + + + +{`IEnumerator Stream(DateTime? from = null, DateTime? to = null, TimeSpan? offset = null); + +Task> StreamAsync(DateTime? from = null, DateTime? to = null, TimeSpan? offset = null); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **from** | `DateTime?` | Start the stream from a certain time. If null, stream starts from the beginning of the time series. | +| **to** | `DateTime?` | Stop the stream at a certain time. If null, stream stops at the end of the time series. | +| **offset** | `TimeSpan?` | Change the timestamp of the streamed time series entries by adding this amount of time. | + +### Stream results of time series queries: + +This syntax is the same as the syntax for streaming query results in general, +found [here](../../../client-api/session/querying/how-to-stream-query-results.mdx). + + + +{`IEnumerator> Stream( + IQueryable query); + +IEnumerator> Stream( + IQueryable query, + out StreamQueryStatistics streamQueryStats); + +IEnumerator> Stream( + IDocumentQuery query); + +IEnumerator> Stream( + IRawDocumentQuery query); + +IEnumerator> Stream( + IRawDocumentQuery query, + out StreamQueryStatistics streamQueryStats); + +IEnumerator> Stream( + IDocumentQuery query, + out StreamQueryStatistics streamQueryStats); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **query** | [IQueryable](../../../client-api/session/querying/how-to-query.mdx#sessionquery), [IDocumentQuery](../../../client-api/session/querying/how-to-query.mdx#sessionadvanceddocumentquery) or [IRawDocumentQuery](../../../client-api/session/querying/how-to-query.mdx#sessionadvancedrawquery) | Query to stream results for. | +| **streamQueryStats** | `out` [StreamQueryStatistics](../../../glossary/stream-query-statistics.mdx) | Information about performed query. | + +| Return Value | Description | +| - | - | +| IEnumerator<[StreamResult](../../../glossary/stream-result.mdx)> | Enumerator with entities. | + + + +## Examples + +### Example I + +Using `TimeSeriesFor`: + + + + +{`var timeseries = session.TimeSeriesFor("user/1-A"); +var results = new List(); + +using (var TSstream = timeseries.Stream()) +{ + while (TSstream.MoveNext()) + { + results.Add(TSstream.Current); + } +} +`} + + + + +{`var timeseries = session.TimeSeriesFor("user/1-A"); +var results = new List(); +var TSstream = await timeseries.StreamAsync(); + +await using (TSstream) +{ + while (await TSstream.MoveNextAsync()) + { + results.Add(TSstream.Current); + } +} +`} + + + + +### Example II + +Using a `RawQuery`: + + + + +{`var query = session.Advanced.RawQuery(@" + from Users + select timeseries ( + from HeartRate + )"); + +var results = new List(); + +using (var docStream = session.Advanced.Stream(query)) +{ + while (docStream.MoveNext()) + { + var document = docStream.Current.Result; + var timeseries = document.Stream; + while (timeseries.MoveNext()) + { + results.Add(timeseries.Current); + } + } +} +`} + + + + +{`var query = session.Advanced.AsyncRawQuery(@" + from Users + select timeseries ( + from HeartRate + )"); + +var results = new List(); + +await using (var docStream = await session.Advanced.StreamAsync(query)) +{ + while (await docStream.MoveNextAsync()) + { + var document = docStream.Current.Result; + var timeseries = document.StreamAsync; + while (await timeseries.MoveNextAsync()) + { + results.Add(timeseries.Current); + } + } +} +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/_stream-timeseries-java.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_stream-timeseries-java.mdx new file mode 100644 index 0000000000..51aa3216d8 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_stream-timeseries-java.mdx @@ -0,0 +1,99 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This page explains how time series data can be streamed: + 1. Stream a time series directly. + 2. Stream time series query results. + +* In this page: + * [Syntax](../../../document-extensions/timeseries/querying/stream-timeseries.mdx#syntax) + * [Examples](../../../document-extensions/timeseries/querying/stream-timeseries.mdx#examples) + + +## Syntax + +### Stream a time series directly: + +Get a time series (using `timeSeriesFor().get()`, and call `stream()`). + + + +{` CloseableIterator> stream(IRawDocumentQuery query); +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **query** | `IRawDocumentQuery` | Raw data | + + +### Stream results of time series queries: + +This syntax is the same as the syntax for streaming query results in general, +found [here](../../../client-api/session/querying/how-to-stream-query-results.mdx). + + + +{` CloseableIterator> stream(IDocumentQuery query, Reference streamQueryStats); + + CloseableIterator> stream(IRawDocumentQuery query); + + CloseableIterator> stream(IRawDocumentQuery query, Reference streamQueryStats); +`} + + + +| Parameters | | | +| ------------- | ------------- | ----- | +| **query** | [IDocumentQuery](../../../client-api/session/querying/how-to-query.mdx#sessionadvanceddocumentquery) or [IRawDocumentQuery](../../../client-api/session/querying/how-to-query.mdx#sessionadvancedrawquery) | Query to stream results for. | +| `Reference` **streamQueryStats** | StreamQueryStatistics | Information about performed query. | + +| Return Value | | +| ------------- | ----- | +| CloseableIterator<StreamResult> | Iterator with entities. | + + + +## Examples + +[comment]: < +### Example I + +Using `timeSeriesFor`: + + + +{`ISessionDocumentTimeSeries timeseries = session.timeSeriesFor("HeartRate", "user/1-A"); +`} + + + +> (There isn't get to stream, a sample couldn't exists like that) + +### Example + +Using a `rawQuery`: + + + +{`IRawDocumentQuery query = session.advanced() + .rawQuery(Employee.class, + "from Users\\n" + + "select timeseries (\\n" + + " from HeartRate\\n"+ + ")" + ); + +CloseableIterator> results = session.advanced().stream(query); +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/_using-indexes-csharp.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_using-indexes-csharp.mdx new file mode 100644 index 0000000000..5bb1baea4b --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_using-indexes-csharp.mdx @@ -0,0 +1,446 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Time series index**: + + * STATIC-time-series-indexes can be defined from the [Client API](../../../document-extensions/timeseries/indexing.mdx) + or using [Studio](../../../studio/database/indexes/create-map-index.mdx). + Such an index can be queried in the same way as a regular index that indexes documents. + (See [Querying an index](../../../indexes/querying/query-index.mdx)). + + * AUTO-time-series-indexes are Not generated automatically by the server when making a time series query. + +* **The contents of the query results**: + + * Unlike a document index, where the source data are your JSON documents, + the source data for a time series index are the time series entries within the documents. + + * When querying a **document index**: + the resulting objects are the document entities (unless results are [projected](../../../indexes/querying/projections.mdx)). + + * When querying a **time series index**: + each item in the results is of the type defined by the **index-entry** in the index definition, + (unless results are [projected](../../../document-extensions/timeseries/querying/using-indexes.mdx#project-results)). + The documents themselves are not returned. + +* In this page: + * [Sample index](../../../document-extensions/timeseries/querying/using-indexes.mdx#sample-index) + * [Querying the index](../../../document-extensions/timeseries/querying/using-indexes.mdx#querying-the-index) + * [Query all time series entries](../../../document-extensions/timeseries/querying/using-indexes.mdx#query-all-time-series-entries) + * [Filter query results](../../../document-extensions/timeseries/querying/using-indexes.mdx#filter-query-results) + * [Order query results](../../../document-extensions/timeseries/querying/using-indexes.mdx#order-query-results) + * [Project results](../../../document-extensions/timeseries/querying/using-indexes.mdx#project-results) + * [Syntax](../../../document-extensions/timeseries/querying/using-indexes.mdx#syntax) + + +## Sample Index + +* The following is a time series map-index that will be used in the query examples throughout this article. + +* Each **index-entry** consists of: + * Three index-fields obtained from the "HeartRates" time series entries: `BPM`, `Date`, and `Tag`. + * One index-field obtained from the time series [segment](../../../document-extensions/timeseries/indexing.mdx#timeseriessegment-object) header: `EmployeeID`. + * One index-field obtained from the loaded employee document: `EmployeeName`. + +* When querying this time series index: + * The resulting items correspond to the time series entries that match the query predicate. + * Each item in the results will be of type `TsIndex.IndexEntry`, which is the index-entry. + Different result types may be returned when the query [projects the results](../../../document-extensions/timeseries/querying/using-indexes.mdx#project-results). + + + +{`public class TsIndex : AbstractTimeSeriesIndexCreationTask +\{ + // The index-entry: + // ================ + public class IndexEntry + \{ + // The index-fields: + // ================= + public double BPM \{ get; set; \} + public DateTime Date \{ get; set; \} + public string Tag \{ get; set; \} + public string EmployeeID \{ get; set; \} + public string EmployeeName \{ get; set; \} + \} + + public TsIndex() + \{ + AddMap("HeartRates", timeSeries => + from segment in timeSeries + from entry in segment.Entries + + let employee = LoadDocument(segment.DocumentId) + + // Define the content of the index-fields: + // ======================================= + select new IndexEntry() + \{ + BPM = entry.Values[0], + Date = entry.Timestamp, + Tag = entry.Tag, + EmployeeID = segment.DocumentId, + EmployeeName = employee.FirstName + " " + employee.LastName + \}); + \} +\} +`} + + + + + +## Querying the index + +#### Query all time series entries: + +No filtering is applied in this query. +Results will include ALL entries from time series "HeartRates". + + + + +{`using (var session = store.OpenSession()) +{ + List results = session + // Query the index + .Query() + // Query for all entries w/o any filtering + .ToList(); + + // Access results: + TsIndex.IndexEntry entryResult = results[0]; + string employeeName = entryResult.EmployeeName; + double BPM = entryResult.BPM; +} +`} + + + + +{`using (var asyncSession = store.OpenAsyncSession()) +{ + List results = await asyncSession + // Query the index + .Query() + // Query for all entries w/o any filtering + .ToListAsync(); +} +`} + + + + +{`using (var session = store.OpenSession()) +{ + List results = session.Advanced + // Query the index + .DocumentQuery() + // Query for all entries w/o any filtering + .ToList(); +} +`} + + + + +{`using (var session = store.OpenSession()) +{ + List results = session.Advanced + // Query the index for all entries w/o any filtering + .RawQuery($@" + from index 'TsIndex' + ") + .ToList(); +} +`} + + + + +{`from index "TsIndex" +`} + + + +#### Filter query results: + +In this example, time series entries are filtered by the query. +The query predicate is applied to the index-fields. + + + + +{`using (var session = store.OpenSession()) +{ + List results = session + .Query() + // Retrieve only time series entries with high BPM values for a specific employee + .Where(x => x.EmployeeName == "Robert King" && x.BPM > 85) + .ToList(); +} +`} + + + + +{`using (var asyncSession = store.OpenAsyncSession()) +{ + List results = await asyncSession + .Query() + // Retrieve only time series entries with high BPM values for a specific employee + .Where(x => x.EmployeeName == "Robert King" && x.BPM > 85) + .ToListAsync(); +} +`} + + + + +{`using (var session = store.OpenSession()) +{ + List results = session.Advanced + .DocumentQuery() + // Retrieve only time series entries with high BPM values for a specific employee + .WhereEquals(x => x.EmployeeName, "Robert King") + .AndAlso() + .WhereGreaterThan(x => x.BPM, 85) + .ToList(); +} +`} + + + + +{`using (var session = store.OpenSession()) +{ + List results = session.Advanced + // Retrieve only time series entries with high BPM values for a specific employee + .RawQuery($@" + from index 'TsIndex' + where EmployeeName == 'Robert King' and BPM > 85.0 + ") + .ToList(); +} +`} + + + + +{`from index "TsIndex" +where EmployeeName == "Robert King" and BPM > 85.0 +`} + + + +#### Order query results: + +Results can be ordered by any of the index-fields. + + + + +{`using (var session = store.OpenSession()) +{ + List results = session + .Query() + // Retrieve time series entries where employees had a low BPM value + .Where(x => x.BPM < 58) + // Order by the 'Date' index-field (descending order) + .OrderByDescending(x => x.Date) + .ToList(); +} +`} + + + + +{`using (var asyncSession = store.OpenAsyncSession()) +{ + List results = await asyncSession + .Query() + // Retrieve time series entries where employees had a low BPM value + .Where(x => x.BPM < 58) + // Order by the 'Date' index-field (descending order) + .OrderByDescending(x => x.Date) + .ToListAsync(); +} +`} + + + + +{`using (var session = store.OpenSession()) +{ + List results = session.Advanced + .DocumentQuery() + // Retrieve time series entries where employees had a low BPM value + .WhereLessThan(x => x.BPM, 58) + // Order by the 'Date' index-field (descending order) + .OrderByDescending(x => x.Date) + .ToList(); +} +`} + + + + +{`using (var session = store.OpenSession()) +{ + List results = session.Advanced + // Retrieve entries with low BPM value and order by 'Date' descending + .RawQuery($@" + from index 'TsIndex' + where BPM < 58.0 + order by Date desc + ") + .ToList(); +} +`} + + + + +{`from index "TsIndex" +where BPM < 58.0 +order by Date desc +`} + + + +#### Project results: + +* Instead of returning the entire `TsIndex.IndexEntry` object for each result item, + you can return only partial fields. + +* Learn more about projecting query results in [Project Index Query Results](../../../indexes/querying/projections.mdx). + +* In this example, we query for time series entries with a very high BPM value. + We retrieve entries with BPM value > 100 but return only the _EmployeeID_ for each entry. + + + + +{`using (var session = store.OpenSession()) +{ + List results = session + .Query() + .Where(x => x.BPM > 100) + // Return only the EmployeeID index-field in the results + .Select(x => x.EmployeeID) + // Optionally: call 'Distinct' to remove duplicates from results + .Distinct() + .ToList(); +} +`} + + + + +{`using (var asyncSession = store.OpenAsyncSession()) +{ + List results = await asyncSession + .Query() + .Where(x => x.BPM > 100) + // Return only the EmployeeID index-field in the results + .Select(x => x.EmployeeID) + // Optionally: call 'Distinct' to remove duplicates from results + .Distinct() + .ToListAsync(); +} +`} + + + + +{`var fieldsToProject = new string[] { + "EmployeeID" +}; + +using (var session = store.OpenSession()) +{ + List results = session.Advanced + .DocumentQuery() + .WhereGreaterThan(x => x.BPM, 100) + // Return only the EmployeeID index-field in the results + .SelectFields(fieldsToProject) + // Optionally: call 'Distinct' to remove duplicates from results + .Distinct() + .ToList(); +} +`} + + + + +{`// This class is used when projecting index-fields via DocumentQuery +public class EmployeeDetails +{ + public string EmployeeName { get; set; } + public string EmployeeID { get; set; } +} +`} + + + + +{`using (var session = store.OpenSession()) +{ + List results = session.Advanced + // Return only the EmployeeID index-field in the results + .RawQuery($@" + from index 'TsIndex' + where BPM > 100.0 + select distinct EmployeeID + ") + .ToList(); +} +`} + + + + +{`from index "TsIndex" +where BPM > 100.0 +select distinct EmployeeID +`} + + + + + + +## Syntax + +* `session.Query` + + + +{`IRavenQueryable Query() where TIndexCreator +: AbstractCommonApiForIndexes, new(); +`} + + + +* `DocumentQuery` + + + +{`IDocumentQuery DocumentQuery() where TIndexCreator +: AbstractCommonApiForIndexes, new(); +`} + + + +| Parameter | Description | +|--------------------|--------------------| +| **T** | The results class | +| **TIndexCreator** | Index | + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/_using-indexes-nodejs.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_using-indexes-nodejs.mdx new file mode 100644 index 0000000000..722df1741d --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_using-indexes-nodejs.mdx @@ -0,0 +1,248 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Time series index**: + + * STATIC-time-series-indexes can be defined from the [Client API](../../../document-extensions/timeseries/indexing.mdx) + or using [Studio](../../../studio/database/indexes/create-map-index.mdx). + Such an index can be queried in the same way as a regular index that indexes documents. + (See [Querying an index](../../../indexes/querying/query-index.mdx)). + + * AUTO-time-series-indexes are Not generated automatically by the server when making a time series query. + +* **The contents of the query results**: + + * Unlike a document index, where the source data are your JSON documents, + the source data for a time series index are the time series entries within the documents. + + * When querying a **document index**: + the resulting objects are the document entities (unless results are [projected](../../../indexes/querying/projections.mdx)). + + * When querying a **time series index**: + each item in the results is of the type defined by the **index-entry** in the index definition, + (unless results are [projected](../../../document-extensions/timeseries/querying/using-indexes.mdx#project-results)). + The documents themselves are not returned. + +* In this page: + * [Sample index](../../../document-extensions/timeseries/querying/using-indexes.mdx#sample-index) + * [Querying the index](../../../document-extensions/timeseries/querying/using-indexes.mdx#querying-the-index) + * [Query all time series entries](../../../document-extensions/timeseries/querying/using-indexes.mdx#query-all-time-series-entries) + * [Filter query results](../../../document-extensions/timeseries/querying/using-indexes.mdx#filter-query-results) + * [Order query results](../../../document-extensions/timeseries/querying/using-indexes.mdx#order-query-results) + * [Project results](../../../document-extensions/timeseries/querying/using-indexes.mdx#project-results) + + +## Sample Index + +* The following is a time series map-index that will be used in the query examples throughout this article. + +* Each **index-entry** consists of: + * Three index-fields obtained from the "HeartRates" time series entries: `bpm`, `date`, and `tag`. + * One index-field obtained from the time series [segment](../../../document-extensions/timeseries/indexing.mdx#timeseriessegment-object) header: `employeeID`. + * One index-field obtained from the loaded employee document: `employeeName`. + +* When querying this time series index: + * The resulting items correspond to the time series entries that match the query predicate. + * Each item in the results will in the shape of the defined index-entry. + Different result types may be returned when the query [projects the results](../../../document-extensions/timeseries/querying/using-indexes.mdx#project-results). + + + +{`class TsIndex extends AbstractRawJavaScriptTimeSeriesIndexCreationTask \{ + constructor() \{ + super(); + + this.maps.add(\` + timeSeries.map("Employees", "HeartRates", function (segment) \{ + let employee = load(segment.DocumentId, "Employees") + + // Return the index-entry: + return segment.Entries.map(entry => (\{ + + // Define the index-fields: + bpm: entry.Values[0], + date: new Date(entry.Timestamp), + tag: entry.Tag + employeeID: segment.DocumentId, + employeeName: employee.FirstName + " " + employee.LastName + \})); + \}) + \`; + \} +\} +`} + + + + + +## Querying the index + +#### Query all time series entries: + +No filtering is applied in this query. +Results will include ALL entries from time series "HeartRates". + + + + +{`const results = await session + // Query the index + .query({ indexName: "TsIndex" }) + // Query for all entries w/o any filtering + .all(); + +// Access results: +const entryResult = results[0]; +const employeeName = entryResult.employeeName; +const bpm = entryResult.bpm; +`} + + + + +{`const results = await session + // Provide RQL to rawQuery + .advanced.rawQuery("from index 'TsIndex'") + // Execute the query + .all(); +`} + + + + +{`from index "TsIndex" +`} + + + +#### Filter query results: + +In this example, time series entries are filtered by the query. +The query predicate is applied to the index-fields. + + + + +{`const results = await session + .query({ indexName: "TsIndex" }) + // Retrieve only time series entries with high BPM values for a specific employee + .whereEquals("employeeName", "Robert King") + .whereGreaterThanOrEqual("bpm", 85) + .all(); +`} + + + + +{`const results = await session + // Retrieve only time series entries with high BPM values for a specific employee + .advanced.rawQuery(\` + from index "TsIndex" + where employeeName == "Robert King" and bpm > 85.0 + \`) + .all(); +`} + + + + +{`from index "TsIndex" +where employeeName == "Robert King" and bpm >= 85 +`} + + + +#### Order query results: + +Results can be ordered by any of the index-fields. + + + + +{`const results = await session + .query({ indexName: "TsIndex" }) + // Retrieve time series entries where employees had a low BPM value + .whereLessThan("bpm", 58) + // Order by the 'date' index-field (descending order) + .orderByDescending("date") + .all(); +`} + + + + +{`const results = await session + // Retrieve entries with low BPM value and order by 'date' descending + .advanced.rawQuery(\` + from index "TsIndex" + where bpm < 58 + order by date desc + \`) + .all(); +`} + + + + +{`from index "TsIndex" +where bpm < 58 +order by date desc +`} + + + +#### Project results: + +* Instead of returning the entire index entry object for each result item, + you can return only partial fields. + +* Learn more about projecting query results in [Project Index Query Results](../../../indexes/querying/projections.mdx). + +* In this example, we query for time series entries with a very high BPM value. + We retrieve entries with BPM value > 100 but return only the _employeeID_ for each entry. + + + + +{`const results = await session + .query({ indexName: "TsIndex" }) + .whereGreaterThanOrEqual("bpm", 100) + // Return only the employeeID index-field in the results + .selectFields(["employeeID"]) + // Optionally: call 'distinct' to remove duplicates from results + .distinct() + .all(); +`} + + + + +{`const results = await session + // Return only the employeeID index-field in the results + .advanced.rawQuery(\` + from index "TsIndex" + where bpm >= 100 + select distinct employeeID + \`) + .all(); +`} + + + + +{`from index "TsIndex" +where bpm > 100 +select distinct employeeID +`} + + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/_using-indexes-php.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_using-indexes-php.mdx new file mode 100644 index 0000000000..896f7b7c41 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_using-indexes-php.mdx @@ -0,0 +1,359 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Time series index**: + + * STATIC-time-series-indexes can be defined from the [Client API](../../../document-extensions/timeseries/indexing.mdx) + or using [Studio](../../../studio/database/indexes/create-map-index.mdx). + Such an index can be queried in the same way as a regular index that indexes documents. + (See [Querying an index](../../../indexes/querying/query-index.mdx)). + + * AUTO-time-series-indexes are Not generated automatically by the server when making a time series query. + +* **The contents of the query results**: + + * Unlike a document index, where the source data are your JSON documents, + the source data for a time series index are the time series entries within the documents. + + * When querying a **document index**: + the resulting objects are the document entities (unless results are [projected](../../../indexes/querying/projections.mdx)). + + * When querying a **time series index**: + each item in the results is of the type defined by the **index-entry** in the index definition, + (unless results are [projected](../../../document-extensions/timeseries/querying/using-indexes.mdx#project-results)). + The documents themselves are not returned. + +* In this page: + * [Sample index](../../../document-extensions/timeseries/querying/using-indexes.mdx#sample-index) + * [Querying the index](../../../document-extensions/timeseries/querying/using-indexes.mdx#querying-the-index) + * [Query all time series entries](../../../document-extensions/timeseries/querying/using-indexes.mdx#query-all-time-series-entries) + * [Filter query results](../../../document-extensions/timeseries/querying/using-indexes.mdx#filter-query-results) + * [Order query results](../../../document-extensions/timeseries/querying/using-indexes.mdx#order-query-results) + * [Project results](../../../document-extensions/timeseries/querying/using-indexes.mdx#project-results) + + +## Sample Index + +* The following is a time series map-index that will be used in the query examples throughout this article. + +* Each **index-entry** consists of: + * Three index-fields obtained from the "HeartRates" time series entries: `BPM`, `Date`, and `Tag`. + * One index-field obtained from the time series [segment](../../../document-extensions/timeseries/indexing.mdx#timeseriessegment-object) header: `EmployeeID`. + * One index-field obtained from the loaded employee document: `EmployeeName`. + +* When querying this time series index: + * The resulting items correspond to the time series entries that match the query predicate. + * Each item in the results will be of type `TsIndex.IndexEntry`, which is the index-entry. + Different result types may be returned when the query [projects the results](../../../document-extensions/timeseries/querying/using-indexes.mdx#project-results). + + + +{`public class TsIndex : AbstractTimeSeriesIndexCreationTask +\{ + // The index-entry: + // ================ + public class IndexEntry + \{ + // The index-fields: + // ================= + public double BPM \{ get; set; \} + public DateTime Date \{ get; set; \} + public string Tag \{ get; set; \} + public string EmployeeID \{ get; set; \} + public string EmployeeName \{ get; set; \} + \} + + public TsIndex() + \{ + AddMap("HeartRates", timeSeries => + from segment in timeSeries + from entry in segment.Entries + + let employee = LoadDocument(segment.DocumentId) + + // Define the content of the index-fields: + // ======================================= + select new IndexEntry() + \{ + BPM = entry.Values[0], + Date = entry.Timestamp, + Tag = entry.Tag, + EmployeeID = segment.DocumentId, + EmployeeName = employee.FirstName + " " + employee.LastName + \}); + \} +\} +`} + + + + + +## Querying the index + +#### Query all time series entries: + +No filtering is applied in this query. +Results will include ALL entries from time series "HeartRates". + + + + +{`using (var session = store.OpenSession()) +{ + List results = session + // Query the index + .Query() + // Query for all entries w/o any filtering + .ToList(); + + // Access results: + TsIndex.IndexEntry entryResult = results[0]; + string employeeName = entryResult.EmployeeName; + double BPM = entryResult.BPM; +} +`} + + + + +{`using (var session = store.OpenSession()) +{ + List results = session.Advanced + // Query the index + .DocumentQuery() + // Query for all entries w/o any filtering + .ToList(); +} +`} + + + + +{`using (var session = store.OpenSession()) +{ + List results = session.Advanced + // Query the index for all entries w/o any filtering + .RawQuery($@" + from index 'TsIndex' + ") + .ToList(); +} +`} + + + + +{`from index "TsIndex" +`} + + + +#### Filter query results: + +In this example, time series entries are filtered by the query. +The query predicate is applied to the index-fields. + + + + +{`using (var session = store.OpenSession()) +{ + List results = session + .Query() + // Retrieve only time series entries with high BPM values for a specific employee + .Where(x => x.EmployeeName == "Robert King" && x.BPM > 85) + .ToList(); +} +`} + + + + +{`using (var session = store.OpenSession()) +{ + List results = session.Advanced + .DocumentQuery() + // Retrieve only time series entries with high BPM values for a specific employee + .WhereEquals(x => x.EmployeeName, "Robert King") + .AndAlso() + .WhereGreaterThan(x => x.BPM, 85) + .ToList(); +} +`} + + + + +{`using (var session = store.OpenSession()) +{ + List results = session.Advanced + // Retrieve only time series entries with high BPM values for a specific employee + .RawQuery($@" + from index 'TsIndex' + where EmployeeName == 'Robert King' and BPM > 85.0 + ") + .ToList(); +} +`} + + + + +{`from index "TsIndex" +where EmployeeName == "Robert King" and BPM > 85.0 +`} + + + +#### Order query results: + +Results can be ordered by any of the index-fields. + + + + +{`using (var session = store.OpenSession()) +{ + List results = session + .Query() + // Retrieve time series entries where employees had a low BPM value + .Where(x => x.BPM < 58) + // Order by the 'Date' index-field (descending order) + .OrderByDescending(x => x.Date) + .ToList(); +} +`} + + + + +{`using (var session = store.OpenSession()) +{ + List results = session.Advanced + .DocumentQuery() + // Retrieve time series entries where employees had a low BPM value + .WhereLessThan(x => x.BPM, 58) + // Order by the 'Date' index-field (descending order) + .OrderByDescending(x => x.Date) + .ToList(); +} +`} + + + + +{`using (var session = store.OpenSession()) +{ + List results = session.Advanced + // Retrieve entries with low BPM value and order by 'Date' descending + .RawQuery($@" + from index 'TsIndex' + where BPM < 58.0 + order by Date desc + ") + .ToList(); +} +`} + + + + +{`from index "TsIndex" +where BPM < 58.0 +order by Date desc +`} + + + +#### Project results: + +* Instead of returning the entire `TsIndex.IndexEntry` object for each result item, + you can return only partial fields. + +* Learn more about projecting query results in [Project Index Query Results](../../../indexes/querying/projections.mdx). + +* In this example, we query for time series entries with a very high BPM value. + We retrieve entries with BPM value > 100 but return only the _EmployeeID_ for each entry. + + + + +{`using (var session = store.OpenSession()) +{ + List results = session + .Query() + .Where(x => x.BPM > 100) + // Return only the EmployeeID index-field in the results + .Select(x => x.EmployeeID) + // Optionally: call 'Distinct' to remove duplicates from results + .Distinct() + .ToList(); +} +`} + + + + +{`var fieldsToProject = new string[] { + "EmployeeID" +}; + +using (var session = store.OpenSession()) +{ + List results = session.Advanced + .DocumentQuery() + .WhereGreaterThan(x => x.BPM, 100) + // Return only the EmployeeID index-field in the results + .SelectFields(fieldsToProject) + // Optionally: call 'Distinct' to remove duplicates from results + .Distinct() + .ToList(); +} +`} + + + + +{`// This class is used when projecting index-fields via DocumentQuery +public class EmployeeDetails +{ + public string EmployeeName { get; set; } + public string EmployeeID { get; set; } +} +`} + + + + +{`using (var session = store.OpenSession()) +{ + List results = session.Advanced + // Return only the EmployeeID index-field in the results + .RawQuery($@" + from index 'TsIndex' + where BPM > 100.0 + select distinct EmployeeID + ") + .ToList(); +} +`} + + + + +{`from index "TsIndex" +where BPM > 100.0 +select distinct EmployeeID +`} + + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/_using-indexes-python.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_using-indexes-python.mdx new file mode 100644 index 0000000000..0a9a50dd4b --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/_using-indexes-python.mdx @@ -0,0 +1,291 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Time series index**: + + * STATIC-time-series-indexes can be defined from the [Client API](../../../document-extensions/timeseries/indexing.mdx) + or using [Studio](../../../studio/database/indexes/create-map-index.mdx). + Such an index can be queried in the same way as a regular index that indexes documents. + (See [Querying an index](../../../indexes/querying/query-index.mdx)). + + * AUTO-time-series-indexes are Not generated automatically by the server when making a time series query. + +* **The contents of the query results**: + + * Unlike a document index, where the source data are your JSON documents, + the source data for a time series index are the time series entries within the documents. + + * When querying a **document index**: + the resulting objects are the document entities (unless results are projected). + + * When querying a **time series index**: + each item in the results is of the type defined by the **index-entry** in the index definition, + (unless results are projected). + The documents themselves are not returned. + +* In this page: + * [Sample index](../../../document-extensions/timeseries/querying/using-indexes.mdx#sample-index) + * [Querying the index](../../../document-extensions/timeseries/querying/using-indexes.mdx#querying-the-index) + * [Query all time series entries](../../../document-extensions/timeseries/querying/using-indexes.mdx#query-all-time-series-entries) + * [Filter query results](../../../document-extensions/timeseries/querying/using-indexes.mdx#filter-query-results) + * [Order query results](../../../document-extensions/timeseries/querying/using-indexes.mdx#order-query-results) + * [Project results](../../../document-extensions/timeseries/querying/using-indexes.mdx#project-results) + * [Syntax](../../../document-extensions/timeseries/querying/using-indexes.mdx#syntax) + + +## Sample Index + +* The following is a time series map-index that will be used in the query examples throughout this article. + +* Each **index-entry** consists of: + * Three index-fields obtained from the "HeartRates" time series entries: `BPM`, `Date`, and `Tag`. + * One index-field obtained from the time series [segment](../../../document-extensions/timeseries/indexing.mdx#timeseriessegment-object) header: `EmployeeID`. + * One index-field obtained from the loaded employee document: `EmployeeName`. + +* When querying this time series index: + * The resulting items correspond to the time series entries that match the query predicate. + * Each item in the results will be of type `TsIndex.IndexEntry`, which is the index-entry. + Different result types may be returned when the query [projects the results](../../../document-extensions/timeseries/querying/using-indexes.mdx#project-results). + + + +{`class TsIndex(AbstractTimeSeriesIndexCreationTask): + # The index-entry: + # =============== + class IndexEntry: + def __init__( + self, + bpm: float = None, + date: datetime = None, + tag: str = None, + employee_id: str = None, + employee_name: str = None, + ): + # The index-fields: + # ================= + self.bpm = bpm + self.date = date + self.tag = tag + self.employee_id = employee_id + self.employee_name = employee_name + + def __init__(self): + super().__init__() + self.map = """ + from ts in timeSeries.Employees.HeartRates + from entry in ts.Entries + let employee = LoadDocument(ts.DocumentId, "Employees") + select new + \{ + bpm = entry.Values[0], + date = entry.Timestamp.Date, + tag = entry.Tag, + employee_id = ts.DocumentId, + employee_name = employee.FirstName + ' ' + employee.LastName + \} + """ +`} + + + + + +## Querying the index + +#### Query all time series entries: + +No filtering is applied in this query. +Results will include ALL entries from time series "HeartRates". + + + + +{`with store.open_session() as session: + results = list(session.query_index_type(self.TsIndex, self.TsIndex.IndexEntry)) + + # Access results: + entry_result = results[0] + employee_name = entry_result.employee_name + bmp = entry_result.bpm +`} + + + + +{`with store.open_session() as session: + results = list(session.advanced.raw_query("from index 'TsIndex'", self.TsIndex.IndexEntry)) +`} + + + + +{`from index "TsIndex" +`} + + + +#### Filter query results: + +In this example, time series entries are filtered by the query. +The query predicate is applied to the index-fields. + + + + +{`with store.open_session() as session: + results = list( + session.query_index_type(self.TsIndex, self.TsIndex.IndexEntry) + .where_equals("employee_name", "Robert King") + .and_also() + .where_greater_than("bpm", 85) + ) +`} + + + + +{`with store.open_session() as session: + results = list( + session.advanced.raw_query( + "from index 'TsIndex' where employee_name == 'Robert King' and bpm > 85.0", + self.TsIndex.IndexEntry, + ) + ) +`} + + + + +{`from index "TsIndex" +where EmployeeName == "Robert King" and BPM > 85.0 +`} + + + +#### Order query results: + +Results can be ordered by any of the index-fields. + + + + +{`with store.open_session() as session: + results = list( + session.query_index_type(self.TsIndex, self.TsIndex.IndexEntry) + .where_less_than("bpm", 58) + .order_by_descending("date") + ) +`} + + + + +{`with store.open_session() as session: + results = list( + session.advanced.raw_query( + "from index 'TsIndex' where bpm < 58.0 order by date desc", + self.TsIndex.IndexEntry, + ) + ) +`} + + + + +{`from index "TsIndex" +where BPM < 58.0 +order by Date desc +`} + + + +#### Project results: + +* Instead of returning the entire `TsIndex.IndexEntry` object for each result item, + you can return only partial fields. + +* In this example, we query for time series entries with a very high BPM value. + We retrieve entries with BPM value > 100 but return only the _EmployeeID_ for each entry. + + + + +{`with store.open_session() as session: + results = list( + session.query_index_type(self.TsIndex, self.TsIndex.IndexEntry) + .where_greater_than("bpm", 100) + .select_fields(self.EmployeeDetails, "employee_id") + .distinct() + ) +`} + + + + +{`# This class is used when projecting index-fields via DocumentQuery +class EmployeeDetails: + def __init__(self, employee_name: str = None, employee_id: str = None): + self.employee_name = employee_name + self.employee_id = employee_id +`} + + + + +{`with store.open_session() as session: + results = list( + session.advanced.raw_query( + "from index 'TsIndex' where bpm > 100.0 select distinct employee_id", + self.TsIndex.IndexEntry, + ) + ) +`} + + + + +{`from index "TsIndex" +where BPM > 100.0 +select distinct EmployeeID +`} + + + + + + +## Syntax + +* [query](../../../client-api/session/querying/how-to-query.mdx#query-overview) + + +{`def query( +self, source: Optional[Query] = None, object_type: Optional[Type[_T]] = None +) -> DocumentQuery[_T]: +... +`} + + + +* [document_query](../../../client-api/session/querying/document-query/query-vs-document-query.mdx) + + +{`def document_query( +self, +index_name: str = None, +collection_name: str = None, +object_type: Type[_T] = None, +is_map_reduce: bool = False, +) -> DocumentQuery[_T]: +... +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/aggregation-and-projections.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/aggregation-and-projections.mdx new file mode 100644 index 0000000000..278b86d49a --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/aggregation-and-projections.mdx @@ -0,0 +1,54 @@ +--- +title: "Aggregating Time Series Values" +hide_table_of_contents: true +sidebar_label: Aggregating Values +sidebar_position: 3 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import AggregationAndProjectionsCsharp from './_aggregation-and-projections-csharp.mdx'; +import AggregationAndProjectionsPhp from './_aggregation-and-projections-php.mdx'; +import AggregationAndProjectionsNodejs from './_aggregation-and-projections-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "php", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/assets/time-series-query.png b/versioned_docs/version-7.1/document-extensions/timeseries/querying/assets/time-series-query.png new file mode 100644 index 0000000000..eeeedf90eb Binary files /dev/null and b/versioned_docs/version-7.1/document-extensions/timeseries/querying/assets/time-series-query.png differ diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/choosing-query-range.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/choosing-query-range.mdx new file mode 100644 index 0000000000..7fa2eb9f0b --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/choosing-query-range.mdx @@ -0,0 +1,56 @@ +--- +title: "Choosing Time Series Range" +hide_table_of_contents: true +sidebar_label: Choosing Query Range +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import ChoosingQueryRangeCsharp from './_choosing-query-range-csharp.mdx'; +import ChoosingQueryRangePython from './_choosing-query-range-python.mdx'; +import ChoosingQueryRangePhp from './_choosing-query-range-php.mdx'; +import ChoosingQueryRangeNodejs from './_choosing-query-range-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/filtering.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/filtering.mdx new file mode 100644 index 0000000000..5474721a89 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/filtering.mdx @@ -0,0 +1,56 @@ +--- +title: "Filtering Time Series Queries" +hide_table_of_contents: true +sidebar_label: Filtering +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import FilteringCsharp from './_filtering-csharp.mdx'; +import FilteringPython from './_filtering-python.mdx'; +import FilteringPhp from './_filtering-php.mdx'; +import FilteringNodejs from './_filtering-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/gap-filling.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/gap-filling.mdx new file mode 100644 index 0000000000..759e52ac4e --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/gap-filling.mdx @@ -0,0 +1,51 @@ +--- +title: "Data Gap Filling" +hide_table_of_contents: true +sidebar_label: Data Gap Filling +sidebar_position: 6 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import GapFillingCsharp from './_gap-filling-csharp.mdx'; +import GapFillingJava from './_gap-filling-java.mdx'; +import GapFillingNodejs from './_gap-filling-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/overview-and-syntax.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/overview-and-syntax.mdx new file mode 100644 index 0000000000..55ea481218 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/overview-and-syntax.mdx @@ -0,0 +1,58 @@ +--- +title: "Querying Time Series: Overview & Syntax" +hide_table_of_contents: true +sidebar_label: Overview and Syntax +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import OverviewAndSyntaxCsharp from './_overview-and-syntax-csharp.mdx'; +import OverviewAndSyntaxPython from './_overview-and-syntax-python.mdx'; +import OverviewAndSyntaxPhp from './_overview-and-syntax-php.mdx'; +import OverviewAndSyntaxNodejs from './_overview-and-syntax-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/statistics.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/statistics.mdx new file mode 100644 index 0000000000..f1d75f70b7 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/statistics.mdx @@ -0,0 +1,210 @@ +--- +title: "Statistical Measures" +hide_table_of_contents: true +sidebar_label: Statistical Measures +sidebar_position: 4 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Statistical Measures + + +* Queries can calculate the **percentile**, **slope**, and **standard deviation** +of a time series, or of a range of entries within a time series. + +* For time series that have more than one value per entry, these methods return +one measure for the first values in each entry, another measure for the +second values in each entry, and so on. + +* In this page: + * [Syntax](../../../document-extensions/timeseries/querying/statistics.mdx#syntax) + * [Examples](../../../document-extensions/timeseries/querying/statistics.mdx#examples) + + +## Syntax + +### Percentile + + +A [percentile](https://en.wikipedia.org/wiki/Percentile) of a time series +is the value that divides the time series values by some ratio, when they +are arranged from smallest to largest. + +For example, a 90th percentile is greater than 90% of the values in the +series, and less than the remaining 10%. + + +* RQL method: `percentile()` +* LINQ method: `Percentile()` + +The percentile method can be used to calculate any percentile in a time +series or range of time series entries. It takes one `double` value that is greater than +0 and less than or equal to 100. This represents the percent of the time series values that +should be smaller than the result. + +See examples [below](../../../document-extensions/timeseries/querying/statistics.mdx#examples). +### Slope + +* RQL method: `slope()` +* LINQ method: `slope()` + +The slope of a time series or range of time series entries is the difference +between the first and last values of the range (disregarding the values in +between) divided by the difference in time. + +Queries that use this method must also [aggregate](../../../document-extensions/timeseries/querying/aggregation-and-projections.mdx) +the time series, grouping the entries into whole numbers of time units. + +The difference in time is measured in milliseconds. Use [scaling](../../../document-extensions/timeseries/querying/overview-and-syntax.mdx#scaling-query-results) +to adjust the results to your preferred units. + +See examples [below](../../../document-extensions/timeseries/querying/statistics.mdx#examples). +### Standard Deviation + +* RQL method: `stddev()` +* LINQ method: `StandardDeviation()` + +These methods return the [standard deviation](https://en.wikipedia.org/wiki/Standard_deviation) +of time series values. + +See examples [below](../../../document-extensions/timeseries/querying/statistics.mdx#examples). +### Result Format + +Queries with these methods return results with the following format: + + + +{`\{ + "From": , + "To": , + "Count": [ + , + , + ... + ], + <"Percentile"/"Slope"/"Standard Deviation">: [ + , + , + ... + ] +\} +`} + + + +If the query uses `group by` aggregation, there will be one of +these results for each of the aggregates. + + + +## Examples + +### Percentile + + + + +{`var query = session.Query() + .Select(p => RavenQuery.TimeSeries(p, "HeartRates") + .Select(x => new + { + P = x.Percentile(90) + } + ) +); +`} + + + + +{`var query = session.Advanced.RawQuery(@" + from Employees as e + select timeseries( + from e.HeartRate + select percentile(90) + ) +"); +`} + + + + +### Slope + + + + +{`var query = session.Query() + .Select(p => RavenQuery.TimeSeries(p, "HeartRates") + .GroupBy(g => g.Hours(1)) + .Select(x => new + { + Slope = x.Slope() + } + ) +); +`} + + + + +{`var query = session.Advanced.RawQuery(@" + from Employees as e + select timeseries( + from e.HeartRates + group by 1 hour + select slope() + ) +"); +`} + + + + +### Standard Deviation + + + + +{`//Example query with defined range +var date = DateTime.Today; + +var query = session.Query() + .Select(p => RavenQuery.TimeSeries(p, "HeartRates", date, date.AddDays(1)) + .Select(x => new + { + StdDev = x.StandardDeviation() + } + ) +); +`} + + + + +{`//Example query with defined range +var date = DateTime.Today; + +var query = session.Advanced.RawQuery(@" + from Employees as e + select timeseries( + from e.HeartRates + between $start and $end + select stddev() + ) +") +.AddParameter("start", date) +.AddParameter("end", date.AddDays(1)); +`} + + + + + + diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/stream-timeseries.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/stream-timeseries.mdx new file mode 100644 index 0000000000..65abdfd50e --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/stream-timeseries.mdx @@ -0,0 +1,39 @@ +--- +title: "Stream Time Series Data" +hide_table_of_contents: true +sidebar_label: Streaming Time Series +sidebar_position: 7 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import StreamTimeseriesCsharp from './_stream-timeseries-csharp.mdx'; +import StreamTimeseriesJava from './_stream-timeseries-java.mdx'; + +export const supportedLanguages = ["csharp", "java"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/querying/using-indexes.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/querying/using-indexes.mdx new file mode 100644 index 0000000000..1bf3486dc2 --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/querying/using-indexes.mdx @@ -0,0 +1,56 @@ +--- +title: "Querying Time Series Indexes" +hide_table_of_contents: true +sidebar_label: Querying Time Series Indexes +sidebar_position: 5 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import UsingIndexesCsharp from './_using-indexes-csharp.mdx'; +import UsingIndexesPython from './_using-indexes-python.mdx'; +import UsingIndexesPhp from './_using-indexes-php.mdx'; +import UsingIndexesNodejs from './_using-indexes-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/rollup-and-retention.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/rollup-and-retention.mdx new file mode 100644 index 0000000000..7d0caa584a --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/rollup-and-retention.mdx @@ -0,0 +1,49 @@ +--- +title: "Time Series Rollups and Retention" +hide_table_of_contents: true +sidebar_label: Rollups and Retention +sidebar_position: 4 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import RollupAndRetentionCsharp from './_rollup-and-retention-csharp.mdx'; +import RollupAndRetentionPython from './_rollup-and-retention-python.mdx'; +import RollupAndRetentionPhp from './_rollup-and-retention-php.mdx'; +import RollupAndRetentionNodejs from './_rollup-and-retention-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/document-extensions/timeseries/time-series-and-other-features.mdx b/versioned_docs/version-7.1/document-extensions/timeseries/time-series-and-other-features.mdx new file mode 100644 index 0000000000..d76c445c1b --- /dev/null +++ b/versioned_docs/version-7.1/document-extensions/timeseries/time-series-and-other-features.mdx @@ -0,0 +1,41 @@ +--- +title: "Time Series and Other Features" +hide_table_of_contents: true +sidebar_label: Time Series and Other Features +sidebar_position: 5 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import TimeSeriesAndOtherFeaturesCsharp from './_time-series-and-other-features-csharp.mdx'; +import TimeSeriesAndOtherFeaturesNodejs from './_time-series-and-other-features-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/glossary/_blittable-json-reader-object-csharp.mdx b/versioned_docs/version-7.1/glossary/_blittable-json-reader-object-csharp.mdx new file mode 100644 index 0000000000..e9fa5cd846 --- /dev/null +++ b/versioned_docs/version-7.1/glossary/_blittable-json-reader-object-csharp.mdx @@ -0,0 +1,14 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +The reader object of a blittable JSON document. + +### Properties + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **Count** | int | The number of properties. | +| **Size** | int | The size of a document in blittable format (in bytes). | + diff --git a/versioned_docs/version-7.1/glossary/_category_.json b/versioned_docs/version-7.1/glossary/_category_.json new file mode 100644 index 0000000000..d6a7d964e3 --- /dev/null +++ b/versioned_docs/version-7.1/glossary/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 11, + "label": "Glossary" +} diff --git a/versioned_docs/version-7.1/glossary/_copy-attachment-command-data-csharp.mdx b/versioned_docs/version-7.1/glossary/_copy-attachment-command-data-csharp.mdx new file mode 100644 index 0000000000..18ead7c262 --- /dev/null +++ b/versioned_docs/version-7.1/glossary/_copy-attachment-command-data-csharp.mdx @@ -0,0 +1,21 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +### Properties + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **sourceDocumentId** | string | Source document id | +| **sourceName** | string | Source attachment name | +| **destinationDocumentId** | string | Destination document id | +| **destinationName** | string | Destination attachment name | +| **changeVector** | string | Document change-vector | + +### Methods + +| Signature | Description | +| ---------- | ----------- | +| **DynamicJsonValue ToJson(DocumentConventions conventions, JsonOperationContext context)** | Translate this instance to Json. | + diff --git a/versioned_docs/version-7.1/glossary/_counters-batch-command-data-csharp.mdx b/versioned_docs/version-7.1/glossary/_counters-batch-command-data-csharp.mdx new file mode 100644 index 0000000000..8e3ba41a8b --- /dev/null +++ b/versioned_docs/version-7.1/glossary/_counters-batch-command-data-csharp.mdx @@ -0,0 +1,20 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +### Properties + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **Id** | string | Document id | +| **Counters** | [DocumentCountersOperation](../client-api/operations/counters/counter-batch.mdx#documentcountersoperation) | Counter operations to perform | +| **ChangeVector** | string | The change-vector of the last-updated counter | + +### Methods + +| Signature | Description | +| ---------- | ----------- | +| **DynamicJsonValue ToJson(DocumentConventions conventions, JsonOperationContext context)** | Translate this instance to a Json. | + + diff --git a/versioned_docs/version-7.1/glossary/_delete-command-data-csharp.mdx b/versioned_docs/version-7.1/glossary/_delete-command-data-csharp.mdx new file mode 100644 index 0000000000..3ba135b610 --- /dev/null +++ b/versioned_docs/version-7.1/glossary/_delete-command-data-csharp.mdx @@ -0,0 +1,20 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +### Properties + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **Id** | string | Document ID | +| **Type** | CommandType | The Command Type (`DELETE`) | +| **ChangeVector** | string | Document change-vector | + +### Methods + +| Signature | Description | +| ---------- | ----------- | +| **DynamicJsonValue ToJson(DocumentConventions conventions, JsonOperationContext context)** | Translate this instance to Json. | + + diff --git a/versioned_docs/version-7.1/glossary/_index-query-csharp.mdx b/versioned_docs/version-7.1/glossary/_index-query-csharp.mdx new file mode 100644 index 0000000000..ddbdf6415c --- /dev/null +++ b/versioned_docs/version-7.1/glossary/_index-query-csharp.mdx @@ -0,0 +1,21 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +### Properties + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **Query** | string | The query | +| **QueryParameters** | Parameters (Dictionary<string, object>) | The query parameters | +| **Start** | int | The start of records to read | +| **PageSize** | int | The page size | +| **WaitForNonStaleResults** | bool | If set to true, the server side will wait until the results are non-stale or until a timeout. | +| **WaitForNonStaleResultsTimeout** | TimeSpan? | The timeout for WaitForNonStaleResults | +| **CutoffEtag** | long? | The cutoff Etag is used to check if the index has already processed a document with the given Etag. | +| **DisableCaching** | bool | Indicates if the query results should be read from the cache (if cached previously), or added to the cache (if there were no cached items prior). | +| **SkipDuplicateChecking** | bool | Allows to skip duplicate checking during queries. | +| **ExplainScores** | bool | When a query result should contain an explanation about how docs are scored against a query. | + + diff --git a/versioned_docs/version-7.1/glossary/_move-attachment-command-data-csharp.mdx b/versioned_docs/version-7.1/glossary/_move-attachment-command-data-csharp.mdx new file mode 100644 index 0000000000..b98140f10c --- /dev/null +++ b/versioned_docs/version-7.1/glossary/_move-attachment-command-data-csharp.mdx @@ -0,0 +1,21 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +### Properties + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **documentId** | string | Document id | +| **name** | string | Attachment name | +| **destinationDocumentId** | string | Destination document id | +| **destinationName** | string | Attachment destination name. | +| **changeVector** | string | Document change-vector | + +### Methods + +| Signature | Description | +| ---------- | ----------- | +| **DynamicJsonValue ToJson(DocumentConventions conventions, JsonOperationContext context)** | Translate this instance to a Json. | + diff --git a/versioned_docs/version-7.1/glossary/_patch-command-data-csharp.mdx b/versioned_docs/version-7.1/glossary/_patch-command-data-csharp.mdx new file mode 100644 index 0000000000..a8653b3294 --- /dev/null +++ b/versioned_docs/version-7.1/glossary/_patch-command-data-csharp.mdx @@ -0,0 +1,20 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +### Properties + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **id** | string | Document ID | +| **changeVector** | string | Document Change Vector | +| **patch** | [PatchRequest](../client-api/operations/patching/single-document.mdx) | Patch to apply | +| **patcheIfMissing** | [PatchRequest](../client-api/operations/patching/single-document.mdx) | Patch to apply if document is missing | + +### Methods + +| Signature | Description | +| ---------- | ----------- | +| **DynamicJsonValue ToJson(DocumentConventions conventions, JsonOperationContext context)** | Translate this instance to Json. | + diff --git a/versioned_docs/version-7.1/glossary/_put-command-data-csharp.mdx b/versioned_docs/version-7.1/glossary/_put-command-data-csharp.mdx new file mode 100644 index 0000000000..399f2a1290 --- /dev/null +++ b/versioned_docs/version-7.1/glossary/_put-command-data-csharp.mdx @@ -0,0 +1,20 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +### Properties + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **Id** | string | Document ID | +| **ChangeVector** | string | Document change-vector | +| **Document** | DynamicJsonValue | Document to put | +| **Type** | CommandType | The Command Type (PUT) | + +### Methods + +| Signature | Description | +| ---------- | ----------- | +| **DynamicJsonValue ToJson(DocumentConventions conventions, JsonOperationContext context)** | Translate this instance to Json. | + diff --git a/versioned_docs/version-7.1/glossary/_query-result-csharp.mdx b/versioned_docs/version-7.1/glossary/_query-result-csharp.mdx new file mode 100644 index 0000000000..b519abfdee --- /dev/null +++ b/versioned_docs/version-7.1/glossary/_query-result-csharp.mdx @@ -0,0 +1,22 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +### Properties + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **Results** | BlittableJsonReaderArray | The documents resulting from this query. | +| **Includes** | BlittableJsonReaderObject | The documents included in the result. | +| **IsStale** | bool | The value indicating whether the index is stale. | +| **IndexTimestamp** | DateTime | The last time the index was updated. | +| **TotalResults** | int | The total results for this query. | +| **SkippedResults** | int | The skipped results. | +| **IndexName** | string | The index used to answer this query. | +| **ResultEtag** | long | The ETag value for this index current state, which includes what we docs we indexed, what document were deleted, etc. | +| **LastQueryTime** | DateTime | The timestamp of the last time the index was queried. | +| **DurationInMs** | long | The duration of actually executing the query server side. | +| **ResultSize** | long | The size of the response which was sent from the server. This value is the _uncompressed_ size. | +| **NodeTag** | string | Tag of a cluster node which responded to the query. | + diff --git a/versioned_docs/version-7.1/glossary/_stream-query-statistics-csharp.mdx b/versioned_docs/version-7.1/glossary/_stream-query-statistics-csharp.mdx new file mode 100644 index 0000000000..690805b5b7 --- /dev/null +++ b/versioned_docs/version-7.1/glossary/_stream-query-statistics-csharp.mdx @@ -0,0 +1,15 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +### Properties + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **Index** | string | Index name | +| **IsStale** | bool | `true` if index is stale | +| **IndexTimestamp** | DateTime | Time when index was last updated | +| **TotalResults** | int | Total number of results | +| **ResultEtag** | long | Etag number specific for results returned for a given query | + diff --git a/versioned_docs/version-7.1/glossary/_stream-result-csharp.mdx b/versioned_docs/version-7.1/glossary/_stream-result-csharp.mdx new file mode 100644 index 0000000000..883d0025cc --- /dev/null +++ b/versioned_docs/version-7.1/glossary/_stream-result-csharp.mdx @@ -0,0 +1,14 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +### Properties + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **Id** | string | Document ID | +| **ChangeVector** | string | Document change vector | +| **Metadata** | IMetadataDictionary | Document metadata | +| **Document** | `T` | Document deserialized to a given generic type | + diff --git a/versioned_docs/version-7.1/glossary/blittable-json-reader-object.mdx b/versioned_docs/version-7.1/glossary/blittable-json-reader-object.mdx new file mode 100644 index 0000000000..201a40814e --- /dev/null +++ b/versioned_docs/version-7.1/glossary/blittable-json-reader-object.mdx @@ -0,0 +1,24 @@ +--- +title: "Glossary: BlittableJsonReaderObject" +hide_table_of_contents: true +sidebar_label: BlittableJsonReaderObject +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import BlittableJsonReaderObjectCsharp from './_blittable-json-reader-object-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/glossary/cluster-node.mdx b/versioned_docs/version-7.1/glossary/cluster-node.mdx new file mode 100644 index 0000000000..bdc309c183 --- /dev/null +++ b/versioned_docs/version-7.1/glossary/cluster-node.mdx @@ -0,0 +1,22 @@ +--- +title: "Glossary: RavenDB Cluster Node" +hide_table_of_contents: true +sidebar_label: Cluster Node +sidebar_position: 8 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +#Glossary: RavenDB Cluster Node + +### What is Cluster Node? + +A RavenDB instance node which is a member of [RavenDB Cluster](./ravendb-cluster). As a cluster member, +it executes cluster-wide operations, which are controlled via Raft Commands. + +Configuration changes are propagated automatically through the cluster. diff --git a/versioned_docs/version-7.1/glossary/copy-attachment-command-data.mdx b/versioned_docs/version-7.1/glossary/copy-attachment-command-data.mdx new file mode 100644 index 0000000000..8b405d1539 --- /dev/null +++ b/versioned_docs/version-7.1/glossary/copy-attachment-command-data.mdx @@ -0,0 +1,24 @@ +--- +title: "Glossary: CopyAttachmentCommandData" +hide_table_of_contents: true +sidebar_label: CopyAttachmentCommandData +sidebar_position: 18 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import CopyAttachmentCommandDataCsharp from './_copy-attachment-command-data-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/glossary/counters-batch-command-data.mdx b/versioned_docs/version-7.1/glossary/counters-batch-command-data.mdx new file mode 100644 index 0000000000..1a4421b658 --- /dev/null +++ b/versioned_docs/version-7.1/glossary/counters-batch-command-data.mdx @@ -0,0 +1,25 @@ +--- +title: "Glossary: CountersBatchCommandData" +hide_table_of_contents: true +sidebar_label: CountersBatchCommandData +sidebar_position: 20 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import CountersBatchCommandDataCsharp from './_counters-batch-command-data-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + diff --git a/versioned_docs/version-7.1/glossary/database-group.mdx b/versioned_docs/version-7.1/glossary/database-group.mdx new file mode 100644 index 0000000000..5563275072 --- /dev/null +++ b/versioned_docs/version-7.1/glossary/database-group.mdx @@ -0,0 +1,23 @@ +--- +title: "Glossary: Database Group" +hide_table_of_contents: true +sidebar_label: Database Group +sidebar_position: 10 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +#Glossary: Database Group + +### What is a database group? +When a database is created in a [RavenDB Cluster](./ravendb-cluster), we can choose a subset of nodes it would exist on. +Between the nodes of a database group, there is an active master-master replication. + +For example: +Assuming we have three-node RavenDB Cluster - nodes A, B and C, we can create a database with replication factor 2, and +it can be located on B and C nodes. In this setup, between the database on B and C a master-master replication will be active. diff --git a/versioned_docs/version-7.1/glossary/database-id.mdx b/versioned_docs/version-7.1/glossary/database-id.mdx new file mode 100644 index 0000000000..ef7a7905f0 --- /dev/null +++ b/versioned_docs/version-7.1/glossary/database-id.mdx @@ -0,0 +1,21 @@ +--- +title: "Glossary: Database Id" +hide_table_of_contents: true +sidebar_label: Database Id +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Glossary: Database Id + +### What are Database Ids? +Each database in RavenDB has a Guid, which must be unique in the cluster. + +### Generating New Database ID +When restoring a snapshot, a new database Id may be generated on restore. This is needed if the same snapshot needs to be restored on multiple nodes so that the uniqueness of database IDs in the cluster is conserved. diff --git a/versioned_docs/version-7.1/glossary/delete-command-data.mdx b/versioned_docs/version-7.1/glossary/delete-command-data.mdx new file mode 100644 index 0000000000..e5bde8cd41 --- /dev/null +++ b/versioned_docs/version-7.1/glossary/delete-command-data.mdx @@ -0,0 +1,24 @@ +--- +title: "Glossary: DeleteCommandData" +hide_table_of_contents: true +sidebar_label: DeleteCommandData +sidebar_position: 11 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import DeleteCommandDataCsharp from './_delete-command-data-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/glossary/etag.mdx b/versioned_docs/version-7.1/glossary/etag.mdx new file mode 100644 index 0000000000..add5e8f89a --- /dev/null +++ b/versioned_docs/version-7.1/glossary/etag.mdx @@ -0,0 +1,48 @@ +--- +title: "Glossary: Etags" +hide_table_of_contents: true +sidebar_label: Etag +sidebar_position: 3 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Glossary: Etags + +### What are Etags? + +RavenDB uses etags to track changes of data. Etags are 64-bit numbers which always increase with each change. +If comparing etags for two versions of the same document on a specific RavenDB instance, the version with a higher etag is more up-to-date. + +Etags are local to each node and are meaningless in the context of the cluster. + + +### Etags vs. Change Vectors + +Both etags and change vectors are used to track document changes. Etags have meaning local to cluster nodes, and change vectors have meaning cluster-wide. + +* An etag is only incremented when a document is modified locally +* [Change Vector](../server/clustering/replication/change-vector.mdx) may be changed when a document is modified locally or replicated + +### Etags In-Depth + +In RavenDB, etags track changes on many different types of server entities. + +Etags are used to track changes to the following: + +* Documents +* Indexes +* Cluster Topology +* ETL + +For developing with the RavenDB client API, etags are not needed. Etags are used as a "building block" of a [Change Vector](../server/clustering/replication/change-vector.mdx). + +### Etags and Clustering + +When looking at etags in the context of a RavenDB cluster, etags are only meaningful within a single database on a specific node. +This means etags are _not_ universally unique per different databases on a single node and are not unique across cluster nodes. diff --git a/versioned_docs/version-7.1/glossary/index-query.mdx b/versioned_docs/version-7.1/glossary/index-query.mdx new file mode 100644 index 0000000000..b16b226314 --- /dev/null +++ b/versioned_docs/version-7.1/glossary/index-query.mdx @@ -0,0 +1,24 @@ +--- +title: "Glossary: IndexQuery" +hide_table_of_contents: true +sidebar_label: IndexQuery +sidebar_position: 12 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import IndexQueryCsharp from './_index-query-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/glossary/move-attachment-command-data.mdx b/versioned_docs/version-7.1/glossary/move-attachment-command-data.mdx new file mode 100644 index 0000000000..1429fcf618 --- /dev/null +++ b/versioned_docs/version-7.1/glossary/move-attachment-command-data.mdx @@ -0,0 +1,24 @@ +--- +title: "Glossary: MoveAttachmentCommandData" +hide_table_of_contents: true +sidebar_label: MoveAttachmentCommandData +sidebar_position: 19 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import MoveAttachmentCommandDataCsharp from './_move-attachment-command-data-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/glossary/node-tag.mdx b/versioned_docs/version-7.1/glossary/node-tag.mdx new file mode 100644 index 0000000000..e8665f7e09 --- /dev/null +++ b/versioned_docs/version-7.1/glossary/node-tag.mdx @@ -0,0 +1,21 @@ +--- +title: "Glossary: Node Tag" +hide_table_of_contents: true +sidebar_label: Node Tag +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Glossary: Node Tag + +### What is a Node Tag? + +It is a [cluster node](cluster-node) unique identifier, which is a denoted by up to four upper-case letters. + +Once a node is added to [RavenDB cluster](ravendb-cluster), a node tag will be assigned to it. diff --git a/versioned_docs/version-7.1/glossary/patch-command-data.mdx b/versioned_docs/version-7.1/glossary/patch-command-data.mdx new file mode 100644 index 0000000000..9e2ff1441c --- /dev/null +++ b/versioned_docs/version-7.1/glossary/patch-command-data.mdx @@ -0,0 +1,24 @@ +--- +title: "Glossary: PatchCommandData" +hide_table_of_contents: true +sidebar_label: PatchCommandData +sidebar_position: 16 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import PatchCommandDataCsharp from './_patch-command-data-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/glossary/put-command-data.mdx b/versioned_docs/version-7.1/glossary/put-command-data.mdx new file mode 100644 index 0000000000..4400609d66 --- /dev/null +++ b/versioned_docs/version-7.1/glossary/put-command-data.mdx @@ -0,0 +1,24 @@ +--- +title: "Glossary: PutCommandData" +hide_table_of_contents: true +sidebar_label: PutCommandData +sidebar_position: 17 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import PutCommandDataCsharp from './_put-command-data-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/glossary/query-result.mdx b/versioned_docs/version-7.1/glossary/query-result.mdx new file mode 100644 index 0000000000..009a9d250e --- /dev/null +++ b/versioned_docs/version-7.1/glossary/query-result.mdx @@ -0,0 +1,24 @@ +--- +title: "Glossary: QueryResult" +hide_table_of_contents: true +sidebar_label: QueryResult +sidebar_position: 13 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import QueryResultCsharp from './_query-result-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/glossary/raft-algorithm.mdx b/versioned_docs/version-7.1/glossary/raft-algorithm.mdx new file mode 100644 index 0000000000..94aa1c704d --- /dev/null +++ b/versioned_docs/version-7.1/glossary/raft-algorithm.mdx @@ -0,0 +1,31 @@ +--- +title: "Glossary: Raft Consensus Algorithm" +hide_table_of_contents: true +sidebar_label: Raft Consensus Algorithm +sidebar_position: 5 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Glossary: Raft Consensus Algorithm + +### What is Raft? + +Raft is a [distributed consensus](https://en.wikipedia.org/wiki/Consensus_(computer_science)) algorithm designed to be understandable and durable. + +In general, the algorithm is useful when we want to order the events that happen in a distributed system on different nodes. + +In RavenDB, Raft is used to coordinate the execution of cluster-wide operations over the nodes. + +If we want to create a database in a cluster, creating the database on all cluster nodes, the Raft will be used to make sure that the database creation is executed in at least (n/2) + 1 nodes. (quorum of nodes) + +### Additional Reading + + * A website with visualization and links to publications -> [https://raft.github.io/](https://raft.github.io/) + * A link to the original PhD dissertation on Raft Algorithm -> [https://github.com/ongardie/dissertation](https://github.com/ongardie/dissertation) + * Visualization and simple tutorial on how Raft works -> [http://thesecretlivesofdata.com/raft/](http://thesecretlivesofdata.com/raft/) diff --git a/versioned_docs/version-7.1/glossary/raft-command.mdx b/versioned_docs/version-7.1/glossary/raft-command.mdx new file mode 100644 index 0000000000..a70af8b5b0 --- /dev/null +++ b/versioned_docs/version-7.1/glossary/raft-command.mdx @@ -0,0 +1,21 @@ +--- +title: "Glossary: Raft Command" +hide_table_of_contents: true +sidebar_label: Raft Command +sidebar_position: 6 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +#Glossary: Raft Command + +In RavenDB 4.x, all cluster-level operations are essentially [Raft](./raft-algorithm) commands, which actually get executed only if they are applied to Raft log (which means that the commands reached the majority of cluster nodes). + +### What is a command? +A Raft implementation is comprised of a state machine and a log. The idea is to ensure the same order of log entries in the state machine (eventually). +A command is an entry in the Raft log. diff --git a/versioned_docs/version-7.1/glossary/ravendb-cluster.mdx b/versioned_docs/version-7.1/glossary/ravendb-cluster.mdx new file mode 100644 index 0000000000..c6d227bf5b --- /dev/null +++ b/versioned_docs/version-7.1/glossary/ravendb-cluster.mdx @@ -0,0 +1,20 @@ +--- +title: "Glossary: RavenDB Cluster" +hide_table_of_contents: true +sidebar_label: RavenDB Cluster +sidebar_position: 9 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Glossary: RavenDB Cluster + +### What is a cluster? +A group of RavenDB servers which may or may not be on the same machine. +This group of servers allows cluster-wide operations which execute on each node, using Raft to coordinate the execution. +If there is a leader, the cluster guarantees that at least (n/2) + 1 nodes would have the operation executed on them. (consensus quorum) diff --git a/versioned_docs/version-7.1/glossary/replication-factor.mdx b/versioned_docs/version-7.1/glossary/replication-factor.mdx new file mode 100644 index 0000000000..d125076e28 --- /dev/null +++ b/versioned_docs/version-7.1/glossary/replication-factor.mdx @@ -0,0 +1,17 @@ +--- +title: "Glossary: Replication Factor" +hide_table_of_contents: true +sidebar_label: Replication Factor +sidebar_position: 7 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Glossary: Replication Factor + +Assuming RavenDB Cluster with two or more nodes, replication factor is the amount of database replicas to maintain in the cluster. diff --git a/versioned_docs/version-7.1/glossary/stream-query-statistics.mdx b/versioned_docs/version-7.1/glossary/stream-query-statistics.mdx new file mode 100644 index 0000000000..3f22d4958e --- /dev/null +++ b/versioned_docs/version-7.1/glossary/stream-query-statistics.mdx @@ -0,0 +1,24 @@ +--- +title: "Glossary: StreamQueryStatistics" +hide_table_of_contents: true +sidebar_label: StreamQueryStatistics +sidebar_position: 14 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import StreamQueryStatisticsCsharp from './_stream-query-statistics-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/glossary/stream-result.mdx b/versioned_docs/version-7.1/glossary/stream-result.mdx new file mode 100644 index 0000000000..99f97b8fdc --- /dev/null +++ b/versioned_docs/version-7.1/glossary/stream-result.mdx @@ -0,0 +1,24 @@ +--- +title: "Glossary: StreamResult" +hide_table_of_contents: true +sidebar_label: StreamResult +sidebar_position: 15 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import StreamResultCsharp from './_stream-result-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/glossary/tombstone.mdx b/versioned_docs/version-7.1/glossary/tombstone.mdx new file mode 100644 index 0000000000..5213781b67 --- /dev/null +++ b/versioned_docs/version-7.1/glossary/tombstone.mdx @@ -0,0 +1,41 @@ +--- +title: "Glossary: Tombstones" +hide_table_of_contents: true +sidebar_label: Tombstone +sidebar_position: 4 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Glossary: Tombstones + +When a document is deleted, RavenDB will leave behind a "delete marker" which is called a Tombstone. + +### Where are They Used? + +* Replication and ETL - this is needed so delete operations can be replicated to other nodes +* Indexes use tombstones in order to delete no longer relevant entries that refer to deleted documents +* Periodic Backup uses tombstones in order to backup "deletions" of documents + +### Tombstone Cleaning + +The tombstones are periodically cleaned. + +Cleaning will occur only for tombstones that were already processed by the modules where they are used: + +* Replication +* Indexes +* ETL +* Periodic Backup + + + +* The tombstone retaining period is configurable with config entry name = + [Tombstones.CleanupIntervalInMin](../server/configuration/tombstone-configuration.mdx) +* By default, the configuration value it is 5 minutes. + diff --git a/versioned_docs/version-7.1/home.mdx b/versioned_docs/version-7.1/home.mdx new file mode 100644 index 0000000000..2df4fc40c5 --- /dev/null +++ b/versioned_docs/version-7.1/home.mdx @@ -0,0 +1,18 @@ +--- +slug: / +pagination_next: null +pagination_prev: null +--- + +import StartingPoints from '@site/src/components/Homepage/StartingPoints/StartingPoints'; +import UseCases from '@site/src/components/Homepage/UseCases/UseCases'; +import Features from '@site/src/components/Homepage/Features/Features'; + +# RavenDB Documentation +Everything you need to know about our product, from getting started to advanced features. + +
+ + + +
diff --git a/versioned_docs/version-7.1/indexes/_boosting-csharp.mdx b/versioned_docs/version-7.1/indexes/_boosting-csharp.mdx new file mode 100644 index 0000000000..497d984df4 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_boosting-csharp.mdx @@ -0,0 +1,298 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When querying with some filtering conditions, a basic **score** is + calculated by the underlying engine for each document in the results. + +* Providing a **boost value** to selected fields allows prioritization of the resulting documents. + The boos value is integrated with the basic score, increasing the document rank. + +* The automatic ordering of results by their score is [configurable](../indexes/boosting.mdx#automatic-score-based-ordering). + +* Boosting can be achieved in the following ways: + + * **At query time**: + By applying a boost factor to searched terms at query time (see [Boost search results](../client-api/session/querying/text-search/boost-search-results.mdx)). + + * **Via index definition**: + By applying a boost factor in the index definition, as described in this article. + +* In this page: + * [Assign a boost factor to an index-field](../indexes/boosting.mdx#assign-a-boost-factor-to-an-index-field) + * [Assign a boost factor to the index-entry](../indexes/boosting.mdx#assign-a-boost-factor-to-the-index-entry) + * [Automatic score-based ordering](../indexes/boosting.mdx#automatic-score-based-ordering) + * [Corax vs Lucene: boosting differences](../indexes/boosting.mdx#automatic-score-based-ordering) + + +## Assign a boost factor to an index-field + +Applying a boost value to an index-field allows prioritization of matching documents based on an index-field. +##### The index: + + + + +{`public class Orders_ByCountries_BoostByField : AbstractIndexCreationTask +{ + public class IndexEntry + { + // Index-field 'ShipToCountry' will be boosted in the map definition below + public string ShipToCountry { get; set; } + public string CompanyCountry { get; set; } + } + + public Orders_ByCountries_BoostByField() + { + Map = orders => from order in orders + let company = LoadDocument(order.Company) + + // Note: with current server version, + // use 'select new' instead of 'select new IndexEntry' for compilation + select new + { + // Boost index-field 'ShipToCountry': + // * Use method 'Boost', pass a numeric value to boost by + // * Documents that match the query criteria for this field will rank higher + ShipToCountry = order.ShipTo.Country.Boost(10), + CompanyCountry = company.Address.Country + }; + } +} +`} + + + + +{`public class Orders_ByCountries_BoostByField_JS : AbstractJavaScriptIndexCreationTask +{ + public Orders_ByCountries_BoostByField_JS() + { + Maps = new HashSet() + { + @"map('orders', function(order) { + let company = load(order.Company, 'Companies') + return { + ShipToCountry: boost(order.ShipTo.Country, 10), + CompanyCountry: company.Address.Country + }; + })" + }; + } +} +`} + + + + +##### The query: + + + + +{`List orders = session + // Query the index + .Query() + .Where(x => x.ShipToCountry == "Poland" || x.CompanyCountry == "Portugal") + .OfType() + .ToList(); + +// Because index-field 'ShipToCountry' was boosted (inside the index definition), +// then documents containing 'Poland' in their 'ShipTo.Country' field will get a higher score than +// documents containing a company that is located in 'Portugal'. +`} + + + + +{`List orders = await asyncSession + // Query the index + .Query() + .Where(x => x.ShipToCountry == "Poland" || x.CompanyCountry == "Portugal") + .OfType() + .ToListAsync(); + +// Because index-field 'ShipToCountry' was boosted (inside the index definition), +// then documents containing 'Poland' in their 'ShipTo.Country' field will get a higher score than +// documents containing a company that is located in 'Portugal'. +`} + + + + +{`List orders = session.Advanced + // Query the index + .DocumentQuery() + .WhereEquals(x => x.ShipToCountry, "Poland") + .OrElse() + .WhereEquals(x => x.CompanyCountry, "Portugal") + .OfType() + .ToList(); + +// Because index-field 'ShipToCountry' was boosted (inside the index definition), +// then documents containing 'Poland' in their 'ShipTo.Country' field will get a higher score than +// documents containing a company that is located in 'Portugal'. +`} + + + + +{`from index "Orders/ByCountries/BoostByField" +where ShipToCountry == "poland" or CompanyCountry == "portugal" +`} + + + + + + +## Assign a boost factor to the index-entry + +Applying a boost value to the whole index-entry allows prioritization of matching documents by content from the document. +##### The index: + + + + +{`public class Orders_ByCountries_BoostByIndexEntry : AbstractIndexCreationTask +{ + public class IndexEntry + { + public string ShipToCountry { get; set; } + public string CompanyCountry { get; set; } + } + + public Orders_ByCountries_BoostByIndexEntry() + { + Map = orders => from order in orders + let company = LoadDocument(order.Company) + + select new IndexEntry() + { + ShipToCountry = order.ShipTo.Country, + CompanyCountry = company.Address.Country + } + // Boost the whole index-entry: + // * Use method 'Boost' + // * Pass a document-field that will set the boost level dynamically per document indexed. + // * The boost level will vary from one document to another based on the value of this field. + .Boost((float) order.Freight); + } +} +`} + + + + +{`public class Orders_ByCountries_BoostByIndexEntry_JS : AbstractJavaScriptIndexCreationTask +{ + public Orders_ByCountries_BoostByIndexEntry_JS() + { + Maps = new HashSet() + { + @"map('orders', function(order) { + let company = load(order.Company, 'Companies') + return boost({ + ShipToCountry: order.ShipTo.Country, + CompanyCountry: company.Address.Country + }, order.Freight) + })" + }; + } +} +`} + + + + +##### The query: + + + + +{`List orders = session + // Query the index + .Query() + .Where(x => x.ShipToCountry == "Poland" || x.CompanyCountry == "Portugal") + .OfType() + .ToList(); + +// The resulting score per matching document is affected by the value of the document-field 'Freight'. +// Documents with a higher 'Freight' value will rank higher. +`} + + + + +{`List orders = await asyncSession + // Query the index + .Query() + .Where(x => x.ShipToCountry == "Poland" || x.CompanyCountry == "Portugal") + .OfType() + .ToListAsync(); + +// The resulting score per matching document is affected by the value of the document-field 'Freight'. +// Documents with a higher 'Freight' value will rank higher. +`} + + + + +{`List orders = session.Advanced + // Query the index + .DocumentQuery() + .WhereEquals(x => x.ShipToCountry, "Poland") + .OrElse() + .WhereEquals(x => x.CompanyCountry, "Portugal") + .OfType() + .ToList(); + +// The resulting score per matching document is affected by the value of the document-field 'Freight'. +// Documents with a higher 'Freight' value will rank higher. +`} + + + + +{`from index "Orders/ByCountries/BoostByIndexEntry" +where ShipToCountry == "poland" or CompanyCountry == "portugal" +`} + + + + + + +## Automatic score-based ordering + +* By default, whenever boosting is applied, either via dynamic querying or when querying an index + that has a boosting factor in its definition, the results will be automatically ordered by the score. + +* This behavior can be modified using the [OrderByScoreAutomaticallyWhenBoostingIsInvolved](../server/configuration/indexing-configuration.mdx#indexingorderbyscoreautomaticallywhenboostingisinvolved) + configuration key. + +* Refer to the [Get resulting score](../client-api/session/querying/sort-query-results.mdx#get-resulting-score) + section to learn how to retrieve the calculated score of each result. + + + +## Corax vs Lucene: boosting differences + +* **Boosting features available:** + + * When using **Corax** as the underlying indexing engine, a boost factor can only be assigned + to the [index-entry](../indexes/boosting.mdx#assign-a-boost-factor-to-the-index-entry). + Applying a boost factor to an _index-field_ is Not supported. + + * When using **Lucene**, a boost factor can be assigned to both the index-field and the whole index-entry. + +* **Algorithm used**: + Corax ranks search results using the [BM25 algorithm](https://en.wikipedia.org/wiki/Okapi_BM25). + Other search engines, e.g. Lucene, may use a different ranking algorithm and return different search results. + + + + diff --git a/versioned_docs/version-7.1/indexes/_boosting-java.mdx b/versioned_docs/version-7.1/indexes/_boosting-java.mdx new file mode 100644 index 0000000000..e7f84bfcac --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_boosting-java.mdx @@ -0,0 +1,94 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +When querying with some filtering conditions, +a basic score is calculated for each document in the results by the underlying engine. + +Providing a **boost value** to some fields allows you to prioritize the resulting documents. +The boost value is integrated with the basic score, making the document rank higher. +Automatic ordering of the results by the score is [configurable](../indexes/boosting.mdx#automatic-score-based-ordering). + +From the index perspective we can associate to an index entry a boosting factor. +The higher value it has, the more relevant term will be. To do this, we must use the `Boost` method. + +Let's jump straight into the example. +To perform the query that will return employees where either `FirstName` or `LastName` is equal to _Bob_, +and to promote employees (move them to the top of the results) where `FirstName` matches the phrase, we must first create an index with boosted entry. + + + + +{`public class Employees_ByFirstAndLastName extends AbstractIndexCreationTask { + public Employees_ByFirstAndLastName() { + map = "docs.Employees.Select(employee => new {" + + " FirstName = employee.FirstName.Boost(10)," + + " LastName = employee.LastName" + + "})"; + } +} +`} + + + + +{`IndexDefinition indexDefinition = new IndexDefinition(); +indexDefinition.setName("Employees/ByFirstAndLastName"); +indexDefinition.setMaps(Collections.singleton( + "docs.Employees.Select(employee => new {" + + " FirstName = employee.FirstName.Boost(10)," + + " LastName = employee.LastName" + + "})")); + +store.maintenance().send(new PutIndexesOperation(indexDefinition)); +`} + + + + +The next step is to perform a query against that index: + + + +{`// employees with 'firstName' equal to 'Bob' +// will be higher in results +// than the ones with 'lastName' match +List results = session.query(Employee.class, Employees_ByFirstAndLastName.class) + .whereEquals("FirstName", "Bob") + .whereEquals("LastName", "Bob") + .toList(); +`} + + + +## Remarks + + +Boosting is also available at the query level. + + + + +#### Automatic score-based ordering + +* By default, whenever boosting is involved, either via a dynamic query or when querying an index that has a boosting factor in its definition, + the results will be automatically ordered by the score. + +* This behavior can be modified using the [OrderByScoreAutomaticallyWhenBoostingIsInvolved](../server/configuration/indexing-configuration.mdx#indexingorderbyscoreautomaticallywhenboostingisinvolved) + configuration key. + + + + + +When using [Corax](../indexes/search-engine/corax.mdx) as the search engine: + +* [indexing-time boosting](../indexes/search-engine/corax.mdx#supported-features) + is available for documents, but not for document fields. +* Corax ranks search results using the [BM25 algorithm](https://en.wikipedia.org/wiki/Okapi_BM25). + Other search engines, e.g. Lucene, may use a different ranking algorithm and return different search results. + + + + diff --git a/versioned_docs/version-7.1/indexes/_boosting-nodejs.mdx b/versioned_docs/version-7.1/indexes/_boosting-nodejs.mdx new file mode 100644 index 0000000000..b8ee341c8b --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_boosting-nodejs.mdx @@ -0,0 +1,174 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When querying with some filtering conditions, a basic **score** is + calculated by the underlying engine for each document in the results. + +* Providing a **boost value** to selected fields allows prioritization of the resulting documents. + The boos value is integrated with the basic score, increasing the document rank. + +* The automatic ordering of results by their score is [configurable](../indexes/boosting.mdx#automatic-score-based-ordering). + +* Boosting can be achieved in the following ways: + + * **At query time**: + By applying a boost factor to searched terms at query time (see [Boost search results](../client-api/session/querying/text-search/boost-search-results.mdx)). + + * **Via index definition**: + By applying a boost factor in the index definition, as described in this article. + +* In this page: + * [Assign a boost factor to an index-field](../indexes/boosting.mdx#assign-a-boost-factor-to-an-index-field) + * [Assign a boost factor to the index-entry](../indexes/boosting.mdx#assign-a-boost-factor-to-the-index-entry) + * [Automatic score-based ordering](../indexes/boosting.mdx#automatic-score-based-ordering) + * [Corax vs Lucene: boosting differences](../indexes/boosting.mdx#automatic-score-based-ordering) + + +## Assign a boost factor to an index-field + +Applying a boost value to an index-field allows prioritization of matching documents based on an index-field. +##### The index: + + + +{`class Orders_ByCountries_BoostByField extends AbstractCsharpIndexCreationTask \{ + constructor() \{ + super(); + + this.map = \`from order in docs.Orders + let company = LoadDocument(order.Company, "Companies") + select new \{ + + // Boost index-field 'ShipToCountry': + // * Use method 'Boost', pass a numeric value to boost by + // * Documents that match the query criteria for this field will rank higher + + ShipToCountry = order.ShipTo.Country.Boost(10), + CompanyCountry = company.Address.Country + \}\`; + \} +\} +`} + + + +##### The query: + + + + +{`const orders = await session + .query({ indexName: "Orders/ByCountries/BoostByField" }) + .whereEquals("ShipToCountry", "Poland") + .orElse() + .whereEquals("CompanyCountry", "Portugal") + .all(); + +// Because index-field 'ShipToCountry' was boosted (inside the index definition), +// then documents containing 'Poland' in their 'ShipTo.Country' field will get a higher score than +// documents containing a company that is located in 'Portugal'. +`} + + + + +{`from index "Orders/ByCountries/BoostByField" +where ShipToCountry == "poland" or CompanyCountry == "portugal" +`} + + + + + + +## Assign a boost factor to the index-entry + +Applying a boost value to the whole index-entry allows prioritization of matching documents by content from the document. +##### The index: + + + +{`class Orders_ByCountries_BoostByIndexEntry extends AbstractCsharpIndexCreationTask \{ + constructor() \{ + super(); + + this.map = \`from order in docs.Orders + let company = LoadDocument(order.Company, "Companies") + select new \{ + ShipToCountry = order.ShipTo.Country, + CompanyCountry = company.Address.Country + \} + + // Boost the whole index-entry: + // * Use method 'Boost' + // * Pass a document-field that will set the boost level dynamically per document indexed. + // * The boost level will vary from one document to another based on the value of this field. + + .Boost(order.Freight)\`; + \} +\} +`} + + + +##### The query: + + + + +{`const orders = await session + .query({ indexName: "Orders/ByCountries/BoostByIndexEntry" }) + .whereEquals("ShipToCountry", "Poland") + .orElse() + .whereEquals("CompanyCountry", "Portugal") + .all(); + +// The resulting score per matching document is affected by the value of the document-field 'Freight'. +// Documents with a higher 'Freight' value will rank higher. +`} + + + + +{`from index "Orders/ByCountries/BoostByIndexEntry" +where ShipToCountry == "poland" or CompanyCountry == "portugal" +`} + + + + + + +## Automatic score-based ordering + +* By default, whenever boosting is applied, either via dynamic querying or when querying an index + that has a boosting factor in its definition, the results will be automatically ordered by the score. + +* This behavior can be modified using the [OrderByScoreAutomaticallyWhenBoostingIsInvolved](../server/configuration/indexing-configuration.mdx#indexingorderbyscoreautomaticallywhenboostingisinvolved) + configuration key. + +* Refer to the [Get resulting score](../client-api/session/querying/sort-query-results.mdx#get-resulting-score) + section to learn how to retrieve the calculated score of each result. + + + +## Corax vs Lucene: boosting differences + +* **Boosting features available:** + + * When using **Corax** as the underlying indexing engine, you can only [assign a boost factor to the index-entry](../indexes/boosting.mdx#assign-a-boost-factor-to-the-index-entry). + Applying a boost factor to an index-field is Not supported. + + * When using **Lucene**, you can assign a boost factor to both the index-field and the whole index-entry. + +* **Algorithm used**: + Corax ranks search results using the [BM25 algorithm](https://en.wikipedia.org/wiki/Okapi_BM25). + Other search engines, e.g. Lucene, may use a different ranking algorithm and return different search results. + + + + diff --git a/versioned_docs/version-7.1/indexes/_boosting-php.mdx b/versioned_docs/version-7.1/indexes/_boosting-php.mdx new file mode 100644 index 0000000000..657c099d8a --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_boosting-php.mdx @@ -0,0 +1,308 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When querying with some filtering conditions, a basic **score** is + calculated by the underlying engine for each document in the results. + +* Providing a **boost value** to selected fields allows prioritization of the resulting documents. + The boos value is integrated with the basic score, increasing the document rank. + +* The automatic ordering of results by their score is [configurable](../indexes/boosting.mdx#automatic-score-based-ordering). + +* Boosting can be achieved in the following ways: + + * **At query time**: + By applying a boost factor to searched terms at query time (see [Boost search results](../client-api/session/querying/text-search/boost-search-results.mdx)). + + * **Via index definition**: + By applying a boost factor in the index definition, as described in this article. + +* In this page: + * [Assign a boost factor to an index-field](../indexes/boosting.mdx#assign-a-boost-factor-to-an-index-field) + * [Assign a boost factor to the index-entry](../indexes/boosting.mdx#assign-a-boost-factor-to-the-index-entry) + * [Automatic score-based ordering](../indexes/boosting.mdx#automatic-score-based-ordering) + * [Corax vs Lucene: boosting differences](../indexes/boosting.mdx#automatic-score-based-ordering) + + +## Assign a boost factor to an index-field + +Applying a boost value to an index-field allows prioritization of matching documents based on an index-field. +##### The index: + + + + +{`class Orders_ByCountries_BoostByField_IndexEntry +{ + // Index-field 'ShipToCountry' will be boosted in the map definition below + public ?string $shipToCountry = null; + public ?string $companyCountry = null; + + public function getShipToCountry(): ?string + { + return $this->shipToCountry; + } + + public function setShipToCountry(?string $shipToCountry): void + { + $this->shipToCountry = $shipToCountry; + } + + public function getCompanyCountry(): ?string + { + return $this->companyCountry; + } + + public function setCompanyCountry(?string $companyCountry): void + { + $this->companyCountry = $companyCountry; + } +} + +class Orders_ByCountries_BoostByField extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + // Boost index-field 'ShipToCountry': + // * Use method 'Boost', pass a numeric value to boost by + // * Documents that match the query criteria for this field will rank higher + $this->map = + "docs.Orders.Select(order => new { " . + " ShipToCountry = order.ShipTo.Country.Boost(10), " . + " CompanyCountry = this.LoadDocument(order.Company, \\"Companies\\").Address.Country " . + "})"; + } +} +`} + + + + +{`class Orders_ByCountries_BoostByField_JS extends AbstractJavaScriptIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->setMaps(["map('Orders', function (order) {\\n" . + " let company = load(order.Company, 'Companies')\\n" . + " return {\\n" . + " ShipToCountry: boost(order.ShipTo.Country, 10),\\n" . + " CompanyCountry: company.Address.Country\\n" . + " }\\n" . + "})"]); + } +} +`} + + + + +##### The query: + + + + +{`$orders = $session + // Query the index + ->query(Orders_ByCountries_BoostByField_IndexEntry::class, Orders_ByCountries_BoostByField::class) + ->whereEquals("ShipToCountry", "Poland") + ->orElse() + ->whereEquals("CompanyCountry", "Portugal") + ->ofType(Order::class) + ->toList(); + +// Because index-field 'ShipToCountry' was boosted (inside the index definition), +// then documents containing 'Poland' in their 'ShipTo.Country' field will get a higher score than +// documents containing a company that is located in 'Portugal'. +`} + + + + +{`$orders = $session->advanced() + // Query the index + ->documentQuery(Orders_ByCountries_BoostByField_IndexEntry::class, Orders_ByCountries_BoostByField::class) + ->whereEquals("ShipToCountry", "Poland") + ->orElse() + ->whereEquals("CompanyCountry", "Portugal") + ->ofType(Order::class) + ->toList(); + +// Because index-field 'ShipToCountry' was boosted (inside the index definition), +// then documents containing 'Poland' in their 'ShipTo.Country' field will get a higher score than +// documents containing a company that is located in 'Portugal'. +`} + + + + +{`from index "Orders/ByCountries/BoostByField" +where ShipToCountry == "poland" or CompanyCountry == "portugal" +`} + + + + + + +## Assign a boost factor to the index-entry + +Applying a boost value to the whole index-entry allows prioritization of matching documents by content from the document. +##### The index: + + + + +{`class Orders_ByCountries_BoostByIndexEntry_IndexEntry +{ + public ?string $shipToCountry = null; + public ?string $companyCountry = null; + + public function getShipToCountry(): ?string + { + return $this->shipToCountry; + } + + public function setShipToCountry(?string $shipToCountry): void + { + $this->shipToCountry = $shipToCountry; + } + + public function getCompanyCountry(): ?string + { + return $this->companyCountry; + } + + public function setCompanyCountry(?string $companyCountry): void + { + $this->companyCountry = $companyCountry; + } +} + +class Orders_ByCountries_BoostByIndexEntry extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + // Boost the whole index-entry: + // * Use method 'Boost' + // * Pass a document-field that will set the boost level dynamically per document indexed. + // * The boost level will vary from one document to another based on the value of this field. + + $this->map = + "docs.Orders.Select(order => new { " . + " ShipToCountry = order.ShipTo.Country, " . + " CompanyCountry = this.LoadDocument(order.Company, \\"Companies\\").Address.Country " . + "}.Boost((float) order.Freight))"; + } +} +`} + + + + +{`class Orders_ByCountries_BoostByIndexEntry_JS extends AbstractJavaScriptIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + + $this->setMaps(["map('Orders', function (order) {\\n" . + " let company = load(order.Company, 'Companies')\\n" . + " return boost({\\n" . + " ShipToCountry: order.ShipTo.Country,\\n" . + " CompanyCountry: company.Address.Country\\n" . + " }, order.Freight)\\n" . + "})"]); + } +} +`} + + + + +##### The query: + + + + +{`$orders = $session + // Query the index + ->query(Orders_ByCountries_BoostByIndexEntry_IndexEntry::class, Orders_ByCountries_BoostByIndexEntry::class) + ->whereEquals("ShipToCountry", "Poland") + ->orElse() + ->whereEquals("CompanyCountry", "Portugal") + ->ofType(Order::class) + ->toList(); + +// The resulting score per matching document is affected by the value of the document-field 'Freight'. +// Documents with a higher 'Freight' value will rank higher. +`} + + + + +{`$orders = $session->advanced() + // Query the index + ->documentQuery(Orders_ByCountries_BoostByIndexEntry_IndexEntry::class, Orders_ByCountries_BoostByIndexEntry::class) + ->whereEquals("ShipToCountry", "Poland") + ->orElse() + ->whereEquals("CompanyCountry", "Portugal") + ->ofType(Order::class) + ->toList(); + +// The resulting score per matching document is affected by the value of the document-field 'Freight'. +// Documents with a higher 'Freight' value will rank higher. +`} + + + + +{`from index "Orders/ByCountries/BoostByIndexEntry" +where ShipToCountry == "poland" or CompanyCountry == "portugal" +`} + + + + + + +## Automatic score-based ordering + +* By default, whenever boosting is applied, either via dynamic querying or when querying an index + that has a boosting factor in its definition, the results will be automatically ordered by the score. + +* This behavior can be modified using the [OrderByScoreAutomaticallyWhenBoostingIsInvolved](../server/configuration/indexing-configuration.mdx#indexingorderbyscoreautomaticallywhenboostingisinvolved) + configuration key. + +* Refer to the [Get resulting score](../client-api/session/querying/sort-query-results.mdx#get-resulting-score) + section to learn how to retrieve the calculated score of each result. + + + +## Corax vs Lucene: boosting differences + +* **Boosting features available:** + + * When using **Corax** as the underlying indexing engine, a boost factor can only be assigned + to the [index-entry](../indexes/boosting.mdx#assign-a-boost-factor-to-the-index-entry). + Applying a boost factor to an _index-field_ is Not supported. + + * When using **Lucene**, a boost factor can be assigned to both the index-field and the whole index-entry. + +* **Algorithm used**: + Corax ranks search results using the [BM25 algorithm](https://en.wikipedia.org/wiki/Okapi_BM25). + Other search engines, e.g. Lucene, may use a different ranking algorithm and return different search results. + + + + diff --git a/versioned_docs/version-7.1/indexes/_category_.json b/versioned_docs/version-7.1/indexes/_category_.json new file mode 100644 index 0000000000..13d6071a5f --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 2, + "label": "Indexes" +} diff --git a/versioned_docs/version-7.1/indexes/_creating-and-deploying-csharp.mdx b/versioned_docs/version-7.1/indexes/_creating-and-deploying-csharp.mdx new file mode 100644 index 0000000000..32e06bf45b --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_creating-and-deploying-csharp.mdx @@ -0,0 +1,506 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article explains how to create indexes in RavenDB. + For a general overview of indexes, see [What are indexes](../indexes/what-are-indexes.mdx). + +* You can either: + * [create a Static-index](../indexes/creating-and-deploying.mdx#define-a-static-index) yourself, which involves **Defining** the index and **Deploying** it to the RavenDB server, or + * let the RavenDB server [create an Auto-index](../indexes/creating-and-deploying.mdx#creating-auto-indexes) for you based on query patterns. + +* Static-indexes can be created: + * using the Client API, as outlined in this article, or + * from the [Indexes list view](../studio/database/indexes/indexes-list-view.mdx) in the Studio. +* In this page: + * [Static-indexes](../indexes/creating-and-deploying.mdx#static-indexes) + * [Define a static-index](../indexes/creating-and-deploying.mdx#define-a-static-index) + * [Deploy a static-index](../indexes/creating-and-deploying.mdx#deploy-a-static-index) + * [Deploy single index](../indexes/creating-and-deploying.mdx#deploy-single-index) + * [Deploy multiple indexes](../indexes/creating-and-deploying.mdx#deploy-multiple-indexes) + * [Deploy syntax](../indexes/creating-and-deploying.mdx#deploy-syntax) + * [Deployment behavior](../indexes/creating-and-deploying.mdx#deployment-behavior) + * [Creating a static-index - Example](../indexes/creating-and-deploying.mdx#create-a-static-index---example) + * [Creating a static-index - using an Operation](../indexes/creating-and-deploying.mdx#create-a-static-index---using-an-operation) + * [Auto-indexes](../indexes/creating-and-deploying.mdx#auto-indexes) + * [Creating auto-indexes](../indexes/creating-and-deploying.mdx#creating-auto-indexes) + * [Disabling auto-indexes](../indexes/creating-and-deploying.mdx#disabling-auto-indexes) + + + + +## Define a static-index + + + +##### Static-indexes +* Indexes that are explicitly **created by the user** are called `static` indexes. +* Static-indexes can perform calculations, data conversions, and other processes behind the scenes. + This reduces the workload at query time by offloading these costly operations to the indexing phase. +* To query with a static-index, you must explicitly specify the index in the query definition. + For more details, see [Querying an index](../indexes/querying/query-index.mdx). + + + + +##### Define a static-index using a custom class +* To define a static-index using a custom class inherit from `AbstractIndexCreationTask`. +* This method is recommended over the [Creating an index using an operation](../indexes/creating-and-deploying.mdx#create-a-static-index---using-an-operation) method + for its simplicity and the following advantages: + * **Strongly-typed syntax**: + Provides strong typing when defining the index, making it easier to work with. + * **Ease of querying**: + Lets you use the index class name in a query, instead of hard-coding the index name. + + +{`// Define a static-index +// Inherit from 'AbstractIndexCreationTask' +public class Orders_ByTotal : AbstractIndexCreationTask +\{ + // ... +\} +`} + + +* A complete example of creating a static-index is provided [below](../indexes/creating-and-deploying.mdx#create-a-static-index---example). + + + + +##### Naming convention +* Static-index class names follow a single naming convention: + Each `_` in the class name is translated to `/` in the index name on the server. +* In the above example, the index class name is `Orders_ByTotal`. + The name of the index that will be generated on the server will be: `Orders/ByTotal`. + + + + +##### Customizing configuration +* You can set various [indexing configuration](../server/configuration/indexing-configuration.mdx) values within the index definition. +* Setting a configuration value within the index will override the matching indexing configuration values set at the server or database level. + + +{`public class Orders_ByTotal : AbstractIndexCreationTask +\{ + public Orders_ByTotal() + \{ + // ... + // Set an indexing configuration value for this index: + Configuration["Indexing.MapTimeoutInSec"] = "30"; + \} +\} +`} + + + + + + +## Deploy a static-index + +* To begin indexing data, the index must be deployed to the server. +* This section provides options for deploying indexes that inherit from `AbstractIndexCreationTask`. +* To create and deploy an index using the `IndexDefinition` class via `PutIndexesOperation`, + see [Creating a static-index - using an Operation](../indexes/creating-and-deploying.mdx#create-a-static-index---using-an-operation). + + +##### Deploy single index +* Use `Execute()` or `ExecuteIndex()` to deploy a single index. +* The following examples deploy index `Ordes/ByTotal` to the default database defined in your _DocumentStore_ object. + See the [syntax](../indexes/creating-and-deploying.mdx#deploy-syntax) section below for all available overloads. + + + + +{`// Call 'Execute' directly on the index instance +new Orders_ByTotal().Execute(store); +`} + + + + +{`// Call 'ExecuteAsync' directly on the index instance +await new Orders_ByTotal().ExecuteAsync(store); +`} + + + + + + + +{`// Call 'ExecuteIndex' on your store object +store.ExecuteIndex(new Orders_ByTotal()); +`} + + + + +{`// Call 'ExecuteIndexAsync' on your store object +await store.ExecuteIndexAsync(new Orders_ByTotal()); +`} + + + + + + + +##### Deploy multiple indexes +* Use `ExecuteIndexes()` or `IndexCreation.CreateIndexes()` to deploy multiple indexes. +* The `IndexCreation.CreateIndexes` method attempts to create all indexes in a single request. + If it fails, it will repeat the execution by calling the `Execute` method for each index, one by one, + in separate requests. +* The following examples deploy indexes `Ordes/ByTotal` and `Employees/ByLastName` to the default database defined in your _DocumentStore_ object. + See the [syntax](../indexes/creating-and-deploying.mdx#deploy-syntax) section below for all available overloads. + + + + +{`var indexesToDeploy = new List +{ + new Orders_ByTotal(), + new Employees_ByLastName() +}; + +// Call 'ExecuteIndexes' on your store object +store.ExecuteIndexes(indexesToDeploy); +`} + + + + +{`var indexesToDeploy = new List +{ + new Orders_ByTotal(), + new Employees_ByLastName() +}; + +// Call 'ExecuteIndexesAsync' on your store object +await store.ExecuteIndexesAsync(indexesToDeploy); +`} + + + + + + + +{`var indexesToDeploy = new List +{ + new Orders_ByTotal(), + new Employees_ByLastName() +}; + +// Call the static method 'CreateIndexes' on the IndexCreation class +IndexCreation.CreateIndexes(indexesToDeploy, store); +`} + + + + +{`var indexesToDeploy = new List +{ + new Orders_ByTotal(), + new Employees_ByLastName() +}; + +// Call the static method 'CreateIndexesAsync' on the IndexCreation class +await IndexCreation.CreateIndexesAsync(indexesToDeploy, store); +`} + + + +###### Deploy ALL indexes from an assembly + +* The following overload allows you to deploy ALL indexes from a specified assembly: + + + + +{`// Deploy ALL indexes from the assembly containing the \`Orders_ByTotal\` class +IndexCreation.CreateIndexes(typeof(Orders_ByTotal).Assembly, store); +`} + + + + +{`// Deploy ALL indexes from the assembly containing the \`Orders_ByTotal\` class +await IndexCreation.CreateIndexesAsync(typeof(Orders_ByTotal).Assembly, store); +`} + + + + + + + +##### Deploy syntax + + + +{`// Call this method directly on the index instance +void Execute(IDocumentStore store, DocumentConventions conventions = null, + string database = null); + +// Call these methods on the store object +void ExecuteIndex(IAbstractIndexCreationTask index, string database = null); +void ExecuteIndexes(IEnumerable indexes, + string database = null); + +// Call these static methods on the IndexCreation class +void CreateIndexes(IEnumerable indexes, IDocumentStore store, + DocumentConventions conventions = null, string database = null); +void CreateIndexes(Assembly assemblyToScan, IDocumentStore store, + DocumentConventions conventions = null, string database = null); +`} + + + + +{`// Call this method directly on the index instance +Task ExecuteAsync(IDocumentStore store, DocumentConventions conventions = null, + string database = null, CancellationToken token = default); + +// Call these methods on the store object +Task ExecuteIndexAsync(IAbstractIndexCreationTask index, string database = null, + CancellationToken token = default(CancellationToken)); +Task ExecuteIndexesAsync(IEnumerable indexes, + string database = null, CancellationToken token = default(CancellationToken)); + +// Call these static methods on the IndexCreation class +Task CreateIndexesAsync(IEnumerable indexes, + IDocumentStore store, DocumentConventions conventions = null, string database = null, + CancellationToken token = default(CancellationToken)); +Task CreateIndexesAsync(Assembly assemblyToScan, IDocumentStore store, + DocumentConventions conventions = null, string database = null, + CancellationToken token = default(CancellationToken)); +`} + + + + +| Parameter | Type | Description | +|--------------------|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------| +| **store** | `IDocumentStore` | Your document store object. | +| **conventions** | `DocumentConventions` | The [Conventions](../client-api/configuration/conventions.mdx) used by the document store. | +| **database** | `string` | The target database to deploy the index to. If not specified, the default database set on the store will be used. | +| **index** | `IAbstractIndexCreationTask` | The index object to deploy. | +| **indexes** | `IEnumerable` | A list of index objects to deploy. | +| **assemblyToScan** | `Assembly ` | Deploy all indexes that are contained in this assembly. | + + + + +##### Deployment behavior + + +###### Deployment mode: +* When your database spans multiple nodes, + you can choose between **Rolling** index deployment or **Parallel** index deployment. +* Rolling deployment applies the index to one node at a time, + while Parallel deployment deploys the index on all nodes simultaneously. +* Learn more in [Rolling index deployment](../indexes/rolling-index-deployment.mdx). + + + + + +###### When the index you are deploying already exists on the server: +* **If the index definition is updated**: + * RavenDB uses a side-by-side strategy for all index updates. + * When an existing index definition is modified, RavenDB creates a new index with the updated definition. + The new index will replace the existing index once it becomes non-stale. + * If you want to swap the indexes immediately, you can do so through the Studio. + For more details, see [Side by side indexing](../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---side-by-side-indexing). +* **If the index definition is unchanged**: + * If the definition of the index being deployed is identical to the one on the server, + the existing index will not be overwritten. + * The indexed data will remain intact, and the indexing process will not restart. + + + + + +## Create a static-index - Example + + + +{`// Define a static-index: +// ====================== +public class Orders_ByTotal : AbstractIndexCreationTask +\{ + public class IndexEntry + \{ + // The index-fields: + public string Employee \{ get; set; \} + public string Company \{ get; set; \} + public decimal Total \{ get; set; \} + \} + + public Orders_ByTotal() + \{ + Map = orders => from order in orders + select new IndexEntry + \{ + // Set the index-fields: + Employee = order.Employee, + Company = order.Company, + Total = order.Lines.Sum(l => + (l.Quantity * l.PricePerUnit) * (1 - l.Discount)) + \}; + + // Customize the index as needed, for example: + DeploymentMode = IndexDeploymentMode.Rolling; + Configuration["Indexing.MapTimeoutInSec"] = "30"; + Indexes.Add(x => x.Company, FieldIndexing.Search); + // ... + \} +\} + +public static void Main(string[] args) +\{ + using (DocumentStore store = new DocumentStore + \{ + Urls = new[] \{ "http://localhost:8080" \}, + Database = "Northwind" + \}) + \{ + store.Initialize(); + + // Deploy the index: + // ================= + new Orders_ByTotal().Execute(store); + + using (IDocumentSession session = store.OpenSession()) + \{ + // Query the index: + // ================ + IList orders = session + .Query() + // Query for Order documents that have Total > 100 + .Where(x => x.Total > 100) + .OfType() + .ToList(); + \} + \} +\} +`} + + + + + +## Create a static-index - using an Operation + +* An index can also be defined and deployed using the [PutIndexesOperation](../client-api/operations/maintenance/indexes/put-indexes.mdx) maintenance operation. + +* When using this operation: + + * Unlike the [naming convention](../indexes/creating-and-deploying.mdx#naming-convention) used with indexes inheriting from `AbstractIndexCreationTask`, + you can choose any string-based name for the index. + However, when querying, you must use that string-based name rather than the index class type. + + * You can also modify various low-level settings available in the [IndexDefinition](../client-api/operations/maintenance/indexes/put-indexes.mdx#put-indexes-operation-with-indexdefinition) + and [IndexDefinitionBuilder](../client-api/operations/maintenance/indexes/put-indexes.mdx#put-indexes-operation-with-indexdefinitionbuilder) classes. + +* Consider using this operation only if inheriting from `AbstractIndexCreationTask` is not an option. + +* For a detailed explanation and examples, refer to the dedicated article: [Put Indexes Operation](../client-api/operations/maintenance/indexes/put-indexes.mdx). + + + + +## Creating auto-indexes + + + +##### Auto-indexes creation +* Indexes **created by the server** are called `dynamic` or `auto` indexes. +* Auto-indexes are created when all of the following conditions are met: + * A query is issued without specifying an index (a dynamic query). + * The query includes a filtering condition. + * No suitable auto-index exists that can satisfy the query. + * Creation of auto-indexes has not been disabled. +* For such queries, RavenDB's Query Optimizer searches for an existing auto-index that can satisfy the query. + If no suitable auto-index is found, RavenDB will either create a new auto-index or optimize an existing auto-index. + (Static-indexes are not taken into account when determining which auto-index should handle the query). +* Note: dynamic queries can be issued either when [querying](../studio/database/queries/query-view.mdx#query-view) or when [patching](../studio/database/documents/patch-view.mdx#patch-configuration). +* Over time, RavenDB automatically adjusts and merges auto-indexes to efficiently serve your queries. + For more details, see [Query a collection - with filtering (dynamic query)](../client-api/session/querying/how-to-query.mdx#dynamicQuery). + + + + +##### Naming convention +* Auto-indexes are easily identified by their names, which start with the `Auto/` prefix. +* Their name also includes the name of the queried collection and a list of fields used in the query predicate to filter matching results. +* For example, issuing the following query: + + + +{`List employees = session + .Query() + .Where(x => x.FirstName == "Robert" && x.LastName == "King") + .ToList(); +`} + + + + +{`from Employees +where FirstName = "Robert" and LastName = "King" +`} + + + + will result in the creation of an auto-index named `Auto/Employees/ByFirstNameAndLastName`. + + + + +##### Auto-index idle state +* To reduce server load, an auto-index is marked as `idle` when it hasn't been used for a while. + Specifically, if the time difference between the last time the auto-index was queried + and the last time a query was made on the database (using any index) exceeds the configured threshold (30 minutes by default), + the auto-index will be marked as `idle`. +* This is done in order to avoid marking indexes as idle for databases that were offline for a long period of time, + as well as for databases that were just restored from a snapshot or a backup. +* To set the time before marking an index as idle, use the + [Indexing.TimeToWaitBeforeDeletingAutoIndexMarkedAsIdleInHrs](../server/configuration/indexing-configuration.mdx#indexingtimetowaitbeforedeletingautoindexmarkedasidleinhrs) configuration key. + Setting this value too high is not recommended, as it may lead to performance degradation by causing unnecessary and redundant work for the indexes. +* An `idle` auto-index will resume its work and return to `normal` state upon its next query, + or when resetting the index. +* If not resumed, the idle auto-index will be deleted by the server after the time period defined in the + [Indexing.TimeToWaitBeforeDeletingAutoIndexMarkedAsIdleInHrs](../server/configuration/indexing-configuration.mdx#indexingtimetowaitbeforedeletingautoindexmarkedasidleinhrs) configuration key + (72 hours by default). + + + + +## Disabling auto-indexes + +**Why disable**: + +* Disabling auto-index creation prevents the accidental deployment of resource-consuming auto-indexes that may result from one-time, ad-hoc queries issued from the Studio. +* In production environments, disabling this feature helps avoid the creation and background execution of expensive indexes. + +**How to disable**: + +* You can disable auto-indexes by setting the [Indexing.DisableQueryOptimizerGeneratedIndexes](../server/configuration/indexing-configuration.mdx#indexingdisablequeryoptimizergeneratedindexes) configuration key. + This will affect all queries made both from the **Client API** and the **Studio**. + +* Alternatively, you can disable auto-indexes from the Studio. + However, this will affect queries made only from the **Studio**. + + * To disable auto-index creation for a specific query made from the query view, see these [Query settings](../studio/database/queries/query-view.mdx#query-settings). + * To disable auto-index creation for a specific query made from the patch view, see these [Patch settings](../studio/database/documents/patch-view.mdx#patch-settings). + * Disabling auto-index creation for ALL queries made on a database can be configured in the [Studio configuration view](../studio/database/settings/studio-configuration.mdx#disabling-auto-index-creation-on-studio-queries-or-patches). + + + + diff --git a/versioned_docs/version-7.1/indexes/_creating-and-deploying-java.mdx b/versioned_docs/version-7.1/indexes/_creating-and-deploying-java.mdx new file mode 100644 index 0000000000..bcbb0a93d7 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_creating-and-deploying-java.mdx @@ -0,0 +1,324 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Indexes are used by the server to satisfy queries. + They are at the heart of RavenDB's efficiency and should be understood before indexes and queries are defined in production. + +* Static indexes can do a number of operations on the data behind the scenes so that queries that use this already processed data are as fast as possible. + Indexes keep the processed data in a separate storage so that the raw data isn't affected. + +* Whenever a user issues a query _that doesn't specify an index_, RavenDB's Query Optimizer will try to find an + existing auto-index that fulfills the query. + * If one doesn't yet exist, RavenDB will either create an auto-index or optimize an existing one if it almost satisfies the query. + +* Indexes process data assigned to them as the data changes. For example, if changes are made to documents in the collection "Orders", + the indexes that are defined to handle queries on "Orders" will be triggered to update the index with the new data. + * These behind-the-scenes processes remove a lot of burden from queries. Also, indexes need to process entire datasets just once, + after which, they only process new data. + Still, they utilize machine resources and this should be considered when defining indexes and queries. + + + + +* Indexes created by issuing a query are called `dynamic` or `Auto` indexes. + * They can be easily identified. Their name starts with the `Auto/` prefix. + * If no [Auto-Index](../indexes/creating-and-deploying.mdx#auto-indexes) exists to satisfy a query, + a new Auto-Index will be created and maintained automatically. +* Indexes created explicitly by the user are called `static`. + * [To use a Static Index in a query](../indexes/querying/query-index.mdx), + **you must specify the index in the query definition**. If you don't specify the index, + RavenDB will look for an auto-index and potentially create a new one. + * Static Indexes can be defined to do calculations, conversions, and various other processes behind the scenes, to prevent + doing these costly processes at query time (see [Using AbstractIndexCreationTask](../indexes/creating-and-deploying.mdx#using-abstractindexcreationtask), + our [map-indexes](../indexes/map-indexes.mdx) article, [indexing multiple collections](../indexes/multi-map-indexes.mdx), + and [map-reduce indexing](../indexes/map-reduce-indexes.mdx)). + + + +* In this page: + * [Static indexes](../indexes/creating-and-deploying.mdx#static-indexes) + * [Using AbstractIndexCreationTask](../indexes/creating-and-deploying.mdx#using-abstractindexcreationtask) + * [Naming convention](../indexes/creating-and-deploying.mdx#naming-convention) + * [Sending to server](../indexes/creating-and-deploying.mdx#sending-to-server) + * [Creating an index with custom configuration](../indexes/creating-and-deploying.mdx#creating-an-index-with-custom-configuration) + * [Using assembly scanner](../indexes/creating-and-deploying.mdx#using-assembly-scanner) + * [Example](../indexes/creating-and-deploying.mdx#example) + * [Using maintenance operations](../indexes/creating-and-deploying.mdx#using-maintenance-operations) + * [IndexDefinitionBuilder](../indexes/creating-and-deploying.mdx#indexdefinitionbuilder) + * [Auto-indexes](../indexes/creating-and-deploying.mdx#auto-indexes) + * [Naming convention](../indexes/creating-and-deploying.mdx#naming-convention-1) + * [Auto indexes and indexing state](../indexes/creating-and-deploying.mdx#auto-indexes-and-indexing-state) + * [If indexes exhaust system resources](../indexes/creating-and-deploying.mdx#if-indexes-exhaust-system-resources) + + + + +## Static indexes + +There are a couple of ways to create a `static index` and send it to the server. We can use [maintenance operations](../indexes/creating-and-deploying.mdx#using-maintenance-operations) or create a [custom class](../indexes/creating-and-deploying.mdx#using-abstractindexcreationtask). + +--- + +### Using AbstractIndexCreationTask + +AbstractIndexCreationTask let you avoid hard-coding index names in every query. + + +We recommend creating and using indexes in this form due to its simplicity. There are many benefits and few disadvantages. + + +#### Naming convention + +There is only one naming convention: each `_` in the class name will be translated to `/` in the index name. + +e.g. + +In the `Northwind` samples, there is a index called `Orders/Totals`. To get such a index name, we need to create a class called `Orders_Totals`. + + + +{`public class Orders_Totals extends AbstractIndexCreationTask \{ + /// ... +\} +`} + + + +#### Sending to server + +There is not much use from an index if it is not deployed to the server. To do so, we need to create an instance of our class that inherits from `AbstractIndexCreationTask` and use `execute` method. + + + +{`// deploy index to database defined in \`DocumentStore.getDatabase\` method +// using default DocumentStore \`conventions\` +new Orders_Totals().execute(store); +`} + + + + + +{`// deploy index to \`Northwind\` database +// using default DocumentStore \`conventions\` +new Orders_Totals().execute(store, store.getConventions(), "Northwind"); +`} + + + + +If an index exists on the server and the stored definition is the same as the one that was sent, it will not be overwritten. The indexed data will not be deleted and indexation will not start from scratch. + + +#### Creating an index with custom configuration + +If you need to create an index with a custom [`index configuration`](../server/configuration/indexing-configuration.mdx) you can set them in the index class constructor like so: + + +{`public class Orders_Totals extends AbstractIndexCreationTask \{ + public Orders_Totals() \{ + // ... + configuration.put("MapTimeoutInSec","30"); + setConfiguration(configuration); + \} +\} +`} + + + +#### Example + + + +{`public static class Orders_Totals extends AbstractIndexCreationTask \{ + public static class Result \{ + private String employee; + private String company; + private double total; + + public String getEmployee() \{ + return employee; + \} + + public void setEmployee(String employee) \{ + this.employee = employee; + \} + + public String getCompany() \{ + return company; + \} + + public void setCompany(String company) \{ + this.company = company; + \} + + public double getTotal() \{ + return total; + \} + + public void setTotal(double total) \{ + this.total = total; + \} + \} + + public Orders_Totals() \{ + map = "docs.Orders.Select(order => new \{ " + + " Employee = order.Employee, " + + " Company = order.Company, " + + " Total = Enumerable.Sum(order.Lines, l => ((decimal)((((decimal) l.Quantity) * l.PricePerUnit) * (1M - l.Discount)))) " + + "\})"; + \} + + public static void main(String[] args) \{ + try (IDocumentStore store = new DocumentStore(new String[]\{ "http://localhost:8080" \}, "Northwind")) \{ + store.initialize(); + + new Orders_Totals().execute(store); + + try (IDocumentSession session = store.openSession()) \{ + List orders = session + .query(Result.class, Orders_Totals.class) + .whereGreaterThan("Total", 100) + .ofType(Order.class) + .toList(); + \} + \} + \} +\} +`} + + + +--- + +### Using maintenance operations + +The `PutIndexesOperation` maintenance operation (which API references can be found [here](../client-api/operations/maintenance/indexes/put-indexes.mdx)) can be used also to send index(es) to the server. + +The benefit of this approach is that you can choose the name as you feel fit, and change various settings available in `IndexDefinition`. You will have to use string-based names of indexes when querying. + + + +{`IndexDefinition indexDefinition = new IndexDefinition(); +indexDefinition.setName("Orders/Totals"); +indexDefinition.setMaps(Collections.singleton( + "from order in docs.Orders " + + " select new " + + " \{ " + + " order.employee, " + + " order.company, " + + " total = order.lines.Sum(l => (l.quantity * l.pricePerUnit) * (1 - l.discount)) " + + "\}" +)); + +store + .maintenance() + .send(new PutIndexesOperation(indexDefinition)); +`} + + + +#### IndexDefinitionBuilder + +`IndexDefinitionBuilder` is a very useful class that enables you to create `IndexDefinitions` using strongly-typed syntax with access to low-level settings not available when the `AbstractIndexCreationTask` approach is used. + + + +{`IndexDefinitionBuilder builder = new IndexDefinitionBuilder(); +builder.setMap( + "from order in docs.Orders \\n" + + "select new \\n" + + " \{\\n" + + " order.employee,\\n" + + " order.company,\\n" + + " total = order.lines.Sum(l => (l.quantity * l.pricePerUnit) * (1 - l.discount))\\n" + + "\}"); + +store.maintenance() + .send(new PutIndexesOperation(builder.toIndexDefinition(store.getConventions()))); +`} + + + +#### Remarks + + +Maintenance Operations or `IndexDefinitionBuilder` approaches are not recommended and should be used only if you can't do it by inheriting from `AbstractIndexCreationTask`. + + + + +Since RavenDB 4.0, **all** index updates are side-by-side by default. The new index will replace the existing one once it becomes non-stale. If you want to force an index to swap immediately, you can use the Studio for that. + + + + + +## **Auto indexes** + +Auto-indexes are **created** when queries that do **not specify an index name** are executed and, after in-depth query analysis, **no matching AUTO index is found** on the server-side. + + +The query optimizer doesn't take into account the static indexes when it determines what index should be used to handle a query. + + +### Naming convention + +Auto-indexes can be recognized by the `Auto/` prefix in their name. Their name also contains the name of a collection that was queried, and list of fields that were required to find valid query results. + +For instance, issuing a query like this + + + + +{`List employees = session + .query(Employee.class) + .whereEquals("firstName", "Robert") + .andAlso() + .whereEquals("lastName", "King") + .toList(); +`} + + + + +{`from Employees +where FirstName = 'Robert' and LastName = 'King' +`} + + + + +will result in a creation of a index named `Auto/Employees/ByFirstNameAndLastName`. + +### Auto indexes and indexing state + +To reduce the server load, if auto-indexes are not queried for a certain amount of time defined in `Indexing.TimeToWaitBeforeMarkingAutoIndexAsIdleInMin` setting (30 minutes by default), then they will be marked as `Idle`. You can read more about the implications of marking index as `Idle` [here](../studio/database/indexes/indexes-list-view.mdx#index-state). + +Setting this configuration option to a high value may result in performance degradation due to the possibility of having a high amount of unnecessary work that is all redundant and not needed by indexes to perform. This is _not_ a recommended configuration. + + + +## If indexes exhaust system resources + +* The indexing process utilizes machine resources to keep the data up-to-date for queries. + +* If indexing drains system resources, it may indicate one or more of the following: + * Indexes may have been defined in a way that causes inefficient processing. + * The [license](https://ravendb.net/buy) may need to be upgraded, + * Your [cloud instance](/cloud/cloud-instances#a-production-cloud-cluster) (if used) may require optimization. + * Hardware upgrades may be necessary to better support your workload. + +* Refer to the [Indexing Performance View](../studio/database/indexes/indexing-performance.mdx) in the Studio to analyze the indexing process and optimize indexes. + This view provides graphical representations and detailed statistics of all index activities at each stage. + +* Additionally, refer to the [Common indexing issues](../studio/database/indexes/indexing-performance.mdx#common-indexing-issues) section + for troubleshooting and resolving indexing challenges. + + + + diff --git a/versioned_docs/version-7.1/indexes/_creating-and-deploying-nodejs.mdx b/versioned_docs/version-7.1/indexes/_creating-and-deploying-nodejs.mdx new file mode 100644 index 0000000000..80c5130a18 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_creating-and-deploying-nodejs.mdx @@ -0,0 +1,380 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article explains how to create indexes in RavenDB. + For a general overview of indexes, see [What are indexes](../indexes/what-are-indexes.mdx). + +* You can either: + * [create a Static-index](../indexes/creating-and-deploying.mdx#define-a-static-index) yourself, which involves **Defining** the index and **Deploying** it to the RavenDB server, or + * let the RavenDB server [create an Auto-index](../indexes/creating-and-deploying.mdx#creating-auto-indexes) for you based on query patterns. + +* Static-indexes can be created: + * using the Client API, as outlined in this article, or + * from the [Indexes list view](../studio/database/indexes/indexes-list-view.mdx) in the Studio. +* In this page: + * [Static-indexes](../indexes/creating-and-deploying.mdx#static-indexes) + * [Define a static-index](../indexes/creating-and-deploying.mdx#define-a-static-index) + * [Deploy a static-index](../indexes/creating-and-deploying.mdx#deploy-a-static-index) + * [Deploy single index](../indexes/creating-and-deploying.mdx#deploy-single-index) + * [Deploy multiple indexes](../indexes/creating-and-deploying.mdx#deploy-multiple-indexes) + * [Deploy syntax](../indexes/creating-and-deploying.mdx#deploy-syntax) + * [Deployment behavior](../indexes/creating-and-deploying.mdx#deployment-behavior) + * [Creating a static-index - Example](../indexes/creating-and-deploying.mdx#create-a-static-index---example) + * [Creating a static-index - using an Operation](../indexes/creating-and-deploying.mdx#create-a-static-index---using-an-operation) + * [Auto-indexes](../indexes/creating-and-deploying.mdx#auto-indexes) + * [Creating auto-indexes](../indexes/creating-and-deploying.mdx#creating-auto-indexes) + * [Disabling auto-indexes](../indexes/creating-and-deploying.mdx#disabling-auto-indexes) + + + + +## Define a static-index + + + +##### Static-indexes +* Indexes that are explicitly **created by the user** are called `static` indexes. +* Static-indexes can perform calculations, data conversions, and other processes behind the scenes. + This reduces the workload at query time by offloading these costly operations to the indexing phase. +* To query with a static-index, you must explicitly specify the index in the query definition. + For more details, see [Querying an index](../indexes/querying/query-index.mdx). + + + + +##### Define a static-index using a custom class +* To define a static-index using a custom class, extend the `AbstractJavaScriptIndexCreationTask` class. +* This method is recommended over the [Creating an index using an operation](../indexes/creating-and-deploying.mdx#create-a-static-index---using-an-operation) method + for its simplified index definition, offering a straightforward way to define the index. + + +{`class Orders_ByTotal extends AbstractJavaScriptIndexCreationTask \{ + /// ... +\} +`} + + +* A complete example of creating a static-index is provided [below](../indexes/creating-and-deploying.mdx#create-a-static-index---example). + + + + +##### Naming convention +* Static-index class names follow a single naming convention: + Each `_` in the class name is translated to `/` in the index name on the server. +* In the above example, the index class name is `Orders_ByTotal`. + The name of the index that will be generated on the server will be: `Orders/ByTotal`. + + + + +##### Customizing configuration +* You can set various [indexing configuration](../server/configuration/indexing-configuration.mdx) values within the index definition. +* Setting a configuration value within the index will override the matching indexing configuration values set at the server or database level. + + +{`class Orders_ByTotal extends AbstractJavaScriptIndexCreationTask \{ + constructor() \{ + super(); + // ... + + // Set an indexing configuration value for this index: + this.configuration = \{ + "Indexing.MapTimeoutInSec": "30", + \} + \} +\} +`} + + + + + + +## Deploy a static-index + +* To begin indexing data, the index must be deployed to the server. +* This section provides options for deploying indexes that inherit from `AbstractJavaScriptIndexCreationTask`. +* To create and deploy an index using the `IndexDefinition` class via `PutIndexesOperation`, + see [Creating a static-index - using an Operation](../indexes/creating-and-deploying.mdx#create-a-static-index---using-an-operation). + + +##### Deploy single index +* Use `execute()` or `executeIndex()` to deploy a single index. +* The following examples deploy index `Ordes/ByTotal` to the default database defined in your _DocumentStore_ object. + See the [syntax](../indexes/creating-and-deploying.mdx#deploy-syntax) section below for all available overloads. + + + +{`// Call 'execute' directly on the index instance +await new Orders_ByTotal().execute(documentStore); +`} + + + + +{`// Call 'executeIndex' on your store object +await documentStore.executeIndex(new Orders_ByTotal()); +`} + + + + + + +##### Deploy multiple indexes +* Use `executeIndexes()` or `IndexCreation.createIndexes()` to deploy multiple indexes. +* The `IndexCreation.createIndexes` method attempts to create all indexes in a single request. + If it fails, it will repeat the execution by calling the `execute` method for each index, one by one, + in separate requests. +* The following examples deploy indexes `Ordes/ByTotal` and `Employees/ByLastName` to the default database defined in your _DocumentStore_ object. + See the [syntax](../indexes/creating-and-deploying.mdx#deploy-syntax) section below for all available overloads. + + + +{`const indexesToDeploy = [new Orders_ByTotal(), new Employees_ByLastName()]; +// Call 'executeIndexes' on your store object +await documentStore.executeIndexes(indexesToDeploy); +`} + + + + +{`const indexesToDeploy = [new Orders_ByTotal(), new Employees_ByLastName()]; +// Call the static method 'createIndexes' on the IndexCreation class +await IndexCreation.createIndexes(indexesToDeploy, documentStore); +`} + + + + + + +##### Deploy syntax + + +{`// Call this method directly on the index instance +execute(store); +execute(store, conventions)>; +execute(store, conventions, database); + +// Call these methods on the store object +executeIndex(index); +executeIndex(index, database); +executeIndexes(indexes); +executeIndexes(indexes, database); + +// Call these methods on the IndexCreation class +createIndexes(indexes, store); +createIndexes(indexes, store, conventions); +`} + + + +| Parameter | Type | Description | +|--------------------|-----------------------|-------------------------------------------------------------------------------------------------------------------| +| **store** | `object` | Your document store object. | +| **conventions** | `DocumentConventions` | The [Conventions](../client-api/configuration/conventions.mdx) used by the document store. | +| **database** | `string` | The target database to deploy the index to. If not specified, the default database set on the store will be used. | +| **index** | `object` | The index object to deploy. | +| **indexes** | `object[]` | A list of index objects to deploy. | + + + + +##### Deployment behavior + + +###### Deployment mode: +* When your database spans multiple nodes, + you can choose between **Rolling** index deployment or **Parallel** index deployment. +* Rolling deployment applies the index to one node at a time, + while Parallel deployment deploys the index on all nodes simultaneously. +* Learn more in [Rolling index deployment](../indexes/rolling-index-deployment.mdx). + + + + + +###### When the index you are deploying already exists on the server: +* **If the index definition is updated**: + * RavenDB uses a side-by-side strategy for all index updates. + * When an existing index definition is modified, RavenDB creates a new index with the updated definition. + The new index will replace the existing index once it becomes non-stale. + * If you want to swap the indexes immediately, you can do so through the Studio. + For more details, see [Side by side indexing](../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---side-by-side-indexing). +* **If the index definition is unchanged**: + * If the definition of the index being deployed is identical to the one on the server, + the existing index will not be overwritten. + * The indexed data will remain intact, and the indexing process will not restart. + + + + + +## Create a static-index - Example + + + +{`// Define a static-index: +// ====================== +class Orders_ByTotal extends AbstractJavaScriptIndexCreationTask \{ + constructor() \{ + super(); + + this.map("Orders", order => \{ + return \{ + Employee: order.Employee, + Company: order.Company, + Total: order.Lines.reduce((sum, line) => + sum + (line.Quantity * line.PricePerUnit) * (1 - line.Discount), 0) + \} + \}); + + // Customize the index configuration + this.deploymentMode = "Rolling"; + this.configuration["Indexing.MapTimeoutInSec"] = "30"; + this.indexes.add(x => x.Company, "Search"); + // ... + \} +\} + +async function main() \{ + const documentStore = new DocumentStore("http://localhost:8080", "Northwind"); + documentStore.initialize(); + + // Deploy the index: + // ================= + const ordersByTotalIndex = new Orders_ByTotal(); + await ordersByTotalIndex.execute(documentStore); + + const session = documentStore.openSession() + + // Query the index: + // ================ + const myIndexName = ordersByTotalIndex.getIndexName(); + + const orders = await session + .query(\{ indexName: myIndexName \}) + .whereGreaterThan("Total", 100) + .all(); +\} +`} + + + + + +## Create a static-index - using an Operation + +* An index can also be defined and deployed using the [PutIndexesOperation](../client-api/operations/maintenance/indexes/put-indexes.mdx) maintenance operation. + +* When using this operation: + + * Unlike the [naming convention](../indexes/creating-and-deploying.mdx#naming-convention) used with indexes inheriting from `AbstractJavaScriptIndexCreationTask`, + you can choose any string-based name for the index. + However, when querying, you must use that string-based name rather than the index class type. + + * You can also modify various low-level settings available in the [IndexDefinition](../client-api/operations/maintenance/indexes/put-indexes.mdx#put-indexes-operation-with-indexdefinition) class. + +* Consider using this operation only if inheriting from `AbstractJavaScriptIndexCreationTask` is not an option. + +* For a detailed explanation and examples, refer to the dedicated article: [Put Indexes Operation](../client-api/operations/maintenance/indexes/put-indexes.mdx). + + + + +## Creating auto-indexes + + + +##### Auto-indexes creation +* Indexes **created by the server** are called `dynamic` or `auto` indexes. +* Auto-indexes are created when all of the following conditions are met: + * A query is issued without specifying an index (a dynamic query). + * The query includes a filtering condition. + * No suitable auto-index exists that can satisfy the query. + * Creation of auto-indexes has not been disabled. +* For such queries, RavenDB's Query Optimizer searches for an existing auto-index that can satisfy the query. + If no suitable auto-index is found, RavenDB will either create a new auto-index or optimize an existing auto-index. + (Static-indexes are not taken into account when determining which auto-index should handle the query). +* Note: dynamic queries can be issued either when [querying](../studio/database/queries/query-view.mdx#query-view) or when [patching](../studio/database/documents/patch-view.mdx#patch-configuration). +* Over time, RavenDB automatically adjusts and merges auto-indexes to efficiently serve your queries. + For more details, see [Query a collection - with filtering (dynamic query)](../client-api/session/querying/how-to-query.mdx#dynamicQuery). + + + + +##### Naming convention +* Auto-indexes are easily identified by their names, which start with the `Auto/` prefix. +* Their name also includes the name of the queried collection and a list of fields used in the query predicate to filter matching results. +* For example, issuing the following query: + + + +{`const employees = await session + .query({ collection: 'employees' }) + .whereEquals("FirstName", "Robert") + .whereEquals("LastName", "King") + .all(); +`} + + + + +{`from Employees +where FirstName = "Robert" and LastName = "King" +`} + + + + will result in the creation of an auto-index named `Auto/Employees/ByFirstNameAndLastName`. + + + + +##### Auto-index idle state +* To reduce server load, an auto-index is marked as `idle` when it hasn't been used for a while. + Specifically, if the time difference between the last time the auto-index was queried + and the last time a query was made on the database (using any index) exceeds the configured threshold (30 minutes by default), + the auto-index will be marked as `idle`. +* This is done in order to avoid marking indexes as idle for databases that were offline for a long period of time, + as well as for databases that were just restored from a snapshot or a backup. +* To set the time before marking an index as idle, use the + [Indexing.TimeToWaitBeforeDeletingAutoIndexMarkedAsIdleInHrs](../server/configuration/indexing-configuration.mdx#indexingtimetowaitbeforedeletingautoindexmarkedasidleinhrs) configuration key. + Setting this value too high is not recommended, as it may lead to performance degradation by causing unnecessary and redundant work for the indexes. +* An `idle` auto-index will resume its work and return to `normal` state upon its next query, + or when resetting the index. +* If not resumed, the idle auto-index will be deleted by the server after the time period defined in the + [Indexing.TimeToWaitBeforeDeletingAutoIndexMarkedAsIdleInHrs](../server/configuration/indexing-configuration.mdx#indexingtimetowaitbeforedeletingautoindexmarkedasidleinhrs) configuration key + (72 hours by default). + + + + +## Disabling auto-indexes + +**Why disable**: + +* Disabling auto-index creation prevents the accidental deployment of resource-consuming auto-indexes that may result from one-time, ad-hoc queries issued from the Studio. +* In production environments, disabling this feature helps avoid the creation and background execution of expensive indexes. + +**How to disable**: + +* You can disable auto-indexes by setting the [Indexing.DisableQueryOptimizerGeneratedIndexes](../server/configuration/indexing-configuration.mdx#indexingdisablequeryoptimizergeneratedindexes) configuration key. + This will affect all queries made both from the **Client API** and the **Studio**. + +* Alternatively, you can disable auto-indexes from the Studio. + However, this will affect queries made only from the **Studio**. + + * To disable auto-index creation for a specific query made from the query view, see these [Query settings](../studio/database/queries/query-view.mdx#query-settings). + * To disable auto-index creation for a specific query made from the patch view, see these [Patch settings](../studio/database/documents/patch-view.mdx#patch-settings). + * Disabling auto-index creation for ALL queries made on a database can be configured in the [Studio configuration view](../studio/database/settings/studio-configuration.mdx#disabling-auto-index-creation-on-studio-queries-or-patches). + + + + diff --git a/versioned_docs/version-7.1/indexes/_extending-indexes-csharp.mdx b/versioned_docs/version-7.1/indexes/_extending-indexes-csharp.mdx new file mode 100644 index 0000000000..f798518161 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_extending-indexes-csharp.mdx @@ -0,0 +1,82 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* An [index](../indexes/what-are-indexes.mdx) is defined with a mapping function that utilizes a LINQ-like syntax. + +* Writing a _complex_ LINQ expression can be a non-trivial task. + You can extend your indexing processing capabilities by adding custom code to the index definition. + This will enable calling the added custom functions or using external libraries logic (e.g. NodaTime) in your LINQ expression. + +* The indexing process will execute the LINQ code and the invoked additional source code. + +* Adding this custom code can be done from [Studio](../studio/database/indexes/create-map-index.mdx#additional-sources) or from code using the _additional sources_ feature. + See [example](../indexes/extending-indexes.mdx#including-additional-sources-from-client-code) below. + +* Advantages: + * **Readability**: Index work logic is clearer and more readable + * **Code usage**: Code fragments are re-used + * **Performance**: Using the additional source feature can perform better then complex LINQ expressions + * **Extendability**: External libraries can be included and used + + +## Including additional sources from client code + +* `AdditionalSources` is a property of the `AbstractIndexCreationTask` class. +* It should be defined in your index class _constructor_ which derives from `AbstractIndexCreationTask`. +* Example: + + + +{`public class People_ByEmail : AbstractIndexCreationTask +\{ + public People_ByEmail() + \{ + Map = people => from person in people + select new + \{ + // Calling the custom function + Email = CalculatePersonEmail(person.Name, person.Age) + \}; + + // Add your custom logic here. + AdditionalSources = new Dictionary + \{ + \{ + "PeopleUtil", + @" + using System; + using NodaTime; /* using an external library */ + using static Raven.Documentation.Samples.Indexes.PeopleUtil; + + namespace Raven.Documentation.Samples.Indexes + \{ + public static class PeopleUtil + \{ + public static string CalculatePersonEmail(string name, uint age) + \{ + return $""\{name\}.\{Instant.FromDateTimeUtc(DateTime.Now.ToUniversalTime()) + .ToDateTimeUtc().Year - age\}@ayende.com""; + \} + \} + \}" + \} + \}; + \} +\} +`} + + + + + + + +* External DLLs that are referenced must be _manually deployed_ to the folder containing the Raven.Server executable. + + + + diff --git a/versioned_docs/version-7.1/indexes/_extending-indexes-java.mdx b/versioned_docs/version-7.1/indexes/_extending-indexes-java.mdx new file mode 100644 index 0000000000..28ddb05a58 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_extending-indexes-java.mdx @@ -0,0 +1,69 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* An [index](../indexes/what-are-indexes.mdx) is defined with a mapping function that utilizes a LINQ-like syntax. + +* Writing a _complex_ LINQ expression can be a non-trivial task. + You can extend your indexing processing capabilities by adding custom code to the index definition. + This will enable calling the added custom functions or using external libraries logic (e.g. NodaTime) in your LINQ expression. + +* The indexing process will execute the LINQ code and the invoked additional source code. + +* Adding this custom code can be done from [Studio](../studio/database/indexes/create-map-index.mdx#additional-sources) or from code using the _additional sources_ feature. + See [example](../indexes/extending-indexes.mdx#including-additional-sources-from-client-code) below. + +* Advantages: + * **Readability**: Index work logic is clearer and more readable + * **Code usage**: Code fragments are re-used + * **Performance**: Using the additional source feature can perform better then complex LINQ expressions + * **Extendability**: External libraries can be included and used + + +## Including additional sources from client code + +* `additionalSources` is a field of the `AbstractIndexCreationTask` class. +* It should be defined in your index class _constructor_ which derives from `AbstractIndexCreationTask`. +* Example: + + + +{`public class People_ByEmail extends AbstractIndexCreationTask \{ + public People_ByEmail() \{ + map = "docs.People.Select(person => new \{ " + + " Email = PeopleUtil.CalculatePersonEmail(person.Name, person.Age) " + + "\})"; + + additionalSources = Collections.singletonMap("PeopleUtil", + " using System; " + + " using NodaTime; /* using an external library */ " + + " using static Raven.Documentation.Samples.Indexes.PeopleUtil; " + + " namespace Raven.Documentation.Samples.Indexes " + + " \{ " + + " public static class PeopleUtil " + + " \{ " + + " public static string CalculatePersonEmail(string name, uint age) " + + " \{ " + + " return $\\"\{name\}.\{Instant.FromDateTimeUtc(DateTime.Now.ToUniversalTime()) " + + " .ToDateTimeUtc().Year - age\}@ayende.com\\"; " + + " \} " + + " \} " + + " \}"); + \} +\} +`} + + + + + + + +* External DLLs that are referenced must be _manually deployed_ to the folder containing the Raven.Server executable. + + + + diff --git a/versioned_docs/version-7.1/indexes/_extending-indexes-nodejs.mdx b/versioned_docs/version-7.1/indexes/_extending-indexes-nodejs.mdx new file mode 100644 index 0000000000..7cb5a0afbe --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_extending-indexes-nodejs.mdx @@ -0,0 +1,73 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* An [index](../indexes/what-are-indexes.mdx) is defined with a mapping function that utilizes a LINQ-like syntax. + +* Writing a _complex_ LINQ expression can be a non-trivial task. + You can extend your indexing processing capabilities by adding custom code to the index definition. + This will enable calling the added custom functions or using external libraries logic (e.g. NodaTime) in your LINQ expression. + +* The indexing process will execute the LINQ code and the invoked additional source code. + +* Adding this custom code can be done from [Studio](../studio/database/indexes/create-map-index.mdx#additional-sources) or from code using the _additional sources_ feature. + See [example](../indexes/extending-indexes.mdx#including-additional-sources-from-client-code) below. + +* Advantages: + * **Readability**: Index work logic is clearer and more readable + * **Code usage**: Code fragments are re-used + * **Performance**: Using the additional source feature can perform better then complex LINQ expressions + * **Extendability**: External libraries can be included and used + + +## Including additional sources from client code + +* `additionalSources` is a field of the `AbstractIndexCreationTask` class. +* It should be defined in your index class _constructor_ which derives from `AbstractIndexCreationTask`. +* Example: + + + +{`class People_ByEmail extends AbstractIndexCreationTask \{ + constructor() \{ + super(); + + this.map = "docs.People.Select(person => new \{ " + + " Email = PeopleUtil.CalculatePersonEmail(person.Name, person.Age) " + + "\})"; + + this.additionalSources = + \{ + "PeopleUtil": \`using System; + using NodaTime; /* using an external library */ + using static Raven.Documentation.Samples.Indexes.PeopleUtil; + namespace Raven.Documentation.Samples.Indexes + \{ + public static class PeopleUtil + \{ + public static string CalculatePersonEmail(string name, uint age) + \{ + return $"\{name\}.\{Instant.FromDateTimeUtc(DateTime.Now.ToUniversalTime()) + .ToDateTimeUtc().Year - age\}@ayende.com"; + \} + \} + \}\` + \}; + \} +\} +`} + + + + + + + +* External DLLs that are referenced must be _manually deployed_ to the folder containing the Raven.Server executable. + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-basics-csharp.mdx b/versioned_docs/version-7.1/indexes/_indexing-basics-csharp.mdx new file mode 100644 index 0000000000..9c193a4456 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-basics-csharp.mdx @@ -0,0 +1,99 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To achieve very fast response times, RavenDB handles **indexing in the background** whenever data is added or changed. This approach allows the server to respond quickly even when large amounts of data have changed. The only drawback of this choice is that results might be stale (more about staleness in next section). Underneath, the server is using [Lucene](http://lucene.apache.org/) to perform indexation and [Raven Query Language](../client-api/session/querying/what-is-rql.mdx) for querying. + +## Stale Indexes + +The notion of stale indexes comes from an observation deep in RavenDB's design, assuming that the user should never suffer from assigning the server big tasks. As far as RavenDB is concerned, it is better to be stale than offline, and as such it will return results to queries even if it knows they may not be as up-to-date as possible. + +RavenDB returns quickly for every client request, even if involves re-indexing hundreds of thousands of documents. Since the previous request has returned so quickly, the next query can be made a millisecond after that. and results will be returned but they will be marked as `Stale`. + + +You can read more about stale indexes [here](../indexes/stale-indexes.mdx). + + +## Querying + +RavenDB uses `Raven Query Language (RQL)`, an SQL-like querying language for querying. The easiest way for us would be to expose a method in which you could pass your RQL-flavored query as a string (we [did](../client-api/session/querying/how-to-query.mdx#sessionadvancedrawquery) that) and do not bother about anything else. + +The fact is that we did not stop at this point. We went much further by exposing LINQ-based querying with strong-type support that hides all Lucene syntax complexity: + + + + +{`List employees = session + .Query("Employees/ByFirstName") + .Where(x => x.FirstName == "Robert") + .ToList(); +`} + + + + +{`IQueryable employees = + from employee in session.Query("Employees/ByFirstName") + where employee.FirstName == "Robert" + select employee; +`} + + + + +{`from index 'Employees/ByFirstName' +where FirstName = 'Robert' +`} + + + + +You can also create queries manually by using [DocumentQuery](../client-api/session/querying/document-query/what-is-document-query.mdx) or [RawQuery](../client-api/session/querying/how-to-query.mdx#sessionadvancedrawquery), both available as a part of advanced session operations: + + + + +{`List employees = session + .Advanced + .DocumentQuery("Employees/ByFirstName") + .WhereEquals(x => x.FirstName, "Robert") + .ToList(); +`} + + + + +{`List employees = session + .Advanced + .RawQuery("from index 'Employees/ByFirstName' where FirstName = 'Robert'") + .ToList(); +`} + + + + +{`from index 'Employees/ByFirstName' +where FirstName = 'Robert' +`} + + + + +## Types of Indexes + +You probably know that indexes can be divided by their source of origin to the `static` and `auto` indexes (if not, read about it [here](../indexes/creating-and-deploying.mdx)), but a more interesting division is by functionality. For this case we have `Map` and `Map-Reduce` indexes. + +`Map` indexes (sometimes referred as simple indexes) contain one (or more) mapping functions that indicate which fields from documents should be indexed. They indicate which documents can be searched by which fields. + +`Map-Reduce` indexes allow complex aggregations to be performed in a two-step process. First by selecting appropriate records (using the Map function), then by applying a specified reduce function to these records to produce a smaller set of results. + + +You can read more about `Map` indexes [here](../indexes/map-indexes.mdx). + + + +More detailed information about `Map-Reduce` indexes can be found [here](../indexes/map-reduce-indexes.mdx). + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-basics-java.mdx b/versioned_docs/version-7.1/indexes/_indexing-basics-java.mdx new file mode 100644 index 0000000000..470bb9c3ad --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-basics-java.mdx @@ -0,0 +1,78 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To achieve very fast response times, RavenDB handles **indexing in the background** whenever data is added or changed. This approach allows the server to respond quickly even when large amounts of data have changed. The only drawback of this choice is that results might be stale (more about staleness in next section). Underneath, the server is using [Lucene](http://lucene.apache.org/) to perform indexation and [Raven Query Language](../client-api/session/querying/what-is-rql.mdx) for querying. + +## Stale Indexes + +The notion of stale indexes comes from an observation deep in RavenDB's design, assuming that the user should never suffer from assigning the server big tasks. As far as RavenDB is concerned, it is better to be stale than offline, and as such it will return results to queries even if it knows they may not be as up-to-date as possible. + +RavenDB returns quickly for every client request, even if involves re-indexing hundreds of thousands of documents. Since the previous request has returned so quickly, the next query can be made a millisecond after that. and results will be returned but they will be marked as `Stale`. + + +You can read more about stale indexes [here](../indexes/stale-indexes.mdx). + + +## Querying + +RavenDB uses `Raven Query Language (RQL)`, an SQL-like querying language for querying. The easiest way for us would be to expose a method in which you could pass your RQL-flavored query as a string (we [did](../client-api/session/querying/how-to-query.mdx#sessionadvancedrawquery) that) and do not bother about anything else. + +The fact is that we did not stop at this point. We went much further, by exposing querying methods that hides all Lucene syntax complexity: + + + + +{`List employees = session.query(Employee.class, Query.index("Employees/ByFirstName")) + .whereEquals("FirstName", "Robert") + .toList(); +`} + + + + +{`from index 'Employees/ByFirstName' +where FirstName = 'Robert' +`} + + + + +You can also create queries by using [RawQuery](../client-api/session/querying/how-to-query.mdx#sessionadvancedrawquery). It is available as a part of advanced session operations: + + + + +{`List employees = session.advanced() + .rawQuery(Employee.class, "from index 'Employees/ByFirstName' where FirstName = 'Robert'") + .toList(); +`} + + + + +{`from index 'Employees/ByFirstName' +where FirstName = 'Robert' +`} + + + + +## Types of Indexes + +You probably know that indexes can be divided by their source of origin to the `static` and `auto` indexes (if not, read about it [here](../indexes/creating-and-deploying.mdx)), but a more interesting division is by functionality. For this case we have `Map` and `Map-Reduce` indexes. + +`Map` indexes (sometimes referred as simple indexes) contain one (or more) mapping functions that indicate which fields from documents should be indexed. They indicate which documents can be searched by which fields. + +`Map-Reduce` indexes allow complex aggregations to be performed in a two-step process. First by selecting appropriate records (using the Map function), then by applying a specified reduce function to these records to produce a smaller set of results. + + +You can read more about `Map` indexes [here](../indexes/map-indexes.mdx). + + + +More detailed information about `Map-Reduce` indexes can be found [here](../indexes/map-reduce-indexes.mdx). + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-basics-nodejs.mdx b/versioned_docs/version-7.1/indexes/_indexing-basics-nodejs.mdx new file mode 100644 index 0000000000..bcf08f4c1e --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-basics-nodejs.mdx @@ -0,0 +1,78 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To achieve very fast response times, RavenDB handles **indexing in the background** whenever data is added or changed. This approach allows the server to respond quickly even when large amounts of data have changed. The only drawback of this choice is that results might be stale (more about staleness in next section). Underneath, the server is using [Lucene](http://lucene.apache.org/) to perform indexation and [Raven Query Language](../client-api/session/querying/what-is-rql.mdx) for querying. + +## Stale Indexes + +The notion of stale indexes comes from an observation deep in RavenDB's design, assuming that the user should never suffer from assigning the server big tasks. As far as RavenDB is concerned, it is better to be stale than offline, and as such it will return results to queries even if it knows they may not be as up-to-date as possible. + +RavenDB returns quickly for every client request, even if involves re-indexing hundreds of thousands of documents. Since the previous request has returned so quickly, the next query can be made a millisecond after that. and results will be returned but they will be marked as `Stale`. + + +You can read more about stale indexes [here](../indexes/stale-indexes.mdx). + + +## Querying + +RavenDB uses `Raven Query Language (RQL)`, an SQL-like querying language for querying. The easiest way for us would be to expose a method in which you could pass your RQL-flavored query as a string (we [did](../client-api/session/querying/how-to-query.mdx#sessionadvancedrawquery) that) and do not bother about anything else. + +The fact is that we did not stop at this point. We went much further, by exposing querying methods that hides all Lucene syntax complexity: + + + + +{`const employees = await session.query({ indexName: "Employees/ByFirstName" }) + .whereEquals("FirstName", "Robert") + .all(); +`} + + + + +{`from index 'Employees/ByFirstName' +where FirstName = 'Robert' +`} + + + + +You can also create queries by using [RawQuery](../client-api/session/querying/how-to-query.mdx#sessionadvancedrawquery). It is available as a part of advanced session operations: + + + + +{`const employees = await session.advanced + .rawQuery("from index 'Employees/ByFirstName' where FirstName = 'Robert'") + .all(); +`} + + + + +{`from index 'Employees/ByFirstName' +where FirstName = 'Robert' +`} + + + + +## Types of Indexes + +You probably know that indexes can be divided by their source of origin to the `static` and `auto` indexes (if not, read about it [here](../indexes/creating-and-deploying.mdx)), but a more interesting division is by functionality. For this case we have `Map` and `Map-Reduce` indexes. + +`Map` indexes (sometimes referred as simple indexes) contain one (or more) mapping functions that indicate which fields from documents should be indexed. They indicate which documents can be searched by which fields. + +`Map-Reduce` indexes allow complex aggregations to be performed in a two-step process. First by selecting appropriate records (using the Map function), then by applying a specified reduce function to these records to produce a smaller set of results. + + +You can read more about `Map` indexes [here](../indexes/map-indexes.mdx). + + + +More detailed information about `Map-Reduce` indexes can be found [here](../indexes/map-reduce-indexes.mdx). + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-hierarchical-data-csharp.mdx b/versioned_docs/version-7.1/indexes/_indexing-hierarchical-data-csharp.mdx new file mode 100644 index 0000000000..490d55a13e --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-hierarchical-data-csharp.mdx @@ -0,0 +1,247 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the `Recurse` method to traverse the layers of a hierarchical document and index its fields. + +* In this Page: + * [Hierarchical data](../indexes/indexing-hierarchical-data.mdx#hierarchical-data) + * [Index hierarchical data](../indexes/indexing-hierarchical-data.mdx#index-hierarchical-data) + * [Query the index](../indexes/indexing-hierarchical-data.mdx#query-the-index) + + +## Hierarchical data + +One significant advantage of document databases is their tendency not to impose limits on data structuring. +**Hierarchical data structures** exemplify this quality well; for example, consider the commonly used comment thread, implemented using objects such as: + + + +{`public class BlogPost +\{ + public string Author \{ get; set; \} + public string Title \{ get; set; \} + public string Text \{ get; set; \} + + // Blog post readers can leave comments + public List Comments \{ get; set; \} +\} + +public class BlogPostComment +\{ + public string Author \{ get; set; \} + public string Text \{ get; set; \} + + // Allow nested comments, enabling replies to existing comments + public List Comments \{ get; set; \} +\} +`} + + + +Readers of a post created using the above `BlogPost` structure can add `BlogPostComment` entries to the post's _Comments_ field, +and readers of these comments can reply with comments of their own, creating a recursive hierarchical structure. + +For example, the following document, `BlogPosts/1-A`, represents a blog post by John that contains multiple layers of comments from various authors. + +`BlogPosts/1-A`: + + + +{`\{ + "Author": "John", + "Title": "Post title..", + "Text": "Post text..", + "Comments": [ + \{ + "Author": "Moon", + "Text": "Comment text..", + "Comments": [ + \{ + "Author": "Bob", + "Text": "Comment text.." + \}, + \{ + "Author": "Adel", + "Text": "Comment text..", + "Comments": \{ + "Author": "Moon", + "Text": "Comment text.." + \} + \} + ] + \} + ], + "@metadata": \{ + "@collection": "BlogPosts" + \} +\} +`} + + + + + +## Index hierarchical data + +To index the elements of a hierarchical structure like the one above, use RavenDB's `Recurse` method. + +The sample index below shows how to use `Recurse` to traverse the comments in the post thread and index them by their authors. +We can then [query the index](../indexes/indexing-hierarchical-data.mdx#query-the-index) for all blog posts that contain comments by specific authors. + + + + +{`public class BlogPosts_ByCommentAuthor : + AbstractIndexCreationTask +{ + public class IndexEntry + { + public IEnumerable Authors { get; set; } + } + + public BlogPosts_ByCommentAuthor() + { + Map = blogposts => + from blogpost in blogposts + let authors = Recurse(blogpost, x => x.Comments) + select new IndexEntry + { + Authors = authors.Select(x => x.Author) + }; + } +} +`} + + + + +{`public class BlogPosts_ByCommentAuthor_JS : AbstractJavaScriptIndexCreationTask +{ + public class Result + { + public string[] Authors { get; set; } + } + + public BlogPosts_ByCommentAuthor_JS() + { + Maps = new HashSet + { + @"map('BlogPosts', function (blogpost) { + + var authors = + recurse(blogpost.Comments, function(x) { + return x.Comments; + }) + .filter(function(comment) { + return comment.Author != null; + }) + .map(function(comment) { + return comment.Author; + }); + + return { + Authors: authors + }; + });" + }; + } +} +`} + + + + +{`store.Maintenance.Send(new PutIndexesOperation( + new IndexDefinition + { + Name = "BlogPosts/ByCommentAuthor", + Maps = + { + @"from blogpost in docs.BlogPosts + let authors = Recurse(blogpost, (Func)(x => x.Comments)) + let authorNames = authors.Select(x => x.Author) + select new + { + Authors = authorNames + }" + } + })); +`} + + + + + + +## Query the index + +The index can be queried for all blog posts that contain comments made by specific authors. + +**Query the index using code**: + + + + +{`List results = session + .Query() + // Query for all blog posts that contain comments by 'Moon': + .Where(x => x.Authors.Any(a => a == "Moon")) + .OfType() + .ToList(); +`} + + + + +{`List results = await asyncSession + .Query() + // Query for all blog posts that contain comments by 'Moon': + .Where(x => x.Authors.Any(a => a == "Moon")) + .OfType() + .ToListAsync(); +`} + + + + +{`List results = session + .Advanced + .DocumentQuery() + // Query for all blog posts that contain comments by 'Moon': + .WhereEquals("Authors", "Moon") + .ToList(); +`} + + + + +{`from index "BlogPosts/ByCommentAuthor" +where Authors == "Moon" +`} + + + + +**Query the index using Studio**: + + * Query the index from the Studio's [List of Indexes](../studio/database/indexes/indexes-list-view.mdx#indexes-list-view) view: + + !["List of Indexes view"](./assets/list-of-indexes-view.png) + + * View the query results in the [Query](../studio/database/queries/query-view.mdx) view: + + !["Query View"](./assets/query-view.png) + + * View the list of terms indexed by the `Recurse` method: + + !["Click to View Index Terms"](./assets/click-to-view-terms.png) + + !["Index Terms"](./assets/index-terms.png) + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-hierarchical-data-java.mdx b/versioned_docs/version-7.1/indexes/_indexing-hierarchical-data-java.mdx new file mode 100644 index 0000000000..930ab20d7a --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-hierarchical-data-java.mdx @@ -0,0 +1,158 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +One of the greatest advantages of a document database is that we have very few limits on how we structure our data. +One very common scenario is the usage of hierarchical data structures. +The most trivial of them is the comment thread: + + + +{`public static class BlogPost \{ + private String author; + private String title; + private String text; + private List comments; + + public String getAuthor() \{ + return author; + \} + + public void setAuthor(String author) \{ + this.author = author; + \} + + public String getTitle() \{ + return title; + \} + + public void setTitle(String title) \{ + this.title = title; + \} + + public String getText() \{ + return text; + \} + + public void setText(String text) \{ + this.text = text; + \} + + public List getComments() \{ + return comments; + \} + + public void setComments(List comments) \{ + this.comments = comments; + \} +\} + +public static class BlogPostComment \{ + private String author; + private String text; + private List comments; + + + public String getAuthor() \{ + return author; + \} + + public void setAuthor(String author) \{ + this.author = author; + \} + + public String getText() \{ + return text; + \} + + public void setText(String text) \{ + this.text = text; + \} + + public List getComments() \{ + return comments; + \} + + public void setComments(List comments) \{ + this.comments = comments; + \} +\} +`} + + + +While it is very easy to work with such a structure in all respects, it does bring up an interesting question, +namely how can we search for all blog posts that were commented by specified author? + +The answer to that is that RavenDB contains built-in support for indexing hierarchies, +and you can take advantage of the `Recurse` method to define an index using the following syntax: + + + + +{`public static class BlogPosts_ByCommentAuthor extends AbstractIndexCreationTask { + public BlogPosts_ByCommentAuthor() { + map = "docs.BlogPosts.Select(post => new { " + + " authors = this.Recurse(post, x => x.comments).Select(x0 => x0.author) " + + "})"; + } +} +`} + + + + +{`IndexDefinition indexDefinition = new IndexDefinition(); +indexDefinition.setName("BlogPosts/ByCommentAuthor"); +indexDefinition.setMaps(Collections.singleton( + "from post in docs.Posts" + + " from comment in Recurse(post, (Func)(x => x.comments)) " + + " select new " + + " { " + + " author = comment.author " + + " }" +)); +store.maintenance().send(new PutIndexesOperation(indexDefinition)); +`} + + + + +{`public static class BlogPosts_ByCommentAuthor extends AbstractJavaScriptIndexCreationTask { + public BlogPosts_ByCommentAuthor() { + setMaps(Sets.newHashSet("map('BlogPosts', function(b){\\n" + + " var names = [];\\n" + + " b.comments.forEach(x => getNames(x, names));\\n" + + " return {\\n" + + " authors : names\\n" + + " };" + + " })")); + + java.util.Map additionalSources = new HashMap<>(); + additionalSources.put("The Script", "function getNames(x, names){\\n" + + " names.push(x.author);\\n" + + " x.comments.forEach(x => getNames(x, names));\\n" + + " }"); + + setAdditionalSources(additionalSources); + } +} +`} + + + + +This will index all the comments in the thread, regardless of their location in the hierarchy. + + + +{`List results = session + .query(BlogPost.class, BlogPosts_ByCommentAuthor.class) + .whereEquals("authors", "Ayende Rahien") + .toList(); +`} + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-hierarchical-data-nodejs.mdx b/versioned_docs/version-7.1/indexes/_indexing-hierarchical-data-nodejs.mdx new file mode 100644 index 0000000000..45f87fdf89 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-hierarchical-data-nodejs.mdx @@ -0,0 +1,180 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the `Recurse` method to traverse the layers of a hierarchical document and index its fields. + +* In this Page: + * [Hierarchical data](../indexes/indexing-hierarchical-data.mdx#hierarchical-data) + * [Index hierarchical data](../indexes/indexing-hierarchical-data.mdx#index-hierarchical-data) + * [Query the index](../indexes/indexing-hierarchical-data.mdx#query-the-index) + + +## Hierarchical data + +One significant advantage of document databases is their tendency not to impose limits on data structuring. +**Hierarchical data structures** exemplify this quality well; for example, consider the commonly used comment thread, implemented using objects such as: + + + +{`class BlogPost \{ + constructor(title, author, text, comments) \{ + this.title = title; + this.author = author; + this.text = text; + + // Blog post readers can leave comments + this.comments = comments; + \} +\} + +class BlogPostComment \{ + constructor(author, text, comments) \{ + this.author = author; + this.text = text; + + // Allow nested comments, enabling replies to existing comments + this.comments = comments; + \} +\} +`} + + + +Readers of a post created using the above `BlogPost` structure can add `BlogPostComment` entries to the post's _comments_ field, +and readers of these comments can reply with comments of their own, creating a recursive hierarchical structure. + +For example, the following document, `BlogPosts/1-A`, represents a blog post by John that contains multiple layers of comments from various authors. + +`BlogPosts/1-A`: + + + +{`\{ + "author": "John", + "title": "Post title..", + "text": "Post text..", + "comments": [ + \{ + "author": "Moon", + "text": "Comment text..", + "comments": [ + \{ + "author": "Bob", + "text": "Comment text.." + \}, + \{ + "author": "Adel", + "text": "Comment text..", + "comments": \{ + "author": "Moon", + "text": "Comment text.." + \} + \} + ] + \} + ], + "@metadata": \{ + "@collection": "BlogPosts" + \} +\} +`} + + + + + +## Index hierarchical data + +To index the elements of a hierarchical structure like the one above, use RavenDB's `Recurse` method. + +The sample index below shows how to use `Recurse` to traverse the comments in the post thread and index them by their authors. +We can then [query the index](../indexes/indexing-hierarchical-data.mdx#query-the-index) for all blog posts that contain comments by specific authors. + + + + +{`class BlogPosts_ByCommentAuthor extends AbstractCsharpIndexCreationTask { + constructor() { + super(); + + this.map = \` + docs.BlogPosts.Select(post => new { + authors = this.Recurse(post, x => x.comments).Select(x0 => x0.author) + })\`; + } +} +`} + + + + +{`const indexDefinition = new IndexDefinition(); + +indexDefinition.name = "BlogPosts/ByCommentAuthor"; +indexDefinition.maps = new Set([ + \`from blogpost in docs.BlogPosts + let authors = Recurse(blogpost, (Func)(x => x.comments)) + let authorNames = authors.Select(x => x.author) + select new + { + Authors = authorNames + }\` +]); + +await store.maintenance.send(new PutIndexesOperation(indexDefinition)); +`} + + + + + + +## Query the index + +The index can be queried for all blog posts that contain comments made by specific authors. + +**Query the index using code**: + + + + +{`const results = await session + .query({ indexName: "BlogPosts/ByCommentAuthor" }) + // Query for all blog posts that contain comments by 'Moon': + .whereEquals("authors", "Moon") + .all(); +`} + + + + +{`from index "BlogPosts/ByCommentAuthor" +where authors == "Moon" +`} + + + + +**Query the index using Studio**: + + * Query the index from the Studio's [List of Indexes](../studio/database/indexes/indexes-list-view.mdx#indexes-list-view) view: + + !["List of Indexes view"](./assets/list-of-indexes-view.png) + + * View the query results in the [Query](../studio/database/queries/query-view.mdx) view: + + !["Query View"](./assets/query-view.png) + + * View the list of terms indexed by the `Recurse` method: + + !["Click to View Index Terms"](./assets/click-to-view-terms.png) + + !["Index Terms"](./assets/index-terms.png) + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-hierarchical-data-php.mdx b/versioned_docs/version-7.1/indexes/_indexing-hierarchical-data-php.mdx new file mode 100644 index 0000000000..aab5ed3dfa --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-hierarchical-data-php.mdx @@ -0,0 +1,280 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the `Recurse` method to traverse the layers of a hierarchical document and index its fields. + +* In this Page: + * [Hierarchical data](../indexes/indexing-hierarchical-data.mdx#hierarchical-data) + * [Index hierarchical data](../indexes/indexing-hierarchical-data.mdx#index-hierarchical-data) + * [Query the index](../indexes/indexing-hierarchical-data.mdx#query-the-index) + + +## Hierarchical data + +One significant advantage of document databases is their tendency not to impose limits on data structuring. +**Hierarchical data structures** exemplify this quality well; for example, consider the commonly used comment thread, implemented using objects such as: + + + +{`class BlogPost +\{ + private ?string $author = null; + private ?string $title = null; + private ?string $text = null; + + // Blog post readers can leave comments + public ?BlogPostCommentList $comments = null; + + public function getAuthor(): ?string + \{ + return $this->author; + \} + + public function setAuthor(?string $author): void + \{ + $this->author = $author; + \} + + public function getTitle(): ?string + \{ + return $this->title; + \} + + public function setTitle(?string $title): void + \{ + $this->title = $title; + \} + + public function getText(): ?string + \{ + return $this->text; + \} + + public function setText(?string $text): void + \{ + $this->text = $text; + \} + + public function getComments(): ?BlogPostCommentList + \{ + return $this->comments; + \} + + public function setComments(?BlogPostCommentList $comments): void + \{ + $this->comments = $comments; + \} +\} + +class BlogPostComment +\{ + private ?string $author = null; + private ?string $text = null; + + // Comments can be left recursively + private ?BlogPostCommentList $comments = null; + + public function getAuthor(): ?string + \{ + return $this->author; + \} + + public function setAuthor(?string $author): void + \{ + $this->author = $author; + \} + + public function getText(): ?string + \{ + return $this->text; + \} + + public function setText(?string $text): void + \{ + $this->text = $text; + \} + + public function getComments(): ?BlogPostCommentList + \{ + return $this->comments; + \} + + public function setComments(?BlogPostCommentList $comments): void + \{ + $this->comments = $comments; + \} +\} + +class BlogPostCommentList extends TypedList +\{ + public function __construct() + \{ + parent::__construct(BlogPost::class); + \} +\} +`} + + + +Readers of a post created using the above `BlogPost` structure can add `BlogPostComment` entries to the post's _comments_ field, +and readers of these comments can reply with comments of their own, creating a recursive hierarchical structure. + +For example, the following document, `BlogPosts/1-A`, represents a blog post by John that contains multiple layers of comments from various authors. + +`BlogPosts/1-A`: + + + +{`\{ + "Author": "John", + "Title": "Post title..", + "Text": "Post text..", + "Comments": [ + \{ + "Author": "Moon", + "Text": "Comment text..", + "Comments": [ + \{ + "Author": "Bob", + "Text": "Comment text.." + \}, + \{ + "Author": "Adel", + "Text": "Comment text..", + "Comments": \{ + "Author": "Moon", + "Text": "Comment text.." + \} + \} + ] + \} + ], + "@metadata": \{ + "@collection": "BlogPosts" + \} +\} +`} + + + + + +## Index hierarchical data + +To index the elements of a hierarchical structure like the one above, use RavenDB's `Recurse` method. + +The sample index below shows how to use `Recurse` to traverse the comments in the post thread and index them by their authors. +We can then [query the index](../indexes/indexing-hierarchical-data.mdx#query-the-index) for all blog posts that contain comments by specific authors. + + + + +{`class BlogPosts_ByCommentAuthor_Result +{ + private ?StringArray $authors = null; + + public function getAuthors(): ?StringArray + { + return $this->authors; + } + + public function setAuthors(?StringArray $authors): void + { + $this->authors = $authors; + } +} + +class BlogPosts_ByCommentAuthor extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "from blogpost in docs.Blogposts let authors = Recurse(blogpost, x => x.comments) select new { authors = authors.Select(x => x.author) }"; + } +} +`} + + + + +{`$indexDefinition = new IndexDefinition(); +$indexDefinition->setName("BlogPosts/ByCommentAuthor"); +$indexDefinition->setMaps([ + "from blogpost in docs.BlogPosts + from comment in Recurse(blogpost, (Func)(x => x.Comments)) + select new + { + Author = comment.Author + }" +]); + +$store->maintenance()->send(new PutIndexesOperation($indexDefinition)); +`} + + + + + + +## Query the index + +The index can be queried for all blog posts that contain comments made by specific authors. + +**Query the index using code**: + + + + +{`/** @var array $results */ +$results = $session + ->query(BlogPosts_ByCommentAuthor_Result::class, BlogPosts_ByCommentAuthor::class) + ->whereEquals("authors", "john") + ->ofType(BlogPost::class) + ->toList(); +`} + + + + +{`/** @var array $results */ +$results = $session + ->advanced() + ->documentQuery(BlogPost::class, BlogPosts_ByCommentAuthor::class) + ->whereEquals("authors", "John") + ->toList(); +`} + + + + +{`from index "BlogPosts/ByCommentAuthor" +where Authors == "Moon" +`} + + + + +**Query the index using Studio**: + + * Query the index from Studio's [List of Indexes](../studio/database/indexes/indexes-list-view.mdx#indexes-list-view) view: + + !["List of Indexes view"](./assets/list-of-indexes-view.png) + + * View the query results in the [Query](../studio/database/queries/query-view.mdx) view: + + !["Query View"](./assets/query-view.png) + + * View the list of terms indexed by the `Recurse` method: + + !["Click to View Index Terms"](./assets/click-to-view-terms.png) + + !["Index Terms"](./assets/index-terms.png) + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-hierarchical-data-python.mdx b/versioned_docs/version-7.1/indexes/_indexing-hierarchical-data-python.mdx new file mode 100644 index 0000000000..4c5594a1fb --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-hierarchical-data-python.mdx @@ -0,0 +1,176 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Use the `Recurse` method to traverse the layers of a hierarchical document and index its fields. + +* In this Page: + * [Hierarchical data](../indexes/indexing-hierarchical-data.mdx#hierarchical-data) + * [Index hierarchical data](../indexes/indexing-hierarchical-data.mdx#index-hierarchical-data) + * [Query the index](../indexes/indexing-hierarchical-data.mdx#query-the-index) + + +## Hierarchical data + +One significant advantage of document databases is their tendency not to impose limits on data structuring. +**Hierarchical data structures** exemplify this quality well; for example, consider the commonly used comment thread, implemented using objects such as: + + + +{`class BlogPost: + def __init__(self, author: str = None, title: str = None, text: str = None, comments: List[BlogPostComment] = None): + self.author = author + self.title = title + self.text = text + + # Blog post readers can leave comments + self.comments = comments + + +class BlogPostComment: + def __init__(self, author: str = None, text: str = None, comments: List[BlogPostComment] = None): + self.author = author + self.text = text + + # Allow nested comments, enabling replies to existing comments + self.comments = comments +`} + + + +Readers of a post created using the above `BlogPost` structure can add `BlogPostComment` entries to the post's _comments_ field, +and readers of these comments can reply with comments of their own, creating a recursive hierarchical structure. + +For example, the following document, `BlogPosts/1-A`, represents a blog post by John that contains multiple layers of comments from various authors. + +`BlogPosts/1-A`: + + + +{`\{ + "Author": "John", + "Title": "Post title..", + "Text": "Post text..", + "Comments": [ + \{ + "Author": "Moon", + "Text": "Comment text..", + "Comments": [ + \{ + "Author": "Bob", + "Text": "Comment text.." + \}, + \{ + "Author": "Adel", + "Text": "Comment text..", + "Comments": \{ + "Author": "Moon", + "Text": "Comment text.." + \} + \} + ] + \} + ], + "@metadata": \{ + "@collection": "BlogPosts" + \} +\} +`} + + + + + +## Index hierarchical data + +To index the elements of a hierarchical structure like the one above, use RavenDB's `Recurse` method. + +The sample index below shows how to use `Recurse` to traverse the comments in the post thread and index them by their authors. +We can then [query the index](../indexes/indexing-hierarchical-data.mdx#query-the-index) for all blog posts that contain comments by specific authors. + + + + +{`class BlogPosts_ByCommentAuthor(AbstractIndexCreationTask): + class Result: + def __init__(self, authors: List[str] = None): + self.authors = authors + + def __init__(self): + super().__init__() + self.map = "from blogpost in docs.Blogposts let authors = Recurse(blogpost, x => x.comments) select new { authors = authors.Select(x => x.author) }" +`} + + + + +{`store.maintenance.send( + PutIndexesOperation( + IndexDefinition( + name="BlogPosts/ByCommentAuthor", + maps={ + """from blogpost in docs.BlogPosts + in Recurse(blogpost, (Func)(x => x.comments)) +select new +{ + comment.author +}""" + }, + ) + ) +) +`} + + + + + + +## Query the index + +The index can be queried for all blog posts that contain comments made by specific authors. + +**Query the index using code**: + + + + +{`results = list( + session.query_index_type(BlogPosts_ByCommentAuthor, BlogPosts_ByCommentAuthor.Result).where_equals( + "authors", "Moon" + ) +) +`} + + + + +{`from index "BlogPosts/ByCommentAuthor" +where Authors == "Moon" +`} + + + + +**Query the index using Studio**: + + * Query the index from Studio's [List of Indexes](../studio/database/indexes/indexes-list-view.mdx#indexes-list-view) view: + + !["List of Indexes view"](./assets/list-of-indexes-view.png) + + * View the query results in the [Query](../studio/database/queries/query-view.mdx) view: + + !["Query View"](./assets/query-view.png) + + * View the list of terms indexed by the `Recurse` method: + + !["Click to View Index Terms"](./assets/click-to-view-terms.png) + + !["Index Terms"](./assets/index-terms.png) + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-linq-extensions-csharp.mdx b/versioned_docs/version-7.1/indexes/_indexing-linq-extensions-csharp.mdx new file mode 100644 index 0000000000..9c105762f6 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-linq-extensions-csharp.mdx @@ -0,0 +1,143 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Various indexing LINQ extensions are available to enhance the usability and reduce the complexity of the indexing functions. The available extensions are: + +- [Boost](../indexes/indexing-linq-extensions.mdx#boost) +- [Reverse](../indexes/indexing-linq-extensions.mdx#reverse) +- [IfEntityIs](../indexes/indexing-linq-extensions.mdx#ifentityis) +- [WhereEntityIs](../indexes/indexing-linq-extensions.mdx#whereentityis) +- [ParseInt, ParseLong, ParseDecimal, ParseDouble](../indexes/indexing-linq-extensions.mdx#parsing-numbers) + +## **Boost** + +You can read more about boosting [here](../indexes/boosting.mdx). + + + +## **Reverse** + +**Strings** and **enumerables** can be reversed by using `Reverse` extension. + + + + +{`public class Employees_ByReversedFirstName : AbstractIndexCreationTask +{ + public Employees_ByReversedFirstName() + { + Map = employees => from employee in employees + select new + { + FirstName = employee.FirstName.Reverse() + }; + } +} +`} + + + + +{`IList results = session + .Query() + .Where(x => x.FirstName == "treboR") + .ToList(); +`} + + + + + + +## **WhereEntityIs** + +`WhereEntityIs` can be used to check if given `Raven-Entity-Name` value in metadata for the given document matches any of the given values. This can be useful when indexing polymorphic data. Please visit our dedicated article to get more information (or click [here](../indexes/indexing-polymorphic-data.mdx#other-ways)). + + + +## **IfEntityIs** + +`IfEntityIs` is similar to `WhereEntityIs`, yet it checks only against one value. + + + +## **Parsing numbers** + +String values can be safely parsed to `int`, `long`, `decimal` and `double` using the appropriate methods: + +- ParseInt, +- ParseLong, +- ParseDecimal, +- ParseDouble + +There are two overrides for each method: The first one returns the default value in case of parsing failure. The second one accepts the value that should be returned when failure occurs. + + + + +{`public class Item_Parse : AbstractIndexCreationTask +{ + public class Result + { + public int MajorWithDefault { get; set; } + + public int MajorWithCustomDefault { get; set; } + } + + public Item_Parse() + { + Map = items => from item in items + let parts = item.Version.Split('.', StringSplitOptions.None) + select new + { + MajorWithDefault = parts[0].ParseInt(), // will return default(int) in case of parsing failure + MajorWithCustomDefault = parts[0].ParseInt(-1) // will return -1 in case of parsing failure + }; + + StoreAllFields(FieldStorage.Yes); + } +} +`} + + + + +{`public class Item +{ + public string Version { get; set; } +} +`} + + + + +{`session.Store(new Item { Version = "3.0.1" }); +session.Store(new Item { Version = "Unknown" }); + +session.SaveChanges(); + +var results = session + .Query() + .ToList(); + +Assert.Equal(2, results.Count); +Assert.True(results.Any(x => x.MajorWithDefault == 3)); +Assert.True(results.Any(x => x.MajorWithCustomDefault == 3)); +Assert.True(results.Any(x => x.MajorWithDefault == 0)); +Assert.True(results.Any(x => x.MajorWithCustomDefault == -1)); +`} + + + + +## Remarks + + +Default `Storage` value for the `StoreAllFields()` method is `FieldStorage.No`. Keep in mind that storing fields will increase disk space usage. + + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-linq-extensions-java.mdx b/versioned_docs/version-7.1/indexes/_indexing-linq-extensions-java.mdx new file mode 100644 index 0000000000..cc23be4932 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-linq-extensions-java.mdx @@ -0,0 +1,165 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Various indexing LINQ extensions are available to enhance the usability and reduce the complexity of the indexing functions. The available extensions are: + +- [Boost](../indexes/indexing-linq-extensions.mdx#boost) +- [Reverse](../indexes/indexing-linq-extensions.mdx#reverse) +- [IfEntityIs](../indexes/indexing-linq-extensions.mdx#ifentityis) +- [WhereEntityIs](../indexes/indexing-linq-extensions.mdx#whereentityis) +- [ParseInt, ParseLong, ParseDecimal, ParseDouble](../indexes/indexing-linq-extensions.mdx#parsing-numbers) + +## **Boost** + +You can read more about boosting [here](../indexes/boosting.mdx). + + + +## **Reverse** + +**Strings** and **enumerables** can be reversed by using `Reverse` extension. + + + + +{`public static class Employees_ByReversedFirstName extends AbstractIndexCreationTask { + public Employees_ByReversedFirstName() { + map = "docs.Employees.Select(employee => new { " + + " FirstName = employee.FirstName.Reverse() " + + "})"; + } +} +`} + + + + +{`List results = session + .query(Employee.class, Employees_ByReversedFirstName.class) + .whereEquals("FirstName", "treboR") + .toList(); +`} + + + + + + +## **WhereEntityIs** + +`WhereEntityIs` can be used to check if given `Raven-Entity-Name` value in metadata for the given document matches any of the given values. This can be useful when indexing polymorphic data. Please visit our dedicated article to get more information (or click [here](../indexes/indexing-polymorphic-data.mdx#other-ways)). + + + +## **IfEntityIs** + +`IfEntityIs` is similar to `WhereEntityIs`, yet it checks only against one value. + + + +## **Parsing numbers** + +String values can be safely parsed to `int`, `long`, `decimal` and `double` using the appropriate methods: + +- ParseInt, +- ParseLong, +- ParseDecimal, +- ParseDouble + +There are two overrides for each method: The first one returns the default value in case of parsing failure. The second one accepts the value that should be returned when failure occurs. + + + + +{`public static class Item_Parse extends AbstractIndexCreationTask { + public static class Result { + private int majorWithDefault; + private int majorWithCustomDefault; + + public int getMajorWithDefault() { + return majorWithDefault; + } + + public void setMajorWithDefault(int majorWithDefault) { + this.majorWithDefault = majorWithDefault; + } + + public int getMajorWithCustomDefault() { + return majorWithCustomDefault; + } + + public void setMajorWithCustomDefault(int majorWithCustomDefault) { + this.majorWithCustomDefault = majorWithCustomDefault; + } + } + + public Item_Parse() { + map = "docs.Items.Select(item => new {" + + " item = item, " + + " parts = item.version.Split('.', System.StringSplitOptions.None) " + + "}).Select(this0 => new { " + + " majorWithDefault = this0.parts[0].ParseInt(), " + // will return default(int) in case of parsing failure + " majorWithCustomDefault = this0.parts[0].ParseInt(-1) " + // will return -1 in case of parsing failure + "})"; + + storeAllFields(FieldStorage.YES); + } +} +`} + + + + +{`public static class Item { + private String version; + + public String getVersion() { + return version; + } + + public void setVersion(String version) { + this.version = version; + } +} +`} + + + + +{`Item item1 = new Item(); +item1.setVersion("3.0.1"); + +Item item2 = new Item(); +item2.setVersion("Unknown"); + +session.store(item1); +session.store(item2); + +session.saveChanges(); + +List results = session + .query(Item_Parse.Result.class, Item_Parse.class) + .toList(); + +Assert.assertEquals(2, results.size()); +Assert.assertTrue(results.stream().anyMatch(x -> x.getMajorWithDefault() == 3)); +Assert.assertTrue(results.stream().anyMatch(x -> x.getMajorWithCustomDefault() == 3)); +Assert.assertTrue(results.stream().anyMatch(x -> x.getMajorWithDefault() == 0)); +Assert.assertTrue(results.stream().anyMatch(x -> x.getMajorWithCustomDefault() == -1)); +`} + + + + + + +## Remarks + + +Default `storage` value for the `storeAllFields()` method is `FieldStorage.NO`. Keep in mind that storing fields will increase disk space usage. + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-linq-extensions-nodejs.mdx b/versioned_docs/version-7.1/indexes/_indexing-linq-extensions-nodejs.mdx new file mode 100644 index 0000000000..443e39f7fa --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-linq-extensions-nodejs.mdx @@ -0,0 +1,132 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Various indexing LINQ extensions are available to enhance the usability and reduce the complexity of the indexing functions. The available extensions are: + +- [Boost](../indexes/indexing-linq-extensions.mdx#boost) +- [Reverse](../indexes/indexing-linq-extensions.mdx#reverse) +- [IfEntityIs](../indexes/indexing-linq-extensions.mdx#ifentityis) +- [WhereEntityIs](../indexes/indexing-linq-extensions.mdx#whereentityis) +- [ParseInt, ParseLong, ParseDecimal, ParseDouble](../indexes/indexing-linq-extensions.mdx#parsing-numbers) + +## **Boost** + +You can read more about boosting [here](../indexes/boosting.mdx). + + + +## **Reverse** + +**Strings** and **enumerables** can be reversed by using `Reverse` extension. + + + + +{`class Employees_ByReversedFirstName extends AbstractIndexCreationTask { + constructor() { + super(); + + this.map = "docs.Employees.Select(employee => new { " + + " FirstName = employee.FirstName.Reverse() " + + "})"; + } +} +`} + + + + +{`const results = await session + .query({ indexName: "Employees/ByReversedFirstName" }) + .whereEquals("firstName", "treboR") + .all(); +`} + + + + + + +## **WhereEntityIs** + +`WhereEntityIs` can be used to check if given `Raven-Entity-Name` value in metadata for the given document matches any of the given values. This can be useful when indexing polymorphic data. Please visit our dedicated article to get more information (or click [here](../indexes/indexing-polymorphic-data.mdx#other-ways)). + + + +## **IfEntityIs** + +`IfEntityIs` is similar to `WhereEntityIs`, yet it checks only against one value. + + + +## **Parsing numbers** + +String values can be safely parsed to `int`, `long`, `decimal` and `double` using the appropriate methods: + +- ParseInt, +- ParseLong, +- ParseDecimal, +- ParseDouble + +There are two overrides for each method: The first one returns the default value in case of parsing failure. The second one accepts the value that should be returned when failure occurs. + + + + +{`class Item_Parse extends AbstractIndexCreationTask { + constructor() { + super(); + + this.map = "docs.Items.Select(item => new {" + + " item = item, " + + " parts = item.version.Split('.', System.StringSplitOptions.None) " + + "}).Select(this0 => new { " + + " majorWithDefault = this0.parts[0].ParseInt(), " + // will return default(int) in case of parsing failure + " majorWithCustomDefault = this0.parts[0].ParseInt(-1) " + // will return -1 in case of parsing failure + "})"; + + this.storeAllFields("Yes"); + } +} +`} + + + + +{`class Item { + constructor(version) { + this.version = version; + } +} +`} + + + + +{`const item1 = new Item("3.0.1"); +const item2 = new Item("Unknown"); + +await session.store(item1); +await session.store(item2); + +await session.saveChanges(); + +const results = await session + .query({ indexName: "Item/Parse" }) + .all(); + +assert.strictEqual(2, results.length); +assert.ok(results.some(x => x.majorWithDefault === 3)); +assert.ok(results.some(x => x.majorWithCustomDefault === 3)); +assert.ok(results.some(x => x.majorWithDefault === 0)); +assert.ok(results.some(x => x.majorWithCustomDefault === -1)); +`} + + + + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-metadata-csharp.mdx b/versioned_docs/version-7.1/indexes/_indexing-metadata-csharp.mdx new file mode 100644 index 0000000000..120f7fcb75 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-metadata-csharp.mdx @@ -0,0 +1,219 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Each document in the database includes a metadata section, stored in a special JSON object under the `@metadata` property. + +* This metadata is not part of the document's content but holds internal system information (used by RavenDB), + such as the document ID, collection name, change vector, last modified timestamp, and more, + as well as optional user-defined entries. + +* To learn how to access (get and modify) the metadata from your client code, + see [How to get and modify the metadata](../client-api/session/how-to/get-and-modify-entity-metadata.mdx). + +* Content from metadata properties can be extracted and **indexed** within a static index, alongside content from the document fields. + This allows you to query for documents based on values stored in the metadata. + See the examples below. +* In this article: + * [Indexing metadata properties](../indexes/indexing-metadata.mdx#indexing-metadata-properties) + * [Metadata properties that can be indexed](../indexes/indexing-metadata.mdx#metadata-properties-that-can-be-indexed) + + +## Indexing metadata properties + +* Use the `MetadataFor` method to access a document's metadata within the index definition, + as shown in the example below. + +* You can retrieve metadata values using one of two syntaxes: + + * **Generic method syntax** + Use `Value()` to retrieve and cast the metadata value to the expected type. + This is type-safe and preferred when the type is known (e.g., _DateTime_). + * **Indexer syntax** + Use `metadata["key"]` to retrieve the raw object. + You can cast it manually if needed. +* The following index definition indexes content from the `@last-modified` and `@counters` metadata properties. + + + + +{`public class Products_ByMetadata_AccessViaValue : AbstractIndexCreationTask +{ + public class IndexEntry + { + public DateTime LastModified { get; set; } + public bool HasCounters { get; set; } + } + + public Products_ByMetadata_AccessViaValue() + { + Map = products => from product in products + // Use 'MetadataFor' to access the metadata object + let metadata = MetadataFor(product) + + // Define the index fields + select new IndexEntry() + { + // Access metadata properties using generic method + LastModified = metadata.Value( + // Specify the Client API Constant corresponding to '@last-modified' + Raven.Client.Constants.Documents.Metadata.LastModified), + + HasCounters = metadata.Value( + // Specify the Client API Constant corresponding to '@counters' + Raven.Client.Constants.Documents.Metadata.Counters) != null + }; + } +} +`} + + + + +{`public class Products_ByMetadata_AccessViaIndexer : AbstractIndexCreationTask +{ + public class IndexEntry + { + public DateTime LastModified { get; set; } + public bool HasCounters { get; set; } + } + + public Products_ByMetadata_AccessViaIndexer() + { + Map = products => from product in products + // Use 'MetadataFor' to access the metadata object + let metadata = MetadataFor(product) + + // Define the index fields + select new IndexEntry() + { + // Access metadata properties using indexer + LastModified = + // Specify the Client API Constant corresponding to '@last-modified' + (DateTime)metadata[Raven.Client.Constants.Documents.Metadata.LastModified], + + HasCounters = + // Specify the Client API Constant corresponding to '@counters' + metadata[Raven.Client.Constants.Documents.Metadata.Counters] != null + }; + } +} +`} + + + + +{`public class Products_ByMetadata_JS : AbstractJavaScriptIndexCreationTask +{ + public Products_ByMetadata_JS() + { + Maps = new HashSet + { + @"map('Products', function (product) { + var metadata = metadataFor(product); + + return { + LastModified: metadata['@last-modified'], + HasCounters: !!metadata['@counters'] + }; + })" + }; + } +} +`} + + + + +* Query for documents based on metadata values: + Retrieve documents that have counters and order them by their last modified timestamp. + + + + +{`List productsWithCounters = session + .Query() + .Where(x => x.HasCounters == true) + .OrderByDescending(x => x.LastModified) + .OfType() + .ToList(); +`} + + + + +{`List productsWithCounters = await asyncSession + .Query() + .Where(x => x.HasCounters == true) + .OrderByDescending(x => x.LastModified) + .OfType() + .ToListAsync(); +`} + + + + +{`List productsWithCounters = session.Advanced. + DocumentQuery() + .WhereEquals(x => x.HasCounters, true) + .OrderByDescending(x => x.LastModified) + .OfType() + .ToList(); +`} + + + + +{`from index "Products/ByMetadata/AccessViaValue" +where HasCounters == true +order by LastModified desc +`} + + + + + + +## Metadata properties that can be indexed + +* The table below lists **predefined metadata properties that can be indexed**. + +* Each property can be accessed using either a string literal (e.g. `"@last-modified"`) or the corresponding Client API constant (e.g. `Raven.Client.Constants.Documents.Metadata.LastModified`). + Using the Client API constant is recommended for clarity and to avoid typos. + +* You can add custom metadata properties to any document as needed. + These custom properties can be indexed just like the predefined ones. + +| String literal | Client API Constant | +|---------------------|--------------------------------------------------------| +| `@archive-at` | Raven.Client.Constants.Documents.Metadata.ArchiveAt | +| `@attachments` | Raven.Client.Constants.Documents.Metadata.Attachments | +| `@change-vector` | Raven.Client.Constants.Documents.Metadata.ChangeVector | +| `@collection` | Raven.Client.Constants.Documents.Metadata.Collection | +| `@counters` | Raven.Client.Constants.Documents.Metadata.Counters | +| `@etag` | Raven.Client.Constants.Documents.Metadata.Etag | +| `@expires` | Raven.Client.Constants.Documents.Metadata.Expires | +| `@id` | Raven.Client.Constants.Documents.Metadata.Id | +| `@last-modified` | Raven.Client.Constants.Documents.Metadata.LastModified | +| `@refresh` | Raven.Client.Constants.Documents.Metadata.Refresh | +| `@timeseries` | Raven.Client.Constants.Documents.Metadata.TimeSeries | +| `Raven-Clr-Type` | Raven.Client.Constants.Documents.Metadata.RavenClrType | + + + +Note: + +* The `@attachments` metadata property can only be indexed using a **Lucene** index. +* The **Corax** search engine does not support indexing complex JSON properties. + Learn more in [Corax: Handling complex JSON objects](../indexes/search-engine/corax.mdx#handling-of-complex-json-objects). + + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-metadata-java.mdx b/versioned_docs/version-7.1/indexes/_indexing-metadata-java.mdx new file mode 100644 index 0000000000..cf89f63a42 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-metadata-java.mdx @@ -0,0 +1,67 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Each document in the database includes a metadata section, stored in a special JSON object under the `@metadata` property. + +* This metadata is not part of the document's content but holds internal system information (used by RavenDB), + such as the document ID, collection name, change vector, last modified timestamp, and more, + as well as optional user-defined entries. + +* To learn how to access (get and modify) the metadata from your client code, + see [How to get and modify the metadata](../client-api/session/how-to/get-and-modify-entity-metadata.mdx). + +* Content from metadata properties can be extracted and **indexed** within a static index, alongside content from the document fields. + This allows you to query for documents based on values stored in the metadata. + See the examples below. +* In this article: + * [Indexing metadata properties](../indexes/indexing-metadata.mdx#indexing-metadata-properties) + + +## Indexing metadata properties + + + +{`public static class Products_WithMetadata extends AbstractIndexCreationTask \{ + public static class Result \{ + private Date lastModified; + + public Date getLastModified() \{ + return lastModified; + \} + + public void setLastModified(Date lastModified) \{ + this.lastModified = lastModified; + \} + \} + + public Products_WithMetadata() \{ + map = "docs.Products.Select(product => new \{ " + + " Product = Product, " + + " Metadata = this.MetadataFor(product) " + + "\}).Select(this0 => new \{ " + + " LastModified = this0.metadata.Value(\\"@last-modified\\") " + + "\})"; + \} +\} +`} + + + + + +{`List results = session + .query(Products_WithMetadata.Result.class, Products_WithMetadata.class) + .orderByDescending("LastModified") + .ofType(Product.class) + .toList(); +`} + + + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-metadata-nodejs.mdx b/versioned_docs/version-7.1/indexes/_indexing-metadata-nodejs.mdx new file mode 100644 index 0000000000..dcc2aa8e4a --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-metadata-nodejs.mdx @@ -0,0 +1,110 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Each document in the database includes a metadata section, stored in a special JSON object under the `@metadata` property. + +* This metadata is not part of the document's content but holds internal system information (used by RavenDB), + such as the document ID, collection name, change vector, last modified timestamp, and more, + as well as optional user-defined entries. + +* To learn how to access (get and modify) the metadata from your client code, + see [How to get and modify the metadata](../client-api/session/how-to/get-and-modify-entity-metadata.mdx). + +* Content from metadata properties can be extracted and **indexed** within a static index, alongside content from the document fields. + This allows you to query for documents based on values stored in the metadata. + See the examples below. +* In this article: + * [Indexing metadata properties](../indexes/indexing-metadata.mdx#indexing-metadata-properties) + * [Metadata properties that can be indexed](../indexes/indexing-metadata.mdx#metadata-properties-that-can-be-indexed) + + +## Indexing metadata properties + +* Use the `getMetadata` method to access a document’s metadata, as shown in the example below. + +* The following index definition indexes content from the `@last-modified` and `@counters` metadata properties. + + + +{`class Products_ByMetadata extends AbstractJavaScriptIndexCreationTask \{ + constructor () \{ + super(); + + const \{ getMetadata \} = this.mapUtils(); + + this.map("Products", product => \{ + // Call 'getMetadata' to access the metadata object + const metadata = getMetadata(product); + + return \{ + // Define the index fields + LastModified: metadata['@last-modified'], + HasCounters: !!metadata['@counters'] + \}; + \}); + \} +\} +`} + + + +* Query for documents based on metadata values: + Retrieve documents that have counters and order them by their last modified timestamp. + + + + +{`const productsWithCounters = await session + .query({ indexName: "Products/ByMetadata" }) + .whereEquals("HasCounters", true) + .orderByDescending("LastModified") + .all(); +`} + + + + +{`from index "Products/ByMetadata" +where HasCounters == true +order by LastModified desc +`} + + + + + + +## Metadata properties that can be indexed + +* The following are the **predefined metadata properties that can be indexed**: + * `@archive-at` + * `@attachments` + * `@change-vector` + * `@collection` + * `@counters` + * `@etag` + * `@expires` + * `@id` + * `@last-modified` + * `@refresh` + * `@timeseries` + * `Raven-Clr-Type` + +* You can add custom metadata properties to any document as needed. + These custom properties can be indexed just like the predefined ones. + + +Note: + +* The `@attachments` metadata property can only be indexed using a **Lucene** index. +* The **Corax** search engine does not support indexing complex JSON properties. + Learn more in [Corax: Handling complex JSON objects](../indexes/search-engine/corax.mdx#handling-of-complex-json-objects). + + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-metadata-php.mdx b/versioned_docs/version-7.1/indexes/_indexing-metadata-php.mdx new file mode 100644 index 0000000000..ce235154b6 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-metadata-php.mdx @@ -0,0 +1,185 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Each document in the database includes a metadata section, stored in a special JSON object under the `@metadata` property. + +* This metadata is not part of the document's content but holds internal system information (used by RavenDB), + such as the document ID, collection name, change vector, last modified timestamp, and more, + as well as optional user-defined entries. + +* To learn how to access (get and modify) the metadata from your client code, + see [How to get and modify the metadata](../client-api/session/how-to/get-and-modify-entity-metadata.mdx). + +* Content from metadata properties can be extracted and **indexed** within a static index, alongside content from the document fields. + This allows you to query for documents based on values stored in the metadata. + See the examples below. +* In this article: + * [Indexing metadata properties](../indexes/indexing-metadata.mdx#indexing-metadata-properties) + * [Metadata properties that can be indexed](../indexes/indexing-metadata.mdx#metadata-properties-that-can-be-indexed) + + +## Indexing metadata properties + +* To access a document's metadata, use the `MetadataFor` method, which is available in the **C# LINQ string** + that is assigned to the `map` property in the PHP index class, as shown in the example below. + +* You can retrieve metadata values using one of two C# syntaxes: + + * **Generic method syntax** + Use `Value()` to retrieve and cast the metadata value to the expected type. + This is type-safe and preferred when the type is known (e.g., _DateTime_). + * **Indexer syntax** + Use `metadata["key"]` to retrieve the raw object. + You can cast it manually if needed. +* The following index definition indexes content from the `@last-modified` and `@counters` metadata properties. + + + + +{`class Products_ByMetadata_AccessViaValue_IndexEntry +{ + public ?DateTime $lastModified = null; + public function getLastModified(): ?DateTime + { + return $this->lastModified; + } + public function setLastModified(?DateTime $lastModified): void + { + $this->lastModified = $lastModified; + } + + public ?bool $hasCounters = null; + public function getHasCounters(): ?bool + { + return $this->hasCounters; + } + public function setHasCounters(?bool $hasCounters): void + { + $this->hasCounters = $hasCounters; + } +} + +class Products_ByMetadata_AccessViaValue extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "from product in docs.Products\\n" . + "let metadata = MetadataFor(product)\\n" . + "select new { " . + " lastModified = metadata.Value(\\"@last-modified\\"), " . + " hasCounters = metadata.Value(\\"@counters\\") != null " . + "}"; + } +} +`} + + + + +{`class Products_ByMetadata_AccessViaIndexer_IndexEntry +{ + public ?DateTime $lastModified = null; + public function getLastModified(): ?DateTime + { + return $this->lastModified; + } + public function setLastModified(?DateTime $lastModified): void + { + $this->lastModified = $lastModified; + } + + public ?bool $hasCounters = null; + public function getHasCounters(): ?bool + { + return $this->hasCounters; + } + public function setHasCounters(?bool $hasCounters): void + { + $this->hasCounters = $hasCounters; + } +} + +class Products_ByMetadata_AccessViaIndexer extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "from product in docs.Products " . + "let metadata = MetadataFor(product) " . + "select new {\\n" . + " lastModified = (DateTime)metadata[\\"@last-modified\\"],\\n" . + " hasCounters = metadata[\\"@counters\\"] != null }"; + } +} +`} + + + + +* Query for documents based on metadata values: + Retrieve documents that have counters and order them by their last modified timestamp. + + + + +{`$results = $session + ->query( + Products_ByMetadata_AccessViaValue_IndexEntry::class, + Products_ByMetadata_AccessViaValue::class + ) + ->whereEquals("hasCounters", true) + ->orderByDescending("lastModified") + ->ofType(Product::class) + ->toList(); +`} + + + + +{`from index "Products/ByMetadata/AccessViaValue" +where hasCounters == true +order by lastModified desc +`} + + + + + + +## Metadata properties that can be indexed + +* The following are the **predefined metadata properties that can be indexed**: + * `@archive-at` + * `@attachments` + * `@change-vector` + * `@collection` + * `@counters` + * `@etag` + * `@expires` + * `@id` + * `@last-modified` + * `@refresh` + * `@timeseries` + * `Raven-Clr-Type` + +* You can add custom metadata properties to any document as needed. + These custom properties can be indexed just like the predefined ones. + + +Note: + +* The `@attachments` metadata property can only be indexed using a **Lucene** index. +* The **Corax** search engine does not support indexing complex JSON properties. + Learn more in [Corax: Handling complex JSON objects](../indexes/search-engine/corax.mdx#handling-of-complex-json-objects). + + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-metadata-python.mdx b/versioned_docs/version-7.1/indexes/_indexing-metadata-python.mdx new file mode 100644 index 0000000000..9488ce3126 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-metadata-python.mdx @@ -0,0 +1,145 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Each document in the database includes a metadata section, stored in a special JSON object under the `@metadata` property. + +* This metadata is not part of the document's content but holds internal system information (used by RavenDB), + such as the document ID, collection name, change vector, last modified timestamp, and more, + as well as optional user-defined entries. + +* To learn how to access (get and modify) the metadata from your client code, + see [How to get and modify the metadata](../client-api/session/how-to/get-and-modify-entity-metadata.mdx). + +* Content from metadata properties can be extracted and **indexed** within a static index, alongside content from the document fields. + This allows you to query for documents based on values stored in the metadata. + See the examples below. +* In this article: + * [Indexing metadata properties](../indexes/indexing-metadata.mdx#indexing-metadata-properties) + * [Metadata properties that can be indexed](../indexes/indexing-metadata.mdx#metadata-properties-that-can-be-indexed) + + +## Indexing metadata properties + +* To access a document's metadata, use the `MetadataFor` method, which is available in the **C# LINQ string** + that is assigned to the `map` property in the PHP index class, as shown in the example below. + +* You can retrieve metadata values using one of two C# syntaxes: + + * **Generic method syntax** + Use `Value()` to retrieve and cast the metadata value to the expected type. + This is type-safe and preferred when the type is known (e.g., _DateTime_). + * **Indexer syntax** + Use `metadata["key"]` to retrieve the raw object. + You can cast it manually if needed. +* The following index definition indexes content from the `@last-modified` and `@counters` metadata properties. + + + + +{`class Products_ByMetadata_AccessViaValue(AbstractIndexCreationTask): + class IndexEntry: + def __init__(self, last_modified: datetime.datetime = None, has_counters: bool = None): + self.last_modified = last_modified + self.has_counters = has_counters + + def __init__(self): + super().__init__() + self.map = """ + from product in docs.Products + let metadata = MetadataFor(product) + + select new { + last_modified = metadata.Value("@last-modified"), + has_counters = metadata.Value("@counters") != null + } + """ +`} + + + + +{`class Products_ByMetadata_AccessViaIndexer(AbstractIndexCreationTask): + class IndexEntry: + def __init__(self, last_modified: datetime.datetime = None, has_counters: bool = None): + self.last_modified = last_modified + self.has_counters = has_counters + + def __init__(self): + super().__init__() + self.map = """ + from product in docs.Products + let metadata = MetadataFor(product) + + select new + { + last_modified = (DateTime)metadata["@last-modified"], + has_counters = metadata["@counters"] != null + } + """ +`} + + + + +* Query for documents based on metadata values: + Retrieve documents that have counters and order them by their last modified timestamp. + + + + +{`productsWithCounters = list( + session.query_index_type(Products_ByMetadata_AccessViaValue, + Products_ByMetadata_AccessViaValue.IndexEntry) + .where_equals("has_counters", True) + .order_by_descending("last_modified") + .of_type(Product) +) +`} + + + + +{`from index "Products/ByMetadata/AccessViaValue" +where has_counters == true +order by last_modified desc +`} + + + + + + +## Metadata properties that can be indexed + +* The following are the **predefined metadata properties that can be indexed**: + * `@archive-at` + * `@attachments` + * `@change-vector` + * `@collection` + * `@counters` + * `@etag` + * `@expires` + * `@id` + * `@last-modified` + * `@refresh` + * `@timeseries` + * `Raven-Clr-Type` + +* You can add custom metadata properties to any document as needed. + These custom properties can be indexed just like the predefined ones. + + +Note: + +* The `@attachments` metadata property can only be indexed using a **Lucene** index. +* The **Corax** search engine does not support indexing complex JSON properties. + Learn more in [Corax: Handling complex JSON objects](../indexes/search-engine/corax.mdx#handling-of-complex-json-objects). + + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-nested-data-csharp.mdx b/versioned_docs/version-7.1/indexes/_indexing-nested-data-csharp.mdx new file mode 100644 index 0000000000..64f96838de --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-nested-data-csharp.mdx @@ -0,0 +1,579 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* JSON documents can have nested structures, where one document contains other objects or arrays of objects. + +* Use a static-index to facilitate querying for documents based on the nested data. + +* In this page: + + * [Sample data](../indexes/indexing-nested-data.mdx#sample-data) + + * [Simple index - SINGLE index-entry per document](../indexes/indexing-nested-data.mdx#simple-index---single-index-entry-per-document) + * [The index](../indexes/indexing-nested-data.mdx#theIndex) + * [The index-entries](../indexes/indexing-nested-data.mdx#theIndexEntries) + * [Querying the index](../indexes/indexing-nested-data.mdx#queryingTheIndex) + * [When to use](../indexes/indexing-nested-data.mdx#whenToUse) + + * [Fanout index - MULTIPLE index-entries per document](../indexes/indexing-nested-data.mdx#fanout-index---multiple-index-entries-per-document) + * [What is a fanout index](../indexes/indexing-nested-data.mdx#whatIsFanoutIndex) + * [Fanout index - Map index example](../indexes/indexing-nested-data.mdx#fanoutMapIndex) + * [Fanout index - Map-Reduce index example](../indexes/indexing-nested-data.mdx#fanoutMapReduceIndex) + * [Performance hints](../indexes/indexing-nested-data.mdx#performanceHints) + * [Paging](../indexes/indexing-nested-data.mdx#paging) + + +## Sample data + +* The examples in this article are based on the following **Classes** and **Sample Data**: + + + + +{`public class OnlineShop +{ + public string ShopName { get; set; } + public string Email { get; set; } + public List TShirts { get; set; } // Nested data +} + +public class TShirt +{ + public string Color { get; set; } + public string Size { get; set; } + public string Logo { get; set; } + public decimal Price { get; set; } + public int Sold { get; set; } +} +`} + + + + +{`// Creating sample data for the examples in this article: +// ====================================================== + +var onlineShops = new[] +{ + // Shop1 + new OnlineShop { ShopName = "Shop1", Email = "sales@shop1.com", TShirts = new List { + new TShirt { Color = "Red", Size = "S", Logo = "Bytes and Beyond", Price = 25, Sold = 2 }, + new TShirt { Color = "Red", Size = "M", Logo = "Bytes and Beyond", Price = 25, Sold = 4 }, + new TShirt { Color = "Blue", Size = "M", Logo = "Query Everything", Price = 28, Sold = 5 }, + new TShirt { Color = "Green", Size = "L", Logo = "Data Driver", Price = 30, Sold = 3} + }}, + // Shop2 + new OnlineShop { ShopName = "Shop2", Email = "sales@shop2.com", TShirts = new List { + new TShirt { Color = "Blue", Size = "S", Logo = "Coffee, Code, Repeat", Price = 22, Sold = 12 }, + new TShirt { Color = "Blue", Size = "M", Logo = "Coffee, Code, Repeat", Price = 22, Sold = 7 }, + new TShirt { Color = "Green", Size = "M", Logo = "Big Data Dreamer", Price = 25, Sold = 9 }, + new TShirt { Color = "Black", Size = "L", Logo = "Data Mining Expert", Price = 20, Sold = 11 } + }}, + // Shop3 + new OnlineShop { ShopName = "Shop3", Email = "sales@shop3.com", TShirts = new List { + new TShirt { Color = "Red", Size = "S", Logo = "Bytes of Wisdom", Price = 18, Sold = 2 }, + new TShirt { Color = "Blue", Size = "M", Logo = "Data Geek", Price = 20, Sold = 6 }, + new TShirt { Color = "Black", Size = "L", Logo = "Data Revolution", Price = 15, Sold = 8 }, + new TShirt { Color = "Black", Size = "XL", Logo = "Data Revolution", Price = 15, Sold = 10 } + }} +}; + +using (var session = store.OpenSession()) +{ + foreach (var shop in onlineShops) + { + session.Store(shop); + } + + session.SaveChanges(); +} +`} + + + + + + +## Simple index - Single index-entry per document + +* **The index**: + + +{`public class Shops_ByTShirt_Simple : AbstractIndexCreationTask +\{ + public class IndexEntry + \{ + // The index-fields: + public IEnumerable Colors \{ get; set; \} + public IEnumerable Sizes \{ get; set; \} + public IEnumerable Logos \{ get; set; \} + \} + + public Shops_ByTShirt_Simple() + \{ + Map = shops => from shop in shops + // Creating a SINGLE index-entry per document: + select new IndexEntry + \{ + // Each index-field will hold a collection of nested values from the document + Colors = shop.TShirts.Select(x => x.Color), + Sizes = shop.TShirts.Select(x => x.Size), + Logos = shop.TShirts.Select(x => x.Logo) + \}; + \} +\} +`} + + + +* **The index-entries**: + + ![Simple - index-entries](./assets/indexing-nested-data-1.png) + + 1. The index-entries content is visible from the Studio [Query view](../studio/database/queries/query-view.mdx). + + 2. Check option: _Show raw index-entries instead of Matching documents_. + + 3. Each row represents an **index-entry**. + The index has a single index-entry per document (3 entries in this example). + + 4. The index-field contains a collection of ALL nested values from the document. + e.g. The third **index-entry** has the following values in the _Colors_ **index-field**: + `{"black", "blue", "red"}` + +* **Querying the index**: + + + + +{`// Query for all shop documents that have a red TShirt +var shopsThatHaveRedShirts = session + .Query() + // Filter query results by a nested value + .Where(x => x.Colors.Contains("red")) + .OfType() + .ToList(); +`} + + + + +{`// Query for all shop documents that have a red TShirt +var shopsThatHaveRedShirts = await asyncSession + .Query() + // Filter query results by a nested value + .Where(x => x.Colors.Contains("red")) + .OfType() + .ToListAsync(); +`} + + + + +{`// Query for all shop documents that have a red TShirt +var shopsThatHaveRedShirts = session.Advanced + .DocumentQuery() + // Filter query results by a nested value + .ContainsAny(x => x.Colors, new[] { "Red" }) + .OfType() + .ToList(); +`} + + + + +{`from index "Shops/ByTShirt/Simple" +where Colors == "red" +`} + + + + + + +{`// Results will include the following shop documents: +// ================================================== +// * Shop1 +// * Shop3 +`} + + + +* **When to use**: + + * This type of index structure is effective for retrieving documents when filtering the query by any of the inner nested values that were indexed. + + * However, due to the way the index-entries are generated, this index **cannot** provide results for a query searching for documents that contain + specific sub-objects which satisfy some `AND` condition. + For example: + + +{`// You want to query for shops containing "Large Green TShirts", +// aiming to get only "Shop1" as a result since it has such a combination, +// so you attempt this query: +var GreenAndLarge = session + .Query() + .Where(x => x.Colors.Contains("green") && x.Sizes.Contains("L")) + .OfType() + .ToList(); + +// But, the results of this query will include BOTH "Shop1" & "Shop2" +// since the index-entries do not keep the original sub-objects structure. +`} + + + + * To address this, you must use a **Fanout index** - as described below. + + + +## Fanout index - Multiple index-entries per document + +* **What is a Fanout index**: + + * A fanout index is an index that outputs multiple index-entries per document. + A separate index-entry is created for each nested sub-object from the document. + + * The fanout index is useful when you need to retrieve documents matching query criteria + that search for specific sub-objects that comply with some logical conditions. + +* **Fanout index - Map index example**: + + + + +{`// A fanout map-index: +// =================== +public class Shops_ByTShirt_Fanout : AbstractIndexCreationTask +{ + public class IndexEntry + { + // The index-fields: + public string Color { get; set; } + public string Size { get; set; } + public string Logo { get; set; } + } + + public Shops_ByTShirt_Fanout() + { + Map = shops => + from shop in shops + from shirt in shop.TShirts + // Creating MULTIPLE index-entries per document, + // an index-entry for each sub-object in the TShirts list + select new IndexEntry + { + Color = shirt.Color, + Size = shirt.Size, + Logo = shirt.Logo + }; + } +} +`} + + + + +{`public class Shops_ByTShirt_JS : AbstractJavaScriptIndexCreationTask +{ + public Shops_ByTShirt_JS() + { + Maps = new HashSet + { + @"map('OnlineShops', function (shop){ + var res = []; + shop.TShirts.forEach(shirt => { + res.push({ + Color: shirt.Color, + Size: shirt.Size, + Logo: shirt.Logo + }) + }); + return res; + })" + }; + } +} +`} + + + + + + + +{`// Query the fanout index: +// ======================= +var shopsThatHaveMediumRedShirts = session + .Query() + // Query for documents that have a "Medium Red TShirt" + .Where(x => x.Color == "red" && x.Size == "M") + .OfType() + .ToList(); +`} + + + + +{`// Query the fanout index: +// ======================= +var shopsThatHaveMediumRedShirts = await asyncSession + .Query() + // Query for documents that have a "Medium Red TShirt" + .Where(x => x.Color == "red" && x.Size == "M") + .OfType() + .ToListAsync(); +`} + + + + +{`// Query the fanout index: +// ======================= +var shopsThatHaveMediumRedShirts = session.Advanced + .DocumentQuery() + // Query for documents that have a "Medium Red TShirt" + .WhereEquals(x => x.Color, "red") + .AndAlso() + .WhereEquals(x=> x.Size, "M") + .OfType() + .ToList(); +`} + + + + +{`from index "Shops/ByTShirt/Fanout" +where Color == "red" and Size == "M" +`} + + + + + + +{`// Query results: +// ============== + +// Only the 'Shop1' document will be returned, +// since it is the only document that has the requested combination within the TShirt list. +`} + + + +* **The index-entries**: + ![Fanout - index-entries](./assets/indexing-nested-data-2.png) + + 1. The index-entries content is visible from the Studio [Query view](../studio/database/queries/query-view.mdx). + + 2. Check option: _Show raw index-entries instead of Matching documents_. + + 3. Each row represents an **index-entry**. + Each index-entry corresponds to an inner item in the TShirt list. + + 4. In this example, the total number of index-entries is **12**, + which is the total number of inner items in the TShirt list in all **3** documents in the collection. + +* **Fanout index - Map-Reduce index example**: + + * The fanout index concept applies to map-reduce indexes as well: + + + + +{`// A fanout map-reduce index: +// ========================== +public class Sales_ByTShirtColor_Fanout : + AbstractIndexCreationTask +{ + public class IndexEntry + { + // The index-fields: + public string Color { get; set; } + public int ItemsSold { get; set; } + public decimal TotalSales { get; set; } + } + + public Sales_ByTShirtColor_Fanout() + { + Map = shops => + from shop in shops + from shirt in shop.TShirts + // Creating MULTIPLE index-entries per document, + // an index-entry for each sub-object in the TShirts list + select new IndexEntry + { + Color = shirt.Color, + ItemsSold = shirt.Sold, + TotalSales = shirt.Price * shirt.Sold + }; + + Reduce = results => from result in results + group result by result.Color + into g + select new + { + // Calculate sales per color + Color = g.Key, + ItemsSold = g.Sum(x => x.ItemsSold), + TotalSales = g.Sum(x => x.TotalSales) + }; + } +} +`} + + + + +{`public class Product_Sales : AbstractJavaScriptIndexCreationTask +{ + public class Result + { + public string Product { get; set; } + + public int Count { get; set; } + + public decimal Total { get; set; } + } + + public Product_Sales() + { + Maps = new HashSet() + { + @"map('orders', function(order){ + var res = []; + order.Lines.forEach(l => { + res.push({ + Product: l.Product, + Count: 1, + Total: (l.Quantity * l.PricePerUnit) * (1- l.Discount) + }) + }); + return res; + })" + }; + + Reduce = @"groupBy(x => x.Product) + .aggregate(g => { + return { + Product : g.key, + Count: g.values.reduce((sum, x) => x.Count + sum, 0), + Total: g.values.reduce((sum, x) => x.Total + sum, 0) + } + })"; + } +} +`} + + + + + + + +{`// Query the fanout index: +// ======================= +var queryResult = session + .Query() + // Query for index-entries that contain "black" + .Where(x => x.Color == "black") + .FirstOrDefault(); + +// Get total sales for black TShirts +var blackShirtsSales = queryResult?.TotalSales ?? 0; +`} + + + + +{`// Query the fanout index: +// ======================= +var queryResult = await asyncSession + .Query() + // Query for index-entries that contain "black" + .Where(x => x.Color == "black") + .FirstOrDefaultAsync(); + +// Get total sales for black TShirts +var blackShirtsSales = queryResult?.TotalSales ?? 0; +`} + + + + +{`// Query the fanout index: +// ======================= +var queryResult = session.Advanced + .DocumentQuery() + // Query for index-entries that contain "black" + .WhereEquals(x => x.Color, "black") + .FirstOrDefault(); + +// Get total sales for black TShirts +var blackShirtsSales = queryResult?.TotalSales ?? 0; +`} + + + + +{`from index "Sales/ByTShirtColor/Fanout" +where Color == "black" +`} + + + + + + +{`// Query results: +// ============== + +// With the sample data used in this article, +// The total sales revenue from black TShirts sold (in all shops) is 490.0 +`} + + + +* **Fanout index - Performance hints**: + + * Fanout indexes are typically more resource-intensive than other indexes as RavenDB has to index a large number of index-entries. + This increased workload can lead to higher CPU and memory utilization, potentially causing a decline in the overall performance of the index. + + * When the number of index-entries generated from a single document exceeds a configurable limit, + RavenDB will issue a **High indexing fanout ratio** alert in the Studio notification center. + + * You can control when this performance hint is created by setting the + [PerformanceHints.Indexing.MaxIndexOutputsPerDocument](../server/configuration/performance-hints-configuration.mdx#performancehintsindexingmaxindexoutputsperdocument) configuration key + (default is 1024). + + * So, for example, adding another OnlineShop document with a `tShirt` object containing 1025 items + will trigger the following alert: + + ![Figure 1. High indexing fanout ratio notification](./assets/fanout-index-performance-hint-1.png) + + * Clicking the 'Details' button will show the following info: + + ![Figure 2. Fanout index, performance hint details](./assets/fanout-index-performance-hint-2.png) + +* **Fanout index - Paging**: + + * A fanout index has more index-entries than the number of documents in the collection indexed. + Multiple index-entries "point" to the same document from which they originated, + as can be seen in the above [index-entries](../indexes/indexing-nested-data.mdx#fanoutMapIndexIndexEntries) example. + + * When making a fanout index query that should return full documents (without projecting results), + then in this case, the `TotalResults` property (available via the `QueryStatistics` object) will contain + the total number of index-entries and Not the total number of resulting documents. + + * **To overcome this when paging results**, you must take into account the number of "duplicate" + index-entries that are skipped internally by the server when serving the resulting documents. + + * Please refer to [paging through tampered results](../indexes/querying/paging.mdx#paging-through-tampered-results) for further explanation and examples. + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-nested-data-java.mdx b/versioned_docs/version-7.1/indexes/_indexing-nested-data-java.mdx new file mode 100644 index 0000000000..c1acf413a1 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-nested-data-java.mdx @@ -0,0 +1,131 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +The fanout index is the index that outputs multiple index entries per each document. Here is an example of such one: + + + + +{`public static class Orders_ByProduct extends AbstractIndexCreationTask { + public Orders_ByProduct() { + map = "docs.Orders.SelectMany(order => order.Lines, (order, orderLine) => new { " + + " Product = orderLine.Product, " + + " ProductName = orderLine.ProductName " + + "})"; + } +} +`} + + + + +{`public static class Orders_ByProduct extends AbstractJavaScriptIndexCreationTask { + public Orders_ByProduct() { + setMaps(Sets.newHashSet("map('Orders', function (order){\\n" + + " var res = [];\\n" + + " order.Lines.forEach(l => {\\n" + + " res.push({\\n" + + " Product: l.Product,\\n" + + " ProductName: l.ProductName\\n" + + " })\\n" + + " });\\n" + + " return res;\\n" + + "})")); + } +} +`} + + + + +A large order, having a lot of line items, will create an index entry per each `OrderLine` item from the `Lines` collection. A single document can generate hundreds of index entries. + +The fanout index concept is not specific for map-only indexes. It also applies to map-reduce indexes: + + + + +{`public static class Product_Sales extends AbstractIndexCreationTask { + public Product_Sales() { + map = "docs.Orders.SelectMany(order => order.Lines, (order, line) => new { " + + " Product = line.Product, " + + " Count = 1, " + + " Total = (((decimal) line.Quantity) * line.PricePerUnit) * (1M - line.Discount) " + + "})"; + + reduce = "results.GroupBy(result => result.Product).Select(g => new {\\n" + + " Product = g.Key,\\n" + + " Count = Enumerable.Sum(g, x => ((int) x.Count)),\\n" + + " Total = Enumerable.Sum(g, x0 => ((decimal) x0.Total))\\n" + + "})"; + } +} +`} + + + + +{`public static class Product_Sales extends AbstractJavaScriptIndexCreationTask { + public Product_Sales() { + setMaps(Sets.newHashSet("map('orders', function(order){\\n" + + " var res = [];\\n" + + " order.Lines.forEach(l => {\\n" + + " res.push({\\n" + + " Product: l.Product,\\n" + + " Count: 1,\\n" + + " Total: (l.Quantity * l.PricePerUnit) * (1- l.Discount)\\n" + + " })\\n" + + " });\\n" + + " return res;\\n" + + " })")); + + setReduce("groupBy(x => x.Product)\\n" + + " .aggregate(g => {\\n" + + " return {\\n" + + " Product : g.key,\\n" + + " Count: g.values.reduce((sum, x) => x.Count + sum, 0),\\n" + + " Total: g.values.reduce((sum, x) => x.Total + sum, 0)\\n" + + " }\\n" + + " })"); + } +} +`} + + + + +The above index definitions are correct. In both cases this is actually what we want. However, you need to be aware that fanout indexes are typically more expensive than regular ones. +RavenDB has to index many more entries than usual. What can result is higher utilization of CPU and memory, and overall declining performance of the index. + + +Starting from version 4.0, the fanout indexes won't error when the number of index entries created from a single document exceeds the configured limit. The configuration options from 3.x: + +- `Raven/MaxSimpleIndexOutputsPerDocument` +- `Raven/MaxMapReduceIndexOutputsPerDocument` + +are no longer valid. + +RavenDB will give you a performance hint regarding high fanout ratio using the Studio's notification center. + + + +## Performance Hints + +Once RavenDB notices that the number of indexing outputs created from a document is high, the notification that will appear in the Studio: + +![Figure 1. High indexing fanout ratio notification](./assets/fanout-index-performance-hint-1.png) + +The details will give you the following info: + +![Figure 2. Fanout index, performance hint details](./assets/fanout-index-performance-hint-2.png) + +You can control when a performance hint should be created using the `PerformanceHints.Indexing.MaxIndexOutputsPerDocument` setting (default: 1024). + +## Paging + +Since the fanout index creates multiple entries for a single document and queries return documents by default (it can change if the query defines the projection) the paging of query results +is a bit more complex. Please read the dedicated article about [paging through tampered results](../indexes/querying/paging.mdx#paging-through-tampered-results). + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-nested-data-nodejs.mdx b/versioned_docs/version-7.1/indexes/_indexing-nested-data-nodejs.mdx new file mode 100644 index 0000000000..e9694f16a7 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-nested-data-nodejs.mdx @@ -0,0 +1,399 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* JSON documents can have nested structures, where one document contains other objects or arrays of objects. + +* Use a static-index to facilitate querying for documents based on the nested data. + +* In this page: + + * [Sample data](../indexes/indexing-nested-data.mdx#sample-data) + + * [Simple index - SINGLE index-entry per document](../indexes/indexing-nested-data.mdx#simple-index---single-index-entry-per-document) + * [The index](../indexes/indexing-nested-data.mdx#theIndex) + * [The index-entries](../indexes/indexing-nested-data.mdx#theIndexEntries) + * [Querying the index](../indexes/indexing-nested-data.mdx#queryingTheIndex) + * [When to use](../indexes/indexing-nested-data.mdx#whenToUse) + + * [Fanout index - MULTIPLE index-entries per document](../indexes/indexing-nested-data.mdx#fanout-index---multiple-index-entries-per-document) + * [What is a fanout index](../indexes/indexing-nested-data.mdx#whatIsFanoutIndex) + * [Fanout index - Map index example](../indexes/indexing-nested-data.mdx#fanoutMapIndex) + * [Fanout index - Map-Reduce index example](../indexes/indexing-nested-data.mdx#fanoutMapReduceIndex) + * [Performance hints](../indexes/indexing-nested-data.mdx#performanceHints) + * [Paging](../indexes/indexing-nested-data.mdx#paging) + + +## Sample data + +* The examples in this article are based on the following **Classes** and **Sample Data**: + + + + +{`class OnlineShop { + constructor( + shopName = '', + email = '', + tShirts = {} // Will contain the nested data + ) { + Object.assign(this, { shopName, email, tShirts }); + } +} + +class TShirt { + constructor( + color = '', + size = '', + logo = '', + price = 0, + sold = 0 + ) { + Object.assign(this, { color, size, logo, price, sold }); + } +} +`} + + + + +{`// Creating sample data for the examples in this article: +// ====================================================== + +const bulkInsert = store.bulkInsert(); + +const onlineShops = [ + new OnlineShop("Shop1", "sales@shop1.com", [ + new TShirt("Red", "S", "Bytes and Beyond", 25, 2), + new TShirt("Red", "M", "Bytes and Beyond", 25, 4), + new TShirt("Blue", "M", "Query Everything", 28, 5), + new TShirt("Green", "L", "Data Driver", 30, 3) + ]), + new OnlineShop("Shop2", "sales@shop2.com", [ + new TShirt("Blue", "S", "Coffee, Code, Repeat", 22, 12), + new TShirt("Blue", "M", "Coffee, Code, Repeat", 22, 7), + new TShirt("Green", "M", "Big Data Dreamer", 25, 9), + new TShirt("Black", "L", "Data Mining Expert", 20, 11) + ]), + new OnlineShop("Shop3", "sales@shop3.com", [ + new TShirt("Red", "S", "Bytes of Wisdom", 18, 2), + new TShirt("Blue", "M", "Data Geek", 20, 6), + new TShirt("Black", "L", "Data Revolution", 15, 8), + new TShirt("Black", "XL", "Data Revolution", 15, 10) + ]) +]; + +for (const shop of onlineShops ) { + await bulkInsert.store(shop); +} + +await bulkInsert.finish(); +`} + + + + + + +## Simple index - Single index-entry per document + +* **The index**: + + +{`class Shops_ByTShirt_Simple extends AbstractJavaScriptIndexCreationTask \{ + constructor () \{ + super(); + + // Creating a SINGLE index-entry per document: + this.map("OnlineShops", shop => \{ + return \{ + // Each index-field will hold a collection of nested values from the document + colors: shop.tShirts.map(x => x.color), + sizes: shop.tShirts.map(x => x.size), + logos: shop.tShirts.map(x => x.logo) + \}; + \}); + \} +\} +`} + + +* **The index-entries**: + + ![Simple - index-entries](./assets/indexing-nested-data-1.png) + + 1. The index-entries content is visible from the Studio [Query view](../studio/database/queries/query-view.mdx). + + 2. Check option: _Show raw index-entries instead of Matching documents_. + + 3. Each row represents an **index-entry**. + The index has a single index-entry per document (3 entries in this example). + + 4. The index-field contains a collection of ALL nested values from the document. + e.g. The third **index-entry** has the following values in the _Colors_ **index-field**: + `{"black", "blue", "red"}` + +* **Querying the index**: + + + + +{`// Query for all shop documents that have a red TShirt +const results = await session + .query({ indexName: "Shops/ByTShirt/Simple" }) + // Filter query results by a nested value + .containsAny("colors", ["red"]) + .all(); +`} + + + + +{`from index "Shops/ByTShirt/Simple" +where colors == "red" +`} + + + + + + +{`// Results will include the following shop documents: +// ================================================== +// * Shop1 +// * Shop3 +`} + + + +* **When to use**: + + * This type of index structure is effective for retrieving documents when filtering the query by any of the inner nested values that were indexed. + + * However, due to the way the index-entries are generated, this index **cannot** provide results for a query searching for documents that contain + specific sub-objects which satisfy some `AND` condition. + For example: + + + +{`// You want to query for shops containing "Large Green TShirts", +// aiming to get only "Shop1" as a result since it has such a combination, +// so you attempt this query: +const greenAndLarge = await session + .query(\{ indexName: "Shops/ByTShirt/Simple" \}) + .containsAny("colors", ["green"]) + .andAlso() + .containsAny("sizes", ["L"]) + .all(); + +// But, the results of this query will include BOTH "Shop1" & "Shop2" +// since the index-entries do not keep the original sub-objects structure. +`} + + + + * To address this, you must use a **Fanout index** - as described below. + + + +## Fanout index - Multiple index-entries per document + +* **What is a Fanout index**: + + * A fanout index is an index that outputs multiple index-entries per document. + A separate index-entry is created for each nested sub-object from the document. + + * The fanout index is useful when you need to retrieve documents matching query criteria + that search for specific sub-objects that comply with some logical conditions. + +* **Fanout index - Map index example**: + + + +{`// A fanout map-index: +// =================== +class Shops_ByTShirt_Fanout extends AbstractJavaScriptIndexCreationTask \{ + constructor () \{ + super(); + + // Creating MULTIPLE index-entries per document, + // an index-entry for each sub-object in the tShirts list + this.map("OnlineShops", shop => \{ + return shop.tShirts.map(shirt => \{ + return \{ + color: shirt.color, + size: shirt.size, + logo: shirt.logo + \}; + \}); + \}); + \} +\} +`} + + + + + + +{`// Query the fanout index: +// ======================= +const shopsThatHaveMediumRedShirts = await session + .query({ indexName: "Shops/ByTShirt/Fanout" }) + // Query for documents that have a "Medium Red TShirt" + .whereEquals("color", "red") + .andAlso() + .whereEquals("size", "M") + .all(); +`} + + + + +{`from index "Shops/ByTShirt/Fanout" +where color == "red" and size == "M" +`} + + + + + + +{`// Query results: +// ============== + +// Only the 'Shop1' document will be returned, +// since it is the only document that has the requested combination within the tShirt list. +`} + + + +* **The index-entries**: + ![Fanout - index-entries](./assets/indexing-nested-data-2.png) + + 1. The index-entries content is visible from the Studio [Query view](../studio/database/queries/query-view.mdx). + + 2. Check option: _Show raw index-entries instead of Matching documents_. + + 3. Each row represents an **index-entry**. + Each index-entry corresponds to an inner item in the TShirt list. + + 4. In this example, the total number of index-entries is **12**, + which is the total number of inner items in the TShirt list in all **3** documents in the collection. + +* **Fanout index - Map-Reduce index example**: + + * The fanout index concept applies to map-reduce indexes as well: + + +{`// A fanout map-reduce index: +// ========================== +class Sales_ByTShirtColor_Fanout extends AbstractJavaScriptIndexCreationTask \{ + constructor () \{ + super(); + + this.map("OnlineShops", shop => \{ + return shop.tShirts.map(shirt => \{ + return \{ + // Define the index-fields: + color: shirt.color, + itemsSold: shirt.sold, + totalSales: shirt.price * shirt.sold + \}; + \}); + \}); + + this.reduce(results => results + .groupBy(shirt => shirt.color) + .aggregate(g => \{ + return \{ + // Calculate sales per color + color: g.key, + itemsSold: g.values.reduce((p, c) => p + c.itemsSold, 0), + totalSales: g.values.reduce((p, c) => p + c.totalSales, 0), + \} + \})); + \} +\} +`} + + + + + + +{`// Query the fanout index: +// ======================= +const queryResult = await session + .query({ indexName: "Sales/ByTShirtColor/Fanout" }) + // Query for index-entries that contain "black" + .whereEquals("color", "black") + .firstOrNull(); + +// Get total sales for black TShirts +const blackShirtsSales = queryResult?.totalSales ?? 0; +`} + + + + +{`from index "Sales/ByTShirtColor/Fanout" +where color == "black" +`} + + + + + + +{`// Query results: +// ============== + +// With the sample data used in this article, +// The total sales revenue from black TShirts sold (in all shops) is 490 +`} + + + +* **Fanout index - Performance hints**: + + * Fanout indexes are typically more resource-intensive than other indexes as RavenDB has to index a large number of index-entries. + This increased workload can lead to higher CPU and memory utilization, potentially causing a decline in the overall performance of the index. + + * When the number of index-entries generated from a single document exceeds a configurable limit, + RavenDB will issue a **High indexing fanout ratio** alert in the Studio notification center. + + * You can control when this performance hint is created by setting the + [PerformanceHints.Indexing.MaxIndexOutputsPerDocument](../server/configuration/performance-hints-configuration.mdx#performancehintsindexingmaxindexoutputsperdocument) configuration key + (default is 1024). + + * So, for example, adding another OnlineShop document with a `tShirt` object containing 1025 items + will trigger the following alert: + + ![Figure 1. High indexing fanout ratio notification](./assets/fanout-index-performance-hint-1.png) + + * Clicking the 'Details' button will show the following info: + + ![Figure 2. Fanout index, performance hint details](./assets/fanout-index-performance-hint-2.png) + +* **Fanout index - Paging**: + + * A fanout index has more index-entries than the number of documents in the collection indexed. + Multiple index-entries "point" to the same document from which they originated, + as can be seen in the above [index-entries](../indexes/indexing-nested-data.mdx#fanoutMapIndexIndexEntries) example. + + * When making a fanout index query that should return full documents (without projecting results), + the `totalResults` property (available when calling the query `statistics()` method) + will contain the total number of index-entries and Not the total number of resulting documents. + + * **To overcome this when paging results**, you must take into account the number of "duplicate" + index-entries that are skipped internally by the server when serving the resulting documents. + + * Please refer to [paging through tampered results](../indexes/querying/paging.mdx#paging-through-tampered-results) for further explanation and examples. + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-nested-data-php.mdx b/versioned_docs/version-7.1/indexes/_indexing-nested-data-php.mdx new file mode 100644 index 0000000000..6b4165db63 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-nested-data-php.mdx @@ -0,0 +1,624 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* JSON documents can have nested structures, where one document contains other objects or arrays of objects. + +* Use a static-index to facilitate querying for documents based on the nested data. + +* In this page: + + * [Sample data](../indexes/indexing-nested-data.mdx#sample-data) + + * [Simple index - SINGLE index-entry per document](../indexes/indexing-nested-data.mdx#simple-index---single-index-entry-per-document) + * [The index](../indexes/indexing-nested-data.mdx#theIndex) + * [The index-entries](../indexes/indexing-nested-data.mdx#theIndexEntries) + * [Querying the index](../indexes/indexing-nested-data.mdx#queryingTheIndex) + * [When to use](../indexes/indexing-nested-data.mdx#whenToUse) + + * [Fanout index - MULTIPLE index-entries per document](../indexes/indexing-nested-data.mdx#fanout-index---multiple-index-entries-per-document) + * [What is a fanout index](../indexes/indexing-nested-data.mdx#whatIsFanoutIndex) + * [Fanout index - Map index example](../indexes/indexing-nested-data.mdx#fanoutMapIndex) + * [Fanout index - Map-Reduce index example](../indexes/indexing-nested-data.mdx#fanoutMapReduceIndex) + * [Performance hints](../indexes/indexing-nested-data.mdx#performanceHints) + * [Paging](../indexes/indexing-nested-data.mdx#paging) + + +## Sample data + +* The examples in this article are based on the following **Classes** and **Sample Data**: + + + + +{`class OnlineShop +{ + private ?string $shopName = null; + private ?string $email = null; + public ?TShirtArray $tShirts = null; // Nested data + + public function __construct( + ?string $shopName = null, + ?string $email = null, + ?TShirtArray $tShirts = null + ) { + $this->shopName = $shopName; + $this->email = $email; + $this->tShirts = $tShirts; + } + + public function getShopName(): ?string + { + return $this->shopName; + } + + public function setShopName(?string $shopName): void + { + $this->shopName = $shopName; + } + + public function getEmail(): ?string + { + return $this->email; + } + + public function setEmail(?string $email): void + { + $this->email = $email; + } + + public function getTShirts(): ?TShirtArray + { + return $this->tShirts; + } + + public function setTShirts(?TShirtArray $tShirts): void + { + $this->tShirts = $tShirts; + } +} + +class TShirt +{ + private ?string $color = null; + private ?string $size = null; + private ?string $logo = null; + private ?float $price = null; + private ?int $sold = null; + + public function __construct( + ?string $color = null, + ?string $size = null, + ?string $logo = null, + ?float $price = null, + ?int $sold = null + ) { + $this->color = $color; + $this->size = $size; + $this->logo = $logo; + $this->price = $price; + $this->sold = $sold; + } + + public function getColor(): ?string + { + return $this->color; + } + + public function setColor(?string $color): void + { + $this->color = $color; + } + + public function getSize(): ?string + { + return $this->size; + } + + public function setSize(?string $size): void + { + $this->size = $size; + } + + public function getLogo(): ?string + { + return $this->logo; + } + + public function setLogo(?string $logo): void + { + $this->logo = $logo; + } + + public function getPrice(): ?float + { + return $this->price; + } + + public function setPrice(?float $price): void + { + $this->price = $price; + } + + public function getSold(): ?int + { + return $this->sold; + } + + public function setSold(?int $sold): void + { + $this->sold = $sold; + } +} + +class TShirtArray extends TypedArray +{ + public function __construct() + { + parent::__construct(TShirt::class); + } +} +`} + + + + +{`// Creating sample data for the examples in this article: +// ====================================================== + +$onlineShops = []; + +// Shop1 +$onlineShops[] = new OnlineShop( + shopName: "Shop1", + email: "sales@shop1.com", + tShirts: TShirtArray::fromArray([ + new TShirt(color: "Red", size: "S", logo: "Bytes and Beyond", price: 25, sold: 2), + new TShirt(color: "Red", size: "M", logo: "Bytes and Beyond", price: 25, sold: 4), + new TShirt(color: "Blue", size: "M", logo: "Query Everything", price: 28, sold: 5), + new TShirt(color: "Green", size: "L", logo: "Data Driver", price: 30, sold:3) + ]) +); + +// Shop2 +$onlineShops[] = new OnlineShop( + shopName: "Shop2", + email: "sales@shop2.com", + tShirts: TShirtArray::fromArray([ + new TShirt(color: "Blue", size: "S", logo: "Coffee, Code, Repeat", price: 22, sold: 12 ), + new TShirt(color: "Blue", size: "M", logo: "Coffee, Code, Repeat", price: 22, sold: 7 ), + new TShirt(color: "Green", size: "M", logo: "Big Data Dreamer", price: 25, sold: 9 ), + new TShirt(color: "Black", size: "L", logo: "Data Mining Expert", price: 20, sold: 11) + ]) +); + +// Shop3 +$onlineShops[] = new OnlineShop( + shopName: "Shop3", + email: "sales@shop3.com", + tShirts: TShirtArray::fromArray([ + new TShirt(color: "Red", size: "S", logo: "Bytes of Wisdom", price: 18, sold: 2 ), + new TShirt(color: "Blue", size: "M", logo: "Data Geek", price: 20, sold: 6 ), + new TShirt(color: "Black", size: "L", logo: "Data Revolution", price: 15, sold: 8 ), + new TShirt(color: "Black", size: "XL", logo: "Data Revolution", price: 15, sold: 10 ) + ]) +); + +$session = $store->openSession(); +try { + /** @var OnlineShop $shop */ + foreach ($onlineShops as $shop) { + $session->store($shop); + } + + $session->SaveChanges(); +} finally { + $session->close(); +} +`} + + + + + + +## Simple index - Single index-entry per document + +* **The index**: + + +{`_query_1 + // Query for all shop documents that have a red TShirt + $shopsThatHaveRedShirts = $session + ->query(Shops_ByTShirt_Simple_IndexEntry::class, Shops_ByTShirt_Simple::class) + // Filter query results by a nested value + ->containsAny("colors", [ "red" ]) + ->ofType(OnlineShop::class) + ->toList(); +`} + + + +* **The index-entries**: + + ![Simple - index-entries](./assets/indexing-nested-data-1.png) + + 1. The index-entries content is visible from the Studio [Query view](../studio/database/queries/query-view.mdx). + + 2. Check option: _Show raw index-entries instead of Matching documents_. + + 3. Each row represents an **index-entry**. + The index has a single index-entry per document (3 entries in this example). + + 4. The index-field contains a collection of ALL nested values from the document. + e.g. The third **index-entry** has the following values in the _Colors_ **index-field**: + `{"black", "blue", "red"}` + +* **Querying the index**: + + + + +{`// Query for all shop documents that have a red TShirt +$shopsThatHaveRedShirts = $session + ->query(Shops_ByTShirt_Simple_IndexEntry::class, Shops_ByTShirt_Simple::class) + // Filter query results by a nested value + ->containsAny("colors", [ "red" ]) + ->ofType(OnlineShop::class) + ->toList(); +`} + + + + +{`from index "Shops/ByTShirt/Simple" +where Colors == "red" +`} + + + + + + +{`// Results will include the following shop documents: +// ================================================== +// * Shop1 +// * Shop3 +`} + + + +* **When to use**: + + * This type of index structure is effective for retrieving documents when filtering the query by any of the inner nested values that were indexed. + + * However, due to the way the index-entries are generated, this index **cannot** provide results for a query searching for documents that contain + specific sub-objects which satisfy some `and_also` condition. + For example: + + +{`// You want to query for shops containing "Large Green TShirts", +// aiming to get only "Shop1" as a result since it has such a combination, +// so you attempt this query: +$greenAndLarge = $session + ->query(Shops_ByTShirt_Simple_IndexEntry::class, Shops_ByTShirt_Simple::class) + ->whereEquals("color", "green") + ->andAlso() + ->whereEquals("size", "L") + ->ofType(OnlineShop::class) + ->toList(); + +// But, the results of this query will include BOTH "Shop1" & "Shop2" +// since the index-entries do not keep the original sub-objects structure. +`} + + + + * To address this, you must use a **Fanout index** - as described below. + + + +## Fanout index - Multiple index-entries per document + +* **What is a Fanout index**: + + * A fanout index is an index that outputs multiple index-entries per document. + A separate index-entry is created for each nested sub-object from the document. + + * The fanout index is useful when you need to retrieve documents matching query criteria + that search for specific sub-objects that comply with some logical conditions. + +* **Fanout index - Map index example**: + + + + +{`// A fanout map-index: +// =================== +class Shops_ByTShirt_Fanout_IndexEntry +{ + // The index-fields: + private ?string $color = null; + private ?string $size = null; + private ?string $logo = null; + + public function getColor(): ?string + { + return $this->color; + } + + public function setColor(?string $color): void + { + $this->color = $color; + } + + public function getSize(): ?string + { + return $this->size; + } + + public function setSize(?string $size): void + { + $this->size = $size; + } + + public function getLogo(): ?string + { + return $this->logo; + } + + public function setLogo(?string $logo): void + { + $this->logo = $logo; + } +} + +class Shops_ByTShirt_Fanout extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = + "from shop in docs.OnlineShops " . + "from shirt in shop.t_shirts " . + // Creating MULTIPLE index-entries per document, + // an index-entry for each sub-object in the TShirts list + "select new {" . + " color = shirt.color," . + " size = shirt.size," . + " logo = shirt.logo" . + "}"; + } +} +`} + + + + +{`class Shops_ByTShirt_JS extends AbstractJavaScriptIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->setMaps([ + "map('OnlineShops', function (shop){ + var res = []; + shop.t_shirts.forEach(shirt => { + res.push({ + color: shirt.color, + size: shirt.size, + logo: shirt.logo + }) + }); + return res; + }) + " + ]); + } +} +`} + + + + + + + +{`// Query the fanout index: +// ======================= +$shopsThatHaveMediumRedShirts = $session + ->query(Shops_ByTShirt_Fanout_IndexEntry::class, Shops_ByTShirt_Fanout::class) + // Query for documents that have a "Medium Red TShirt" + ->whereEquals("color", "red") + ->andAlso() + ->whereEquals("size", "M") + ->ofType(OnlineShop::class) + ->toList(); +`} + + + + +{`from index "Shops/ByTShirt/Fanout" +where Color == "red" and Size == "M" +`} + + + + + + +{`// Query results: +// ============== + +// Only the 'Shop1' document will be returned, +// since it is the only document that has the requested combination within the TShirt list. +`} + + + +* **The index-entries**: + ![Fanout - index-entries](./assets/indexing-nested-data-2.png) + + 1. The index-entries content is visible from the Studio [Query view](../studio/database/queries/query-view.mdx). + + 2. Check option: _Show raw index-entries instead of Matching documents_. + + 3. Each row represents an **index-entry**. + Each index-entry corresponds to an inner item in the TShirt list. + + 4. In this example, the total number of index-entries is **12**, + which is the total number of inner items in the TShirt list in all **3** documents in the collection. + +* **Fanout index - Map-Reduce index example**: + + * The fanout index concept applies to map-reduce indexes as well: + + + + +{`// A fanout map-reduce index: +// ========================== +class Sales_ByTShirtColor_Fanout_IndexEntry { + // The index-fields: + private ?string $color = null; + private ?int $itemsSold = null; + private ?float $totalSales = null; + + public function getColor(): ?string + { + return $this->color; + } + + public function setColor(?string $color): void + { + $this->color = $color; + } + + public function getItemsSold(): ?int + { + return $this->itemsSold; + } + + public function setItemsSold(?int $itemsSold): void + { + $this->itemsSold = $itemsSold; + } + + public function getTotalSales(): ?float + { + return $this->totalSales; + } + + public function setTotalSales(?float $totalSales): void + { + $this->totalSales = $totalSales; + } +} +class Sales_ByTShirtColor_Fanout extends AbstractIndexCreationTask +{ + + public function __construct() + { + parent::__construct(); + + # Creating MULTIPLE index-entries per document, + # an index-entry for each sub-object in the TShirts list + $this->map = + "from shop in docs.OnlineShops " . + "from shirt in shop.t_shirts " . + // Creating MULTIPLE index-entries per document, + // an index-entry for each sub-object in the TShirts list + "select new {" . + " color = shirt.color, " . + " items_sold = shirt.sold, " . + " total_sales = shirt.price * shirt.sold" . + "}"; + + $this->reduce = + "from result in results " . + "group result by result.color " . + "into g select new {" . + " color = g.Key," . + // Calculate sales per color + " items_sold = g.Sum(x => x.items_sold)," . + " total_sales = g.Sum(x => x.total_sales)" . + "}"; + + } +} +`} + + + + + + + +{`// Query the fanout index: +// ======================= +/** @var Sales_ByTShirtColor_Fanout_IndexEntry $queryResult */ +$queryResult = $session + ->query(Sales_ByTShirtColor_Fanout_IndexEntry::class, Sales_ByTShirtColor_Fanout::class) + // Query for index-entries that contain "black" + ->whereEquals("color", "black") + ->firstOrDefault(); + +// Get total sales for black TShirts +$blackShirtsSales = $queryResult?->getTotalSales() ?? 0; +`} + + + + +{`from index "Sales/ByTShirtColor/Fanout" +where Color == "black" +`} + + + + +* **Fanout index - Performance hints**: + + * Fanout indexes are typically more resource-intensive than other indexes as RavenDB has to index a large number of index-entries. + This increased workload can lead to higher CPU and memory utilization, potentially causing a decline in the overall performance of the index. + + * When the number of index-entries generated from a single document exceeds a configurable limit, + RavenDB will issue a **High indexing fanout ratio** alert in the Studio notification center. + + * You can control when this performance hint is created by setting the + [PerformanceHints.Indexing.MaxIndexOutputsPerDocument](../server/configuration/performance-hints-configuration.mdx#performancehintsindexingmaxindexoutputsperdocument) configuration key + (default is 1024). + + * So, for example, adding another OnlineShop document with a `tShirt` object containing 1025 items + will trigger the following alert: + + ![Figure 1. High indexing fanout ratio notification](./assets/fanout-index-performance-hint-1.png) + + * Clicking the 'Details' button will show the following info: + + ![Figure 2. Fanout index, performance hint details](./assets/fanout-index-performance-hint-2.png) + +* **Fanout index - Paging**: + + * A fanout index has more index-entries than the number of documents in the collection indexed. + Multiple index-entries "point" to the same document from which they originated, + as can be seen in the above [index-entries](../indexes/indexing-nested-data.mdx#fanoutMapIndexIndexEntries) example. + + * When making a fanout index query that should return full documents (without projecting results), + the `TotalResults` property (available via the `QueryStatistics` object) will contain + the total number of index-entries and Not the total number of resulting documents. + + * **To overcome this when paging results**, you must take into account the number of "duplicate" + index-entries that are skipped internally by the server when serving the resulting documents. + + * Please refer to [paging through tampered results](../indexes/querying/paging.mdx#paging-through-tampered-results) for further explanation and examples. + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-nested-data-python.mdx b/versioned_docs/version-7.1/indexes/_indexing-nested-data-python.mdx new file mode 100644 index 0000000000..130d9519bd --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-nested-data-python.mdx @@ -0,0 +1,444 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* JSON documents can have nested structures, where one document contains other objects or arrays of objects. + +* Use a static-index to facilitate querying for documents based on the nested data. + +* In this page: + + * [Sample data](../indexes/indexing-nested-data.mdx#sample-data) + + * [Simple index - SINGLE index-entry per document](../indexes/indexing-nested-data.mdx#simple-index---single-index-entry-per-document) + * [The index](../indexes/indexing-nested-data.mdx#theIndex) + * [The index-entries](../indexes/indexing-nested-data.mdx#theIndexEntries) + * [Querying the index](../indexes/indexing-nested-data.mdx#queryingTheIndex) + * [When to use](../indexes/indexing-nested-data.mdx#whenToUse) + + * [Fanout index - MULTIPLE index-entries per document](../indexes/indexing-nested-data.mdx#fanout-index---multiple-index-entries-per-document) + * [What is a fanout index](../indexes/indexing-nested-data.mdx#whatIsFanoutIndex) + * [Fanout index - Map index example](../indexes/indexing-nested-data.mdx#fanoutMapIndex) + * [Fanout index - Map-Reduce index example](../indexes/indexing-nested-data.mdx#fanoutMapReduceIndex) + * [Performance hints](../indexes/indexing-nested-data.mdx#performanceHints) + * [Paging](../indexes/indexing-nested-data.mdx#paging) + + +## Sample data + +* The examples in this article are based on the following **Classes** and **Sample Data**: + + + + +{`class OnlineShop: + def __init__(self, shop_name: str = None, email: str = None, t_shirts: List[TShirt] = None): + self.shop_name = shop_name + self.email = email + self.t_shirts = t_shirts + + @classmethod + def from_json(cls, json_data: Dict[str, Any]) -> OnlineShop: + return cls( + json_data["shop_name"], + json_data["email"], + [TShirt.from_json(shirt_json_dict) for shirt_json_dict in json_data["t_shirts"]], + ) + + def to_json(self) -> Dict[str, Any]: + return { + "shop_name": self.shop_name, + "email": self.email, + "t_shirts": [tshirt.to_json() for tshirt in self.t_shirts], + } + + +class TShirt: + def __init__(self, color: str = None, size: str = None, logo: str = None, price: float = None, sold: int = None): + self.color = color + self.size = size + self.logo = logo + self.price = price + self.sold = sold + + @classmethod + def from_json(cls, json_data: Dict[str, Any]) -> TShirt: + return cls(json_data["color"], json_data["size"], json_data["logo"], json_data["price"], json_data["sold"]) + + def to_json(self) -> Dict[str, Any]: + return {"color": self.color, "size": self.size, "logo": self.logo, "price": self.price, "sold": self.sold} +`} + + + + +{`# Creating sample data for the examples in this article: +# ====================================================== +shop1_tshirts = [ + TShirt(color="Red", size="S", logo="Bytes and Beyond", price=25, sold=2), + TShirt(color="Red", size="M", logo="Bytes and Beyond", price=25, sold=4), + TShirt(color="Blue", size="M", logo="Query Everything", price=28, sold=5), + TShirt(color="Green", size="L", logo="Data Driver", price=30, sold=3), +] + +shop2_tshirts = [ + TShirt(color="Blue", size="S", logo="Coffee, Code, Repeat", price=22, sold=12), + TShirt(color="Blue", size="M", logo="Coffee, Code, Repeat", price=22, sold=7), + TShirt(color="Green", size="M", logo="Big Data Dreamer", price=25, sold=9), + TShirt(color="Black", size="L", logo="Data Mining Expert", price=20, sold=11), +] + +shop3_tshirts = [ + TShirt(color="Red", size="S", logo="Bytes of Wisdom", price=18, sold=2), + TShirt(color="Blue", size="M", logo="Data Geek", price=20, sold=6), + TShirt(color="Black", size="L", logo="Data Revolution", price=15, sold=8), + TShirt(color="Black", size="XL", logo="Data Revolution", price=15, sold=10), +] + +online_shops = [ + OnlineShop(shop_name="Shop1", email="sales@shop1.com", t_shirts=shop1_tshirts), + OnlineShop(shop_name="Shop2", email="sales@shop2.com", t_shirts=shop2_tshirts), + OnlineShop(shop_name="Shop3", email="sales@shop3.com", t_shirts=shop3_tshirts), +] + +Shops_ByTShirt_Simple().execute(store) +Shops_ByTShirt_Fanout().execute(store) +Sales_ByTShirtColor_Fanout().execute(store) + +with store.open_session() as session: + for shop in online_shops: + session.store(shop) + + session.save_changes() +`} + + + + + + +## Simple index - Single index-entry per document + +* **The index**: + + +{`class Shops_ByTShirt_Simple(AbstractIndexCreationTask): + class IndexEntry: + def __init__(self, colors: List[str] = None, sizes: List[str] = None, logos: List[str] = None): + # The index-fields: + self.colors = colors + self.sizes = sizes + self.logos = logos + + def __init__(self): + super().__init__() + # Creating a SINGLE index-entry per document: + self.map = ( + "from shop in docs.OnlineShops " + "select new \{ " + # Each index-field will hold a collection of nested values from the document + " colors = shop.t_shirts.Select(x => x.color)," + " sizes = shop.t_shirts.Select(x => x.size)," + " logos = shop.t_shirts.Select(x => x.logo)" + "\}" + ) +`} + + + +* **The index-entries**: + + ![Simple - index-entries](./assets/indexing-nested-data-1.png) + + 1. The index-entries content is visible from the Studio [Query view](../studio/database/queries/query-view.mdx). + + 2. Check option: _Show raw index-entries instead of Matching documents_. + + 3. Each row represents an **index-entry**. + The index has a single index-entry per document (3 entries in this example). + + 4. The index-field contains a collection of ALL nested values from the document. + e.g. The third **index-entry** has the following values in the _Colors_ **index-field**: + `{"black", "blue", "red"}` + +* **Querying the index**: + + + + +{`# Query for all shop documents that have a red TShirt +shops_that_have_red_shirts = list( + session.query_index_type(Shops_ByTShirt_Simple, Shops_ByTShirt_Simple.IndexEntry) + .contains_any("colors", ["Red"]) + .of_type(OnlineShop) +) +`} + + + + +{`from index "Shops/ByTShirt/Simple" +where Colors == "red" +`} + + + + + + +{`# Results will include the following shop documents: +# ================================================== +# * Shop1 +# * Shop3 +`} + + + +* **When to use**: + + * This type of index structure is effective for retrieving documents when filtering the query by any of the inner nested values that were indexed. + + * However, due to the way the index-entries are generated, this index **cannot** provide results for a query searching for documents that contain + specific sub-objects which satisfy some `and_also` condition. + For example: + + +{`# You want to query for shops containing "Large Green TShirts", +# aiming to get only "Shop1" as a result since it has such a combination, +# so you attempt this query: +green_and_large = list( + session.query_index_type(Shops_ByTShirt_Simple, Shops_ByTShirt_Simple.IndexEntry) + .contains_any("colors", ["green"]) + .and_also() + .contains_any("sizes", "L") + .of_type(OnlineShop) +) + +# But, the results of this query will include BOTH "Shop1" & "Shop2" +# since the index-queries do not keep the original sub-subjects structure. +`} + + + + * To address this, you must use a **Fanout index** - as described below. + + + +## Fanout index - Multiple index-entries per document + +* **What is a Fanout index**: + + * A fanout index is an index that outputs multiple index-entries per document. + A separate index-entry is created for each nested sub-object from the document. + + * The fanout index is useful when you need to retrieve documents matching query criteria + that search for specific sub-objects that comply with some logical conditions. + +* **Fanout index - Map index example**: + + + + +{`# A fanout map-index: +# =================== +class Shops_ByTShirt_Fanout(AbstractIndexCreationTask): + class IndexEntry: + def __init__(self, color: str = None, size: str = None, logo: str = None): + self.color = color + self.size = size + self.logo = logo + + def __init__(self): + super().__init__() + # Creating MULTIPLE index-entries per document, + # an index-entry for each sub-object in the TShirts list + self.map = ( + "from shop in docs.OnlineShops from shirt in shop.t_shirts " + "select new {" + " color = shirt.color," + " size = shirt.size," + " logo = shirt.logo" + "}" + ) +`} + + + + +{`class Shops_ByTShirt_JS(AbstractJavaScriptIndexCreationTask): + def __init__(self): + super().__init__() + self.maps = { + """ + map('OnlineShops', function (shop){ + var res = []; + shop.t_shirts.forEach(shirt => { + res.push({ + color: shirt.color, + size: shirt.size, + logo: shirt.logo + }) + }); + return res; + }) + """ + } +`} + + + + + + + +{`# Query the fanout index: +# ======================= +shops_that_have_medium_red_shirts = list( + session.query_index_type(Shops_ByTShirt_Fanout, Shops_ByTShirt_Fanout.IndexEntry) + # Query for documents that have a "Medium Red TShirt" + .where_equals("color", "red") + .and_also() + .where_equals("size", "M") + .of_type(OnlineShop) +) +`} + + + + +{`from index "Shops/ByTShirt/Fanout" +where Color == "red" and Size == "M" +`} + + + + + + +{`# Query results: +# ============== +# +# Only the 'Shop1' document will be returned, +# since it is the only document that has the requested combination within the TShirt list. +`} + + + +* **The index-entries**: + ![Fanout - index-entries](./assets/indexing-nested-data-2.png) + + 1. The index-entries content is visible from the Studio [Query view](../studio/database/queries/query-view.mdx). + + 2. Check option: _Show raw index-entries instead of Matching documents_. + + 3. Each row represents an **index-entry**. + Each index-entry corresponds to an inner item in the TShirt list. + + 4. In this example, the total number of index-entries is **12**, + which is the total number of inner items in the TShirt list in all **3** documents in the collection. + +* **Fanout index - Map-Reduce index example**: + + * The fanout index concept applies to map-reduce indexes as well: + + + + +{`class Sales_ByTShirtColor_Fanout(AbstractIndexCreationTask): + class IndexEntry: + def __init__(self, color: str = None, items_sold: int = None, total_sales: float = None): + self.color = color + self.items_sold = items_sold + self.total_sales = total_sales + + def __init__(self): + super().__init__() + # Creating MULTIPLE index-entries per document, + # an index-entry for each sub-object in the TShirts list + self.map = ( + "from shop in docs.OnlineShops from shirt in shop.t_shirts " + "select new {" + " color = shirt.color, " + " items_sold = shirt.sold, " + " total_sales = shirt.price * shirt.sold" + "}" + ) + self.reduce = ( + "from result in results group result by result.color into g select new {" + " color = g.Key," + " items_sold = g.Sum(x => x.items_sold)," + " total_sales = g.Sum(x => x.total_sales)" + "}" + ) +`} + + + + + + + +{`# Query the fanout index: +# ======================= +query_result = ( + session.query_index_type(Sales_ByTShirtColor_Fanout, Sales_ByTShirtColor_Fanout.IndexEntry) + # Query for index-entries that contain "black" + .where_equals("color", "black").first() +) + +# Get total sales for black TShirts +black_shirts_sales = query_result.total_sales or 0 +`} + + + + +{`from index "Sales/ByTShirtColor/Fanout" +where Color == "black" +`} + + + + +* **Fanout index - Performance hints**: + + * Fanout indexes are typically more resource-intensive than other indexes as RavenDB has to index a large number of index-entries. + This increased workload can lead to higher CPU and memory utilization, potentially causing a decline in the overall performance of the index. + + * When the number of index-entries generated from a single document exceeds a configurable limit, + RavenDB will issue a **High indexing fanout ratio** alert in the Studio notification center. + + * You can control when this performance hint is created by setting the + [PerformanceHints.Indexing.MaxIndexOutputsPerDocument](../server/configuration/performance-hints-configuration.mdx#performancehintsindexingmaxindexoutputsperdocument) configuration key + (default is 1024). + + * So, for example, adding another OnlineShop document with a `tShirt` object containing 1025 items + will trigger the following alert: + + ![Figure 1. High indexing fanout ratio notification](./assets/fanout-index-performance-hint-1.png) + + * Clicking the 'Details' button will show the following info: + + ![Figure 2. Fanout index, performance hint details](./assets/fanout-index-performance-hint-2.png) + +* **Fanout index - Paging**: + + * A fanout index has more index-entries than the number of documents in the collection indexed. + Multiple index-entries "point" to the same document from which they originated, + as can be seen in the above [index-entries](../indexes/indexing-nested-data.mdx#fanoutMapIndexIndexEntries) example. + + * When making a fanout index query that should return full documents (without projecting results), + the `TotalResults` property (available via the `QueryStatistics` object) will contain + the total number of index-entries and Not the total number of resulting documents. + + * **To overcome this when paging results**, you must take into account the number of "duplicate" + index-entries that are skipped internally by the server when serving the resulting documents. + + * Please refer to [paging through tampered results](../indexes/querying/paging.mdx#paging-through-tampered-results) for further explanation and examples. + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-polymorphic-data-csharp.mdx b/versioned_docs/version-7.1/indexes/_indexing-polymorphic-data-csharp.mdx new file mode 100644 index 0000000000..d38ff436f4 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-polymorphic-data-csharp.mdx @@ -0,0 +1,167 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, RavenDB indexes are defined on a specific entity type, referred to as a `Collection`, + and do not consider the inheritance hierarchy. + +* In this Page: + * [Polymorphic Data](../indexes/indexing-polymorphic-data.mdx#polymorphic-data) + * [Multi-Map Indexes](../indexes/indexing-polymorphic-data.mdx#multi-map-indexes) + * [Other Options](../indexes/indexing-polymorphic-data.mdx#other-options) + + +## Polymorphic Data + +Let's assume, for example, that we have the following inheritance hierarchy: + +![Figure 1: Polymorphic indexes](./assets/polymorphic_indexes_faq.png) + +When saving a `Cat` document, it will be assigned to the "Cats" collection, +while a `Dog` document will be placed in the "Dogs" collection. + +If we intend to create a simple Map-index for Cat documents based on their names, we would write: + + + +{`from cat in docs.Cats +select new \{ cat.Name \} +`} + + + +And for dogs: + + + +{`from dog in docs.Dogs +select new \{ dog.Name \} +`} + + + + +Querying each index results in documents only from the specific collection the index was defined for. +However, what if we need to query across ALL animal collections? + + +## Multi-Map Indexes + +The easiest way to do this is by writing a multi-map index such as: + + + + +{`public class Animals_ByName : AbstractMultiMapIndexCreationTask +{ + public Animals_ByName() + { + AddMap(cats => from c in cats select new { c.Name }); + + AddMap(dogs => from d in dogs select new { d.Name }); + } +} +`} + + + + +{`public class Animals_ByName : AbstractJavaScriptIndexCreationTask +{ + public Animals_ByName() + { + Maps = new HashSet() + { + @"map('cats', function (c){ return {Name: c.Name}})", + @"map('dogs', function (d){ return {Name: d.Name}})" + }; + } +} +`} + + + + +And query it like this: + + + + +{`IList results = session + .Query() + .Where(x => x.Name == "Mitzy") + .ToList(); +`} + + + + +{`List results = session + .Advanced + .DocumentQuery() + .WhereEquals("Name", "Mitzy") + .ToList(); +`} + + + + +{`from index 'Animals/ByName' +where Name = 'Mitzy' +`} + + + + +## Other Options + +Another option would be to modify the way we generate the Collection for subclasses of `Animal`: + + + +{`DocumentStore store = new DocumentStore() +\{ + Conventions = + \{ + FindCollectionName = type => + \{ + if (typeof(Animal).IsAssignableFrom(type)) + return "Animals"; + return DocumentConventions.DefaultGetCollectionName(type); + \} + \} +\}; +`} + + + +Using this method, we can now index on all animals using: + + + +{`from animal in docs.Animals +select new \{ animal.Name \} +`} + + + +But what happens when you don't want to modify the entity name of an entity itself? + +You can create a polymorphic index using: + + + +{`from animal in docs.WhereEntityIs("Cats", "Dogs") +select new \{ animal.Name \} +`} + + + +It will generate an index that matches both Cats and Dogs. + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-polymorphic-data-java.mdx b/versioned_docs/version-7.1/indexes/_indexing-polymorphic-data-java.mdx new file mode 100644 index 0000000000..981e1ebc2a --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-polymorphic-data-java.mdx @@ -0,0 +1,148 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, RavenDB indexes are defined on a specific entity type, referred to as a `Collection`, + and do not consider the inheritance hierarchy. + +* In this Page: + * [Polymorphic Data](../indexes/indexing-polymorphic-data.mdx#polymorphic-data) + * [Multi-Map Indexes](../indexes/indexing-polymorphic-data.mdx#multi-map-indexes) + * [Other Options](../indexes/indexing-polymorphic-data.mdx#other-options) + + +## Polymorphic Data + +Let's assume, for example, that we have the following inheritance hierarchy: + +![Figure 1: Polymorphic indexes](./assets/polymorphic_indexes_faq.png) + +When saving a `Cat` document, it will be assigned to the "Cats" collection, +while a `Dog` document will be placed in the "Dogs" collection. + +If we intend to create a simple Map-index for Cat documents based on their names, we would write: + + + +{`from cat in docs.Cats +select new \{ cat.name \} +`} + + + +And for dogs: + + + +{`from dog in docs.Dogs +select new \{ dog.name \} +`} + + + + +Querying each index results in documents only from the specific collection the index was defined for. +However, what if we need to query across ALL animal collections? + + +## Multi-Map Indexes + +The easiest way to do this is by writing a multi-map index such as: + + + + +{`IndexDefinition indexDefinition = new IndexDefinition(); +indexDefinition.setName("Animals/ByName"); +HashSet maps = new HashSet<>(); +maps.add("docs.Cats.Select(c => new { name = c.name})"); +maps.add("docs.Dogs.Select(c => new { name = c.name})"); +indexDefinition.setMaps(maps); +`} + + + + +{`public static class Animals_ByName extends AbstractJavaScriptIndexCreationTask { + public Animals_ByName() { + setMaps(Sets.newHashSet( + "map('cats', function (c){ return {name: c.name}})", + "map('dogs', function (d){ return {name: d.name}})" + )); + } +} +`} + + + + +And query it like this: + + + + +{`List results = session + .query(Animal.class, Query.index("Animals/ByName")) + .whereEquals("name", "Mitzy") + .toList(); +`} + + + + +{`from index 'Animals/ByName' +where name = 'Mitzy' +`} + + + + +## Other Options + +Another option would be to modify the way we generate the Collection for subclasses of `Animal`: + + + +{`try (IDocumentStore store = new DocumentStore()) \{ + store.getConventions().setFindCollectionName(clazz -> \{ + if (Animal.class.isAssignableFrom(clazz)) \{ + return "Animals"; + \} + + return DocumentConventions.defaultGetCollectionName(clazz); + \}); +\} +`} + + + +Using this method, we can now index on all animals using: + + + +{`from animal in docs.Animals +select new \{ animal.name \} +`} + + + +But what happens when you don't want to modify the entity name of an entity itself? + +You can create a polymorphic index using: + + + +{`from animal in docs.WhereEntityIs("Cats", "Dogs") +select new \{ animal.Name \} +`} + + + +It will generate an index that matches both Cats and Dogs. + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-polymorphic-data-nodejs.mdx b/versioned_docs/version-7.1/indexes/_indexing-polymorphic-data-nodejs.mdx new file mode 100644 index 0000000000..6bd96b37be --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-polymorphic-data-nodejs.mdx @@ -0,0 +1,297 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, RavenDB indexes are defined on a specific entity type, referred to as a `Collection`, + and do not consider the inheritance hierarchy. + +* In this page: + * [The challenge](../indexes/indexing-polymorphic-data.mdx#the-challenge) + * [Possible solutions:](../indexes/indexing-polymorphic-data.mdx#possible-solutions) + * [Multi-map index](../indexes/indexing-polymorphic-data.mdx#multi-map-index) + * [Polymorphic index](../indexes/indexing-polymorphic-data.mdx#polymorphic-index) + * [Customize collection](../indexes/indexing-polymorphic-data.mdx#customize-collection) + + +## The challenge + +Let's assume, for example, that we have the following inheritance hierarchy: + +![Figure 1: Polymorphic indexes](./assets/polymorphic_indexes_faq.png) + +
+**By default**: +When saving a `Cat` document, it will be assigned to the "Cats" collection, +while a `Dog` document will be placed in the "Dogs" collection. + +If we intend to create a simple Map-index for Cat documents based on their names, we would write: + + + + +{`class Cats_ByName extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + // Index the 'name' field from the CATS collection + this.map('Cats', cat => { + return { + name: cat.name + }; + }); + } +} +`} + + + + +{`class Animal { + constructor(name) { + this.name = name; + } +} + +class Cat extends Animal { } +`} + + + + +And for Dogs: + + + + +{`class Dogs_ByName extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + // Index the 'name' field from the DOGS collection + this.map('Dogs', dog => { + return { + name: dog.name + }; + }); + } +} +`} + + + + +{`class Animal { + constructor(name) { + this.name = name; + } +} + +class Dog extends Animal { } +`} + + + + +**The challenge**: +Querying each index results in documents only from the specific collection the index was defined for. +However, what if we need to query across ALL animal collections? + + + +## Possible solutions + + + +
**Multi-Map Index**: +Writing a [Multi-map index](../indexes/multi-map-indexes.mdx) enables getting results from all collections the index was defined for. + + + + +{`class CatsAndDogs_ByName extends AbstractJavaScriptMultiMapIndexCreationTask { + constructor() { + super(); + + // Index documents from the CATS collection + this.map('Cats', cat => { + return { + name: cat.name + }; + }); + + // Index documents from the DOGS collection + this.map('Dogs', dog => { + return { + name: dog.name + }; + }); + } +} +`} + + + + +Query the Multi-map index: + + + + +{`const catsAndDogs = await session + // Query the index + .query({ indexName: "CatsAndDogs/ByName" }) + // Look for all Cats or Dogs that are named 'Mitzy' :)) + .whereEquals("name", "Mitzy") + .all(); + +// Results will include matching documents from the CATS and DOGS collection +`} + + + + +{`from index "CatsAndDogs/ByName" +where name == "Mitzy" +`} + + + + + + + + **Polymorphic index**: +Another option is to create a polymorphic-index. + +Use method `WhereEntityIs` within your index definition to index documents from all collections +listed in the method. + + + + +{`class CatsAndDogs_ByName extends AbstractCsharpIndexCreationTask { + constructor() { + super(); + + // Index documents from both the CATS collection and the DOGS collection + this.map = \`from animal in docs.WhereEntityIs("Cats", "Dogs") + select new { + animal.name + }\`; + } +} +`} + + + + +Query the polymorphic-index: + + + + +{`const catsAndDogs = await session + // Query the index + .query({ indexName: "CatsAndDogs/ByName" }) + // Look for all Cats or Dogs that are named 'Mitzy' :)) + .whereEquals("name", "Mitzy") + .all(); + +// Results will include matching documents from the CATS and DOGS collection +`} + + + + +{`from index "CatsAndDogs/ByName" +where name == "Mitzy" +`} + + + + + + + + **Customize collection**: +This option involves customizing the collection name that is assigned to documents created from +subclasses of the _Animal_ class. + +This is done by setting the [findCollectionName](../client-api/configuration/conventions.mdx#findcollectionname) convention on the document store. + + + +{`const documentStore = new DocumentStore(["serverUrl_1", "serverUrl_2", "..."], "DefaultDB"); + +// Customize the findCollectionName convention +documentStore.conventions.findCollectionName = (type) => \{ + const typeName = type.name; + + // Documents created from a 'Cat' or a 'Dog' entity will be assinged the "Animals" collection + if (typeName === "Cat" || typeName === "Dog") \{ + return "Animals"; + \} + + // All other documents will be assgined the default collection name + return DocumentConventions.defaultGetCollectionName(type); +\} +`} + + + +With the above convention in place, whenever a _Cat_ or a _Dog_ entity is saved, its document will be assigned the "Animals" collection instead of the default "Cats" or "Dogs" collection. + +Now you can define a Map-index on the "Animals" collection: + + + + +{`class Animals_ByName extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + // Index documents from the ANIMALS collection + this.map('Animals', animal => { + return { + name: animal.name + }; + }); + } +} +`} + + + + +Query the index: + + + + +{`const animals = await session + // Query the index + .query({ indexName: "Animals/ByName" }) + // Look for all Animals that are named 'Mitzy' :)) + .whereEquals("name", "Mitzy") + .all(); + +// Results will include matching documents from the ANIMALS collection +`} + + + + +{`from index "Animals/ByName" +where name == "Mitzy" +`} + + + + + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-polymorphic-data-php.mdx b/versioned_docs/version-7.1/indexes/_indexing-polymorphic-data-php.mdx new file mode 100644 index 0000000000..319f416978 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-polymorphic-data-php.mdx @@ -0,0 +1,158 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, RavenDB indexes are defined on a specific entity type, referred to as a `Collection`, + and do not consider the inheritance hierarchy. + +* In this Page: + * [Polymorphic Data](../indexes/indexing-polymorphic-data.mdx#polymorphic-data) + * [Multi-Map Indexes](../indexes/indexing-polymorphic-data.mdx#multi-map-indexes) + * [Other Options](../indexes/indexing-polymorphic-data.mdx#other-options) + + +## Polymorphic Data + +Let's assume, for example, that we have the following inheritance hierarchy: + +![Figure 1: Polymorphic indexes](./assets/polymorphic_indexes_faq.png) + +When saving a `Cat` document, it will be assigned to the "Cats" collection, +while a `Dog` document will be placed in the "Dogs" collection. + +If we intend to create a simple Map-index for Cat documents based on their names, we would write: + + + +{`from cat in docs.Cats +select new \{ cat.Name \} +`} + + + +And for dogs: + + + +{`from dog in docs.Dogs +select new \{ dog.Name \} +`} + + + + +Querying each index results in documents only from the specific collection the index was defined for. +However, what if we need to query across ALL animal collections? + + +## Multi-Map Indexes + +The easiest way to do this is by writing a multi-map index such as: + + + + +{`class Animals_ByName extends AbstractMultiMapIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->addMap("from c in docs.Cats select new { c.name }"); + $this->addMap("from d in docs.Dogs select new { d.name }"); + } +} +`} + + + + +{`class Animals_ByName extends AbstractJavaScriptIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->setMaps([ + "map('cats', function (c){ return {Name: c.Name}})", + "map('dogs', function (d){ return {Name: d.Name}})" + ]); + } +} +`} + + + + +And query it like this: + + + + +{`/** @var array $results */ +$results = $session + ->advanced() + ->documentQuery(Animal::class, Animals_ByName::class) + ->whereEquals("Name", "Mitzy") + ->toList(); +`} + + + + +{`from index 'Animals/ByName' +where Name = 'Mitzy' +`} + + + + +## Other Options + +Another option would be to modify the way we generate the Collection for subclasses of `Animal`: + + + +{`$store = new DocumentStore(); +$store->getConventions()->setFindCollectionName( + function (?string $className): string \{ + if (is_a($className, Animal::class)) \{ + return "Animals"; + \} + return DocumentConventions::defaultGetCollectionName($className); + \} +); +`} + + + +Using this method, we can now index on all animals using: + + + +{`from animal in docs.Animals +select new \{ animal.Name \} +`} + + + +But what happens when you don't want to modify the entity name of an entity itself? + +You can create a polymorphic index using: + + + +{`from animal in docs.WhereEntityIs("Cats", "Dogs") +select new \{ animal.Name \} +`} + + + +It will generate an index that matches both Cats and Dogs. + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-polymorphic-data-python.mdx b/versioned_docs/version-7.1/indexes/_indexing-polymorphic-data-python.mdx new file mode 100644 index 0000000000..d55f33b8d3 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-polymorphic-data-python.mdx @@ -0,0 +1,142 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* By default, RavenDB indexes are defined on a specific entity type, referred to as a `Collection`, + and do not consider the inheritance hierarchy. + +* In this Page: + * [Polymorphic Data](../indexes/indexing-polymorphic-data.mdx#polymorphic-data) + * [Multi-Map Indexes](../indexes/indexing-polymorphic-data.mdx#multi-map-indexes) + * [Other Options](../indexes/indexing-polymorphic-data.mdx#other-options) + + +## Polymorphic Data + +Let's assume, for example, that we have the following inheritance hierarchy: + +![Figure 1: Polymorphic indexes](./assets/polymorphic_indexes_faq.png) + +When saving a `Cat` document, it will be assigned to the "Cats" collection, +while a `Dog` document will be placed in the "Dogs" collection. + +If we intend to create a simple Map-index for Cat documents based on their names, we would write: + + + +{`from cat in docs.Cats +select new \{ cat.Name \} +`} + + + +And for dogs: + + + +{`from dog in docs.Dogs +select new \{ dog.Name \} +`} + + + + +Querying each index results in documents only from the specific collection the index was defined for. +However, what if we need to query across ALL animal collections? + + +## Multi-Map Indexes + +The easiest way to do this is by writing a multi-map index such as: + + + + +{`class Animals_ByName(AbstractMultiMapIndexCreationTask): + def __init__(self): + super().__init__() + self._add_map("from c in docs.Cats select new { c.name }") + self._add_map("from d in docs.Dogs select new { d.name }") +`} + + + + +{`class Animals_ByName(AbstractJavaScriptIndexCreationTask): + def __init__(self): + super().__init__() + self.maps = { + "map('cats', function (c){ return {Name: c.Name}})", + "map('dogs', function (d){ return {Name: d.Name}})", + } +`} + + + + +And query it like this: + + + + +{`results = list(session.query_index_type(Animals_ByName, Animal).where_equals("name", "Mitzy")) +`} + + + + +{`from index 'Animals/ByName' +where Name = 'Mitzy' +`} + + + + +## Other Options + +Another option would be to modify the way we generate the Collection for subclasses of `Animal`: + + + +{`store = DocumentStore() + +def _custom_find_collection_name(object_type: Type) -> str: + if issubclass(object_type, Animal): + return "Animals" + return DocumentConventions.default_get_collection_name(object_type) + +store.conventions.find_collection_name = _custom_find_collection_name +`} + + + +Using this method, we can now index on all animals using: + + + +{`from animal in docs.Animals +select new \{ animal.Name \} +`} + + + +But what happens when you don't want to modify the entity name of an entity itself? + +You can create a polymorphic index using: + + + +{`from animal in docs.WhereEntityIs("Cats", "Dogs") +select new \{ animal.Name \} +`} + + + +It will generate an index that matches both Cats and Dogs. + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-related-documents-csharp.mdx b/versioned_docs/version-7.1/indexes/_indexing-related-documents-csharp.mdx new file mode 100644 index 0000000000..c176df9eb1 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-related-documents-csharp.mdx @@ -0,0 +1,446 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* As described in [modeling considerations in RavenDB](https://ravendb.net/learn/inside-ravendb-book/reader/4.0/3-document-modeling#summary), + it is recommended for documents to be: independent, isolated, and coherent. + However, to accommodate varied models, **documents can reference other documents**. + +* The related data from a referenced (related) document can be indexed, + this will allow querying the collection by the indexed related data. + +* The related documents that are loaded in the index definition can be either **Tracked** or **Not-Tracked**. + +* In this page: + + * [What are related documents](../indexes/indexing-related-documents.mdx#what-are-related-documents) + + + * [Index related documents - With tracking](../indexes/indexing-related-documents.mdx#index-related-documents---with-tracking) + * [Example I - basic](../indexes/indexing-related-documents.mdx#example-i---basic) + * [Example II - list](../indexes/indexing-related-documents.mdx#example-ii---list) + * [Tracking implications](../indexes/indexing-related-documents.mdx#tracking-implications) + * [Index related documents - No tracking](../indexes/indexing-related-documents.mdx#index-related-documents---no-tracking) + * [Example III - no tracking](../indexes/indexing-related-documents.mdx#index-related-documents---no-tracking) + * [No-tracking implications](../indexes/indexing-related-documents.mdx#no-tracking-implications) + * [Document changes that cause re-indexing](../indexes/indexing-related-documents.mdx#document-changes-that-cause-re-indexing) + * [LoadDocument Syntax](../indexes/indexing-related-documents.mdx#loaddocument-syntax) + + + +## What are related documents + +* Whenever a document references another document, the referenced document is called a **Related Document**. + +* In the image below, document `products/34-A` references documents `categories/1-A` & `suppliers/16-A`, + which are considered Related Documents. + ![Referencing related documents](./assets/index-related-documents.png) + + + +## Index related documents - With tracking + +### Example I - basic + +* **What is tracked**: + Both the documents from the **indexed collection** and the **indexed related documents** are tracked for changes. + Re-indexing will be triggered per any change in either collection. + (See changes that cause re-indexing [here](../indexes/indexing-related-documents.mdx#document-changes-that-cause-re-indexing)). + +* **The index**: + Following the above `Product - Category` relationship from the Northwind sample database, + an index defined on the Products collection can index data from the related Category document. + + + + +{`public class Products_ByCategoryName : AbstractIndexCreationTask +{ + public class IndexEntry + { + public string CategoryName { get; set; } + } + + public Products_ByCategoryName() + { + Map = products => from product in products + + // Call LoadDocument to load the related Category document + // The document ID to load is specified by 'product.Category' + let category = LoadDocument(product.Category) + + select new IndexEntry + { + // Index the Name field from the related Category document + CategoryName = category.Name + }; + + // Since NoTracking was Not specified, + // then any change to either Products or Categories will trigger reindexing + } +} +`} + + + + +{`public class Products_ByCategoryName_JS : AbstractJavaScriptIndexCreationTask +{ + public Products_ByCategoryName_JS() + { + Maps = new HashSet() + { + // Call method 'load' to load the related Category document + // The document ID to load is specified by 'product.Category' + // The Name field from the related Category document will be indexed + + @"map('products', function(product) { + let category = load(product.Category, 'Categories') + return { + CategoryName: category.Name + }; + })" + + // Since noTracking was Not specified, + // then any change to either Products or Categories will trigger reindexing + }; + } +} +`} + + + + +* **The query**: + We can now query the index for Product documents by `CategoryName`, + i.e. get all matching Products that reference a Category that has the specified name term. + + + + +{`IList matchingProducts = session + .Query() + .Where(x => x.CategoryName == "Beverages") + .OfType() + .ToList(); +`} + + + + +{`IList matchingProducts = await asyncSession + .Query() + .Where(x => x.CategoryName == "Beverages") + .OfType() + .ToListAsync(); +`} + + + + +{`from index "Products/ByCategoryName" +where CategoryName == "Beverages" +`} + + + +### Example II - list + +* **The documents**: + + +{`// The referencing document +public class Author +\{ + public string Id \{ get; set; \} + public string Name \{ get; set; \} + + // Referencing a list of related document IDs + public List BookIds \{ get; set; \} +\} + +// The related document +public class Book +\{ + public string Id \{ get; set; \} + public string Name \{ get; set; \} +\} +`} + + + +* **The index**: + This index will index all names of the related Book documents. + + + + +{`public class Authors_ByBooks : AbstractIndexCreationTask +{ + public class IndexEntry + { + public IEnumerable BookNames { get; set; } + } + + public Authors_ByBooks() + { + Map = authors => from author in authors + select new IndexEntry + { + // For each Book ID, call LoadDocument and index the book's name + BookNames = author.BookIds.Select(x => LoadDocument(x).Name) + }; + + // Since NoTracking was Not specified, + // then any change to either Authors or Books will trigger reindexing + } +} +`} + + + + +{`public class Authors_ByBooks_JS : AbstractJavaScriptIndexCreationTask +{ + public Authors_ByBooks_JS() + { + Maps = new HashSet() + { + // For each Book ID, call 'load' and index the book's name + @"map('Author', function(author) { + return { + Books: author.BooksIds.map(x => load(x, 'Books').Name) + } + })" + + // Since NoTracking was Not specified, + // then any change to either Authors or Books will trigger reindexing + }; + } +} +`} + + + + +* **The query**: + We can now query the index for Author documents by a book's name, + i.e. get all Authors that have the specified book's name in their list. + + + +{`// Get all authors that have books with title: "The Witcher" +IList matchingAuthors = session + .Query() + .Where(x => x.BookNames.Contains("The Witcher")) + .OfType() + .ToList(); +`} + + + + +{`// Get all authors that have books with title: "The Witcher" +IList matchingAuthors = await asyncSession + .Query() + .Where(x => x.BookNames.Contains("The Witcher")) + .OfType() + .ToListAsync(); +`} + + + + +{`// Get all authors that have books with title: "The Witcher" +from index "Authors/ByBooks" +where BookNames = "The Witcher" +`} + + + + +### Tracking implications + +* Indexing related data with tracking can be a useful way to query documents by their related data. + However, that may come with performance costs. + +* **Re-indexing** will be triggered whenever any document in the collection that is referenced by `LoadDocument` is changed. + Even when indexing just a single field from the related document, any change to any other field will cause re-indexing. + (See changes that cause re-indexing [here](../indexes/indexing-related-documents.mdx#document-changes-that-cause-re-indexing)). + +* Frequent re-indexing will increase CPU usage and reduce performance, + and index results may be stale for prolonged periods. + +* Tracking indexed related data is more useful when the indexed related collection is known not to change much. + + + + + +## Index related documents - No tracking + +### Example III - no tracking + +* **What is tracked**: + * Only the documents from the **indexed collection** are tracked for changes and can trigger re-indexing. + Any change done to any document in the **indexed related documents** will Not trigger re-indexing. + (See changes that cause re-indexing [here](../indexes/indexing-related-documents.mdx#document-changes-that-cause-re-indexing)). + +* **The index**: + + + +{`public class Products_ByCategoryName_NoTracking : AbstractIndexCreationTask +{ + public class IndexEntry + { + public string CategoryName { get; set; } + } + + public Products_ByCategoryName_NoTracking() + { + Map = products => from product in products + + // Call NoTracking.LoadDocument to load the related Category document w/o tracking + let category = NoTracking.LoadDocument(product.Category) + + select new IndexEntry + { + // Index the Name field from the related Category document + CategoryName = category.Name + }; + + // Since NoTracking is used - + // then only the changes to Products will trigger reindexing + } +} +`} + + + + +{`public class Products_ByCategoryName_NoTracking_JS : AbstractJavaScriptIndexCreationTask +{ + public Products_ByCategoryName_NoTracking_JS() + { + Maps = new HashSet() + { + // Call 'noTracking.load' to load the related Category document w/o tracking + + @"map('products', function(product) { + let category = noTracking.load(product.Category, 'Categories') + return { + CategoryName: category.Name + }; + })" + + // Since noTracking is used - + // then only the changes to Products will trigger reindexing + }; + } +} +`} + + + + +* **The query**: + When querying the index for Product documents by `CategoryName`, + results will be based on the related data that was **first indexed** when the index was deployed. + + + + +{`IList matchingProducts = session + .Query() + .Where(x => x.CategoryName == "Beverages") + .OfType() + .ToList(); +`} + + + + +{`IList matchingProducts = await asyncSession + .Query() + .Where(x => x.CategoryName == "Beverages") + .OfType() + .ToListAsync(); +`} + + + + +{`from index "Products/ByCategoryName/NoTracking" +where CategoryName == "Beverages" +`} + + + + +### No-tracking implications + +* Indexing related data with no-tracking can be a useful way to query documents by their related data. + However, that may come with some data accuracy costs. + +* **Re-indexing** will Not be triggered when documents in the collection that is referenced by `LoadDocument` are changed. + Although this may save system resources, the index entries and the indexed terms may not be updated with the current state of data. + +* Indexing related data without tracking is useful when the indexed related data is fixed and not supposed to change. + + + + + +## Document changes that cause re-indexing + +* The following changes done to a document will trigger re-indexing: + * Any modification to any document field (not just to the indexed fields) + * Adding/Deleting an attachment + * Creating a new Time series (modifying existing will not trigger) + * Creating a new Counter (modifying existing will not trigger) + +* Any such change done on any document in the **indexed collection** will trigger re-indexing. + +* Any such change done on any document in the **indexed related documents** will trigger re-indexing + only if `NoTracking` was Not used in the index definition. + + + +## LoadDocument syntax + +#### Syntax for LINQ-index: + + + +{`T LoadDocument(string relatedDocumentId); + +T LoadDocument(string relatedDocumentId, string relatedCollectionName); + +T[] LoadDocument(IEnumerable relatedDocumentIds); + +T[] LoadDocument(IEnumerable relatedDocumentIds, string relatedCollectionName); +`} + + +#### Syntax for JavaScript-index: + + + +{`object load(relatedDocumentId, relatedCollectionName); +`} + + + +| Parameters | | | +|---------------------------|-----------------------|----------------------------------------| +| **relatedDocumentId** | `string` | ID of the related document to load | +| **relatedCollectionName** | `string` | The related collection name | +| **relatedDocumentIds** | `IEnumerable` | A list of related document IDs to load | + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-related-documents-java.mdx b/versioned_docs/version-7.1/indexes/_indexing-related-documents-java.mdx new file mode 100644 index 0000000000..5142fdf101 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-related-documents-java.mdx @@ -0,0 +1,197 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To extend indexing capabilities and simplify many scenarios, we have introduced the possibility for indexing related documents. + +## Example I + +Let's consider a simple `Product - Category` scenario where you want to look for a `Product` by `Category Name`. + +Without this feature, you would have to create a fairly complex multiple map-reduce index. This is why the `LoadDocument` function was introduced. + + + + +{`public static class Products_ByCategoryName extends AbstractIndexCreationTask { + public Products_ByCategoryName() { + map = "docs.Products.Select(product => new { " + + " CategoryName = (this.LoadDocument(product.Category, \\"Categories\\")).Name " + + "})"; + } +} +`} + + + + +{`IndexDefinition indexDefinition = new IndexDefinition(); +indexDefinition.setName("Products/ByCategoryName"); +indexDefinition.setMaps(Collections.singleton("from product in products " + + " select new " + + " { " + + " CategoryName = LoadDocument(product.Category, \\"\\"Categories\\"\\").Name " + + " }")); + +store.maintenance().send(new PutIndexesOperation(indexDefinition)); +`} + + + + +{`public static class Products_ByCategoryName extends AbstractJavaScriptIndexCreationTask { + public Products_ByCategoryName() { + setMaps(Sets.newHashSet("map('products', function(product ){\\n" + + " return {\\n" + + " CategoryName : load(product .Category, 'Categories').Name,\\n" + + " }\\n" + + " })")); + } +} +`} + + + + +Now we will be able to search for products using the `CategoryName` as a parameter: + + + +{`List results = session + .query(Product.class, Products_ByCategoryName.class) + .whereEquals("CategoryName", "Beverages") + .toList(); +`} + + + +## Example II + +Our next scenario will show us how indexing of more complex relationships is also trivial. Let's consider the following case: + + + +{`public static class Book \{ + private String id; + private String name; + + public String getId() \{ + return id; + \} + + public void setId(String id) \{ + this.id = id; + \} + + public String getName() \{ + return name; + \} + + public void setName(String name) \{ + this.name = name; + \} +\} + +public static class Author \{ + private String id; + private String name; + private List bookIds; + + public String getId() \{ + return id; + \} + + public void setId(String id) \{ + this.id = id; + \} + + public String getName() \{ + return name; + \} + + public void setName(String name) \{ + this.name = name; + \} + + public List getBookIds() \{ + return bookIds; + \} + + public void setBookIds(List bookIds) \{ + this.bookIds = bookIds; + \} +\} +`} + + + +To create an index with `Author Name` and list of `Book Names`, we need do the following: + + + + +{`public static class Authors_ByNameAndBooks extends AbstractIndexCreationTask { + public Authors_ByNameAndBooks() { + map = "docs.Authors.Select(author => new { " + + " name = author.name, " + + " books = author.bookIds.Select(x => (this.LoadDocument(x, \\"Books\\")).name) " + + "})"; + } +} +`} + + + + +{`IndexDefinition indexDefinition = new IndexDefinition(); +indexDefinition.setName("Authors/ByNameAndBooks"); +indexDefinition.setMaps(Collections.singleton("from author in docs.Authors " + + " select new " + + " { " + + " name = author.name, " + + " books = author.bookIds.Select(x => LoadDocument(x, \\"\\"Books\\"\\").id) " + + " }")); +store.maintenance().send(new PutIndexesOperation(indexDefinition)); +`} + + + + +{`public static class Authors_ByNameAndBookNames extends AbstractJavaScriptIndexCreationTask { + public Authors_ByNameAndBookNames() { + setMaps(Sets.newHashSet("map('author', function(a){\\n" + + " return {\\n" + + " name: a.name,\\n" + + " books: a.booksIds.forEach(x => load(x, 'Book').name)\\n" + + " }\\n" + + " })")); + } +} +`} + + + + + + +{`List results = session + .query(Author.class, Authors_ByNameAndBooks.class) + .whereEquals("name", "Andrzej Sapkowski") + .whereEquals("books", "The Witcher") + .toList(); +`} + + + +## Remarks + + +Indexes are updated automatically when related documents change. + + + +Using the `LoadDocument` adds a loaded document to the tracking list. This may cause very expensive calculations to occur, especially when multiple documents are tracking the same document. + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-related-documents-nodejs.mdx b/versioned_docs/version-7.1/indexes/_indexing-related-documents-nodejs.mdx new file mode 100644 index 0000000000..65e01214eb --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-related-documents-nodejs.mdx @@ -0,0 +1,398 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* As described in [modeling considerations in RavenDB](https://ravendb.net/learn/inside-ravendb-book/reader/4.0/3-document-modeling#summary), + it is recommended for documents to be: independent, isolated, and coherent. + However, to accommodate varied models, **documents can reference other documents**. + +* The related data from a referenced (related) document can be indexed, + this will allow querying the collection by the indexed related data. + +* The related documents that are loaded in the index definition can be either **Tracked** or **Not-Tracked**. + +* In this page: + + * [What are related documents](../indexes/indexing-related-documents.mdx#what-are-related-documents) + + + * [Index related documents - With tracking](../indexes/indexing-related-documents.mdx#index-related-documents---with-tracking) + * [Example I - basic](../indexes/indexing-related-documents.mdx#example-i---basic) + * [Example II - list](../indexes/indexing-related-documents.mdx#example-ii---list) + * [Tracking implications](../indexes/indexing-related-documents.mdx#tracking-implications) + * [Index related documents - No tracking](../indexes/indexing-related-documents.mdx#index-related-documents---no-tracking) + * [Example III - no tracking](../indexes/indexing-related-documents.mdx#index-related-documents---no-tracking) + * [No-tracking implications](../indexes/indexing-related-documents.mdx#no-tracking-implications) + * [Document changes that cause re-indexing](../indexes/indexing-related-documents.mdx#document-changes-that-cause-re-indexing) + * [LoadDocument Syntax](../indexes/indexing-related-documents.mdx#loaddocument-syntax) + + + +## What are related documents + +* Whenever a document references another document, the referenced document is called a **Related Document**. + +* In the image below, document `products/34-A` references documents `categories/1-A` & `suppliers/16-A`, + which are considered Related Documents. + ![Referencing related documents](./assets/index-related-documents.png) + + + +## Index related documents - With tracking + + +#### Example I - basic +**What is tracked**: + +* Both the documents from the **indexed collection** and the **indexed related documents** are tracked for changes. + Re-indexing will be triggered per any change in either collection. + (See changes that cause re-indexing [here](../indexes/indexing-related-documents.mdx#document-changes-that-cause-re-indexing)). + +**The index**: + +* Following the above `Product - Category` relationship from the Northwind sample database, + an index defined on the Products collection can index data from the related Category document. + + + + +{`class Products_ByCategoryName extends AbstractCsharpIndexCreationTask { + constructor() { + super(); + + // Call LoadDocument to load the related Category document + // The document ID to load is specified by 'product.Category' + // The Name field from the related Category document will be indexed + + this.map = \`docs.Products.Select(product => new { + CategoryName = (this.LoadDocument(product.Category, "Categories")).Name + })\`; + + // Since NoTracking was Not specified, + // then any change to either Products or Categories will trigger reindexing + } +} +`} + + + + +{`class Products_ByCategoryName_JS extends AbstractJavaScriptIndexCreationTask { + constructor () { + super(); + + const { load } = this.mapUtils(); + + this.map("Products", product => { + return { + // Call method 'load' to load the related Category document + // The document ID to load is specified by 'product.Category' + // The Name field from the related Category document will be indexed + categoryName: load(product.Category, "Categories").Name + + // Since NoTracking was Not specified, + // then any change to either Products or Categories will trigger reindexing + }; + }); + } +} +`} + + + + +**The query**: + +* We can now query the index for Product documents by `CategoryName`, + i.e. get all matching Products that reference a Category that has the specified name term. + + + + +{`const matchingProducts = await session + .query({indexName: "Products/ByCategoryName"}) + .whereEquals("CategoryName", "Beverages") + .all(); +`} + + + + +{`from index "Products/ByCategoryName" +where CategoryName == "Beverages" +`} + + + + + + + +#### Example II - list +**The documents**: + + + +{`// The referencing document +class Author \{ + constructor(id, name, bookIds) \{ + this.id = id; + this.name = name; + + // Referencing a list of related document IDs + this.bookIds = bookIds; + \} +\} +// The related document +class Book \{ + constructor(id, name) \{ + this.id = id; + this.name = name; + \} +\} +`} + + + +**The index**: + +* This index will index all names of the related Book documents. + + + + +{`class Authors_ByBooks extends AbstractCsharpIndexCreationTask { + constructor() { + super(); + + // For each Book ID, call LoadDocument and index the book's name + this.map = \`docs.Authors.Select(author => new { + BookNames = author.bookIds.Select(x => (this.LoadDocument(x, "Books")).name) + })\`; + + // Since NoTracking was Not specified, + // then any change to either Authors or Books will trigger reindexing + } +} +`} + + + + +{`class Authors_ByBooks_JS extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + const { load } = this.mapUtils(); + + this.map("Authors", author => { + return { + // For each Book ID, call 'load' and index the book's name + BookNames: author.bookIds.map(x => load(x, "Books").name) + + // Since NoTracking was Not specified, + // then any change to either Products or Categories will trigger reindexing + }; + }); + } +} +`} + + + + +**The query**: + +* We can now query the index for Author documents by a book's name, + i.e. get all Authors that have the specified book's name in their list. + + + + +{`const matchingProducts = await session + .query({indexName: "Authors/ByBooks"}) + .whereEquals("BookNames", "The Witcher") + .all(); +`} + + + + +{`// Get all authors that have books with title: "The Witcher" +from index "Authors/ByBooks" +where BookNames = "The Witcher" +`} + + + + + + + +#### Tracking implications + +* Indexing related data with tracking can be a useful way to query documents by their related data. + However, that may come with performance costs. + +* **Re-indexing** will be triggered whenever any document in the collection that is referenced by `LoadDocument` is changed. + Even when indexing just a single field from the related document, any change to any other field will cause re-indexing. + (See changes that cause re-indexing [here](../indexes/indexing-related-documents.mdx#document-changes-that-cause-re-indexing)). + +* Frequent re-indexing will increase CPU usage and reduce performance, + and index results may be stale for prolonged periods. + +* Tracking indexed related data is more useful when the indexed related collection is known not to change much. + + + + + +## Index related documents - No tracking + + +#### Example III - no tracking +**What is tracked**: + +* Only the documents from the **indexed collection** are tracked for changes and can trigger re-indexing. + Any change done to any document in the **indexed related documents** will Not trigger re-indexing. + (See changes that cause re-indexing [here](../indexes/indexing-related-documents.mdx#document-changes-that-cause-re-indexing)). + +**The index**: + + + + +{`class Products_ByCategoryName_NoTracking extends AbstractCsharpIndexCreationTask { + constructor() { + super(); + + // Call NoTracking.LoadDocument to load the related Category document w/o tracking + this.map = \`docs.Products.Select(product => new { + CategoryName = (this.NoTracking.LoadDocument(product.Category, "Categories")).Name + })\`; + + // Since NoTracking is used - + // then only the changes to Products will trigger reindexing + } +} +`} + + + + +{`class Products_ByCategoryName_NoTracking_JS extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + const { noTracking } = this.mapUtils(); + + this.map("Products", product => { + return { + // Call 'noTracking.load' to load the related Category document w/o tracking + categoryName: noTracking.load(product.Category, "Categories").Name + }; + }); + + // Since noTracking is used - + // then only the changes to Products will trigger reindexing + } +} +`} + + + + +**The query**: + +* When querying the index for Product documents by `CategoryName`, + results will be based on the related data that was **first indexed** when the index was deployed. + + + + +{`const matchingProducts = await session + .query({indexName: "Products/ByCategoryName/NoTracking"}) + .whereEquals("CategoryName", "Beverages") + .all(); +`} + + + + +{`from index "Products/ByCategoryName/NoTracking" +where CategoryName == "Beverages" +`} + + + + + + + +#### No-tracking implications + +* Indexing related data with no-tracking can be a useful way to query documents by their related data. + However, that may come with some data accuracy costs. + +* **Re-indexing** will Not be triggered when documents in the collection that is referenced by `LoadDocument` are changed. + Although this may save system resources, the index entries and the indexed terms may not be updated with the current state of data. + +* Indexing related data without tracking is useful when the indexed related data is fixed and not supposed to change. + + + + + +## Document changes that cause re-indexing + +* The following changes done to a document will trigger re-indexing: + + * Any modification to any document field (not just to the indexed fields) + * Adding/Deleting an attachment + * Creating a new Time series (modifying existing will not trigger) + * Creating a new Counter (modifying existing will not trigger) + +* Any such change done on any document in the **indexed collection** will trigger re-indexing. + +* Any such change done on any document in the **indexed related documents** will trigger re-indexing + only if `NoTracking` was Not used in the index definition. + + + +## LoadDocument syntax + +#### Syntax for LINQ-index: + + + +{`T LoadDocument(string relatedDocumentId); + +T LoadDocument(string relatedDocumentId, string relatedCollectionName); + +T[] LoadDocument(IEnumerable relatedDocumentIds); + +T[] LoadDocument(IEnumerable relatedDocumentIds, string relatedCollectionName); +`} + + +#### Syntax for JavaScript-index: + + + +{`object load(relatedDocumentId, relatedCollectionName); +`} + + + +| Parameters | | | +|---------------------------|-----------------------|----------------------------------------| +| **relatedDocumentId** | `string` | ID of the related document to load | +| **relatedCollectionName** | `string` | The related collection name | +| **relatedDocumentIds** | `IEnumerable` | A list of related document IDs to load | + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-related-documents-php.mdx b/versioned_docs/version-7.1/indexes/_indexing-related-documents-php.mdx new file mode 100644 index 0000000000..e9d3a79911 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-related-documents-php.mdx @@ -0,0 +1,491 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* As described in [modeling considerations in RavenDB](https://ravendb.net/learn/inside-ravendb-book/reader/4.0/3-document-modeling#summary), + it is recommended for documents to be: independent, isolated, and coherent. + However, to accommodate varied models, **documents can reference other documents**. + +* The related data from a referenced (related) document can be indexed, + this will allow querying the collection by the indexed related data. + +* The related documents that are loaded in the index definition can be either **Tracked** or **Not-Tracked**. + +* In this page: + + * [What are related documents](../indexes/indexing-related-documents.mdx#what-are-related-documents) + + + * [Index related documents - With tracking](../indexes/indexing-related-documents.mdx#index-related-documents---with-tracking) + * [Example I - basic](../indexes/indexing-related-documents.mdx#example-i---basic) + * [Example II - list](../indexes/indexing-related-documents.mdx#example-ii---list) + * [Tracking implications](../indexes/indexing-related-documents.mdx#tracking-implications) + * [Index related documents - No tracking](../indexes/indexing-related-documents.mdx#index-related-documents---no-tracking) + * [Example III - no tracking](../indexes/indexing-related-documents.mdx#index-related-documents---no-tracking) + * [No-tracking implications](../indexes/indexing-related-documents.mdx#no-tracking-implications) + * [Document changes that cause re-indexing](../indexes/indexing-related-documents.mdx#document-changes-that-cause-re-indexing) + * [LoadDocument Syntax](../indexes/indexing-related-documents.mdx#loaddocument-syntax) + + + +## What are related documents + +* Whenever a document references another document, the referenced document is called a **Related Document**. + +* In the image below, document `products/34-A` references documents `categories/1-A` & `suppliers/16-A`, + which are considered Related Documents. + ![Referencing related documents](./assets/index-related-documents.png) + + + +## Index related documents - With tracking + +### Example I - basic + +* **What is tracked**: + Both the documents from the **indexed collection** and the **indexed related documents** are tracked for changes. + Re-indexing will be triggered per any change in either collection. + (See changes that cause re-indexing [here](../indexes/indexing-related-documents.mdx#document-changes-that-cause-re-indexing)). + +* **The index**: + Following the above `Product - Category` relationship from the Northwind sample database, + an index defined on the Products collection can index data from the related Category document. + + + + +{`class Products_ByCategoryName_IndexEntry +{ + private ?string $categoryName = null; + + public function getCategoryName(): ?string + { + return $this->categoryName; + } + + public function setCategoryName(?string $categoryName): void + { + $this->categoryName = $categoryName; + } +} +class Products_ByCategoryName extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = + "from product in docs.Products " . + 'let category = this.LoadDocument(product.Category, "Categories") ' . + "select new { CategoryName = category.Name }"; + + // Since NoTracking was Not specified, + // then any change to either Products or Categories will trigger reindexing + } +} +`} + + + + +{`class Products_ByCategoryName_JS extends AbstractJavaScriptIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + // Call method 'load' to load the related Category document + // The document ID to load is specified by 'product.Category' + // The Name field from the related Category document will be indexed + $this->setMaps([ + "map('products', function(product) { " . + " let category = load(product.Category, 'Categories') " . + " return { " . + " CategoryName: category.Name " . + " }; " . + "})" + ]); + + // Since noTracking was Not specified, + // then any change to either Products or Categories will trigger reindexing + + } +} +`} + + + + +* **The query**: + We can now query the index for Product documents by `CategoryName`, + i.e. get all matching Products that reference a Category that has the specified name term. + + + + +{`$matchingProducts = $session + ->query(Products_ByCategoryName_IndexEntry::class, Products_ByCategoryName::class) + ->whereEquals("CategoryName", "Beverages") + ->ofType(Product::class) + ->toList(); +`} + + + + +{`from index "Products/ByCategoryName" +where CategoryName == "Beverages" +`} + + + +### Example II - list + +* **The documents**: + + +{`// The referencing document +class Author +\{ + private ?string $id = null; + private ?string $name = null; + + // Referencing a list of related document IDs + private ?StringArray $bookIds = null; + + public function getId(): ?string + \{ + return $this->id; + \} + + public function setId(?string $id): void + \{ + $this->id = $id; + \} + + public function getName(): ?string + \{ + return $this->name; + \} + + public function setName(?string $name): void + \{ + $this->name = $name; + \} + + public function getBookIds(): ?StringArray + \{ + return $this->bookIds; + \} + + public function setBookIds(?StringArray $bookIds): void + \{ + $this->bookIds = $bookIds; + \} +\} + +// The related document +class Book +\{ + private ?string $id = null; + private ?string $name = null; + + public function getId(): ?string + \{ + return $this->id; + \} + + public function setId(?string $id): void + \{ + $this->id = $id; + \} + + public function getName(): ?string + \{ + return $this->name; + \} + + public function setName(?string $name): void + \{ + $this->name = $name; + \} +\} +`} + + + +* **The index**: + This index will index all names of the related Book documents. + + + + +{`class Authors_ByBooks_IndexEntry +{ + private ?StringArray $bookNames = null; + + public function getBookNames(): ?StringArray + { + return $this->bookNames; + } + + public function setBookNames(?StringArray $bookNames): void + { + $this->bookNames = $bookNames; + } +} +class Authors_ByBooks extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = + "from author in docs.Authors " . + "select new " . + "{" . + // For each Book ID, call LoadDocument and index the book's name + ' BookNames = author.BookIds.Select(x => LoadDocument(x, "Books").Name)' . + "}"; + + // Since NoTracking was Not specified, + // then any change to either Authors or Books will trigger reindexing + } +} +`} + + + + +{`class Authors_ByBooks_JS extends AbstractJavaScriptIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->setMaps([ + // For each Book ID, call 'load' and index the book's name + "map('Author', function(author) { + return { + Books: author.BooksIds.map(x => load(x, 'Books').Name) + } + })" + ]); + + // Since NoTracking was Not specified, + // then any change to either Authors or Books will trigger reindexing + } +} +`} + + + + +* **The query**: + We can now query the index for Author documents by a book's name, + i.e. get all Authors that have the specified book's name in their list. + + + +{`// Get all authors that have books with title: "The Witcher" +$matchingAuthors = $session + ->query(Authors_ByBooks_IndexEntry::class, Authors_ByBooks::class) + ->containsAny("BookNames", ["The Witcher"]) + ->ofType(Author::class) + ->toList(); +`} + + + + +{`// Get all authors that have books with title: "The Witcher" +from index "Authors/ByBooks" +where BookNames = "The Witcher" +`} + + + + +### Tracking implications + +* Indexing related data with tracking can be a useful way to query documents by their related data. + However, that may come with performance costs. + +* **Re-indexing** will be triggered whenever any document in the collection that is referenced by `LoadDocument` is changed. + Even when indexing just a single field from the related document, any change to any other field will cause re-indexing. + (See changes that cause re-indexing [here](../indexes/indexing-related-documents.mdx#document-changes-that-cause-re-indexing)). + +* Frequent re-indexing will increase CPU usage and reduce performance, + and index results may be stale for prolonged periods. + +* Tracking indexed related data is more useful when the indexed related collection is known not to change much. + + + + + +## Index related documents - No tracking + +### Example III - no tracking + +* **What is tracked**: + * Only the documents from the **indexed collection** are tracked for changes and can trigger re-indexing. + Any change done to any document in the **indexed related documents** will Not trigger re-indexing. + (See changes that cause re-indexing [here](../indexes/indexing-related-documents.mdx#document-changes-that-cause-re-indexing)). + +* **The index**: + + + +{`class Products_ByCategoryName_NoTracking_IndexEntry +{ + private ?string $categoryName = null; + + public function getCategoryName(): ?string + { + return $this->categoryName; + } + + public function setCategoryName(?string $categoryName): void + { + $this->categoryName = $categoryName; + } +} + +class Products_ByCategoryName_NoTracking extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = + "from product in docs.Products " . + # Call NoTracking.LoadDocument to load the related Category document w/o tracking + 'let category = NoTracking.LoadDocument(product.Category, "Categories") ' . + "select new {" . + # Index the name field from the related Category document + " CategoryName = category.Name " . + "}"; + + // Since NoTracking is used - + // then only the changes to Products will trigger reindexing + } +} +`} + + + + +{`class Products_ByCategoryName_NoTracking_JS extends AbstractJavaScriptIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->setMaps([ + // Call 'noTracking.load' to load the related Category document w/o tracking + "map('products', function(product) { + let category = noTracking.load(product.Category, 'Categories') + return { + CategoryName: category.Name + }; + })" + ]); + + // Since noTracking is used - + // then only the changes to Products will trigger reindexing + } +} +`} + + + + +* **The query**: + When querying the index for Product documents by `CategoryName`, + results will be based on the related data that was **first indexed** when the index was deployed. + + + + +{`$matchingProducts = $session + ->query(Products_ByCategoryName_NoTracking_IndexEntry::class, Products_ByCategoryName_NoTracking::class) + ->whereEquals("CategoryName", "Beverages") + ->ofType(Product::class) + ->toList(); +`} + + + + +{`from index "Products/ByCategoryName/NoTracking" +where CategoryName == "Beverages" +`} + + + + +### No-tracking implications + +* Indexing related data with no-tracking can be a useful way to query documents by their related data. + However, that may come with some data accuracy costs. + +* **Re-indexing** will Not be triggered when documents in the collection that is referenced by `LoadDocument` are changed. + Although this may save system resources, the index entries and the indexed terms may not be updated with the current state of data. + +* Indexing related data without tracking is useful when the indexed related data is fixed and not supposed to change. + + + + + +## Document changes that cause re-indexing + +* The following changes done to a document will trigger re-indexing: + * Any modification to any document field (not just to the indexed fields) + * Adding/Deleting an attachment + * Creating a new Time series (modifying existing will not trigger) + * Creating a new Counter (modifying existing will not trigger) + +* Any such change done on any document in the **indexed collection** will trigger re-indexing. + +* Any such change done on any document in the **indexed related documents** will trigger re-indexing + only if `NoTracking` was Not used in the index definition. + + + +## LoadDocument syntax + + + +{`T LoadDocument(string relatedDocumentId); + +T LoadDocument(string relatedDocumentId, string relatedCollectionName); + +T[] LoadDocument(IEnumerable relatedDocumentIds); + +T[] LoadDocument(IEnumerable relatedDocumentIds, string relatedCollectionName); +`} + + +#### Syntax for JavaScript-index: + + + +{`object load(relatedDocumentId, relatedCollectionName); +`} + + + +| Parameters | | | +|---------------------------|-----------------------|----------------------------------------| +| **relatedDocumentId** | `string` | ID of the related document to load | +| **relatedCollectionName** | `string` | The related collection name | +| **relatedDocumentIds** | `IEnumerable` | A list of related document IDs to load | + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-related-documents-python.mdx b/versioned_docs/version-7.1/indexes/_indexing-related-documents-python.mdx new file mode 100644 index 0000000000..b89d5a63ca --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-related-documents-python.mdx @@ -0,0 +1,381 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* As described in [modeling considerations in RavenDB](https://ravendb.net/learn/inside-ravendb-book/reader/4.0/3-document-modeling#summary), + it is recommended for documents to be: independent, isolated, and coherent. + However, to accommodate varied models, **documents can reference other documents**. + +* The related data from a referenced (related) document can be indexed, + this will allow querying the collection by the indexed related data. + +* The related documents that are loaded in the index definition can be either **Tracked** or **Not-Tracked**. + +* In this page: + + * [What are related documents](../indexes/indexing-related-documents.mdx#what-are-related-documents) + + + * [Index related documents - With tracking](../indexes/indexing-related-documents.mdx#index-related-documents---with-tracking) + * [Example I - basic](../indexes/indexing-related-documents.mdx#example-i---basic) + * [Example II - list](../indexes/indexing-related-documents.mdx#example-ii---list) + * [Tracking implications](../indexes/indexing-related-documents.mdx#tracking-implications) + * [Index related documents - No tracking](../indexes/indexing-related-documents.mdx#index-related-documents---no-tracking) + * [Example III - no tracking](../indexes/indexing-related-documents.mdx#index-related-documents---no-tracking) + * [No-tracking implications](../indexes/indexing-related-documents.mdx#no-tracking-implications) + * [Document changes that cause re-indexing](../indexes/indexing-related-documents.mdx#document-changes-that-cause-re-indexing) + * [LoadDocument Syntax](../indexes/indexing-related-documents.mdx#loaddocument-syntax) + + + +## What are related documents + +* Whenever a document references another document, the referenced document is called a **Related Document**. + +* In the image below, document `products/34-A` references documents `categories/1-A` & `suppliers/16-A`, + which are considered Related Documents. + ![Referencing related documents](./assets/index-related-documents.png) + + + +## Index related documents - With tracking + +### Example I - basic + +* **What is tracked**: + Both the documents from the **indexed collection** and the **indexed related documents** are tracked for changes. + Re-indexing will be triggered per any change in either collection. + (See changes that cause re-indexing [here](../indexes/indexing-related-documents.mdx#document-changes-that-cause-re-indexing)). + +* **The index**: + Following the above `Product - Category` relationship from the Northwind sample database, + an index defined on the Products collection can index data from the related Category document. + + + + +{`class Products_ByCategoryName(AbstractIndexCreationTask): + class IndexEntry: + def __init__(self, category_name: str = None): + self.category_name = category_name + + def __init__(self): + super().__init__() + self.map = ( + "from product in docs.Products " + 'let category = this.LoadDocument(product.Category, "Categories") ' + "select new { category_name = category.Name }" + ) +`} + + + + +{`class Products_ByCategoryName_JS(AbstractJavaScriptIndexCreationTask): + def __init__(self): + super().__init__() + self.maps = { + # Call method 'load' to load the related Category document + # The document ID to load is specified by 'product.Category' + # The Name field from the related Category document will be indexed + """ + map('products', function(product) { + let category = load(product.Category, 'Categories') + return { + category_name: category.Name + }; + }) + """ + # Since no_tracking was not specified, + # then any change to either Products or Categories will trigger reindexing + } +`} + + + + +* **The query**: + We can now query the index for Product documents by `CategoryName`, + i.e. get all matching Products that reference a Category that has the specified name term. + + + + +{`matching_products = list( + session.query_index_type(Products_ByCategoryName, Products_ByCategoryName.IndexEntry) + .where_equals("category_name", "Beverages") + .of_type(Product) +) +`} + + + + +{`from index "Products/ByCategoryName" +where CategoryName == "Beverages" +`} + + + +### Example II - list + +* **The documents**: + + +{`# The referencing document +class Author: + def __init__(self, Id: str = None, name: str = None, book_ids: List[str] = None): + self.Id = Id + self.name = name + + # Referencing a list of related document IDs + self.book_ids = book_ids + + +# The related document +class Book: + def __init__(self, Id: str = None, name: str = None): + self.Id = Id + self.name = name +`} + + + +* **The index**: + This index will index all names of the related Book documents. + + + + +{`class Authors_ByBooks(AbstractIndexCreationTask): + class IndexEntry: + def __init__(self, book_names: List[str] = None): + self.book_names = book_names + + def __init__(self): + super().__init__() + self.map = ( + "from author in docs.Authors " + "select new " + "{" + # For each Book ID, call LoadDocument and index the book's name + ' book_names = author.book_ids.Select(x => LoadDocument(x, "Books").Name)' + "}" + ) + # Since no_tracking was not specified, + # then any change to either Authors or Books will trigger reindexing +`} + + + + +{`class Authors_ByBooks_JS(AbstractJavaScriptIndexCreationTask): + def __init__(self): + super().__init__() + self.maps = { + # For each Book ID, call 'load' and index the book's name + """ + map('Author', function(author) { + return { + books: author.BooksIds.map(x => load(x, 'Books').Name) + } + }) + """ + # Since no_tracking was not specified, + # then any change to either Authors or Books will trigger reindexing + } +`} + + + + +* **The query**: + We can now query the index for Author documents by a book's name, + i.e. get all Authors that have the specified book's name in their list. + + + +{`# Get all authors that have books with title: "The Witcher" +matching_authors = list( + session.query_index_type(Authors_ByBooks, Authors_ByBooks.IndexEntry) + .where_in("book_names", ["The Witcher"]) + .of_type(Author) +) +`} + + + + +{`// Get all authors that have books with title: "The Witcher" +from index "Authors/ByBooks" +where BookNames = "The Witcher" +`} + + + + +### Tracking implications + +* Indexing related data with tracking can be a useful way to query documents by their related data. + However, that may come with performance costs. + +* **Re-indexing** will be triggered whenever any document in the collection that is referenced by `LoadDocument` is changed. + Even when indexing just a single field from the related document, any change to any other field will cause re-indexing. + (See changes that cause re-indexing [here](../indexes/indexing-related-documents.mdx#document-changes-that-cause-re-indexing)). + +* Frequent re-indexing will increase CPU usage and reduce performance, + and index results may be stale for prolonged periods. + +* Tracking indexed related data is more useful when the indexed related collection is known not to change much. + + + + + +## Index related documents - No tracking + +### Example III - no tracking + +* **What is tracked**: + * Only the documents from the **indexed collection** are tracked for changes and can trigger re-indexing. + Any change done to any document in the **indexed related documents** will Not trigger re-indexing. + (See changes that cause re-indexing [here](../indexes/indexing-related-documents.mdx#document-changes-that-cause-re-indexing)). + +* **The index**: + + + +{`class Products_ByCategoryName_NoTracking(AbstractIndexCreationTask): + class IndexEntry: + def __init__(self, category_name: str = None): + self.category_name = category_name + + def __init__(self): + super().__init__() + self.map = ( + "from product in docs.Products " + # Call NoTracking.LoadDocument to load the related Category document w/o tracking + 'let category = NoTracking.LoadDocument(product.Category, "Categories") ' + "select new {" + # Index the name field from the related Category document + " category_name = category.Name " + "}" + ) + # Since NoTracking is used - + # then only the changes to Products will trigger reindexing +`} + + + + +{`class Products_ByCategoryName_NoTracking_JS(AbstractJavaScriptIndexCreationTask): + def __init__(self): + super().__init__() + self.maps = { + # Call 'noTracking.load' to load the related Category document w/o tracking + """ + map('products', function(product) { + let category = noTracking.load(product.Category, 'Categories') + return { + category_name: category.Name + }; + }) + """ + } + # Since noTracking is used - + # then only the changes to Products will trigger reindexing +`} + + + + +* **The query**: + When querying the index for Product documents by `CategoryName`, + results will be based on the related data that was **first indexed** when the index was deployed. + + + + +{`matching_products = list( + session.query_index_type( + Products_ByCategoryName_NoTracking, Products_ByCategoryName_NoTracking.IndexEntry + ) + .where_equals("category_name", "Beverages") + .of_type(Product) +) +`} + + + + +{`from index "Products/ByCategoryName/NoTracking" +where CategoryName == "Beverages" +`} + + + + +### No-tracking implications + +* Indexing related data with no-tracking can be a useful way to query documents by their related data. + However, that may come with some data accuracy costs. + +* **Re-indexing** will Not be triggered when documents in the collection that is referenced by `LoadDocument` are changed. + Although this may save system resources, the index entries and the indexed terms may not be updated with the current state of data. + +* Indexing related data without tracking is useful when the indexed related data is fixed and not supposed to change. + + + + + +## Document changes that cause re-indexing + +* The following changes done to a document will trigger re-indexing: + * Any modification to any document field (not just to the indexed fields) + * Adding/Deleting an attachment + * Creating a new Time series (modifying existing will not trigger) + * Creating a new Counter (modifying existing will not trigger) + +* Any such change done on any document in the **indexed collection** will trigger re-indexing. + +* Any such change done on any document in the **indexed related documents** will trigger re-indexing + only if `NoTracking` was Not used in the index definition. + + + +## LoadDocument syntax + + + +{`T LoadDocument(string relatedDocumentId); + +T LoadDocument(string relatedDocumentId, string relatedCollectionName); + +T[] LoadDocument(IEnumerable relatedDocumentIds); + +T[] LoadDocument(IEnumerable relatedDocumentIds, string relatedCollectionName); +`} + + +#### Syntax for JavaScript-index: + + + +{`object load(relatedDocumentId, relatedCollectionName); +`} + + + +| Parameters | | | +|---------------------------|-----------------------|----------------------------------------| +| **relatedDocumentId** | `string` | ID of the related document to load | +| **relatedCollectionName** | `string` | The related collection name | +| **relatedDocumentIds** | `IEnumerable` | A list of related document IDs to load | + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-spatial-data-csharp.mdx b/versioned_docs/version-7.1/indexes/_indexing-spatial-data-csharp.mdx new file mode 100644 index 0000000000..d52ac37a3f --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-spatial-data-csharp.mdx @@ -0,0 +1,366 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Documents that contain spatial data can be queried by spatial queries that employ geographical criteria. + There are two options: **dynamic** spatial query, and spatial **index** query. + + * **Dynamic spatial query** + A dynamic spatial query can be made on a collection (see [how to make a spatial query](../client-api/session/querying/how-to-make-a-spatial-query.mdx)). + An auto-index will be created by the server. + + * **Spatial index query** + Documents' spatial data can be indexed in a static index (**described in this article**), + and a spatial query can then be executed over this index (see [query a spatial index](../indexes/querying/spatial.mdx)). + +* In this page: + * [Create index with spatial field](../indexes/indexing-spatial-data.mdx#create-index-with-spatial-field) + * [Customize coordinate system and strategy](../indexes/indexing-spatial-data.mdx#customize-coordinate-system-and-strategy) + * [Spatial indexing strategies](../indexes/indexing-spatial-data.mdx#spatial-indexing-strategies) + + +## Create index with spatial field + +* Use `CreateSpatialField` to index spatial data in a static-index. + +* You can then retrieve documents based on geographical criteria when making a spatial query on this index-field. + +* A spatial index can also be defined from [Studio](../studio/database/indexes/create-map-index.mdx#spatial-field-options). + +#### Exmaple: + + + + +{`// Define an index with a spatial field +public class Events_ByNameAndCoordinates : AbstractIndexCreationTask +{ + public Events_ByNameAndCoordinates() + { + Map = events => from e in events + select new + { + Name = e.Name, + // Call 'CreateSpatialField' to create a spatial index-field + // Field 'Coordinates' will be composed of lat & lng supplied from the document + Coordinates = CreateSpatialField(e.Latitude, e.Longitude) + + // Documents can be retrieved + // by making a spatial query on the 'Coordinates' index-field + }; + } +} + +public class Event +{ + public string Id { get; set; } + public string Name { get; set; } + public double Latitude { get; set; } + public double Longitude { get; set; } +} +`} + + + + +{`// Define an index with a spatial field +public class EventsWithWKT_ByNameAndWKT : AbstractIndexCreationTask +{ + public EventsWithWKT_ByNameAndWKT() + { + Map = events => from e in events + select new + { + Name = e.Name, + // Call 'CreateSpatialField' to create a spatial index-field + // Field 'WKT' will be composed of the WKT string supplied from the document + WKT = CreateSpatialField(e.WKT) + + // Documents can be retrieved + // by making a spatial query on the 'WKT' index-field + }; + } +} + +public class EventWithWKT +{ + public string Id { get; set; } + public string Name { get; set; } + public string WKT { get; set; } +} +`} + + + + +{`public class Events_ByNameAndCoordinates_JS : AbstractJavaScriptIndexCreationTask +{ + public Events_ByNameAndCoordinates_JS() + { + Maps = new HashSet + { + @"map('events', function (e) { + return { + Name: e.Name, + Coordinates: createSpatialField(e.Latitude, e.Longitude) + }; + })" + }; + } +} +`} + + + + +#### Syntax: + + + +{`object CreateSpatialField(double? lat, double? lng); // Latitude/Longitude coordinates +object CreateSpatialField(string shapeWkt); // Shape in WKT string format +`} + + + + + +## Customize coordinate system and strategy + +* For each spatial index-field, you can specify the **coordinate system** and **strategy** to be used + during indexing and when processing the data at query time. + +* RavenDB supports both the `Geography` and `Cartesian` systems with the following strategies: + + * Geography system: + * BoundingBox + * GeoHashPrefixTree + * QuadPrefixTree + + * Cartesian system: + * BoundingBox + * QuadPrefixTree + +* **By default**, the `GeoHashPrefixTree` strategy is used with `GeoHashLevel` set to **9**. + Use the `Spatial` method from `AbstractIndexCreationTask` to modify this setting. + +* The performance cost of spatial indexing is directly related to the tree level chosen. + Learn more about each strategy [below](../indexes/indexing-spatial-data.mdx#spatial-indexing-strategies). + +* Note: Modifying the strategy after the index has been created & deployed will trigger the re-indexing. + +#### Exmaple: + + + + +{`public class Events_ByNameAndCoordinates_Custom : AbstractIndexCreationTask +{ + public Events_ByNameAndCoordinates_Custom() + { + Map = events => from e in events + select new + { + Name = e.Name, + // Define a spatial index-field + Coordinates = CreateSpatialField(e.Latitude, e.Longitude) + }; + + // Set the spatial indexing strategy for the spatial field 'Coordinates' + Spatial("Coordinates", factory => factory.Cartesian.BoundingBoxIndex()); + } +} +`} + + + + +{`public class Events_ByNameAndCoordinates_Custom_JS : AbstractJavaScriptIndexCreationTask +{ + public Events_ByNameAndCoordinates_Custom_JS() + { + // Define index fields + Maps = new HashSet + { + @"map('events', function (e) { + return { + Name: e.Name, + Coordinates: createSpatialField(e.Latitude, e.Longitude) + }; + })" + }; + + // Customize index fields + Fields = new Dictionary + { + ["Coordinates"] = new IndexFieldOptions + { + Spatial = new SpatialOptions + { + Type = SpatialFieldType.Cartesian, + Strategy = SpatialSearchStrategy.BoundingBox + } + } + }; + } +} +`} + + + + +#### Syntax: + + + +{`public class SpatialOptionsFactory +\{ + public GeographySpatialOptionsFactory Geography; + public CartesianSpatialOptionsFactory Cartesian; +\} +`} + + + + + + +{`// Default is GeohashPrefixTree strategy with maxTreeLevel set to 9 +SpatialOptions Default(SpatialUnits circleRadiusUnits = SpatialUnits.Kilometers); + +SpatialOptions BoundingBoxIndex(SpatialUnits circleRadiusUnits = SpatialUnits.Kilometers); + +SpatialOptions GeohashPrefixTreeIndex(int maxTreeLevel, + SpatialUnits circleRadiusUnits = SpatialUnits.Kilometers); + +SpatialOptions QuadPrefixTreeIndex(int maxTreeLevel, + SpatialUnits circleRadiusUnits = SpatialUnits.Kilometers); +`} + + + + +{`SpatialOptions BoundingBoxIndex(); +SpatialOptions QuadPrefixTreeIndex(int maxTreeLevel, SpatialBounds bounds); + +public class SpatialBounds +{ + public double MinX; + public double MaxX; + public double MinY; + public double MaxY; +} +`} + + + + + + +## Spatial indexing strategies + +#### BoundingBox strategy + +* The bounding box strategy is the simplest. + Given a spatial shape, such as a point, circle, or polygon, the shape's bounding box is computed + and the spatial coordinates (minX, minY, maxX, maxY) that enclose the shape are indexed. + +* When making a query, + RavenDB translates the query criteria to the same bounding box system used for indexing. + +* Bounding box strategy is cheaper at indexing time and can produce quick queries, + but that's at the expense of the level of accuracy you can get. + +* Read more about bounding box [here](https://en.wikipedia.org/wiki/Minimum_bounding_rectangle). +#### GeoHashPrefixTree strategy + +* Geohash is a latitude/longitude representation system that describes Earth as a grid with 32 cells, assigning an alphanumeric character to each grid cell. + Each grid cell is further divided into 32 smaller chunks, and each chunk has an alphanumeric character assigned as well, and so on. + +* E.g. The location of 'New York' in the United States is represented by the following geohash: [DR5REGY6R](http://geohash.org/dr5regy6r) + and it represents the `40.7144 -74.0060` coordinates. + Removing characters from the end of the geohash will decrease the precision level. + +* The `maxTreeLevel` determines the length of the geohash used for the indexing, which in turn affects accuracy. + By default, it is set to **9**, providing a resolution of approximately 2.5 meters. + +* More information about geohash uses, decoding algorithm, and limitations can be found [here](https://en.wikipedia.org/wiki/Geohash). + + + +| Level | E-W Distance at Equator | N-S Distance at Equator | +|:----- |:------------------------|:------------------------| +| 12 | ~3.7cm | ~1.8cm | +| 11 | ~14.9cm | ~14.9cm | +| 10 | ~1.19m | ~0.60m | +| **9** | **~4.78m** | **~4.78m** | +| 8 | ~38.2m | ~19.1m | +| 7 | ~152.8m | ~152.8m | +| 6 | ~1.2km | ~0.61km | +| 5 | ~4.9km | ~4.9km | +| 4 | ~39km | ~19.6km | +| 3 | ~157km | ~157km | +| 2 | ~1252km | ~626km | +| 1 | ~5018km | ~5018km | + + +#### QuadPrefixTree strategy + +* The QuadTree represents Earth as a grid consisting of four cells (also known as buckets). + Similar to GeoHash, each cell is assigned a letter, and is recursively divided into four more cells, creating a hierarchical structure. + +* By default, the precision level (`maxTreeLevel`) for QuadPrefixTree is **23**. + +* More information about QuadTree can be found [here](https://en.wikipedia.org/wiki/Quadtree). + + + +| Level | Distance at Equator | +|:-------|:-------------------| +| 30 | ~4cm | +| 29 | ~7cm | +| 28 | ~15cm | +| 27 | ~30cm | +| 26 | ~60cm | +| 25 | ~1.19m | +| 24 | ~2.39m | +| **23** | **~4.78m** | +| 22 | ~9.56m | +| 21 | ~19.11m | +| 20 | ~38.23m | +| 19 | ~76.23m | +| 18 | ~152.92m | +| 17 | ~305.84m | +| 16 | ~611.67m | +| 15 | ~1.22km | +| 14 | ~2.45km | +| 13 | ~4.89km | +| 12 | ~9.79km | +| 11 | ~19.57km | +| 10 | ~39.15km | +| 9 | ~78.29km | +| 8 | ~156.58km | +| 7 | ~313.12km | +| 6 | ~625.85km | +| 5 | ~1249km | +| 4 | ~2473km | +| 3 | ~4755km | +| 2 | ~7996km | +| 1 | ~15992km | + + + + + +## Remarks + + + +Distance is measured by default in **kilometers**. + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-spatial-data-java.mdx b/versioned_docs/version-7.1/indexes/_indexing-spatial-data-java.mdx new file mode 100644 index 0000000000..084b9237b2 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-spatial-data-java.mdx @@ -0,0 +1,306 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To support the ability to retrieve the data based on spatial coordinates, the spatial search has been introduced. + + +This article describes how to setup a spatial field in static index. If you are interested in an automatic approach, please visit relevant spatial querying article that can be found [here](../indexes/querying/spatial.mdx). + + +## Creating Indexes + +To take an advantage of the spatial search, first we need to create an index with a spatial field. To mark field as the spatial field, we need to use the `CreateSpatialField` method: + + + +{`object CreateSpatialField(double? lat, double? lng); + +object CreateSpatialField(string shapeWkt); +`} + + + +Where: + +* **lat/lng** are latitude/longitude coordinates +* **shapeWKT** is a shape in the [WKT](https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry) format + +### Example + + + + +{`public static class Event { + private String id; + private String name; + private double latitude; + private double longitude; + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public double getLatitude() { + return latitude; + } + + public void setLatitude(double latitude) { + this.latitude = latitude; + } + + public double getLongitude() { + return longitude; + } + + public void setLongitude(double longitude) { + this.longitude = longitude; + } +} + +public static class Events_ByNameAndCoordinates extends AbstractIndexCreationTask { + public Events_ByNameAndCoordinates() { + map = "docs.Events.Select(e => new { " + + " name = e.name, " + + " coordinates = this.CreateSpatialField(((double ? ) e.latitude), ((double ? ) e.longitude)) " + + "})"; + } +} +`} + + + + +{`public static class EventWithWKT { + private String id; + private String name; + private String wkt; + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getWkt() { + return wkt; + } + + public void setWkt(String wkt) { + this.wkt = wkt; + } +} + +public static class EventsWithWKT_ByNameAndWKT extends AbstractIndexCreationTask { + public EventsWithWKT_ByNameAndWKT() { + map = "docs.EventWithWKTs.Select(e => new { " + + " name = e.name, " + + " wkt = this.CreateSpatialField(e.wkt) " + + "})"; + } +} +`} + + + + +{`public static class Events_ByNameAndCoordinates extends AbstractJavaScriptIndexCreationTask { + public Events_ByNameAndCoordinates() { + setMaps(Sets.newHashSet("map('events', function (e){\\n" + + " return {\\n" + + " name: e.name ,\\n" + + " coordinates: createSpatialField(e.latitude, e.longitude)\\n" + + " };\\n" + + "})")); + } +} +`} + + + + +### Options + +RavenDB supports both the `Geography` and `Cartesian` systems and multiple strategies for each one of them. + + + +{`public static class SpatialOptionsFactory \{ + public GeographySpatialOptionsFactory geography() + + public CartesianSpatialOptionsFactory cartesian() +\} +`} + + + + + + +{`public SpatialOptions defaultOptions() + +public SpatialOptions defaultOptions(SpatialUnits circleRadiusUnits) + +public SpatialOptions boundingBoxIndex() + +public SpatialOptions boundingBoxIndex(SpatialUnits circleRadiusUnits) + +public SpatialOptions geohashPrefixTreeIndex(int maxTreeLevel) + +public SpatialOptions geohashPrefixTreeIndex(int maxTreeLevel, SpatialUnits circleRadiusUnits) + +public SpatialOptions quadPrefixTreeIndex(int maxTreeLevel) + +public SpatialOptions quadPrefixTreeIndex(int maxTreeLevel, SpatialUnits circleRadiusUnits) +`} + + + + +{`public SpatialOptions boundingBoxIndex() + +public SpatialOptions quadPrefixTreeIndex(int maxTreeLevel, SpatialBounds bounds) +`} + + + + +### Changing Default Behavior + +By default, if no action is taken, the `GeohashPrefixTree` strategy is used with `GeohashLevel` set to **9**. This behavior can be changed by using the `spatial()` method from `AbstractIndexCreationTask` + + + +{`public static class Events_ByNameAndCoordinates_Custom extends AbstractIndexCreationTask \{ + public Events_ByNameAndCoordinates_Custom() \{ + map = "docs.Events.Select(e => new \{ " + + " name = e.name, " + + " coordinates = this.CreateSpatialField(((double ? ) e.latitude), ((double ? ) e.longitude)) " + + "\})"; + + spatial("coordinates", f -> f.cartesian().boundingBoxIndex()); + \} +\} +`} + + + +## Spatial search strategies + +## GeohashPrefixTree +Geohash is a latitude/longitude representation system that describes earth as a grid with 32 cells, assigning an alphanumeric character to each grid cell. Each grid cell is further divided into 32 smaller chunks, and each chunk has an alphanumeric character assigned as well, and so on. + +E.g. The location of 'New York' in the United States is represented by the following geohash: [DR5REGY6R](http://geohash.org/dr5regy6r) and it represents the `40.7144 -74.0060` coordinates. Removing characters from the end of geohash will decrease the precision level. + +More information about geohash uses, decoding algorithm and limitations can be found [here](https://en.wikipedia.org/wiki/Geohash). + + +## QuadPrefixTree +QuadTree represents the earth as a grid with exactly four cells and similarly to geohash, each grid cell (sometimes called a bucket) has a letter assigned, and is divided further into 4 more cells and so on. + +More information about QuadTree can be found [here](https://en.wikipedia.org/wiki/Quadtree). + + +## BoundingBox +More information about BoundingBox can be found [here](https://en.wikipedia.org/wiki/Minimum_bounding_rectangle). + + + +`GeohashPrefixTree` is a default `SpatialSearchStrategy`. Doing any changes to the strategy after an index has been created will trigger the re-indexation process. + + +### Precision + +By default, the precision level (`maxTreeLevel`) for GeohashPrefixTree is set to **9** and for QuadPrefixTree the value is **23**. This means that the coordinates are represented by a 9 or 23 character string. The difference exists because the `QuadTree` representation would be much less precise if the level would be the same. + +## Geohash precision values +Source: unterbahn.com + +| Level | E-W Distance at Equator | N-S Distance at Equator | +|:----- |:------------------------|:------------------------| +| 12 | ~3.7cm | ~1.8cm | +| 11 | ~14.9cm | ~14.9cm | +| 10 | ~1.19m | ~0.60m | +| **9** | **~4.78m** | **~4.78m** | +| 8 | ~38.2m | ~19.1m | +| 7 | ~152.8m | ~152.8m | +| 6 | ~1.2km | ~0.61km | +| 5 | ~4.9km | ~4.9km | +| 4 | ~39km | ~19.6km | +| 3 | ~157km | ~157km | +| 2 | ~1252km | ~626km | +| 1 | ~5018km | ~5018km | + + + +## Quadtree precision values + +| Level | Distance at Equator | +|:-------|:-------------------| +| 30 | ~4cm | +| 29 | ~7cm | +| 28 | ~15cm | +| 27 | ~30cm | +| 26 | ~60cm | +| 25 | ~1.19m | +| 24 | ~2.39m | +| **23** | **~4.78m** | +| 22 | ~9.56m | +| 21 | ~19.11m | +| 20 | ~38.23m | +| 19 | ~76.23m | +| 18 | ~152.92m | +| 17 | ~305.84m | +| 16 | ~611.67m | +| 15 | ~1.22km | +| 14 | ~2.45km | +| 13 | ~4.89km | +| 12 | ~9.79km | +| 11 | ~19.57km | +| 10 | ~39.15km | +| 9 | ~78.29km | +| 8 | ~156.58km | +| 7 | ~313.12km | +| 6 | ~625.85km | +| 5 | ~1249km | +| 4 | ~2473km | +| 3 | ~4755km | +| 2 | ~7996km | +| 1 | ~15992km | + + + +## Remarks + + +You can read more about **spatial search** in a **dedicated querying article** available [here](../indexes/querying/spatial.mdx). + + + +Distance by default is measured in **kilometers**. + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-spatial-data-nodejs.mdx b/versioned_docs/version-7.1/indexes/_indexing-spatial-data-nodejs.mdx new file mode 100644 index 0000000000..da422dbc03 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-spatial-data-nodejs.mdx @@ -0,0 +1,328 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Documents that contain spatial data can be queried by spatial queries that employ geographical criteria. + There are two options: **dynamic** spatial query, and spatial **index** query. + + * **Dynamic spatial query** + A dynamic spatial query can be made on a collection (see [how to make a spatial query](../client-api/session/querying/how-to-make-a-spatial-query.mdx)). + An auto-index will be created by the server. + + * **Spatial index query** + Documents' spatial data can be indexed in a static index (**described in this article**), + and a spatial query can then be executed over this index (see [query a spatial index](../indexes/querying/spatial.mdx)). + +* In this page: + * [Create index with spatial field](../indexes/indexing-spatial-data.mdx#create-index-with-spatial-field) + * [Customize coordinate system and strategy](../indexes/indexing-spatial-data.mdx#customize-coordinate-system-and-strategy) + * [Spatial indexing strategies](../indexes/indexing-spatial-data.mdx#spatial-indexing-strategies) + + +## Create index with spatial field + +* Use `createSpatialField` to index spatial data in a static-index. + +* You can then retrieve documents based on geographical criteria when making a spatial query on this index-field. + +* A spatial index can also be defined from [Studio](../studio/database/indexes/create-map-index.mdx#spatial-field-options). + +#### Exmaple: + + + + +{`// Define an index with a spatial field +class Events_ByNameAndCoordinates extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + const { createSpatialField } = this.mapUtils(); + + this.map('events', e => { + return { + name: e.Name, + // Call 'createSpatialField' to create a spatial index-field + // Field 'coordinates' will be composed of lat & lng supplied from the document + coordinates: createSpatialField( + e.latitude, + e.longitude + ) + + // Documents can be retrieved + // by making a spatial query on the 'coordinates' index-field + }; + }); + } +} + +class Event { + constructor(id, name, latitude, longitude) { + this.id = id; + this.name = name; + this.latitude = latitude + this.longitude = longitude; + } +} +`} + + + + +{`// Define an index with a spatial field +class EventsWithWKT_ByNameAndWKT extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + const { createSpatialField } = this.mapUtils(); + + this.map('events', e => { + return { + name: e.Name, + // Call 'createSpatialField' to create a spatial index-field + // Field 'wkt' will be composed of the WKT string supplied from the document + wkt: createSpatialField(e.wkt) + + // Documents can be retrieved by + // making a spatial query on the 'wkt' index-field + }; + }); + } +} + +class EventWithWKT { + constructor(id, name, wkt) { + this.id = id; + this.name = name; + this.wkt = wkt; + } +} +`} + + + + +#### Syntax: + + + +{`createSpatialField(lat, lng); +createSpatialField(wkt); +`} + + + +| Parameters | Type | Description | +|------------|----------|----------------------------| +| **lat** | `number` | Latitude coordinate | +| **lng** | `number` | Longitude coordinate | +| **wkt** | `string` | Shape in WKT string format | + + + +## Customize coordinate system and strategy + +* For each spatial index-field, you can specify the **coordinate system** and **strategy** to be used + during indexing and when processing the data at query time. + +* RavenDB supports both the `Geography` and `Cartesian` systems with the following strategies: + + * Geography system: + * BoundingBox + * GeoHashPrefixTree + * QuadPrefixTree + + * Cartesian system: + * BoundingBox + * QuadPrefixTree + +* **By default**, the `GeoHashPrefixTree` strategy is used with `GeoHashLevel` set to **9**. + Use the `spatial` method to modify this setting. + +* The performance cost of spatial indexing is directly related to the tree level chosen. + Learn more about each strategy [below](../indexes/indexing-spatial-data.mdx#spatial-indexing-strategies). + +* Note: Modifying the strategy after the index has been created & deployed will trigger the re-indexing. + +#### Exmaple: + + + +{`class Events_ByNameAndCoordinates_Custom extends AbstractJavaScriptIndexCreationTask \{ + constructor() \{ + super(); + const \{ createSpatialField \} = this.mapUtils(); + + this.map('events', e => \{ + return \{ + name: e.Name, + // Define a spatial index-field + coordinates: createSpatialField( + e.latitude, + e.longitude + ) + + // Documents can be retrieved + // by making a spatial query on the 'coordinates' index-field + \}; + \}); + + // Set the spatial indexing strategy for the spatial field 'coordinates' + this.spatial("coordinates", factory => factory.cartesian().boundingBoxIndex()); + \} +\} +`} + + + +#### Syntax: + + + +{`class SpatialOptionsFactory \{ + geography(): GeographySpatialOptionsFactory; + cartesian(): CartesianSpatialOptionsFactory; +\} +`} + + + + + + +{`defaultOptions(circleRadiusUnits); +boundingBoxIndex(circleRadiusUnits); +geohashPrefixTreeIndex(maxTreeLevel, circleRadiusUnits); +quadPrefixTreeIndex(maxTreeLevel, circleRadiusUnits); +`} + + + + +{`boundingBoxIndex(): SpatialOptions; +quadPrefixTreeIndex(maxTreeLevel, bounds); + +class SpatialBounds { + minX; // number + maxX; // number + minY; // number + maxY; // number +} +`} + + + + +| Parameters | Type | Description | +|-----------------------|-----------------|---------------------------------------------------| +| **circleRadiusUnits** | `string` | "Kilometers" or "Miles" | +| **maxTreeLevel** | `number` | Controls precision level | +| **bounds** | `SpatialBounds` | Coordinates for the cartesian quadPrefixTreeIndex | + + + +## Spatial indexing strategies + +#### BoundingBox strategy + +* The bounding box strategy is the simplest. + Given a spatial shape, such as a point, circle, or polygon, the shape's bounding box is computed + and the spatial coordinates (minX, minY, maxX, maxY) that enclose the shape are indexed. + +* When making a query, + RavenDB translates the query criteria to the same bounding box system used for indexing. + +* Bounding box strategy is cheaper at indexing time and can produce quick queries, + but that's at the expense of the level of accuracy you can get. + +* Read more about bounding box [here](https://en.wikipedia.org/wiki/Minimum_bounding_rectangle). +#### GeoHashPrefixTree strategy + +* Geohash is a latitude/longitude representation system that describes Earth as a grid with 32 cells, assigning an alphanumeric character to each grid cell. + Each grid cell is further divided into 32 smaller chunks, and each chunk has an alphanumeric character assigned as well, and so on. + +* E.g. The location of 'New York' in the United States is represented by the following geohash: [DR5REGY6R](http://geohash.org/dr5regy6r) + and it represents the `40.7144 -74.0060` coordinates. + Removing characters from the end of the geohash will decrease the precision level. + +* The `maxTreeLevel` determines the length of the geohash used for the indexing, which in turn affects accuracy. + By default, it is set to **9**, providing a resolution of approximately 2.5 meters. + +* More information about geohash uses, decoding algorithm, and limitations can be found [here](https://en.wikipedia.org/wiki/Geohash). + + + +| Level | E-W Distance at Equator | N-S Distance at Equator | +|:----- |:------------------------|:------------------------| +| 12 | ~3.7cm | ~1.8cm | +| 11 | ~14.9cm | ~14.9cm | +| 10 | ~1.19m | ~0.60m | +| **9** | **~4.78m** | **~4.78m** | +| 8 | ~38.2m | ~19.1m | +| 7 | ~152.8m | ~152.8m | +| 6 | ~1.2km | ~0.61km | +| 5 | ~4.9km | ~4.9km | +| 4 | ~39km | ~19.6km | +| 3 | ~157km | ~157km | +| 2 | ~1252km | ~626km | +| 1 | ~5018km | ~5018km | + + +#### QuadPrefixTree strategy + +* The QuadTree represents Earth as a grid consisting of four cells (also known as buckets). + Similar to GeoHash, each cell is assigned a letter, and is recursively divided into four more cells, creating a hierarchical structure. + +* By default, the precision level (`maxTreeLevel`) for QuadPrefixTree is **23**. + +* More information about QuadTree can be found [here](https://en.wikipedia.org/wiki/Quadtree). + + + +| Level | Distance at Equator | +|:-------|:-------------------| +| 30 | ~4cm | +| 29 | ~7cm | +| 28 | ~15cm | +| 27 | ~30cm | +| 26 | ~60cm | +| 25 | ~1.19m | +| 24 | ~2.39m | +| **23** | **~4.78m** | +| 22 | ~9.56m | +| 21 | ~19.11m | +| 20 | ~38.23m | +| 19 | ~76.23m | +| 18 | ~152.92m | +| 17 | ~305.84m | +| 16 | ~611.67m | +| 15 | ~1.22km | +| 14 | ~2.45km | +| 13 | ~4.89km | +| 12 | ~9.79km | +| 11 | ~19.57km | +| 10 | ~39.15km | +| 9 | ~78.29km | +| 8 | ~156.58km | +| 7 | ~313.12km | +| 6 | ~625.85km | +| 5 | ~1249km | +| 4 | ~2473km | +| 3 | ~4755km | +| 2 | ~7996km | +| 1 | ~15992km | + + + + + +## Remarks + + + +Distance is measured by default in **kilometers**. + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-spatial-data-php.mdx b/versioned_docs/version-7.1/indexes/_indexing-spatial-data-php.mdx new file mode 100644 index 0000000000..2b77fd2c6f --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-spatial-data-php.mdx @@ -0,0 +1,437 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Documents that contain spatial data can be queried by spatial queries that employ geographical criteria. + There are two options: **dynamic** spatial query, and spatial **index** query. + + * **Dynamic spatial query** + A dynamic spatial query can be made on a collection (see [how to make a spatial query](../client-api/session/querying/how-to-make-a-spatial-query.mdx)). + An auto-index will be created by the server. + + * **Spatial index query** + Documents' spatial data can be indexed in a static index (**described in this article**), + and a spatial query can then be executed over this index (see [query a spatial index](../indexes/querying/spatial.mdx)). + +* In this page: + * [Create index with spatial field](../indexes/indexing-spatial-data.mdx#create-index-with-spatial-field) + * [Customize coordinate system and strategy](../indexes/indexing-spatial-data.mdx#customize-coordinate-system-and-strategy) + * [Spatial indexing strategies](../indexes/indexing-spatial-data.mdx#spatial-indexing-strategies) + + +## Create index with spatial field + +* Use `CreateSpatialField` to index spatial data in a static-index. + +* You can then retrieve documents based on geographical criteria when making a spatial query on this index-field. + +* A spatial index can also be defined from [Studio](../studio/database/indexes/create-map-index.mdx#spatial-field-options). + +#### Exmaple: + + + + +{`class Event +{ + private ?string $id = null; + private ?string $name = null; + private ?float $latitude = null; + private ?float $longitude = null; + + public function getId(): ?string + { + return $this->id; + } + + public function setId(?string $id): void + { + $this->id = $id; + } + + public function getName(): ?string + { + return $this->name; + } + + public function setName(?string $name): void + { + $this->name = $name; + } + + public function getLatitude(): ?float + { + return $this->latitude; + } + + public function setLatitude(?float $latitude): void + { + $this->latitude = $latitude; + } + + public function getLongitude(): ?float + { + return $this->longitude; + } + + public function setLongitude(?float $longitude): void + { + $this->longitude = $longitude; + } +} + +class Events_ByNameAndCoordinates extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "docs.Events.Select(e => new { " . + " name = e.name, " . + " coordinates = this.CreateSpatialField(((double ? ) e.latitude), ((double ? ) e.longitude)) " . + "})"; + } +} +`} + + + + +{`class EventWithWKT { + private ?string $id = null; + private ?string $name = null; + private ?string $wkt = null; + + public function getId(): ?string + { + return $this->id; + } + + public function setId(?string $id): void + { + $this->id = $id; + } + + public function getName(): ?string + { + return $this->name; + } + + public function setName(?string $name): void + { + $this->name = $name; + } + + public function getWkt(): ?string + { + return $this->wkt; + } + + public function setWkt(?string $wkt): void + { + $this->wkt = $wkt; + } +} + +class EventsWithWKT_ByNameAndWKT extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "docs.EventWithWKTs.Select(e => new { " . + " name = e.name, " . + " wkt = this.CreateSpatialField(e.wkt) " . + "})"; + } +} +`} + + + + +{`class Events_ByNameAndCoordinates_JS extends AbstractJavaScriptIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->setMaps([ + "map('events', function (e) { + return { + Name: e.Name, + Coordinates: createSpatialField(e.Latitude, e.Longitude) + }; + })" + ]); + } +} +`} + + + + +#### Syntax: + + + +{`object CreateSpatialField(double? lat, double? lng); // Latitude/Longitude coordinates +object CreateSpatialField(string shapeWkt); // Shape in WKT string format +`} + + + + + +## Customize coordinate system and strategy + +* For each spatial index-field, you can specify the **coordinate system** and **strategy** to be used + during indexing and when processing the data at query time. + +* RavenDB supports both the **Geography** and **Cartesian** systems with the following strategies: + + * Geography system: + * `boundingBoxIndex` + * `geohashPrefixTreeIndex` + * `quadPrefixTreeIndex` + + * Cartesian system: + * `boundingBoxIndex` + * `quadPrefixTreeIndex` + +* **By default**, the `geohashPrefixTreeIndex` strategy is used with `geohashLevel` set to **9**. + +* The performance cost of spatial indexing is directly related to the tree level chosen. + Learn more about each strategy [below](../indexes/indexing-spatial-data.mdx#spatial-indexing-strategies). + +* Note: Modifying the strategy after the index has been created & deployed will trigger the re-indexing. + +#### Exmaple: + + + + +{`class Events_ByNameAndCoordinates_Custom extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "docs.Events.Select(e => new { " . + " name = e.name, " . + " coordinates = this.CreateSpatialField(((double ? ) e.latitude), ((double ? ) e.longitude)) " . + "})"; + + $this->spatial("coordinates", function($factory) { return $factory->cartesian()->boundingBoxIndex(); }); + } +} +`} + + + + +{`class Events_ByNameAndCoordinates_Custom_JS extends AbstractJavaScriptIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + // Define index fields + $this->setMaps([ + "map('events', function (e) { + return { + Name: e.Name, + Coordinates: createSpatialField(e.Latitude, e.Longitude) + }; + })" + ]); + + + // Customize index fields + $options = new IndexFieldOptions(); + + $spatialOptions = new SpatialOptions(); + $spatialOptions->setType(SpatialFieldType::cartesian()); + $spatialOptions->setStrategy(SpatialSearchStrategy::boundingBox()); + $options->setSpatial($spatialOptions); + + $this->setFields([ + "Coordinates" => $options + ]); + } +} +`} + + + + +#### Syntax: + + + +{`class SpatialOptionsFactory +\{ + public function geography(): GeographySpatialOptionsFactory + \{ + return new GeographySpatialOptionsFactory(); + \} + + public function cartesian(): CartesianSpatialOptionsFactory + \{ + return new CartesianSpatialOptionsFactory(); + \} +\} +`} + + + + + + +{`interface GeographySpatialOptionsFactory +{ + // if $circleRadiusUnits is not set SpatialUnits::kilometers() will be used + + // Default is GeohashPrefixTree strategy with maxTreeLevel set to 9 + public function defaultOptions(?SpatialUnits $circleRadiusUnits = null): SpatialOptions; + + public function boundingBoxIndex(?SpatialUnits $circleRadiusUnits = null): SpatialOptions; + + public function geohashPrefixTreeIndex(int $maxTreeLevel, ?SpatialUnits $circleRadiusUnits = null): SpatialOptions; + + public function quadPrefixTreeIndex(int $maxTreeLevel, ?SpatialUnits $circleRadiusUnits = null): SpatialOptions; +} +`} + + + + +{`interface CartesianSpatialOptionsFactory +{ + public function boundingBoxIndex(): SpatialOptions; + public function quadPrefixTreeIndex(int $maxTreeLevel, SpatialBounds $bounds): SpatialOptions; +} + +class SpatialBounds +{ + private float $minX; + private float $maxX; + private float $minY; + private float $maxY; + + // ... getters and setters +} +`} + + + + + + +## Spatial indexing strategies + +#### BoundingBox strategy + +* The bounding box strategy is the simplest. + Given a spatial shape, such as a point, circle, or polygon, the shape's bounding box is computed + and the spatial coordinates (minX, minY, maxX, maxY) that enclose the shape are indexed. + +* When making a query, + RavenDB translates the query criteria to the same bounding box system used for indexing. + +* Bounding box strategy is cheaper at indexing time and can produce quick queries, + but that's at the expense of the level of accuracy you can get. + +* Read more about bounding box [here](https://en.wikipedia.org/wiki/Minimum_bounding_rectangle). +#### GeoHashPrefixTree strategy + +* Geohash is a latitude/longitude representation system that describes Earth as a grid with 32 cells, assigning an alphanumeric character to each grid cell. + Each grid cell is further divided into 32 smaller chunks, and each chunk has an alphanumeric character assigned as well, and so on. + +* E.g. The location of 'New York' in the United States is represented by the following geohash: [DR5REGY6R](http://geohash.org/dr5regy6r) + and it represents the `40.7144 -74.0060` coordinates. + Removing characters from the end of the geohash will decrease the precision level. + +* The `max_tree_level` determines the length of the geohash used for the indexing, which in turn affects accuracy. + By default, it is set to **9**, providing a resolution of approximately 2.5 meters. + +* More information about geohash uses, decoding algorithm, and limitations can be found [here](https://en.wikipedia.org/wiki/Geohash). + + + +| Level | E-W Distance at Equator | N-S Distance at Equator | +|:----- |:------------------------|:------------------------| +| 12 | ~3.7cm | ~1.8cm | +| 11 | ~14.9cm | ~14.9cm | +| 10 | ~1.19m | ~0.60m | +| **9** | **~4.78m** | **~4.78m** | +| 8 | ~38.2m | ~19.1m | +| 7 | ~152.8m | ~152.8m | +| 6 | ~1.2km | ~0.61km | +| 5 | ~4.9km | ~4.9km | +| 4 | ~39km | ~19.6km | +| 3 | ~157km | ~157km | +| 2 | ~1252km | ~626km | +| 1 | ~5018km | ~5018km | + + +#### QuadPrefixTree strategy + +* The QuadTree represents Earth as a grid consisting of four cells (also known as buckets). + Similar to GeoHash, each cell is assigned a letter, and is recursively divided into four more cells, creating a hierarchical structure. + +* By default, the precision level (`maxTreeLevel`) for QuadPrefixTree is **23**. + +* More information about QuadTree can be found [here](https://en.wikipedia.org/wiki/Quadtree). + + + +| Level | Distance at Equator | +|:-------|:-------------------| +| 30 | ~4cm | +| 29 | ~7cm | +| 28 | ~15cm | +| 27 | ~30cm | +| 26 | ~60cm | +| 25 | ~1.19m | +| 24 | ~2.39m | +| **23** | **~4.78m** | +| 22 | ~9.56m | +| 21 | ~19.11m | +| 20 | ~38.23m | +| 19 | ~76.23m | +| 18 | ~152.92m | +| 17 | ~305.84m | +| 16 | ~611.67m | +| 15 | ~1.22km | +| 14 | ~2.45km | +| 13 | ~4.89km | +| 12 | ~9.79km | +| 11 | ~19.57km | +| 10 | ~39.15km | +| 9 | ~78.29km | +| 8 | ~156.58km | +| 7 | ~313.12km | +| 6 | ~625.85km | +| 5 | ~1249km | +| 4 | ~2473km | +| 3 | ~4755km | +| 2 | ~7996km | +| 1 | ~15992km | + + + + + +## Remarks + + + +Distance is measured by default in `KILOMETERS`. + + + + diff --git a/versioned_docs/version-7.1/indexes/_indexing-spatial-data-python.mdx b/versioned_docs/version-7.1/indexes/_indexing-spatial-data-python.mdx new file mode 100644 index 0000000000..ad1e66f647 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_indexing-spatial-data-python.mdx @@ -0,0 +1,332 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Documents that contain spatial data can be queried by spatial queries that employ geographical criteria. + There are two options: **dynamic** spatial query, and spatial **index** query. + + * **Dynamic spatial query** + A dynamic spatial query can be made on a collection (see [how to make a spatial query](../client-api/session/querying/how-to-make-a-spatial-query.mdx)). + An auto-index will be created by the server. + + * **Spatial index query** + Documents' spatial data can be indexed in a static index (**described in this article**), + and a spatial query can then be executed over this index (see [query a spatial index](../indexes/querying/spatial.mdx)). + +* In this page: + * [Create index with spatial field](../indexes/indexing-spatial-data.mdx#create-index-with-spatial-field) + * [Customize coordinate system and strategy](../indexes/indexing-spatial-data.mdx#customize-coordinate-system-and-strategy) + * [Spatial indexing strategies](../indexes/indexing-spatial-data.mdx#spatial-indexing-strategies) + + +## Create index with spatial field + +* Use `CreateSpatialField` to index spatial data in a static-index. + +* You can then retrieve documents based on geographical criteria when making a spatial query on this index-field. + +* A spatial index can also be defined from [Studio](../studio/database/indexes/create-map-index.mdx#spatial-field-options). + +#### Exmaple: + + + + +{`# Define an index with a spatial field +class Events_ByNameAndCoordinates(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + # Call 'CreateSpatialField' to create a spatial index-field + # Field 'coordinates' will be composed of lat & lng supplied from the document + self.map = ( + "from e in docs.Events select new {" + " name = e.name," + " coordinates = CreateSpatialField(e.latitude, e.longitude)" + "}" + ) + # Documents can be retrieved + # by making a spatial query on the 'coordinates' index-field + + +class Event: + def __init__(self, Id: str = None, name: str = None, latitude: float = None, longitude: float = None): + self.Id = Id + self.name = name + self.latitude = latitude + self.longitude = longitude +`} + + + + +{`# Define an index with a spatial field +class EventsWithWKT_ByNameAndWKT(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = "from e in docs.Events select new {" " name = e.name," " WKT = CreateSpatialField(e.WKT)" "}" + + +class EventWithWKT: + def __init__(self, Id: str = None, name: str = None, WKT: str = None): + self.Id = Id + self.name = name + self.WKT = WKT +`} + + + + +{`class Events_ByNameAndCoordinates_JS(AbstractJavaScriptIndexCreationTask): + def __init__(self): + super().__init__() + self.maps = { + """ + map('events', function (e) { + return { + name: e.name, + coordinates: createSpatialField(e.latitude, e.longitude) + }; + }) + """ + } +`} + + + + +#### Syntax: + + + +{`class DynamicSpatialField(ABC): + def __init__(self, round_factor: float = 0): ... +class PointField(DynamicSpatialField): # Latitude/Longitude coordinates + def __init__(self, latitude: str, longitude: str): ... + + +class WktField(DynamicSpatialField): # Shape in WKT string format + def __init__(self, wkt: str): ... +`} + + + + + +## Customize coordinate system and strategy + +* For each spatial index-field, you can specify the **coordinate system** and **strategy** to be used + during indexing and when processing the data at query time. + +* RavenDB supports both the **Geography** and **Cartesian** systems with the following strategies: + + * Geography system: + * `bounding_box_index` + * `geohash_prefix_tree_index` + * `quad_prefix_tree_index` + + * Cartesian system: + * `bounding_box_index` + * `quad_prefix_tree_index` + +* **By default**, the `geohash_prefix_tree_index` strategy is used with `geohash_level` set to **9**. + +* The performance cost of spatial indexing is directly related to the tree level chosen. + Learn more about each strategy [below](../indexes/indexing-spatial-data.mdx#spatial-indexing-strategies). + +* Note: Modifying the strategy after the index has been created & deployed will trigger the re-indexing. + +#### Exmaple: + + + + +{`class Events_ByNameAndCoordinates_Custom(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = "from e in docs.Events select new { name = e.name, coordinates = CreateSpatialField(e.latitude, e.longitude)}" + + # Set the spatial indexing strategy for the spatial field 'coordinates' + self._spatial("coordinates", lambda factory: factory.cartesian.bounding_box_index()) +`} + + + + +{`class Event_ByNameAndCoordinates_Custom_JS(AbstractJavaScriptIndexCreationTask): + def __init__(self): + super().__init__() + self.maps = """ + map('events', function (e) { + return { + Name: e.Name, + Coordinates: createSpatialField(e.Latitude, e.Longitude) + }; + }) + """ + + # Customize index fields + self.fields = { + "coordinates": IndexFieldOptions( + spatial=SpatialOptions( + field_type=SpatialFieldType.CARTESIAN, strategy=SpatialSearchStrategy.BOUNDING_BOX + ) + ) + } +`} + + + + +#### Syntax: + + + +{`class SpatialOptionsFactory: + def geography(self) -> GeographySpatialOptionsFactory: + return SpatialOptionsFactory.GeographySpatialOptionsFactory() + + def cartesian(self) -> CartesianSpatialOptionsFactory: + return SpatialOptionsFactory.CartesianSpatialOptionsFactory() +`} + + + + + + +{`# Default is GeohashPrefixTree strategy with max_tree_level set to 9 +def default_option(self, circle_radius_units: SpatialUnits = SpatialUnits.KILOMETERS) -> SpatialOptions: ... + +def bounding_box_index(self, circle_radius_units: SpatialUnits = SpatialUnits.KILOMETERS) -> SpatialOptions: ... +def geohash_prefix_tree_index( + self, max_tree_level: int, circle_radius_units: SpatialUnits = SpatialUnits.KILOMETERS +) -> SpatialOptions: ... +def quad_prefix_tree_index( + self, max_tree_level: int, circle_radius_units: SpatialUnits = SpatialUnits.KILOMETERS +) -> SpatialOptions: ... +`} + + + + +{`def bounding_box_index(self) -> SpatialOptions: ... +def quad_prefix_tree_index( + self, max_tree_level: int, bounds: SpatialOptionsFactory.SpatialBounds +) -> SpatialOptions: ... + +class SpatialBounds: + def __init__(self, min_x: float, min_y: float, max_x: float, max_y: float): ... +`} + + + + + + +## Spatial indexing strategies + +#### BoundingBox strategy + +* The bounding box strategy is the simplest. + Given a spatial shape, such as a point, circle, or polygon, the shape's bounding box is computed + and the spatial coordinates (minX, minY, maxX, maxY) that enclose the shape are indexed. + +* When making a query, + RavenDB translates the query criteria to the same bounding box system used for indexing. + +* Bounding box strategy is cheaper at indexing time and can produce quick queries, + but that's at the expense of the level of accuracy you can get. + +* Read more about bounding box [here](https://en.wikipedia.org/wiki/Minimum_bounding_rectangle). +#### GeoHashPrefixTree strategy + +* Geohash is a latitude/longitude representation system that describes Earth as a grid with 32 cells, assigning an alphanumeric character to each grid cell. + Each grid cell is further divided into 32 smaller chunks, and each chunk has an alphanumeric character assigned as well, and so on. + +* E.g. The location of 'New York' in the United States is represented by the following geohash: [DR5REGY6R](http://geohash.org/dr5regy6r) + and it represents the `40.7144 -74.0060` coordinates. + Removing characters from the end of the geohash will decrease the precision level. + +* The `max_tree_level` determines the length of the geohash used for the indexing, which in turn affects accuracy. + By default, it is set to **9**, providing a resolution of approximately 2.5 meters. + +* More information about geohash uses, decoding algorithm, and limitations can be found [here](https://en.wikipedia.org/wiki/Geohash). + + + +| Level | E-W Distance at Equator | N-S Distance at Equator | +|:----- |:------------------------|:------------------------| +| 12 | ~3.7cm | ~1.8cm | +| 11 | ~14.9cm | ~14.9cm | +| 10 | ~1.19m | ~0.60m | +| **9** | **~4.78m** | **~4.78m** | +| 8 | ~38.2m | ~19.1m | +| 7 | ~152.8m | ~152.8m | +| 6 | ~1.2km | ~0.61km | +| 5 | ~4.9km | ~4.9km | +| 4 | ~39km | ~19.6km | +| 3 | ~157km | ~157km | +| 2 | ~1252km | ~626km | +| 1 | ~5018km | ~5018km | + + +#### QuadPrefixTree strategy + +* The QuadTree represents Earth as a grid consisting of four cells (also known as buckets). + Similar to GeoHash, each cell is assigned a letter, and is recursively divided into four more cells, creating a hierarchical structure. + +* By default, the precision level (`max_tree_level`) for QuadPrefixTree is **23**. + +* More information about QuadTree can be found [here](https://en.wikipedia.org/wiki/Quadtree). + + + +| Level | Distance at Equator | +|:-------|:-------------------| +| 30 | ~4cm | +| 29 | ~7cm | +| 28 | ~15cm | +| 27 | ~30cm | +| 26 | ~60cm | +| 25 | ~1.19m | +| 24 | ~2.39m | +| **23** | **~4.78m** | +| 22 | ~9.56m | +| 21 | ~19.11m | +| 20 | ~38.23m | +| 19 | ~76.23m | +| 18 | ~152.92m | +| 17 | ~305.84m | +| 16 | ~611.67m | +| 15 | ~1.22km | +| 14 | ~2.45km | +| 13 | ~4.89km | +| 12 | ~9.79km | +| 11 | ~19.57km | +| 10 | ~39.15km | +| 9 | ~78.29km | +| 8 | ~156.58km | +| 7 | ~313.12km | +| 6 | ~625.85km | +| 5 | ~1249km | +| 4 | ~2473km | +| 3 | ~4755km | +| 2 | ~7996km | +| 1 | ~15992km | + + + + + +## Remarks + + + +Distance is measured by default in `KILOMETERS`. + + + + diff --git a/versioned_docs/version-7.1/indexes/_javascript-indexes-csharp.mdx b/versioned_docs/version-7.1/indexes/_javascript-indexes-csharp.mdx new file mode 100644 index 0000000000..7e7e932145 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_javascript-indexes-csharp.mdx @@ -0,0 +1,517 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* JavaScript indexes are intended for users who prefer to define static indexes using JavaScript instead of C#. + +* The RavenDB JavaScript engine supports ECMAScript 5.1 syntax. + In addition, RavenDB provides a set of predefined JavaScript functions that can be used in JavaScript indexes and in other features such as subscriptions, ETL scripts, set-based patching, and more. + See the full list in [Predefined JavaScript functions](../server/kb/javascript-engine.mdx#predefined-javascript-functions). +* In this article: + * [Creating and deploying a JavaScript index](../indexes/javascript-indexes.mdx#creating-and-deploying-a-javascript-index) + * [Map index](../indexes/javascript-indexes.mdx#map-index) + * [Map index - basic](../indexes/javascript-indexes.mdx#example-i---map-index---basic) + * [Map index - with additional sources](../indexes/javascript-indexes.mdx#example-ii---map-index---with-additional-sources) + * [Map index - with inline string compilation](../indexes/javascript-indexes.mdx#example-iii---map-index---with-inline-string-compilation) + * [Multi-Map index](../indexes/javascript-indexes.mdx#multi-map-index) + * [Map-Reduce index](../indexes/javascript-indexes.mdx#map-reduce-index) + + + +## Creating and deploying a JavaScript index + +**Creating a JavaScript index**: + +* To create a JavaScript index, define a class that inherits from `AbstractJavaScriptIndexCreationTask`. +* This base class itself inherits from [AbstractIndexCreationTask](../indexes/creating-and-deploying.mdx#define-a-static-index-using-a-custom-class), + which is the base class for all C# indexes. + + +```csharp +// Inherit from 'AbstractJavaScriptIndexCreationTask' +public class Documents_ByName_JS : AbstractJavaScriptIndexCreationTask +{ + public Documents_ByName_JS() + { + Maps = new HashSet() + { + // Define a map function: + @"map(, function(doc) { + return { + Name: doc.Name + // ... + } + })", + + // ... + }; + } +} +``` + + +**Deploying a JavaScript index**: + +* Deploy a JavaScript index using the standard creation flow as described in [Deploy a static-index](../indexes/creating-and-deploying.mdx#deploy-a-static-index). + +* Note that **JavaScript indexes** can be deployed using a [User/Read-Write](../server/security/authorization/security-clearance-and-permissions.mdx#section-1) certificate, + while **C# static indexes** require a [User/Admin](../server/security/authorization/security-clearance-and-permissions.mdx#section) certificate or higher. + +* To restrict the creation of JavaScript indexes to database admins (and above), + set the [Indexing.Static.RequireAdminToDeployJavaScriptIndexes](../server/configuration/indexing-configuration.mdx#indexingstaticrequireadmintodeployjavascriptindexes) configuration to `true`. + +* All other capabilities and features of JavaScript indexes are identical to those of [C# indexes](../indexes/indexing-basics.mdx). + + + +## Map index + +* A map index contains a single `map` function. + To define an index that uses multiple map functions, see the section on [Multi-Map indexes](../indexes/javascript-indexes.mdx#multi-map-index) below. + +* The `map` function is written as a string and specifies what content from the documents will be indexed. + + + +#### Example I - Map index - basic +The following index indexes the `FirstName` and `LastName` of employees from the _Employees_ collection. + + + +{`public class Employees_ByFirstAndLastName_JS : AbstractJavaScriptIndexCreationTask +\{ + public class IndexEntry + \{ + public string FirstName \{ get; set; \} + public string LastName \{ get; set; \} + \} + + public Employees_ByFirstAndLastName_JS() + \{ + Maps = new HashSet + \{ + // Define the 'map' function: + // Index content from documents in the 'Employees' collection + @"map('Employees', function (employee) \{ + + // Provide your JavaScript code here + // Return an object that defines the index-entry: + // ============================================== + + return \{ + // Define the index-fields: + // ======================== + + FirstName: employee.FirstName, + LastName: employee.LastName + \}; + \})", + \}; + \} +\} +`} + + + +**Query the index**: +Once the index is deployed, you can query for _Employee_ documents based on the indexed name fields. + + + + +{`List employees = session + // Query the map index + .Query() + .Where(x => x.LastName == "King") + .OfType() + .ToList(); +`} + + + + +{`from index "Employees/ByFirstAndLastName/JS" +where LastName == "King" +`} + + + + + + + +#### Example II - Map index - with additional sources +* The following index indexes the names of all comment authors (including nested replies) for each _BlogPost_ document. + +* It uses `getNames`, a recursive helper defined in `AdditionalSources`, to traverse every comment level and accumulate author names. + + + + +{`public class BlogPosts_ByCommentAuthor_JS : AbstractJavaScriptIndexCreationTask +{ + public class IndexEntry + { + public string[] Authors { get; set; } + } + + public BlogPosts_ByCommentAuthor_JS() + { + Maps = new HashSet() + { + @"map('BlogPosts', function(post) { + const names = []; + + // Get names of authors from the additional source code: + if (post.Comments) { + post.Comments.forEach(x => getNames(x, names)); + } + + return { + Authors: names + }; + })" + }; + + AdditionalSources = new Dictionary + { + ["The getNames method"] = @" + function getNames(comment, names) { + names.push(comment.Author); + + if (comment.Comments) { + comment.Comments.forEach(x => getNames(x, names)); + } + }" + }; + } +} +`} + + + + +{`public class BlogPost +{ + public string Author { get; set; } + public string Title { get; set; } + public string Text { get; set; } + public List Comments { get; set; } +} + +public class BlogPostComment +{ + public string Author { get; set; } + public string Text { get; set; } + public List Comments { get; set; } +} +`} + + + + + + + +#### Example III - Map index - with inline string compilation +* To define a JavaScript index using inline string compilation, + you must set the [Indexing.AllowStringCompilation](../server/configuration/indexing-configuration.mdx#indexingallowstringcompilation) configuration key to _true_. + +* The following indexes use inline string compilation to evaluate whether each product’s `UnitsInStock` is low. + + + + +{`public class Products_ByStock1_JS : AbstractJavaScriptIndexCreationTask +{ + public class IndexEntry + { + public string FirstName { get; set; } + public string LastName { get; set; } + } + + public Products_ByStock1_JS() + { + Maps = new HashSet + { + @"map('Products', function(product) { + // Define a string expression to check for low stock. + const functionBody = 'return product.UnitsInStock < 10'; + + // Create a dynamic function that evaluates the expression at runtime. + const dynamicFunc = new Function(""product"", functionBody); + + return { + StockIsLow: dynamicFunc(product) + }; + });", + }; + + // Enable string‑compilation so this index can execute the inline script + Configuration["Indexing.AllowStringCompilation"] = "true"; + } +} +`} + + + + +{`public class Products_ByStock2_JS : AbstractJavaScriptIndexCreationTask +{ + public class IndexEntry + { + public string FirstName { get; set; } + public string LastName { get; set; } + } + + public Products_ByStock2_JS() + { + Maps = new HashSet + { + @"map('Products', function(product) { + // Define a string expression with your condition + const expression = 'product.UnitsInStock < 10'; + + // Evaluate the string expression at runtime using eval. + const isLowOnStock = eval(expression); + + return { + StockIsLow: isLowOnStock + }; + });", + }; + + // Enable string‑compilation so this index can execute the inline script + Configuration["Indexing.AllowStringCompilation"] = "true"; + } +} +`} + + + + + + +Learn more about [Map indexes here](../indexes/map-indexes.mdx). + + + +## Multi-Map index + +* A Multi-Map index allows indexing data from multiple collections. + +* For example, the following index processes documents from both the _Cats_ and _Dogs_ collections. + + + + + +{`public class Animals_ByName_JS : AbstractJavaScriptIndexCreationTask +\{ + public class IndexEntry + \{ + public string Name \{ get; set; \} + \} + + public Animals_ByName_JS() + \{ + Maps = new HashSet() + \{ + // Define a map function on the 'Cats' collection + @"map('Cats', function(c) \{ return \{ Name: c.Name \}\})", + + // Define a map function on the 'Dogs' collection + @"map('Dogs', function(d) \{ return \{ Name: d.Name \}\})" + \}; + \} +\} +`} + + + +**Query the index**: +Once the index is deployed, querying it will return matching documents from both the _Cats_ and _Dogs_ collections. + + + + +{`var animalsNamedMilo = session + // Query the multi-map index + .Query() + .Where(x => x.Name == "Milo") + .ToList(); +`} + + + + +{`from index "Animals/ByName/JS" +where Name == "Milo" +`} + + + + + + +Learn more about [Multi-Map indexes here](../indexes/multi-map-indexes.mdx). + + + +## Map-Reduce index + +* A Map-Reduce index allows you to perform complex data aggregations. + +* In the **Map** stage, the index processes documents and extracts relevant data using the defined mapping function(s). + +* In the **Reduce** stage, the map results are aggregated to produce the final output. + + + +#### Example I +The following index counts the number of products per category by grouping on the category name. + + + +{`public class Products_ByCategory_JS : AbstractJavaScriptIndexCreationTask +\{ + public class IndexEntry + \{ + public string Category \{ get; set; \} + public int Count \{ get; set; \} + \} + + public Products_ByCategory_JS() + \{ + // The Map stage: + // For each product document - + // * load its related Category document using the 'load' function, + // * extract the category name, and return a count of 1. + Maps = new HashSet() + \{ + @"map('Products', function(p) \{ + return \{ + Category: load(p.Category, 'Categories').Name, + Count: 1 + \} + \})" + \}; + + // The Reduce stage: + // * group the mapped results by Category + // * and count the number of products in each category. + Reduce = @"groupBy(x => x.Category).aggregate(g => \{ + return \{ + Category: g.key, + Count: g.values.reduce((count, val) => val.Count + count, 0) + \}; + \})"; + \} +\} +`} + + + +**Query the index**: +Once the index is deployed, you can query for the total number of products per category, +and optionally, order the results by product count in descending order. + + + + +{`var topCategories = session + // Query the map-reduce index + .Query() + .OrderByDescending(x => x.Count) + .ToList(); +`} + + + + +{`from index "Products/ByCategory/JS" +order by Count as long desc +`} + + + + + + + +#### Example II +The following index calculates how many items were sold and the total sales amount for each product and month. + + + +{`public class ProductSales_ByMonth_JS : AbstractJavaScriptIndexCreationTask +\{ + public class IndexEntry + \{ + public string Product \{ get; set; \} + public DateTime Month \{ get; set; \} + public int Count \{ get; set; \} + public decimal Total \{ get; set; \} + \} + + public ProductSales_ByMonth_JS() + \{ + // The Map stage: + // For each order, emit one entry per line with: + // * the product, + // * the first day of the order’s month, + // * a count of 1, + // * and the line’s total value. + Maps = new HashSet() + \{ + @"map('orders', function(order) \{ + var res = []; + var orderDate = new Date(order.OrderedAt); + + order.Lines.forEach(l => \{ + res.push(\{ + Product: l.Product, + Month: new Date(orderDate.getFullYear(), orderDate.getMonth(), 1), + Count: 1, + Total: (l.Quantity * l.PricePerUnit) * (1- l.Discount) + \}) + \}); + + return res; + \})" + \}; + + // The Reduce stage: + // Group by product and month, then sum up counts and totals. + Reduce = @" + groupBy(x => (\{Product: x.Product, Month: x.Month\})) + .aggregate(g => \{ + return \{ + Product: g.key.Product, + Month: g.key.Month, + Count: g.values.reduce((sum, x) => x.Count + sum, 0), + Total: g.values.reduce((sum, x) => x.Total + sum, 0) + \} + \})"; + + // Output the reduce results into a dedicated collection + OutputReduceToCollection = "MonthlyProductSales"; + PatternReferencesCollectionName = "MonthlyProductSales/References"; + PatternForOutputReduceToCollectionReferences = "sales/monthly/\{Month\}"; + \} +\} +`} + + + + + +Learn more about [Map-Reduce indexes here](../indexes/map-reduce-indexes.mdx). + + + + diff --git a/versioned_docs/version-7.1/indexes/_javascript-indexes-java.mdx b/versioned_docs/version-7.1/indexes/_javascript-indexes-java.mdx new file mode 100644 index 0000000000..36e08bceb4 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_javascript-indexes-java.mdx @@ -0,0 +1,198 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +This feature was created for users who want to create an index and prefer JavaScript over C#. +JavaScript indexes can be defined by a user with lower permissions than the C# indexes (admin not required). +All other capabilities and features are the same as C# indexes. + +## Creating JavaScript index + +If we want to create JavaScript index we need to create an instance of our class that inherits +from AbstractJavaScriptIndexCreationTask. +AbstractJavaScriptIndexCreationTask inherits from AbstractIndexCreationTask +(Read more about AbstractIndexCreationTask [here](../indexes/creating-and-deploying.mdx#Using-AbstractIndexCreationTask).) + + + +{`public static class Employees_ByFirstAndLastName extends AbstractJavaScriptIndexCreationTask \{ + // ... +\} +`} + + + +## Map index + +`Map` indexes, sometimes referred to as simple indexes, contain one (or more) mapping functions that indicate which fields from the documents should be indexed. +They indicate which documents can be searched by which fields. + + + +{`map(, function (document)\{ + return \{ + // indexed properties go here e.g: + // name: document.name + \}; + \}) +`} + + + +### Example I - Simple map index + + + +{`public static class Employees_ByFirstAndLastName extends AbstractJavaScriptIndexCreationTask \{ + public Employees_ByFirstAndLastName() \{ + setMaps(Sets.newHashSet("map('Employees', function (employee)\{\\n" + + " return \{\\n" + + " FirstName : employee.FirstName,\\n" + + " LastName : employee.LastName\\n" + + " \};\\n" + + " \})")); + \} +\} +`} + + + +### Example II - Map index with additional sources + + + +{`public static class BlogPosts_ByCommentAuthor extends AbstractJavaScriptIndexCreationTask \{ + public BlogPosts_ByCommentAuthor() \{ + setMaps(Sets.newHashSet("map('BlogPosts', function(b)\{\\n" + + " var names = [];\\n" + + " b.comments.forEach(x => getNames(x, names));\\n" + + " return \{\\n" + + " authors : names\\n" + + " \};" + + " \})")); + + java.util.Map additionalSources = new HashMap<>(); + additionalSources.put("The Script", "function getNames(x, names)\{\\n" + + " names.push(x.author);\\n" + + " x.comments.forEach(x => getNames(x, names));\\n" + + " \}"); + + setAdditionalSources(additionalSources); + \} +\} +`} + + + +Read more about map indexes [here](../indexes/map-indexes.mdx). + +## Multi map index + +Multi-Map indexes allow you to index data from multiple collections + +### Example + + + +{`public static class Animals_ByName extends AbstractJavaScriptIndexCreationTask \{ + public Animals_ByName() \{ + setMaps(Sets.newHashSet( + "map('cats', function (c)\{ return \{name: c.name\}\})", + "map('dogs', function (d)\{ return \{name: d.name\}\})" + )); + \} +\} +`} + + + +Read more about multi map indexes [here](../indexes/map-reduce-indexes.mdx). + +## Map-Reduce index +Map-Reduce indexes allow you to perform complex aggregations of data. +The first stage, called the map, runs over documents and extracts portions of data according to the defined mapping function(s). +Upon completion of the first phase, reduction is applied to the map results and the final outcome is produced. + + + +{`groupBy(x => \{map properties\}) + .aggregate(y => \{ + return \{ + // indexed properties go here e.g: + // name: y.name + \}; + \}) +`} + + + +### Example I + + + +{`public static class Products_ByCategory extends AbstractJavaScriptIndexCreationTask \{ + public Products_ByCategory() \{ + setMaps(Sets.newHashSet("map('products', function(p)\{\\n" + + " return \{\\n" + + " Category: load(p.Category, 'Categories').Name,\\n" + + " Count: 1\\n" + + " \}\\n" + + " \})")); + + setReduce("groupBy(x => x.Category)\\n" + + " .aggregate(g => \{\\n" + + " return \{\\n" + + " Category: g.key,\\n" + + " Count: g.values.reduce((count, val) => val.Count + count, 0)\\n" + + " \};\\n" + + " \})"); + \} +\} +`} + + + +### Example II + + + +{`public static class Product_Sales_ByMonth extends AbstractJavaScriptIndexCreationTask \{ + public Product_Sales_ByMonth() \{ + setMaps(Sets.newHashSet("map('orders', function(order)\{\\n" + + " var res = [];\\n" + + " order.Lines.forEach(l => \{\\n" + + " res.push(\{\\n" + + " Product: l.Product,\\n" + + " Month: new Date( (new Date(order.OrderedAt)).getFullYear(),(new Date(order.OrderedAt)).getMonth(),1),\\n" + + " Count: 1,\\n" + + " Total: (l.Quantity * l.PricePerUnit) * (1- l.Discount)\\n" + + " \})\\n" + + " \});\\n" + + " return res;\\n" + + " \})")); + + setReduce("groupBy(x => (\{Product: x.Product, Month: x.Month\}))\\n" + + " .aggregate(g => \{\\n" + + " return \{\\n" + + " Product: g.key.Product,\\n" + + " Month: g.key.Month,\\n" + + " Count: g.values.reduce((sum, x) => x.Count + sum, 0),\\n" + + " Total: g.values.reduce((sum, x) => x.Total + sum, 0)\\n" + + " \}\\n" + + " \})"); + + setOutputReduceToCollection("MonthlyProductSales"); + \} +\} +`} + + + +Read more about map reduce indexes [here](../indexes/multi-map-indexes.mdx). + + +Supported JavaScript version : ECMAScript 5.1 + + + diff --git a/versioned_docs/version-7.1/indexes/_map-indexes-csharp.mdx b/versioned_docs/version-7.1/indexes/_map-indexes-csharp.mdx new file mode 100644 index 0000000000..9a129bc497 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_map-indexes-csharp.mdx @@ -0,0 +1,850 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* `Map` indexes, sometimes referred to as simple indexes, contain one (or more) mapping functions + that indicate which fields from the documents should be indexed. +* After indexing, documents can be searched by the indexed fields. +* The mapping functions are **LINQ-based functions** or **JavaScript functions** (when using + JavaScript indexes); they can be considered the **core** of indexes. + +* In This Page: + * [Indexing single fields](../indexes/map-indexes.mdx#indexing-single-fields) + * [Combining multiple fields](../indexes/map-indexes.mdx#combining-multiple-fields) + * [Indexing partial field data](../indexes/map-indexes.mdx#indexing-partial-field-data) + * [Filtering data within fields](../indexes/map-indexes.mdx#filtering-data-within-fields) + * [Indexing nested data](../indexes/map-indexes.mdx#indexing-nested-data) + * [Indexing Missing Fields](../indexes/map-indexes.mdx#indexing-missing-fields) + + + +* Indexing fields from [related documents](../indexes/indexing-related-documents.mdx) +* Aggregating data with [Map-Reduce indexes](../indexes/map-reduce-indexes.mdx) +* Indexing multiple collections with [Multi-Map indexes](../indexes/indexing-polymorphic-data.mdx#multi-map-indexes) +* [Running calculations and storing the results in the index to reduce query time](https://demo.ravendb.net/demos/csharp/static-indexes/store-fields-in-index) + + + + +## Indexing single fields + +Let's create an index that will help us search for `Employees` by their `FirstName`, `LastName`, or both. + +* First, let's create an index called `Employees/ByFirstAndLastName` + + Note: The naming separator character "`_`" in your code will become "/" in the index name. + In the following sample, "`Employees_ByFirstAndLastName`" will become "Employees/ByFirstAndLastName" in your indexes list. + + + + + +{`public class Employees_ByFirstAndLastName : AbstractIndexCreationTask +{ + // ... +} +`} + + + + +{`public class Employees_ByFirstAndLastName : AbstractJavaScriptIndexCreationTask +{ + // ... +} +`} + + + + +You might notice that we're passing `Employee` as a generic parameter to `AbstractIndexCreationTask`. This gives our indexing function a strongly-typed syntax. If you are not familiar with `AbstractIndexCreationTask`, you can read [this](../indexes/creating-and-deploying.mdx) article before proceeding. + +- The next step is to create the indexing function itself. This is done by setting the `Map` property with our function in a **parameterless constructor**. + + + + +{`public Employees_ByFirstAndLastName() +{ + Map = employees => from employee in employees + select new + { + FirstName = employee.FirstName, + LastName = employee.LastName + }; +} +`} + + + + +{`public Employees_ByFirstAndLastName() +{ + Map = employees => employees + .Select(employee => new + { + FirstName = employee.FirstName, + LastName = employee.LastName + }); +} +`} + + + + +{`public Employees_ByFirstAndLastName() +{ + Maps = new HashSet + { + @"map('Employees', function (employee){ + return { + FirstName : employee.FirstName, + LastName : employee.LastName + }; + })", + }; +} +`} + + + + +- The final step is to [deploy it](../indexes/creating-and-deploying.mdx) to the server + and issue a query using the session [Query](../client-api/session/querying/how-to-query.mdx) method. + To query an index, the name of the index must be called by the query. + If the index isn't called, RavenDB will either use or create an [auto index](../indexes/creating-and-deploying.mdx#auto-indexes). + + + + +{`IList employees1 = session + .Query() + .Where(x => x.FirstName == "Robert") + .ToList(); + +IList employees2 = session + .Query("Employees/ByFirstAndLastName") + .Where(x => x.FirstName == "Robert") + .ToList(); +`} + + + + +{`from index 'Employees/ByFirstAndLastName' +where FirstName = 'Robert' +`} + + + + +Our final index looks like: + + + + +{`public class Employees_ByFirstAndLastName : AbstractIndexCreationTask +{ + public Employees_ByFirstAndLastName() + { + Map = employees => from employee in employees + select new + { + FirstName = employee.FirstName, + LastName = employee.LastName + }; + } +} +`} + + + + +{`public class Employees_ByFirstAndLastName : AbstractJavaScriptIndexCreationTask +{ + public Employees_ByFirstAndLastName() + { + Maps = new HashSet + { + @"map('Employees', function (employee){ + return { + FirstName : employee.FirstName, + LastName : employee.LastName + }; + })", + }; + } +} +`} + + + + + + +Please note that indexing capabilities are detected automatically from the returned field type from the indexing function. + +For example, if our `Employee` will have a property called `Age` that is an `integer` then the following indexing function... + + + + +{`from employee in docs.Employees +select new +{ + Age = employee.Age +} +`} + + + + +{`map('Employees', function(employee) +{ + return { + Age : employee.Age + }; +}) +`} + + + + + + +...grants us the capability to issue numeric queries (**return all the Employees whose Age is more than 30**). + +Changing the `Age` type to a `string` will take that capability away from you. The easiest example would be to issue `.ToString()` on the `Age` field... + + + + +{`from employee in docs.Employees +select new +{ + Age = employee.Age.ToString() +} +`} + + + + +{`map('Employees', function(employee) +{ + return { + Age : employee.Age.toString() + }; +}) +`} + + + + + + + + +You will probably notice that in the `Studio`, this function is a bit different from the one defined in the `Employees_ByFirstAndLastName` class: + + + +{`from employee in docs.Employees +select new +\{ + FirstName = employee.FirstName, + LastName = employee.LastName +\} +`} + + + +The part you should pay attention to is `docs.Employees`. This syntax indicates from which collection a server should take the documents for indexing. In our case, documents will be taken from the `Employees` collection. To change the collection, you need to change `Employees` to the desired collection name or remove it and leave only `docs` to index **all documents**. + + + + + +## Combining multiple fields + +Since each index contains a LINQ function, you can combine multiple fields into one. + +#### Example I +Index definition: + + + +{`public class Employees_ByFullName : AbstractIndexCreationTask +{ + public class Result + { + public string FullName { get; set; } + } + + public Employees_ByFullName() + { + Map = employees => from employee in employees + select new Result + { + FullName = employee.FirstName + " " + employee.LastName + }; + } +} +`} + + + + +{`public class Employees_ByFullName : AbstractJavaScriptIndexCreationTask +{ + public class Result + { + public string FullName { get; set; } + } + + public Employees_ByFullName() + { + Maps = new HashSet + { + @"map('Employees', function (employee){ + return { + FullName : employee.FirstName + ' ' + employee.LastName + }; + })", + }; + } +} +`} + + + + +Query the index: + + + +{`// notice that we're 'cheating' here +// by marking result type in 'Query' as 'Employees_ByFullName.Result' to get strongly-typed syntax +// and changing type using 'OfType' before sending query to server +IList employees = session + .Query() + .Where(x => x.FullName == "Robert King") + .OfType() + .ToList(); +`} + + + + +{`IList employees = session + .Advanced + .DocumentQuery() + .WhereEquals("FullName", "Robert King") + .ToList(); +`} + + + + +{`from index 'Employees/ByFullName' +where FullName = 'Robert King' +`} + + + +#### Example II + + + +In this example, the index field `Query` combines all values from various Employee fields into one. +The default Analyzer on fields is changed to enable `Full-Text Search` operations. The matches no longer need to be exact. + +You can read more about analyzers and `Full-Text Search` [here](../indexes/using-analyzers.mdx). + + + +Index definition: + + + +{`public class Companies_ByAddress_Country : AbstractIndexCreationTask +{ + public class Result + { + public string City { get; set; } + public string Company { get; set; } + public string Phone { get; set; } + } + + public Companies_ByAddress_Country() + { + Map = companies => from company in companies + where company.Address.Country == "USA" + select new Result + { + Company = company.Name, + City = company.Address.City, + Phone = company.Phone + }; + } +} +`} + + + + +{`public class Employees_Query : AbstractJavaScriptIndexCreationTask +{ + public class Result + { + public string[] Query { get; set; } + } + + public Employees_Query() + { + Maps = new HashSet + { + @"map('Employees', function (employee) { + return { + Query : [employee.FirstName, + employee.LastName, + employee.Title, + employee.Address.City] + } + })" + }; + Fields = new Dictionary() + { + {"Query", new IndexFieldOptions(){ Indexing = FieldIndexing.Search} } + }; + } +} +`} + + + + +Query the index: + + + +{`IList employees = session + .Advanced + .DocumentQuery() + .Search(x => x.Query, "John Doe") + .SelectFields() + .ToList(); +`} + + + + +{`from index 'Employees/Query' +where search(Query, 'John Doe') +`} + + + + + + +## Indexing partial field data + +Imagine that you would like to return all employees that were born in a specific year. +You can do it by indexing `Birthday` from `Employee`, then specify the year in `Birthday` as you query the index: + +Index definition: + + + +{`public class Employees_ByBirthday : AbstractIndexCreationTask +{ + public class Result + { + public DateTime Birthday { get; set; } + } + + public Employees_ByBirthday() + { + Map = employees => from employee in employees + select new Result + { + Birthday = employee.Birthday + }; + } +} +`} + + + + +{`public class Employees_ByBirthday : AbstractJavaScriptIndexCreationTask +{ + public class Result + { + public DateTime Birthday { get; set; } + } + + public Employees_ByBirthday() + { + Maps = new HashSet + { + @"map('Employees', function (employee){ + return { + Birthday : employee.Birthday + } + })" + }; + } +} +`} + + + + +Query the index: + + + +{`DateTime startDate = new DateTime(1963, 1, 1); +DateTime endDate = startDate.AddYears(1).AddMilliseconds(-1); +IList employees = session + .Query() + .Where(x => x.Birthday >= startDate && x.Birthday <= endDate) + .OfType() + .ToList(); +`} + + + + +{`DateTime startDate = new DateTime(1963, 1, 1); +DateTime endDate = startDate.AddYears(1).AddMilliseconds(-1); +IList employees = session + .Advanced + .DocumentQuery() + .WhereBetween(x => x.Birthday, startDate, endDate) + .OfType() + .ToList(); +`} + + + + +{`from index 'Employees/ByBirthday ' +where Birthday between '1963-01-01' and '1963-12-31T23:59:59.9990000' +`} + + + + +RavenDB gives you the ability **to extract field data and to index by it**. A different way to achieve our goal will look as follows: + +Index definition: + + + +{`public class Employees_ByYearOfBirth : AbstractIndexCreationTask +{ + public class Result + { + public int YearOfBirth { get; set; } + } + + public Employees_ByYearOfBirth() + { + Map = employees => from employee in employees + select new Result + { + YearOfBirth = employee.Birthday.Year + }; + } +} +`} + + + + +{`public class Employees_ByYearOfBirth : AbstractJavaScriptIndexCreationTask +{ + public class Result + { + public int YearOfBirth { get; set; } + } + + public Employees_ByYearOfBirth() + { + Maps = new HashSet + { + @"map('Employees', function (employee){ + return { + Birthday : employee.Birthday.Year + } + })" + }; + } +} +`} + + + + +Query the index: + + + +{`IList employees = session + .Query() + .Where(x => x.YearOfBirth == 1963) + .OfType() + .ToList(); +`} + + + + +{`IList employees = session + .Advanced + .DocumentQuery() + .WhereEquals(x => x.YearOfBirth, 1963) + .OfType() + .ToList(); +`} + + + + +{`from index 'Employees/ByYearOfBirth' +where YearOfBirth = 1963 +`} + + + + + + +## Filtering data within fields + +In the examples above, `where` is used in the query to filter the results. +If you consistently want to filter with the same filtering conditions, +you can use `where` in the index definition to narrow the index terms that the query must scan. + +This can save query-time but narrows the terms available to query. + +#### Example I + +For logic that has to do with special import rules that only apply to the USA +`where` can be used to filter the Companies collection `Address.Country` field. +Thus, we only index documents `where company.Address.Country == "USA"` . + +Index definition (LINQ Syntax): + + +{`public class Companies_ByAddress_Country : AbstractIndexCreationTask +\{ + public class Result + \{ + public string City \{ get; set; \} + public string Company \{ get; set; \} + public string Phone \{ get; set; \} + \} + + public Companies_ByAddress_Country() + \{ + Map = companies => from company in companies + where company.Address.Country == "USA" + select new Result + \{ + Company = company.Name, + City = company.Address.City, + Phone = company.Phone + \}; + \} +\} +`} + + + +Query the index: + + + +{`IList orders = session + .Query() + .OfType() + .ToList(); +`} + + + + +{`from index 'Companies_ByAddress_Country' +`} + + + +#### Example II + +Imagine a seed company that needs to categorize its customers by latitude-based growing zones. + +They can create a different index for each zone and filter their customers in the index with +`where (company.Address.Location.Latitude > 20 && company.Address.Location.Latitude < 50)` . + +Index definition (LINQ Syntax): + + +{`public class Companies_ByAddress_Latitude : AbstractIndexCreationTask +\{ + public class Result + \{ + public double latitude \{ get; set; \} + public double longitude \{ get; set; \} + public string companyName \{ get; set; \} + public string companyAddress \{ get; set; \} + public string companyPhone \{ get; set; \} + \} + + public Companies_ByAddress_Latitude() + \{ + Map = companies => from company in companies + where (company.Address.Location.Latitude > 20 && company.Address.Location.Latitude < 50) + select new + \{ + latitude = company.Address.Location.Latitude, + longitude = company.Address.Location.Latitude, + companyName = company.Name, + companyAddress = company.Address, + companyPhone = company.Phone + \}; + \} +\} +`} + + + +Query the index: + + + +{`IList orders = session + .Query() + .OfType() + .ToList(); +`} + + + + +{`from index 'Companies_ByAddress_Latitude' +`} + + + + + + + +## Indexing nested data + +If your document contains nested data, e.g. `Employee` contains `Address`, you can index on its fields by accessing them directly in the index. Let's say that we would like to create an index that returns all employees that were born in a specific `Country`: + +Index definition: + + + +{`public class Employees_ByCountry : AbstractIndexCreationTask +{ + public class Result + { + public string Country { get; set; } + } + + public Employees_ByCountry() + { + Map = employees => from employee in employees + select new Result + { + Country = employee.Address.Country + }; + } +} +`} + + + + +{`public class Employees_ByCountry : AbstractJavaScriptIndexCreationTask +{ + public class Result + { + public string Country { get; set; } + } + + public Employees_ByCountry() + { + Maps = new HashSet + { + @"map('Employees', function (employee){ + return { + Country : employee.Address.Country + } + })" + }; + } +} +`} + + + + +Query the index: + + + +{`IList employees = session + .Query() + .Where(x => x.Country == "USA") + .OfType() + .ToList(); +`} + + + + +{`IList employees = session + .Advanced + .DocumentQuery() + .WhereEquals(x => x.Country, "USA") + .OfType() + .ToList(); +`} + + + + +{`from index 'Employees/ByCountry' +where Country = 'USA' +`} + + + + +If a document relationship is represented by the document's ID, you can use the `LoadDocument` method to retrieve such a document. +Learn more [here](../indexes/indexing-related-documents.mdx). + + + +## Indexing Missing Fields + +By default, indexes will not index a document that contains none of the specified fields. This behavior can be changed +using the [Indexing.IndexEmptyEntries](../server/configuration/indexing-configuration.mdx#indexingindexemptyentries) +configuration option. + +The option [Indexing.IndexMissingFieldsAsNull](../server/configuration/indexing-configuration.mdx#indexingindexmissingfieldsasnull) +determines whether missing fields in documents are indexed with the value `null`, or not indexed at all. + + + + diff --git a/versioned_docs/version-7.1/indexes/_map-indexes-java.mdx b/versioned_docs/version-7.1/indexes/_map-indexes-java.mdx new file mode 100644 index 0000000000..89d6c42dae --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_map-indexes-java.mdx @@ -0,0 +1,499 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* `Map` indexes, sometimes referred to as simple indexes, contain one (or more) mapping functions + that indicate which fields from the documents should be indexed. +* After indexing, documents can be searched by the indexed fields. +* The mapping functions are **LINQ-based functions** or **JavaScript functions** (when using + JavaScript indexes); they can be considered the **core** of indexes. + +* In this page: + * [Indexing single fields](../indexes/map-indexes.mdx#indexing-single-fields) + * [Combining multiple fields](../indexes/map-indexes.mdx#combining-multiple-fields) + * [Indexing partial field data](../indexes/map-indexes.mdx#indexing-partial-field-data) + * [Indexing nested data](../indexes/map-indexes.mdx#indexing-nested-data) + + + +* Indexing fields from [Related documents](../indexes/indexing-related-documents.mdx) +* Indexing fields from multiple collections using [Multi-Map Indexes](../indexes/indexing-polymorphic-data.mdx#multi-map-indexes) +* Indexing Multiple Collections using [Multi-Map indexes](../indexes/indexing-polymorphic-data.mdx#multi-map-indexes). + + + + + +## Indexing Single Fields + +Let's create an index that will help us search for `Employees` by their `FirstName`, `LastName`, or both. + +- First, let's create an index called `Employees/ByFirstAndLastName` + + + + +{`public static class Employees_ByFirstAndLastName extends AbstractIndexCreationTask { + // ... +} +`} + + + + +{`public static class Employees_ByFirstAndLastName extends AbstractJavaScriptIndexCreationTask { + // ... +} +`} + + + + +- The next step is to create the indexing function itself. This is done by setting the `map` field with mapping function in a **parameterless constructor**. + + + + +{`public Employees_ByFirstAndLastName() { + map = "docs.Employees.Select(employee => new { " + + " FirstName = employee.FirstName, " + + " LastName = employee.LastName " + + "})"; +} +`} + + + + +{`public Employees_ByFirstAndLastName() { + setMaps(Sets.newHashSet("map('Employees', function (employee){\\n" + + " return {\\n" + + " FirstName : employee.FirstName,\\n" + + " LastName : employee.LastName\\n" + + " };\\n" + + " })")); +} +`} + + + + +- The final step is to [deploy it](../indexes/creating-and-deploying.mdx) to the server and issue a query using the session [Query](../indexes/querying/query-index.mdx) method: + + + + +{`List employees1 = session.query(Employee.class, Employees_ByFirstAndLastName.class) + .whereEquals("FirstName", "Robert") + .toList(); + +List employees2 = session.query(Employee.class, Query.index("Employees/ByFirstAndLastName")) + .whereEquals("FirstName", "Robert") + .toList(); +`} + + + + +{`from index 'Employees/ByFirstAndLastName' +where FirstName = 'Robert' +`} + + + + + + +Please note that indexing capabilities are detected automatically from the returned field type from the indexing function. + +For example, if our `Employee` will have a property called `Age` that is an `integer` then the following indexing function... + + + + +{`from employee in docs.Employees +select new +{ + Age = employee.Age +} +`} + + + + +{`map('Employees', function(employee) +{ + return { + Age : employee.Age + }; +}) +`} + + + + +...grant us the capability to issue numeric queries (**return all the Employees that `Age` is more than 30**). + +Changing the `Age` type to a `string` will take that capability away from you. The easiest example would be to issue `.ToString()` on the `Age` field... + + + + +{`from employee in docs.Employees +select new +{ + Age = employee.Age.ToString() +} +`} + + + + +{`map('Employees', function(employee) +{ + return { + Age : employee.Age.toString() + }; +}) +`} + + + + + + + + +You will probably notice that in the `Studio`, this function is a bit different from the one defined in the `Employees_ByFirstAndLastName` class: + + + +{`from employee in docs.Employees +select new +\{ + FirstName = employee.FirstName, + LastName = employee.LastName +\} +`} + + + +The part you should pay attention to is `docs.Employees`. This syntax indicates from which collection a server should take the documents for indexing. In our case, documents will be taken from the `Employees` collection. To change the collection, you need to change `Employees` to the desired collection name or remove it and leave only `docs` to index **all documents**. + + + + + +## Combining Multiple Fields + +Since each index contains a LINQ function, you can combine multiple fields into one. + +### Example I + +Index definition: + + + +{`public static class Employees_ByFullName extends AbstractIndexCreationTask { + public Employees_ByFullName() { + map = "docs.Employees.Select(employee => new { " + + " FullName = (employee.FirstName + \\" \\") + employee.LastName " + + "})"; + } +} +`} + + + + +{`public static class Employees_ByFullName extends AbstractJavaScriptIndexCreationTask { + public static class Result { + private String fullName; + + public String getFullName() { + return fullName; + } + + public void setFullName(String fullName) { + this.fullName = fullName; + } + } + + public Employees_ByFullName() { + setMaps(Sets.newHashSet("map('Employees', function (employee){\\n" + + " return {\\n" + + " FullName : employee.FirstName + ' ' + employee.LastName\\n" + + " };\\n" + + " })")); + } +} +`} + + + + +Query the index: + + + +{`// notice that we're 'cheating' here +// by marking result type in 'query' as 'Employees_ByFullName.Result' +// and changing type using 'ofType' before sending query to server +List employees = session + .query(Employee.class, Employees_ByFullName.class) + .whereEquals("FullName", "Robert King") + .toList(); +`} + + + + +{`from index 'Employees/ByFullName' +where FullName = 'Robert King' +`} + + + + +### Example II + + + +In this example, the index field `Query` combines all values from various Employee fields into one. The default Analyzer on field is changed to enable `Full Text Search` operations. The matches no longer need to be exact. + +You can read more about analyzers and `Full Text Search` [here](../indexes/using-analyzers.mdx). + + + +Index definition: + + + +{`public static class Employees_Query extends AbstractIndexCreationTask { + public Employees_Query() { + map = "docs.Employees.Select(employee => new { " + + " Query = new [] { employee.FirstName, employee.LastName, employee.Title, employee.Address.City } " + + "})"; + index("query", FieldIndexing.SEARCH); + } +} +`} + + + + +{`public static class Employees_Query extends AbstractJavaScriptIndexCreationTask { + public Employees_Query() { + setMaps(Sets.newHashSet("map('Employees', function (employee) {\\n" + + " return {\\n" + + " Query : [employee.FirstName,\\n" + + " employee.LastName,\\n" + + " employee.Title,\\n" + + " employee.Address.City]\\n" + + " }\\n" + + " })")); + + IndexFieldOptions fieldOptions = new IndexFieldOptions(); + fieldOptions.setIndexing(FieldIndexing.SEARCH); + getFields().put("Query", fieldOptions); + } +} +`} + + + + +Query the index: + + + +{`List employees = session + .query(Employee.class, Employees_Query.class) + .search("Query", "John Doe") + .toList(); +`} + + + + +{`from index 'Employees/Query' +where search(Query, 'John Doe') +`} + + + + + + +## Indexing Partial Field Data + +Imagine that you would like to return all employees that were born in a specific year. You can do it by indexing `birthday` from `Employee` in the following way: + +Index definition: + + + +{`public static class Employees_ByBirthday extends AbstractIndexCreationTask { + public Employees_ByBirthday() { + map = "docs.Employees.Select(employee => new { " + + " Birthday = employee.Birthday " + + "})"; + } +} +`} + + + + +{`public static class Employees_ByBirthday extends AbstractJavaScriptIndexCreationTask { + public Employees_ByBirthday() { + setMaps(Sets.newHashSet("map('Employees', function (employee){\\n" + + " return {\\n" + + " Birthday : employee.Birthday\\n" + + " }\\n" + + " })")); + } +} +`} + + + + +Query the index: + + + +{`LocalDate startDate = LocalDate.of(1963, 1, 1); +LocalDate endDate = startDate.plusYears(1).minus(1, ChronoUnit.MILLIS); +List employees = session + .query(Employee.class, Employees_ByBirthday.class) + .whereBetween("Birthday", startDate, endDate) + .toList(); +`} + + + + +{`from index 'Employees/ByBirthday ' +where Birthday between '1963-01-01' and '1963-12-31T23:59:59.9990000' +`} + + + + +RavenDB gives you the ability **to extract field data and to index by it**. A different way to achieve our goal will look as follows: + +Index definition: + + + +{`public static class Employees_ByYearOfBirth extends AbstractIndexCreationTask { + public Employees_ByYearOfBirth() { + map = "docs.Employees.Select(employee => new { " + + " YearOfBirth = employee.Birthday.Year " + + "})"; + } +} +`} + + + + +{`public static class Employees_ByYearOfBirth extends AbstractJavaScriptIndexCreationTask { + public Employees_ByYearOfBirth() { + setMaps(Sets.newHashSet("map('Employees', function (employee){\\n" + + " return {\\n" + + " Birthday : employee.Birthday.Year\\n" + + " }\\n" + + " })")); + } +} +`} + + + + +Query the index: + + + +{`List employees = session + .query(Employee.class, Employees_ByYearOfBirth.class) + .whereEquals("YearOfBirth", 1963) + .toList(); +`} + + + + +{`from index 'Employees/ByYearOfBirth' +where YearOfBirth = 1963 +`} + + + + + + +## Indexing Nested Data + +If your document contains nested data, e.g. `Employee` contains `Address`, you can index on its fields by accessing them directly in the index. Let's say that we would like to create an index that returns all employees that were born in a specific `Country`: + +Index definition: + + + +{`public static class Employees_ByCountry extends AbstractIndexCreationTask { + public Employees_ByCountry() { + map = "docs.Employees.Select(employee => new { " + + " Country = employee.Address.Country " + + "})"; + } +} +`} + + + + +{`public static class Employees_ByCountry extends AbstractJavaScriptIndexCreationTask { + public Employees_ByCountry() { + setMaps(Sets.newHashSet("map('Employees', function (employee){\\n" + + " return {\\n" + + " Country : employee.Address.Country\\n" + + " }\\n" + + " })")); + } +} +`} + + + + +Query the index: + + + +{`List employees = session + .query(Employee.class, Employees_ByCountry.class) + .whereEquals("Country", "USA") + .toList(); +`} + + + + +{`from index 'Employees/ByCountry' +where Country = 'USA' +`} + + + + +If a document relationship is represented by the document's ID, you can use the `LoadDocument` method to retrieve such a document. More about it can be found [here](../indexes/indexing-related-documents.mdx). + + + + diff --git a/versioned_docs/version-7.1/indexes/_map-indexes-nodejs.mdx b/versioned_docs/version-7.1/indexes/_map-indexes-nodejs.mdx new file mode 100644 index 0000000000..5a6c047a64 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_map-indexes-nodejs.mdx @@ -0,0 +1,379 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* `Map` indexes, sometimes referred to as simple indexes, contain one (or more) mapping functions + that indicate which fields from the documents should be indexed. +* After indexing, documents can be searched by the indexed fields. +* The mapping functions are **LINQ-based functions**; they can be considered the **core** of indexes. + +* In This Page: + * [Indexing single fields](../indexes/map-indexes.mdx#indexing-single-fields) + * [Combining multiple fields](../indexes/map-indexes.mdx#combining-multiple-fields) + * [Indexing partial field data](../indexes/map-indexes.mdx#indexing-partial-field-data) + * [Indexing nested data](../indexes/map-indexes.mdx#indexing-nested-data) + * [Indexing Missing Fields](../indexes/map-indexes.mdx#indexing-missing-fields) + + + +* Indexing fields from [related documents](../indexes/indexing-related-documents.mdx) +* Aggregating data with [Map-Reduce indexes](../indexes/map-reduce-indexes.mdx) +* Indexing multiple collections with [Multi-Map indexes](../indexes/indexing-polymorphic-data.mdx#multi-map-indexes) +* [Running calculations and storing the results in the index to reduce query time](https://demo.ravendb.net/demos/csharp/static-indexes/store-fields-in-index) + + + + +## Indexing Single Fields + +Let's create an index that will help us search for `Employees` by their `FirstName`, `LastName`, or both. + +- First, let's create an index called `Employees/ByFirstAndLastName` + + + +{`class Employees_ByFirstAndLastName extends AbstractCsharpIndexCreationTask \{ + // ... +\} +`} + + + +- The next step is to create the indexing function itself. This is done by setting the `map` field with mapping function in the **constructor**. + + + +{`constructor() \{ + super(); + + this.map = \`from employee in docs.Employees + select new \{ + FirstName = employee.FirstName, + LastName = employee.LastName + \}\`; +\} +`} + + + +- The final step is to [deploy it](../indexes/creating-and-deploying.mdx) to the server and issue a query using the session [Query](../client-api/session/querying/how-to-query.mdx) method: + + + + +{`const employees1 = await session.query({ indexName: "Employees/ByFirstAndLastName" }) + .whereEquals("FirstName", "Robert") + .all(); + +const employees2 = await session.query({ indexName: "Employees/ByFirstAndLastName" }) + .whereEquals("FirstName", "Robert") + .all(); +`} + + + + +{`from index 'Employees/ByFirstAndLastName' +where FirstName = 'Robert' +`} + + + + + + +Please note that indexing capabilities are detected automatically from the returned field type from the indexing function. + +For example, if our `Employee` will have a property called `Age` that is an `number` then the following indexing function... + + + +{`\`from employee in docs.Employees +select new +\{ + Age = employee.Age +\}\` +`} + + + +...grant us the capability to issue numeric queries (**return all the Employees that `Age` is more than 30**). + +Changing the `Age` type to a `string` will take that capability away from you. The easiest example would be to issue `.ToString()` on the `Age` field... + + + +{`\`from employee in docs.Employees +select new +\{ + Age = employee.Age.ToString() +\}\` +`} + + + + + + + +You will probably notice that in the `Studio`, this function is a bit different from the one defined in the `Employees_ByFirstAndLastName` class: + + + +{`\`from employee in docs.Employees +select new +\{ + FirstName = employee.FirstName, + LastName = employee.LastName +\}\` +`} + + + +The part you should pay attention to is `docs.Employees`. This syntax indicates from which collection a server should take the documents for indexing. In our case, documents will be taken from the `Employees` collection. To change the collection, you need to change `Employees` to the desired collection name or remove it and leave only `docs` to index **all documents**. + + + + + +## Combining Multiple Fields + +Since each index contains a LINQ function, you can combine multiple fields into one. + +#### Example I + + + +{`class Employees_ByFullName extends AbstractCsharpIndexCreationTask \{ + constructor() \{ + super(); + + this.map = \`from employee in docs.Employees + select new \{ + Name = employee.FirstName + ' ' + employee.LastName + \}\`; + \} +\} +`} + + + + + + +{`const employees = await session + .query({ indexName: "Employees/ByFullName" }) + .whereEquals("FullName", "Robert King") + .ofType(Employee) + .all(); +`} + + + + +{`from index 'Employees/ByFullName' +where FullName = 'Robert King' +`} + + + +#### Example II + + + +In this example, the index field `Query` combines all values from various Employee fields into one. The default Analyzer on field is changed to enable `Full Text Search` operations. The matches no longer need to be exact. + +You can read more about analyzers and `Full Text Search` [here](../indexes/using-analyzers.mdx). + + + + + +{`class Employees_Query extends AbstractCsharpIndexCreationTask \{ + constructor() \{ + super(); + + this.map = \`from employee in docs.Employees + select new \{ + Query = new [] \{ + employee.FirstName, + employee.LastName, + employee.Title, + employee.Address.City + \} + \}\`; + + this.index("Query", "Search"); + \} +\} +`} + + + + + + +{`const employees = await session + .query({ indexName: "Employees/Query" }) + .search("Query", "John Doe") + .ofType(Employee) + .all(); +`} + + + + +{`from index 'Employees/Query' +where search(Query, 'John Doe') +`} + + + + + + +## Indexing Partial Field Data + +Imagine that you would like to return all employees that were born in a specific year. You can do it by indexing `Birthday` from `Employee` in the following way: + + + +{`class Employees_ByBirthday extends AbstractCsharpIndexCreationTask \{ + + constructor() \{ + super(); + + this.map = \`from employee in docs.Employees + select new \{ + Birthday = employee.Birthday, + \}\`; + \} + +\} +`} + + + + + + +{`const startDate = new Date(1963, 1, 1); +const endDate = new Date(1963, 12, 31, 23, 59, 59, 999); +const employees = await session + .query({ indexName: "Employees/ByBirthday" }) + .whereBetween("Birthday", startDate, endDate) + .ofType(Employee) + .all(); +`} + + + + +{`from index 'Employees/ByBirthday ' +where Birthday between '1963-01-01' and '1963-12-31T23:59:59.9990000' +`} + + + + +RavenDB gives you the ability to extract field data and to index by it. A different way to achieve our goal will look as follows: + + + +{`class Employees_ByYearOfBirth extends AbstractCsharpIndexCreationTask \{ + + constructor() \{ + super(); + + this.map = \`from employee in docs.Employees + select new \{ + YearOfBirth = employee.Birthday.Year, + \}\`; + \} +\} +`} + + + + + + +{`const employees = await session + .query({ indexName: "Employees/ByYearOfBirth" }) + .whereEquals("YearOfBirth", 1963) + .ofType(Employee) + .all(); +`} + + + + +{`from index 'Employees/ByYearOfBirth' +where YearOfBirth = 1963 +`} + + + + + + +## Indexing Nested Data + +If your document contains nested data, e.g. `Employee` contains `Address`, you can index on its fields by accessing them directly in the index. Let's say that we would like to create an index that returns all employees that were born in a specific `Country`: + + + +{`class Employees_ByCountry extends AbstractCsharpIndexCreationTask \{ + + constructor() \{ + super(); + + this.map = \`from employee in docs.Employees + select new \{ + Country = employee.Address.Country + \}\`; + \} +\} +`} + + + + + + +{`const employees = await session + .query({ indexName: "Employees/ByCountry" }) + .whereEquals("Country", "USA") + .ofType(Employee) + .all(); +`} + + + + +{`from index 'Employees/ByCountry' +where Country = 'USA' +`} + + + + +If a document relationship is represented by the document's ID, you can use the `LoadDocument` method to retrieve such a document. More about it can be found [here](../indexes/indexing-related-documents.mdx). + + + +## Indexing Missing Fields + +By default, indexes will not index a document that contains none of the specified fields. This behavior can be changed +using the [Indexing.IndexEmptyEntries](../server/configuration/indexing-configuration.mdx#indexingindexemptyentries) +configuration option. + +The option [Indexing.IndexMissingFieldsAsNull](../server/configuration/indexing-configuration.mdx#indexingindexmissingfieldsasnull) +determines whether missing fields in documents are indexed with the value `null`, or not indexed at all. + + + + diff --git a/versioned_docs/version-7.1/indexes/_map-indexes-php.mdx b/versioned_docs/version-7.1/indexes/_map-indexes-php.mdx new file mode 100644 index 0000000000..fcc79a5b00 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_map-indexes-php.mdx @@ -0,0 +1,789 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* `Map` indexes, sometimes referred to as simple indexes, contain one or more mapping functions + to indicate which document fields should be indexed. +* After indexing, documents can be searched by the indexed fields. +* The mapping functions can be considered the **core** of indexes. + +* In This Page: + * [Indexing single fields](../indexes/map-indexes.mdx#indexing-single-fields) + * [Combining multiple fields](../indexes/map-indexes.mdx#combining-multiple-fields) + * [Indexing partial field data](../indexes/map-indexes.mdx#indexing-partial-field-data) + * [Filtering data within fields](../indexes/map-indexes.mdx#filtering-data-within-fields) + * [Indexing nested data](../indexes/map-indexes.mdx#indexing-nested-data) + * [Indexing Missing Fields](../indexes/map-indexes.mdx#indexing-missing-fields) + + + +* Indexing fields from [related documents](../indexes/indexing-related-documents.mdx) +* Aggregating data with [Map-Reduce indexes](../indexes/map-reduce-indexes.mdx) +* Indexing multiple collections with [Multi-Map indexes](../indexes/indexing-polymorphic-data.mdx#multi-map-indexes) +* [Running calculations and storing the results in the index to reduce query time](https://demo.ravendb.net/demos/python/static-indexes/store-fields-in-index) + + + + +## Indexing single fields + +Let's create an index that will help us search for `Employees` by their `FirstName`, `LastName`, or both. + +* First, let's create an index called `Employees/ByFirstAndLastName` + + Note: The naming separator character "`_`" in your code will become "/" in the index name. + In the following sample, "`Employees_ByFirstAndLastName`" will become "Employees/ByFirstAndLastName" in your indexes list. + + + + + +{`class Employees_ByFirstAndLastName extends AbstractIndexCreationTask +{ + // ... +} +`} + + + + +{`class Employees_ByFirstAndLastName extends AbstractJavaScriptIndexCreationTask +{ + // ... +} +`} + + + + +You might notice that we're passing `Employee` as a generic parameter to `AbstractIndexCreationTask`. +This gives our indexing function a strongly-typed syntax. If you are not familiar with `AbstractIndexCreationTask`, +you can read [this](../indexes/creating-and-deploying.mdx) article before proceeding. + +- The next step is to create the indexing function itself. This is done by setting the `map` property with our function in a **parameterless constructor**. + + + + +{`public function __construct() +{ + parent::__construct(); + + $this->map = "docs.Employees.Select(employee => new { " . + " FirstName = employee.FirstName, " . + " LastName = employee.LastName " . + "})"; +} +`} + + + + +{`public function __construct() +{ + parent::__construct(); + + $this->setMaps([ + "map('Employees', function (employee){ + return { + FirstName : employee.FirstName, + LastName : employee.LastName + }; + })" + ]); +} +`} + + + + +- The final step is to [deploy it](../indexes/creating-and-deploying.mdx) to the server + and issue a query using the session [Query](../client-api/session/querying/how-to-query.mdx) method. + To query an index, the name of the index must be called by the query. + If the index isn't called, RavenDB will either use or create an [auto index](../indexes/creating-and-deploying.mdx#auto-indexes). + + + + +{`$employees1 = $session + ->query(Employee::class, Employees_ByFirstAndLastName::class) + ->whereEquals('FirstName', "Robert") + ->toList(); + +$employees2 = $session + ->query("Employees/ByFirstAndLastName") + ->whereEquals('FirstName', "Robert") + ->toList(); +`} + + + + +{`from index 'Employees/ByFirstAndLastName' +where FirstName = 'Robert' +`} + + + + +This is how our final index looks like: + + + + +{`_1 + $employees = $session + ->query(Employees_ByYearOfBirth_Result::class, Employees_ByYearOfBirth::class) + ->whereEquals("YearOfBirth", 1963) + ->ofType(Employee::class) + ->toList(); +`} + + + + +{`class Employees_ByFirstAndLastName extends AbstractJavaScriptIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->setMaps(["map('Employees', function (employee){ + return { + FirstName : employee.FirstName, + LastName : employee.LastName + }; + })"]); + } +} +`} + + + + + + +Please note that indexing capabilities are detected automatically from the returned field type from the indexing function. + +For example, if our `Employee` has a property named `Age` that is an `int`, the following indexing function... + + + + +{`from employee in docs.Employees +select new +{ + Age = employee.Age +} +`} + + + + +{`map('Employees', function(employee) +{ + return { + Age : employee.Age + }; +}) +`} + + + + + + +...grants us the capability to issue numeric queries (**return all the Employees whose Age is more than 30**). + +Changing the `Age` type to a `string` will take that capability away from you. The easiest example would be to issue `.ToString()` on the `Age` field... + + + + +{`from employee in docs.Employees +select new +{ + Age = employee.Age.ToString() +} +`} + + + + +{`map('Employees', function(employee) +{ + return { + Age : employee.Age.toString() + }; +}) +`} + + + + + + + + +You will probably notice that in the `Studio`, this function is a bit different from the one defined in the `Employees_ByFirstAndLastName` class: + + + +{`from employee in docs.Employees +select new +\{ + FirstName = employee.FirstName, + LastName = employee.LastName +\} +`} + + + +The part you should pay attention to is `docs.Employees`. This syntax indicates from which collection a server should take the documents for indexing. In our case, documents will be taken from the `Employees` collection. To change the collection, you need to change `Employees` to the desired collection name or remove it and leave only `docs` to index **all documents**. + + + + + +## Combining multiple fields + +Since each index contains a function, you can combine multiple fields into one. + +#### Example +Index definition: + + + +{`class Employees_ByFullName_Result { + private ?string $fullName = null; + + public function getFullName(): ?string + { + return $this->fullName; + } + + public function setFullName(?string $fullName): void + { + $this->fullName = $fullName; + } +} + +class Employees_ByFullName extends AbstractIndexCreationTask +{ + public function __construct() { + parent::__construct(); + + $this->map = "docs.Employees.Select(employee => new { " . + " FullName = (employee.FirstName + \\" \\") + employee.LastName " . + "})"; + } +} +`} + + + + +{`class Employees_ByFullName_Result +{ + private ?string $fullName = null; + + public function getFullName(): ?string + { + return $this->fullName; + } + + public function setFullName(?string $fullName): void + { + $this->fullName = $fullName; + } +} +class Employees_ByFullName extends AbstractJavaScriptIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->setMaps(["map('Employees', function (employee){ + return { + FullName : employee.FirstName + ' ' + employee.LastName + }; + })"]); + } +} +`} + + + + +Query the index: + + + +{`// notice that we're 'cheating' here +// by marking result type in 'Query' as 'Employees_ByFullName.Result' to get strongly-typed syntax +// and changing type using 'OfType' before sending query to server +$employees = $session + ->query(Employees_ByFullName_Result::class, Employees_ByFullName::class) + ->whereEquals('FullName', "Robert King") + ->ofType(Employee::class) + ->toList(); +`} + + + + +{`from index 'Employees/ByFullName' +where FullName = 'Robert King' +`} + + + + + + +## Indexing partial field data + +Imagine that you would like to return all employees that were born in a specific year. +You can do it by indexing `Birthday` from `Employee`, then specify the year in `Birthday` as you query the index: + +Index definition: + + + +{`class Employees_ByBirthday_Result +{ + public ?DateTime $birthday = null; + + public function getBirthday(): ?DateTime + { + return $this->birthday; + } + + public function setBirthday(?DateTime $birthday): void + { + $this->birthday = $birthday; + } +} +class Employees_ByBirthday extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "docs.Employees.Select(employee => new { " . + " Birthday = employee.Birthday " . + "})"; + } +} +`} + + + + +{`class Employees_ByBirthday_Result +{ + private ?DateTime $birthday = null; + + public function getBirthday(): ?DateTime + { + return $this->birthday; + } + + public function setBirthday(?DateTime $birthday): void + { + $this->birthday = $birthday; + } +} +class Employees_ByBirthday extends AbstractJavaScriptIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->setMaps([ + "map('Employees', function (employee){ + return { + Birthday : employee.Birthday + } + })" + ]); + } +} +`} + + + + +Query the index: + + + +{`$startDate = new DateTime('1963-01-01'); +$endDate = $startDate->modify('+1 year')->sub(new DateInterval('PT0.001S')); +$employees = $session + ->query(Employees_ByBirthday_Result::class, Employees_ByBirthday::class) + ->whereGreaterThanOrEqual("Birthday", $startDate) + ->andAlso() + ->whereLessThanOrEqual("Birthday", $endDate) + ->ofType(Employee::class) + ->toList(); +`} + + + + +{`from index 'Employees/ByBirthday ' +where Birthday between '1963-01-01' and '1963-12-31T23:59:59.9990000' +`} + + + + +RavenDB gives you the ability **to extract field data and to index by it**. A different way to achieve our goal will look as follows: + +Index definition: + + + +{`class Employees_ByYearOfBirth_Result { + public ?int $yearOfBirth = null; + + public function getYearOfBirth(): ?int + { + return $this->yearOfBirth; + } + + public function setYearOfBirth(?int $yearOfBirth): void + { + $this->yearOfBirth = $yearOfBirth; + } +} + +class Employees_ByYearOfBirth extends AbstractIndexCreationTask { + public function __construct() { + parent::__construct(); + + $this->map = "docs.Employees.Select(employee => new { " . + " YearOfBirth = employee.Birthday.Year " . + "})"; + } +} +`} + + + + +{`class Employees_ByYearOfBirth_Reslut +{ + private ?int $yearOfBirth = null; + + public function getYearOfBirth(): ?int + { + return $this->yearOfBirth; + } + + public function setYearOfBirth(?int $yearOfBirth): void + { + $this->yearOfBirth = $yearOfBirth; + } +} +class Employees_ByYearOfBirth extends AbstractJavaScriptIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->setMaps([ + "map('Employees', function (employee){ + return { + Birthday : employee.Birthday.Year + } + })" + ]); + } +} +`} + + + + +Query the index: + + + +{`$employees = $session + ->query(Employees_ByYearOfBirth_Result::class, Employees_ByYearOfBirth::class) + ->whereEquals("YearOfBirth", 1963) + ->ofType(Employee::class) + ->toList(); +`} + + + + +{`from index 'Employees/ByYearOfBirth' +where YearOfBirth = 1963 +`} + + + + + + +## Filtering data within fields + +In the examples above, `where_equals` is used in the query to filter the results. +If you consistently want to filter with the same filtering conditions, +you can use `where_equals` in the index definition to narrow the index terms that the query must scan. + +This can save query-time but narrows the terms available to query. + +#### Example I + +For logic that has to do with special import rules that only apply to the USA +`where` can be used to filter the Companies collection `Address.Country` field. +Thus, we only index documents `where company.Address.Country == "USA"` . + +Index definition: + + +{`class Employees_Query_Result \{ + public ?StringArray $query = null; + + public function getQuery(): ?StringArray + \{ + return $this->query; + \} + + public function setQuery(?StringArray $query): void + \{ + $this->query = $query; + \} +\} + +class Employees_Query extends AbstractIndexCreationTask \{ + public function __construct() \{ + parent::__construct(); + + $this->map = "docs.Employees.Select(employee => new \{ " . + " Query = new [] \{ employee.FirstName, employee.LastName, employee.Title, employee.Address.City \} " . + "\})"; + $this->index("query", FieldIndexing::search()); + \} +\} +`} + + + +Query the index: + + + +{`$orders = $session + ->query(Companies_ByAddress_Country_Result::class, Companies_ByAddress_Country::class) + ->ofType(Company::class) + ->toList(); +`} + + + + +{`from index 'Companies_ByAddress_Country' +`} + + + +#### Example II + +Imagine a seed company that needs to categorize its customers by latitude-based growing zones. + +They can create a different index for each zone and filter their customers in the index with +`where (company.Address.Location.Latitude > 20 && company.Address.Location.Latitude < 50)` . + +Index definition: + + +{`class Companies_ByAddress_Latitude_Result \{ + private ?float $latitude = null; + private ?float $longitude = null; + private ?string $companyName = null; + private ?string $companyAddress = null; + private ?string $companyPhone = null; + + public function getLatitude(): ?float + \{ + return $this->latitude; + \} + + public function setLatitude(?float $latitude): void + \{ + $this->latitude = $latitude; + \} + + public function getLongitude(): ?float + \{ + return $this->longitude; + \} + + public function setLongitude(?float $longitude): void + \{ + $this->longitude = $longitude; + \} + + public function getCompanyName(): ?string + \{ + return $this->companyName; + \} + + public function setCompanyName(?string $companyName): void + \{ + $this->companyName = $companyName; + \} + + public function getCompanyAddress(): ?string + \{ + return $this->companyAddress; + \} + + public function setCompanyAddress(?string $companyAddress): void + \{ + $this->companyAddress = $companyAddress; + \} + + public function getCompanyPhone(): ?string + \{ + return $this->companyPhone; + \} + + public function setCompanyPhone(?string $companyPhone): void + \{ + $this->companyPhone = $companyPhone; + \} +\} +`} + + + +Query the index: + + + +{`$orders = $session + ->query(Companies_ByAddress_Latitude_Result::class, Companies_ByAddress_Latitude::class) + ->ofType(Company::class) + ->toList(); +`} + + + + +{`from index 'Companies_ByAddress_Latitude' +`} + + + + + + + +## Indexing nested data + +If your document contains nested data, e.g. `Employee` contains `Address`, you can index on its fields by accessing them directly in the index. Let's say that we would like to create an index that returns all employees that were born in a specific `Country`: + +Index definition: + + + +{`class Employees_ByCountry_Result +{ + private ?string $country = null; + + public function getCountry(): ?string + { + return $this->country; + } + + public function setCountry(?string $country): void + { + $this->country = $country; + } +} +class Employees_ByCountry extends AbstractIndexCreationTask { + public function __construct() { + parent::__construct(); + + $this->map = "docs.Employees.Select(employee => new { " . + " Country = employee.Address.Country " . + "})"; + } +} +`} + + + + +{`class Employees_ByCountry_Result +{ + private ?string $country = null; +} +class Employees_ByCountry extends AbstractJavaScriptIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->setMaps([ + "map('Employees', function (employee){ + return { + Country : employee.Address.Country + } + })" + ]); + } +} +`} + + + + +Query the index: + + + +{`$employees = $session + ->query(Employees_ByCountry_Result::class, Employees_ByCountry::class) + ->whereEquals("Country", "USA") + ->ofType(Employee::class) + ->toList(); +`} + + + + +{`from index 'Employees/ByCountry' +where Country = 'USA' +`} + + + + +If a document relationship is represented by the document's ID, you can use the `LoadDocument` method to retrieve such a document. +Learn more [here](../indexes/indexing-related-documents.mdx). + + + +## Indexing Missing Fields + +By default, indexes will not index a document that contains none of the specified fields. This behavior can be changed +using the [Indexing.IndexEmptyEntries](../server/configuration/indexing-configuration.mdx#indexingindexemptyentries) +configuration option. + +The option [Indexing.IndexMissingFieldsAsNull](../server/configuration/indexing-configuration.mdx#indexingindexmissingfieldsasnull) +determines whether missing fields in documents are indexed with the value `null`, or not indexed at all. + + + + diff --git a/versioned_docs/version-7.1/indexes/_map-indexes-python.mdx b/versioned_docs/version-7.1/indexes/_map-indexes-python.mdx new file mode 100644 index 0000000000..283930ec6e --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_map-indexes-python.mdx @@ -0,0 +1,732 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* `Map` indexes, sometimes referred to as simple indexes, contain one or more mapping functions + to indicate which document fields should be indexed. +* After indexing, documents can be searched by the indexed fields. +* The mapping functions can be considered the **core** of indexes. + +* In This Page: + * [Indexing single fields](../indexes/map-indexes.mdx#indexing-single-fields) + * [Combining multiple fields](../indexes/map-indexes.mdx#combining-multiple-fields) + * [Indexing partial field data](../indexes/map-indexes.mdx#indexing-partial-field-data) + * [Filtering data within fields](../indexes/map-indexes.mdx#filtering-data-within-fields) + * [Indexing nested data](../indexes/map-indexes.mdx#indexing-nested-data) + * [Indexing Missing Fields](../indexes/map-indexes.mdx#indexing-missing-fields) + + + +* Indexing fields from [related documents](../indexes/indexing-related-documents.mdx) +* Aggregating data with [Map-Reduce indexes](../indexes/map-reduce-indexes.mdx) +* Indexing multiple collections with [Multi-Map indexes](../indexes/indexing-polymorphic-data.mdx#multi-map-indexes) +* [Running calculations and storing the results in the index to reduce query time](https://demo.ravendb.net/demos/python/static-indexes/store-fields-in-index) + + + + +## Indexing single fields + +Let's create an index that will help us search for `Employees` by their `FirstName`, `LastName`, or both. + +* First, let's create an index called `Employees/ByFirstAndLastName` + + Note: The naming separator character "`_`" in your code will become "/" in the index name. + In the following sample, "`Employees_ByFirstAndLastName`" will become "Employees/ByFirstAndLastName" in your indexes list. + + + + + +{`class Employees_ByFirstAndLastName(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + # ... +`} + + + + +{`class Employees_ByFirstAndLastName(AbstractJavaScriptIndexCreationTask): + def __init__(self): + super().__init__() + # ... +`} + + + + +You might notice that we're passing `Employee` as a generic parameter to `AbstractIndexCreationTask`. This gives our indexing function a strongly-typed syntax. If you are not familiar with `AbstractIndexCreationTask`, you can read [this](../indexes/creating-and-deploying.mdx) article before proceeding. + +- The next step is to create the indexing function itself. This is done by setting the `map` property with our function in a **parameterless constructor**. + + + + +{`class Employees_ByFirstAndLastName(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = "from employee in docs.Employees select new { FirstName = employee.FirstName, LastName = employee.LastName }" +`} + + + + +{`class Employees_ByFirstAndLastName(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = "from employee in docs.Employees select new { FirstName = employee.FirstName, LastName = employee.LastName }" +`} + + + + +{`class Employees_ByFirstAndLastName(AbstractJavaScriptIndexCreationTask): + def __init__(self): + super().__init__() + self.maps = { + """ + map('Employees', function (employee){ + return { + FirstName : employee.FirstName, + LastName : employee.LastName + }; + }) + """ + } +`} + + + + +- The final step is to [deploy it](../indexes/creating-and-deploying.mdx) to the server + and issue a query using the session [Query](../client-api/session/querying/how-to-query.mdx) method. + To query an index, the name of the index must be called by the query. + If the index isn't called, RavenDB will either use or create an [auto index](../indexes/creating-and-deploying.mdx#auto-indexes). + + + + +{`employees_1 = list( + session.query_index_type(Employees_ByFirstAndLastName, Employee).where_equals( + "first_name", "Robert" + ) +) +employees_2 = list( + session.query_index("Employees/ByFirstAndLastName", Employee).where_equals("first_name", "Robert") +) +`} + + + + +{`from index 'Employees/ByFirstAndLastName' +where FirstName = 'Robert' +`} + + + + +This is how our final index looks like: + + + + +{`class Employees_ByFirstAndLastName(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = "from employee in docs.Employees select new { FirstName = employee.FirstName, LastName = employee.LastName }" +`} + + + + +{`class Employees_ByFirstAndLastName(AbstractJavaScriptIndexCreationTask): + def __init__(self): + super().__init__() + self.maps = { + """ + map('Employees', function (employee){ + return { + FirstName : employee.FirstName, + LastName : employee.LastName + }; + }) + + """ + } +`} + + + + + + +Please note that indexing capabilities are detected automatically from the returned field type from the indexing function. + +For example, if our `Employee` has a property named `Age` that is an `int`, the following indexing function... + + + + +{`from employee in docs.Employees +select new +{ + Age = employee.Age +} +`} + + + + +{`map('Employees', function(employee) +{ + return { + Age : employee.Age + }; +}) +`} + + + + + + +...grants us the capability to issue numeric queries (**return all the Employees whose Age is more than 30**). + +Changing the `Age` type to a `string` will take that capability away from you. The easiest example would be to issue `.ToString()` on the `Age` field... + + + + +{`from employee in docs.Employees +select new +{ + Age = employee.Age.ToString() +} +`} + + + + +{`map('Employees', function(employee) +{ + return { + Age : employee.Age.toString() + }; +}) +`} + + + + + + + + +You will probably notice that in the `Studio`, this function is a bit different from the one defined in the `Employees_ByFirstAndLastName` class: + + + +{`from employee in docs.Employees +select new +\{ + FirstName = employee.FirstName, + LastName = employee.LastName +\} +`} + + + +The part you should pay attention to is `docs.Employees`. This syntax indicates from which collection a server should take the documents for indexing. In our case, documents will be taken from the `Employees` collection. To change the collection, you need to change `Employees` to the desired collection name or remove it and leave only `docs` to index **all documents**. + + + + + +## Combining multiple fields + +Since each index contains a function, you can combine multiple fields into one. + +#### Example I +Index definition: + + + +{`class Employees_ByFullName(AbstractIndexCreationTask): + class Result: + def __init__(self, full_name: str = None): + self.full_name = full_name + + def __init__(self): + super().__init__() + self.map = ( + 'from employee in docs.Employees select new { full_name = employee.FirstName + " " + employee.LastName }' + ) +`} + + + + +{`class Employees_ByFullName(AbstractJavaScriptIndexCreationTask): + class Result: + def __init__(self, full_name: str = None): + self.full_name = full_name + + def __init__(self): + super().__init__() + self.maps = { + """ + map('Employees', function (employee){ + return { + FullName : employee.FirstName + ' ' + employee.LastName + }; + }) + """ + } +`} + + + + +Query the index: + + + +{`employees = list( + session.query_index_type(Employees_ByFullName, Employee).where_equals("full_name", "Robert King") +) +`} + + + + +{`from index 'Employees/ByFullName' +where FullName = 'Robert King' +`} + + + +#### Example II + + + +In this example, the index field `Query` combines all values from various Employee fields into one. +The default Analyzer on fields is changed to enable `Full-Text Search` operations. The matches no longer need to be exact. + +You can read more about analyzers and `Full-Text Search` [here](../indexes/using-analyzers.mdx). + + + +Index definition: + + + +{`class Companies_ByAddress_Country(AbstractIndexCreationTask): + class Result: + def __init__(self, city: str = None, company: str = None, phone: str = None): + self.city = city + self.company = company + self.phone = phone + + def __init__(self): + super().__init__() + self.map = ( + 'from company in docs.Companies where company.Address.Country == "USA"' + "select new { company = company.Name, city = company.Address.City, phone = company.Phone }" + ) +`} + + + + +{`class Employees_Query(AbstractJavaScriptIndexCreationTask): + class Result: + def __init__(self, query: List[str] = None): + self.query = query + + def __init__(self): + super().__init__() + self.maps = { + """ + map('Employees', function (employee) { + return { + query : [employee.FirstName, + employee.LastName, + employee.Title, + employee.Address.City] + } + }) + """ + } + + self.fields = {"query": IndexFieldOptions(indexing=FieldIndexing.SEARCH)} +`} + + + + +Query the index: + + + +{`employees = list( + session.query_index_type(Employees_Query, Employees_Query.Result) + .search("query", "John Doe") + .of_type(Employee) +) +`} + + + + +{`from index 'Employees/Query' +where search(Query, 'John Doe') +`} + + + + + + +## Indexing partial field data + +Imagine that you would like to return all employees that were born in a specific year. +You can do it by indexing `Birthday` from `Employee`, then specify the year in `Birthday` as you query the index: + +Index definition: + + + +{`class Employees_ByBirthday(AbstractIndexCreationTask): + class Result: + def __init__(self, birthday: datetime.datetime = None): + self.birthday = birthday + + @classmethod + def from_json(cls, json_dict: Dict[str, Any]) -> "Employees_ByBirthday.Result": + # import 'Utils' from 'ravendb.tools.utils' to convert C# datetime strings to Python datetime objects + return cls(Utils.string_to_datetime(json_dict["Birthday"])) +`} + + + + +{`class Employees_ByBirthday(AbstractJavaScriptIndexCreationTask): + class Result: + def __init__(self, birthday: datetime.datetime = None): + self.birthday = birthday + + @classmethod + def from_json(cls, json_dict: Dict[str, Any]) -> "Employees_ByBirthday.Result": + return cls(json_dict["Birthday"]) + + def __init__(self): + super().__init__() + self.maps = { + """ + map('Employees', function (employee){ + return { + Birthday : employee.Birthday + } + }) + """ + } +`} + + + + +Query the index: + + + +{`start_date = datetime.datetime(1963, 1, 1) +end_date = start_date + datetime.timedelta(days=365) - datetime.timedelta(milliseconds=1) +employees = list( + session.query_index_type(Employees_ByBirthday, Employees_ByBirthday.Result) + .where_between("birthday", start_date, end_date) + .of_type(Employee) +) +`} + + + + +{`from index 'Employees/ByBirthday ' +where Birthday between '1963-01-01' and '1963-12-31T23:59:59.9990000' +`} + + + + +RavenDB gives you the ability **to extract field data and to index by it**. A different way to achieve our goal will look as follows: + +Index definition: + + + +{`class Employees_ByYearOfBirth(AbstractIndexCreationTask): + class Result: + def __init__(self, year_of_birth: int = None): + self.year_of_birth = year_of_birth + + def __init__(self): + super().__init__() + self.map = "from employee in docs.Employees select new { year_of_birth = employee.Birthday.Year }" +`} + + + + +{`class Employees_ByYearOfBirth(AbstractJavaScriptIndexCreationTask): + class Result: + def __init__(self, year_of_birth: int = None): + self.year_of_birth = year_of_birth + + @classmethod + def from_json(cls, json_dict: Dict[str, Any]) -> "Employees_ByYearOfBirth.Result": + return cls(json_dict["Birthday"]) + + def __init__(self): + super().__init__() + self.maps = { + """ + map('Employees', function (employee){ + return { + Birthday : employee.Birthday.Year + } + }) + """ + } +`} + + + + +Query the index: + + + +{`employees = list( + session.query_index_type(Employees_ByYearOfBirth, Employees_ByYearOfBirth.Result) + .where_equals("year_of_birth", 1963) + .of_type(Employee) +) +`} + + + + +{`from index 'Employees/ByYearOfBirth' +where YearOfBirth = 1963 +`} + + + + + + +## Filtering data within fields + +In the examples above, `where_equals` is used in the query to filter the results. +If you consistently want to filter with the same filtering conditions, +you can use `where_equals` in the index definition to narrow the index terms that the query must scan. + +This can save query-time but narrows the terms available to query. + +#### Example I + +For logic that has to do with special import rules that only apply to the USA +`where` can be used to filter the Companies collection `Address.Country` field. +Thus, we only index documents `where company.Address.Country == "USA"` . + +Index definition: + + +{`class Companies_ByAddress_Country(AbstractIndexCreationTask): + class Result: + def __init__(self, city: str = None, company: str = None, phone: str = None): + self.city = city + self.company = company + self.phone = phone + + def __init__(self): + super().__init__() + self.map = ( + 'from company in docs.Companies where company.Address.Country == "USA"' + "select new \{ company = company.Name, city = company.Address.City, phone = company.Phone \}" + ) +`} + + + +Query the index: + + + +{`orders = list( + session.query_index_type(Companies_ByAddress_Country, Companies_ByAddress_Country.Result).of_type( + Company + ) +) +`} + + + + +{`from index 'Companies_ByAddress_Country' +`} + + + +#### Example II + +Imagine a seed company that needs to categorize its customers by latitude-based growing zones. + +They can create a different index for each zone and filter their customers in the index with +`where (company.Address.Location.Latitude > 20 && company.Address.Location.Latitude < 50)` . + +Index definition: + + +{`class Companies_ByAddress_Latitude(AbstractIndexCreationTask): + class Result: + def __init__( + self, + latitude: float = None, + longitude: float = None, + company_name: str = None, + company_address: str = None, + company_phone: str = None, + ): + self.latitude = latitude + self.longitude = longitude + self.company_name = company_name + self.company_address = company_address + self.company_phone = company_phone + + def __init__(self): + super().__init__() + self.map = ( + "from company in companies" + "where (company.Address.Location.Latitude > 20 && company.Address.Location.Latitude < 50" + "select new" + "\{" + " latitude = company.Address.Location.Latitude," + " longitude = company.Address.Location.Longitude," + " company_name = company.Name," + " company_address = company.Address," + " company_phone = company.Phone" + "\}" + ) +`} + + + +Query the index: + + + +{`orders = list( + session.query_index_type(Companies_ByAddress_Latitude, Companies_ByAddress_Latitude.Result).of_type( + Company + ) +) +`} + + + + +{`from index 'Companies_ByAddress_Latitude' +`} + + + + + + + +## Indexing nested data + +If your document contains nested data, e.g. `Employee` contains `Address`, you can index on its fields by accessing them directly in the index. Let's say that we would like to create an index that returns all employees that were born in a specific `Country`: + +Index definition: + + + +{`class Employees_ByCountry(AbstractIndexCreationTask): + class Result: + def __init__(self, country: str = None): + self.country = country + + def __init__(self): + super().__init__() + self.map = "from employee in docs.Employees select new { country = employee.Address.Country }" +`} + + + + +{`class Employees_ByCountry(AbstractJavaScriptIndexCreationTask): + class Result: + def __init__(self, country: str = None): + self.country = country + + def __init__(self): + super().__init__() + self.maps = { + """ + map('Employees', function (employee){ + return { + country : employee.Address.Country + } + }) + """ + } +`} + + + + +Query the index: + + + +{`employees = list( + session.query_index_type(Employees_ByCountry, Employees_ByCountry.Result) + .where_equals("country", "USA") + .of_type(Employee) +) +`} + + + + +{`from index 'Employees/ByCountry' +where Country = 'USA' +`} + + + + +If a document relationship is represented by the document's ID, you can use the `LoadDocument` method to retrieve such a document. +Learn more [here](../indexes/indexing-related-documents.mdx). + + + +## Indexing Missing Fields + +By default, indexes will not index a document that contains none of the specified fields. This behavior can be changed +using the [Indexing.IndexEmptyEntries](../server/configuration/indexing-configuration.mdx#indexingindexemptyentries) +configuration option. + +The option [Indexing.IndexMissingFieldsAsNull](../server/configuration/indexing-configuration.mdx#indexingindexmissingfieldsasnull) +determines whether missing fields in documents are indexed with the value `null`, or not indexed at all. + + + + diff --git a/versioned_docs/version-7.1/indexes/_map-reduce-indexes-csharp.mdx b/versioned_docs/version-7.1/indexes/_map-reduce-indexes-csharp.mdx new file mode 100644 index 0000000000..e7c48e9698 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_map-reduce-indexes-csharp.mdx @@ -0,0 +1,846 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Map-Reduce indexes** allow complex ***data aggregation*** that can be queried on + with very little cost, regardless of the data size. + +* To expedite queries and prevent performance degradation during queries, the aggregation + is done during the indexing phase, _not_ at query time. + +* Once new data enters the database, or existing documents are modified, + the Map-Reduce index will re-calculate the aggregated data so that the + aggregation results are always available and up-to-date. + +* The aggregation computation is done in two separate consecutive actions: + * **The `Map` stage:** + This first stage runs the defined Map function(s) on each document, indexing the specified fields. + * **The `Reduce` stage:** + This second stage groups the specified requested fields that were indexed in the Map stage, + and then runs the Reduce function to get a final aggregation result per field value. + +* In this page: + * [Creating Map Reduce Indexes](../indexes/map-reduce-indexes.mdx#creating-map-reduce-indexes) + * [Creating Multi-Map-Reduce Indexes](../indexes/map-reduce-indexes.mdx#creating-multi-map-reduce-indexes) + * [Reduce Results as Artificial Documents](../indexes/map-reduce-indexes.mdx#reduce-results-as-artificial-documents) + * [Remarks](../indexes/map-reduce-indexes.mdx#remarks) + + + +## Creating Map Reduce Indexes + +When it comes to index creation, the only difference between simple indexes and the map-reduce ones +is an additional reduce function defined in the index definition. +To deploy an index we need to create a definition and deploy it using one of the ways described in the +[creating and deploying](../indexes/creating-and-deploying.mdx) article. +#### Example I - Count + +Let's assume that we want to count the number of products for each category. +To do it, we can create the following index using `LoadDocument` inside: + + + +{`public class Products_ByCategory : AbstractIndexCreationTask +{ + public class Result + { + public string Category { get; set; } + + public int Count { get; set; } + } + + public Products_ByCategory() + { + Map = products => from product in products + let categoryName = LoadDocument(product.Category).Name + select new + { + Category = categoryName, + Count = 1 + }; + + Reduce = results => from result in results + group result by result.Category into g + select new + { + Category = g.Key, + Count = g.Sum(x => x.Count) + }; + } +} +`} + + + + +{`public class Products_ByCategory : AbstractJavaScriptIndexCreationTask +{ + public class Result + { + public string Category { get; set; } + + public int Count { get; set; } + } + + public Products_ByCategory() + { + Maps = new HashSet() + { + @"map('products', function(p){ + return { + Category: load(p.Category, 'Categories').Name, + Count: 1 + } + })" + }; + + Reduce = @"groupBy(x => x.Category) + .aggregate(g => { + return { + Category: g.key, + Count: g.values.reduce((count, val) => val.Count + count, 0) + }; + })"; + } +} +`} + + + + +and issue the query: + + + +{`IList results = session + .Query() + .Where(x => x.Category == "Seafood") + .ToList(); +`} + + + + +{`IList results = session + .Advanced + .DocumentQuery() + .WhereEquals(x => x.Category, "Seafood") + .ToList(); +`} + + + + +{`from 'Products/ByCategory' +where Category == 'Seafood' +`} + + + + +The above query will return one result for _Seafood_ with the appropriate number of products from that category. +#### Example II - Average + +In this example, we will count an average product price for each category. +The index definition: + + + +{`public class Products_Average_ByCategory : + AbstractIndexCreationTask +{ + public class Result + { + public string Category { get; set; } + + public decimal PriceSum { get; set; } + + public double PriceAverage { get; set; } + + public int ProductCount { get; set; } + } + + public Products_Average_ByCategory() + { + Map = products => from product in products + let categoryName = LoadDocument(product.Category).Name + select new + { + Category = categoryName, + PriceSum = product.PricePerUnit, + PriceAverage = 0, + ProductCount = 1 + }; + + Reduce = results => from result in results + group result by result.Category into g + let productCount = g.Sum(x => x.ProductCount) + let priceSum = g.Sum(x => x.PriceSum) + select new + { + Category = g.Key, + PriceSum = priceSum, + PriceAverage = priceSum / productCount, + ProductCount = productCount + }; + } +} +`} + + + + +{`public class Products_Average_ByCategory : + AbstractJavaScriptIndexCreationTask +{ + public class Result + { + public string Category { get; set; } + + public decimal PriceSum { get; set; } + + public double PriceAverage { get; set; } + + public int ProductCount { get; set; } + } + + public Products_Average_ByCategory() + { + Maps = new HashSet() + { + @"map('products', function(product){ + return { + Category: load(product.Category, 'Categories').Name, + PriceSum: product.PricePerUnit, + PriceAverage: 0, + ProductCount: 1 + } + })" + }; + + Reduce = @"groupBy(x => x.Category) + .aggregate(g => { + var pricesum = g.values.reduce((sum,x) => x.PriceSum + sum,0); + var productcount = g.values.reduce((sum,x) => x.ProductCount + sum,0); + return { + Category: g.key, + PriceSum: pricesum, + ProductCount: productcount, + PriceAverage: pricesum / productcount + } + })"; + } +} +`} + + + + +and the query: + + + +{`IList results = session + .Query() + .Where(x => x.Category == "Seafood") + .ToList(); +`} + + + + +{`IList results = session + .Advanced + .DocumentQuery() + .WhereEquals(x => x.Category, "Seafood") + .ToList(); +`} + + + + +{`from 'Products/Average/ByCategory' +where Category == 'Seafood' +`} + + + +#### Example III - Calculations + +This example illustrates how we can put some calculations inside an index using +one of the indexes available in the sample database (`Product/Sales`). + +We want to know how many times each product was ordered and how much we earned for it. +To extract that information, we need to define the following index: + + + +{`public class Product_Sales : AbstractIndexCreationTask +{ + public class Result + { + public string Product { get; set; } + + public int Count { get; set; } + + public decimal Total { get; set; } + } + + public Product_Sales() + { + Map = orders => from order in orders + from line in order.Lines + select new + { + Product = line.Product, + Count = 1, + Total = ((line.Quantity * line.PricePerUnit) * (1 - line.Discount)) + }; + + Reduce = results => from result in results + group result by result.Product into g + select new + { + Product = g.Key, + Count = g.Sum(x => x.Count), + Total = g.Sum(x => x.Total) + }; + } +} +`} + + + + +{`public class Product_Sales : AbstractJavaScriptIndexCreationTask +{ + public class Result + { + public string Product { get; set; } + + public int Count { get; set; } + + public decimal Total { get; set; } + } + + public Product_Sales() + { + Maps = new HashSet() + { + @"map('orders', function(order){ + var res = []; + order.Lines.forEach(l => { + res.push({ + Product: l.Product, + Count: 1, + Total: (l.Quantity * l.PricePerUnit) * (1- l.Discount) + }) + }); + return res; + })" + }; + + Reduce = @"groupBy(x => x.Product) + .aggregate(g => { + return { + Product : g.key, + Count: g.values.reduce((sum, x) => x.Count + sum, 0), + Total: g.values.reduce((sum, x) => x.Total + sum, 0) + } + })"; + } +} +`} + + + + +and run the query: + + + +{`IList results = session + .Query() + .ToList(); +`} + + + + +{`IList results = session + .Advanced + .DocumentQuery() + .ToList(); +`} + + + + +{`from 'Product/Sales' +`} + + + + + + +## Creating Multi-Map-Reduce Indexes + +A **Multi-Map-Reduce** index allows aggregating (or 'reducing') data from several collections. + +They can be created and edited via [Studio](../studio/database/indexes/create-map-reduce-index.mdx#multi-map-reduce), +or with API as shown below. + +In the following code sample, we want the number of companies, suppliers, and employees per city. +We define the map phase on collections 'Employees', 'Companies', and 'Suppliers'. +We then define the reduce phase. + + + +{`public class Cities_Details : + AbstractMultiMapIndexCreationTask +\{ + public class IndexEntry + \{ + public string City; + public int Companies, Employees, Suppliers; + \} + + public Cities_Details() + \{ + // Map employees collection. + AddMap(employees => + from e in employees + select new IndexEntry + \{ + City = e.Address.City, + Companies = 0, + Suppliers = 0, + Employees = 1 + \} + ); + + // Map companies collection. + AddMap(companies => + from c in companies + select new IndexEntry + \{ + City = c.Address.City, + Companies = 1, + Suppliers = 0, + Employees = 0 + \} + ); + + // Map suppliers collection. + AddMap(suppliers => + from s in suppliers + select new IndexEntry + \{ + City = s.Address.City, + Companies = 0, + Suppliers = 1, + Employees = 0 + \} + ); + + // Apply reduction/aggregation on multi-map results. + Reduce = results => + from result in results + group result by result.City + into g + select new IndexEntry + \{ + City = g.Key, + Companies = g.Sum(x => x.Companies), + Suppliers = g.Sum(x => x.Suppliers), + Employees = g.Sum(x => x.Employees) + \}; + \} +\} +`} + + + +A query on the index: + + +{`// Queries the index "Cities_Details" - filters "Companies" results and orders by "City". +IList commerceDetails = session + .Query() + .Where(doc => doc.Companies > 5) + .OrderBy(x => x.City) + .ToList(); +`} + + + + +You can see this sample described in detail in [Inside RavenDB - Multi-Map-Reduce Indexes](https://ravendb.net/learn/inside-ravendb-book/reader/4.0/11-mapreduce-and-aggregations-in-ravendb#multimapreduce-indexes). + + + + +## Reduce Results as Artificial Documents + +#### Map-Reduce Output Documents + +In addition to storing the aggregation results in the index, the map-reduce index can also output +those reduce results as documents to a specified collection. In order to create these documents, +called _"artificial",_ you need to define the target collection using the `OutputReduceToCollection` +property in the index definition. + +Writing map-reduce outputs into documents allows you to define additional indexes on top of them +that give you the option to create recursive map-reduce operations. This makes it cheap and easy +to, for example, recursively create daily, monthly, and yearly summaries on the same data. + +In addition, you can also apply the usual operations on artificial documents (e.g. data +subscriptions or ETL). + +If the aggregation value for a given reduce key changes, we overwrite the output document. If the +given reduce key no longer has a result, the output document will be removed. + +#### Reference Documents + +To help organize these output documents, the map-reduce index can also create an additional +collection of artificial _reference documents_. These documents aggregate the output documents +and store their document IDs in an array field `ReduceOutputs`. + +The document IDs of reference documents are customized to follow some pattern. The format you +give to their document ID also determines how the output documents are grouped. + +Because reference documents have well known, predictable IDs, they are easier to plug into +indexes and other operations, and can serve as an intermediary for the output documents whose +IDs are less predictable. This allows you to chain map-reduce indexes in a recursive fashion, +see [Example II](../indexes/map-reduce-indexes.mdx#example-ii). + +Learn more about how to configure output and reference documents in the +[Studio: Create Map-Reduce Index](../studio/database/indexes/create-map-reduce-index.mdx). +### Artificial Document Properties + +#### IDs + +The identifiers of **map reduce output documents** have three components in this format: + +`//` + +The index in [Example I](../indexes/map-reduce-indexes.mdx#example-i) might generate an output document +ID like this: + +`DailyProductSales/35/14369232530304891504` + +* "DailyProductSales" is the collection name specified for the output documents. +* The middle part is an incrementing integer assigned by the server. This number grows by some +amount whenever the index definition is modified. This can be useful because when an index definition +changes, there is a brief transition phase when the new output documents are being created, but the +old output documents haven't been deleted yet (this phase is called +["side-by-side indexing"](../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---side-by-side-indexing)). +During this phase, the output collection contains output documents created both by the old version +and the new version of the index, and they can be distinguished by this value: the new output +documents will always have a higher value (by 1 or more). +* The last part of the document ID (the unique part) is the hash of the reduce key values - in this +case: `hash(Product, Month)`. + +The identifiers of **reference documents** follow some pattern you choose, and this pattern +determines which output documents are held by a given reference document. + +The index in [Example I](../indexes/map-reduce-indexes.mdx#example-i) has this pattern for reference documents: + +`sales/daily/{Date:yyyy-MM-dd}` + +And this produces reference document IDs like this: + +`sales/daily/1998-05-06` + +The pattern is built using the same syntax as +[the `StringBuilder.AppendFormat` method](https://docs.microsoft.com/en-us/dotnet/api/system.text.stringbuilder.appendformat). +See [here](https://docs.microsoft.com/en-us/dotnet/standard/base-types/standard-date-and-time-format-strings) +to learn about the date formatting in particular. +#### Metadata + +Artificial documents generated by map-reduce indexes get the following `@flags` in their metadata: + + + +{`"@flags": "Artificial, FromIndex" +`} + + + +These flags are used internally by the database to filter out artificial documents during replication. +### Syntax + +The map-reduce output documents are configured with these properties of +`IndexDefinition`: + + + +{`string OutputReduceToCollection; + +string PatternReferencesCollectionName; + +// Using IndexDefinition +string PatternForOutputReduceToCollectionReferences; + +// Inheriting from AbstractGenericIndexCreationTask +Expression> PatternForOutputReduceToCollectionReferences; +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **OutputReduceToCollection** | `string` | Collection name for the output documents. | +| **PatternReferencesCollectionName** | `string` | Optional collection name for the reference documents - by default it is `/References`. | +| **PatternForOutputReduceToCollectionReferences** | `string` / `Expression>` | Document ID format for reference documents. This ID references the fields of the reduce function output, which determines how the output documents are aggregated. The type of this parameter is different depending on if the index is created using [IndexDefinition](../client-api/operations/maintenance/indexes/put-indexes.mdx) or [AbstractIndexCreationTask](../indexes/creating-and-deploying.mdx#define-a-static-index-using-a-custom-class). | + +To index artificial documents in strongly typed syntax (LINQ), you will need the +type of reference documents: + + + +{`public class OutputReduceToCollectionReference +\{ + public string Id \{ get; set; \} + public List ReduceOutputs \{ get; set; \} +\} +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **Id** | `string` | The reference document's ID | +| **ReduceOutputs** | `List` | List of map-reduce output documents that this reference document aggregates. Determined by the pattern of the reference document ID. | +### Examples + +#### Example I + +Here is a map-reduce index with output documents and reference documents: + + + + +{`public class ProductSales_ByDate : AbstractIndexCreationTask +{ + public ProductSales_ByDate() + { + Map = orders => from order in orders + from line in order.Lines + select new DailyProductSale + { + Product = line.Product, + Date = new DateTime(order.OrderedAt.Year, + order.OrderedAt.Month, + order.OrderedAt.Day), + Count = 1, + Total = ((line.Quantity * line.PricePerUnit) * (1 - line.Discount)) + }; + + Reduce = results => from result in results + group result by new { result.Product, result.Date } into g + select new DailyProductSale + { + Product = g.Key.Product, + Date = g.Key.Date, + Count = g.Sum(x => x.Count), + Total = g.Sum(x => x.Total) + }; + + OutputReduceToCollection = "DailyProductSales"; + PatternReferencesCollectionName = "DailyProductSales/References"; + PatternForOutputReduceToCollectionReferences = x => $"sales/daily/{x.Date:yyyy-MM-dd}"; + } +} + +public class DailyProductSale +{ + public string Product { get; set; } + public DateTime Date { get; set; } + public int Count { get; set; } + public decimal Total { get; set; } +} +`} + + + + +{`public class ProductSales_ByMonth : AbstractJavaScriptIndexCreationTask +{ + public class Result + { + public string Product { get; set; } + public DateTime Month { get; set; } + public int Count { get; set; } + public decimal Total { get; set; } + } + + public ProductSales_ByMonth() + { + Maps = new HashSet() + { + @"map('orders', function(order) { + var res = []; + + order.Lines.forEach(l => { + res.push({ + Product: l.Product, + Month: new Date( (new Date(order.OrderedAt)).getFullYear(),(new Date(order.OrderedAt)).getMonth(),1), + Count: 1, + Total: (l.Quantity * l.PricePerUnit) * (1- l.Discount) + }) + }); + + return res; + })" + }; + + Reduce = @"groupBy(x => ({Product: x.Product, Month: x.Month})) + .aggregate(g => { + return { + Product: g.key.Product, + Month: g.key.Month, + Count: g.values.reduce((sum, x) => x.Count + sum, 0), + Total: g.values.reduce((sum, x) => x.Total + sum, 0) + } + })"; + + OutputReduceToCollection = "MonthlyProductSales"; + PatternReferencesCollectionName = "MonthlyProductSales/References"; + PatternForOutputReduceToCollectionReferences = "sales/monthly/{Month}"; + } +} +`} + + + + +In the **LINQ** index example above (which inherits `AbstractIndexCreationTask`), +the reference document ID pattern is set with a lambda expression: + + + +{`PatternForOutputReduceToCollectionReferences = x => $"sales/daily/\{x.Date:yyyy-MM-dd\}"; +`} + + + +This gives the reference documents IDs in this general format: `sales/monthly/1998-05-01`. +The reference document with that ID contains the IDs of all the output documents from the +month of May 1998. + + +In the **JavaScript** index example (which uses `IndexDefinition`), +the reference document ID pattern is set with a `string`: + + + +{`PatternForOutputReduceToCollectionReferences = "sales/daily/\{Date:yyyy-MM-dd\}" +`} + + + +This gives the reference documents IDs in this general format: `sales/daily/1998-05-06`. +The reference document with that ID contains the IDs of all the output documents from +May 6th 1998. +#### Example II + +This is an example of a "recursive" map-reduce index - it indexes the output documents +of the index above, using the reference documents. + + + +{`public class NumberOfOrders_ByProduct : AbstractIndexCreationTask +\{ + public NumberOfOrders_ByProduct() + \{ + Map = dailyProductSales => from sale in dailyProductSales + let referenceDocuments = LoadDocument( + $"sales/daily/\{sale.Date:yyyy-MM-dd\}", + "DailyProductSales/References") + + from refDoc in referenceDocuments.ReduceOutputs + let outputDoc = LoadDocument(refDoc) + select new OutputDocument + \{ + Product = outputDoc.Product, + Count = outputDoc.Count, + NumOrders = 1 + \}; + + Reduce = results => from r in results + group r by new \{ r.Count, r.Product \} into g + select new OutputDocument + \{ + Product = g.Key.Product, + Count = g.Key.Count, + NumOrders = g.Sum(x => x.NumOrders) + \}; + \} +\} + +public class OutputDocument \{ + public string Product; + public int Count; + public int NumOrders; +\} + +public class OutputReduceToCollectionReference +\{ + public string Id \{ get; set; \} + public List ReduceOutputs \{ get; set; \} +\} +`} + + + + + +## Remarks + +#### Saving documents + +[Artificial documents](../indexes/map-reduce-indexes.mdx#reduce-results-as-artificial-documents) +are stored immediately after the indexing transaction completes. +#### Recursive indexing loop + +It is **forbidden** to output reduce results to collections such as the following: + +- A collection that the current index is already working on. + E.g., an index on a `DailyInvoices` collection outputs to `DailyInvoices`. +- A collection that the current index is loading a document from. + E.g., an index with `LoadDocument(id, "Invoices")` outputs to `Invoices`. +- Two collections, each processed by a map-reduce indexes, + when each index outputs to the second collection. + E.g., + An index on the `Invoices` collection outputs to the `DailyInvoices` collection, + while an index on `DailyInvoices` outputs to `Invoices`. + +When an attempt to create such an infinite indexing loop is +detected a detailed error is generated. +#### Output to an Existing collection + +Creating a map-reduce index which defines an output collection that already +exists and contains documents, will result in an error. +Delete all documents from the target collection before creating the index, +or output results to a different collection. +#### Modification of Artificial Documents +Artificial documents can be loaded and queried just like regular documents. +However, it is **not** recommended to edit artificial documents manually since +any index results update would overwrite all manual modifications made in them. +#### Map-Reduce Indexes on a Sharded Database + +On a [sharded database](../sharding/overview.mdx), the behavior of map-reduce +indexes is altered in in a few ways that database operators should be aware of. + +* Read [here](../sharding/indexing.mdx#map-reduce-indexes-on-a-sharded-database) + about map-reduce indexes on a sharded database. +* Read [here](../sharding/querying.mdx#querying-map-reduce-indexes) about querying + map-reduce indexes on a sharded database. + + + + diff --git a/versioned_docs/version-7.1/indexes/_map-reduce-indexes-java.mdx b/versioned_docs/version-7.1/indexes/_map-reduce-indexes-java.mdx new file mode 100644 index 0000000000..b1bb3bbd2b --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_map-reduce-indexes-java.mdx @@ -0,0 +1,622 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Map-Reduce indexes** allow complex ***data aggregation*** that can be queried on + with very little cost, regardless of the data size. + +* To expedite queries and prevent performance degradation during queries, the aggregation + is done during the indexing phase, _not_ at query time. + +* Once new data enters the database, or existing documents are modified, + the Map-Reduce index will re-calculate the aggregated data so that the + aggregation results are always available and up-to-date. + +* The aggregation computation is done in two separate consecutive actions: the `Map` and the `Reduce`. + * **The Map stage:** + This first stage runs the defined Map function(s) on each document, indexing the specified fields. + * **The Reduce stage:** + This second stage groups the specified requested fields that were indexed in the Map stage, + and then runs the Reduce function to get a final aggregation result per field value. + +* In this page: + * [Creating Map Reduce Indexes](../indexes/map-reduce-indexes.mdx#creating-map-reduce-indexes) + * [Reduce Results as Artificial Documents](../indexes/map-reduce-indexes.mdx#reduce-results-as-artificial-documents) + * [Important Comments](../indexes/map-reduce-indexes.mdx#important-comments) + + + +## Creating Map Reduce Indexes + +When it comes to index creation, the only difference between simple indexes and the map-reduce ones +is an additional reduce function defined in the index definition. +To deploy an index we need to create a definition and deploy it using one of the ways described in the +[creating and deploying](../indexes/creating-and-deploying.mdx) article. +### Example I - Count + +Let's assume that we want to count the number of products for each category. +To do it, we can create the following index using `LoadDocument` inside: + + + +{`public static class Products_ByCategory extends AbstractIndexCreationTask { + public static class Result { + private String category; + private String count; + + public String getCategory() { + return category; + } + + public void setCategory(String category) { + this.category = category; + } + + public String getCount() { + return count; + } + + public void setCount(String count) { + this.count = count; + } + } + + public Products_ByCategory() { + map = "docs.Products.Select(product => new { " + + " Product = Product, " + + " CategoryName = (this.LoadDocument(product.Category, \\"Categories\\")).Name " + + "}).Select(this0 => new { " + + " Category = this0.CategoryName, " + + " Count = 1 " + + "})"; + + reduce = "results.GroupBy(result => result.Category).Select(g => new { " + + " Category = g.Key, " + + " Count = Enumerable.Sum(g, x => ((int) x.Count)) " + + "})"; + } +} +`} + + + + +{`public static class Products_ByCategory extends AbstractJavaScriptIndexCreationTask { + public Products_ByCategory() { + setMaps(Sets.newHashSet("map('products', function(p){\\n" + + " return {\\n" + + " Category: load(p.Category, 'Categories').Name,\\n" + + " Count: 1\\n" + + " }\\n" + + " })")); + + setReduce("groupBy(x => x.Category)\\n" + + " .aggregate(g => {\\n" + + " return {\\n" + + " Category: g.key,\\n" + + " Count: g.values.reduce((count, val) => val.Count + count, 0)\\n" + + " };\\n" + + " })"); + } +} +`} + + + + +and issue the query: + + + +{`List results = session + .query(Products_ByCategory.Result.class, Products_ByCategory.class) + .whereEquals("Category", "Seafood") + .toList(); +`} + + + + +{`from 'Products/ByCategory' +where Category == 'Seafood' +`} + + + + +The above query will return one result for _Seafood_ with the appropriate number of products from that category. + +### Example II - Average + +In this example, we will count an average product price for each category. +The index definition: + + + +{`public static class Products_Average_ByCategory extends AbstractIndexCreationTask { + public static class Result { + private String category; + private double priceSum; + private double priceAverage; + private int productCount; + + public String getCategory() { + return category; + } + + public void setCategory(String category) { + this.category = category; + } + + public double getPriceSum() { + return priceSum; + } + + public void setPriceSum(double priceSum) { + this.priceSum = priceSum; + } + + public double getPriceAverage() { + return priceAverage; + } + + public void setPriceAverage(double priceAverage) { + this.priceAverage = priceAverage; + } + + public int getProductCount() { + return productCount; + } + + public void setProductCount(int productCount) { + this.productCount = productCount; + } + } + + public Products_Average_ByCategory() { + map = "docs.Products.Select(product => new { " + + " Product = Product, " + + " CategoryName = (this.LoadDocument(product.Category, \\"Categories\\")).Name " + + "}).Select(this0 => new { " + + " Category = this0.CategoryName, " + + " PriceSum = this0.Product.PricePerUnit, " + + " PriceAverage = 0, " + + " ProductCount = 1 " + + "})"; + + reduce = "results.GroupBy(result => result.Category).Select(g => new { " + + " g = g, " + + " ProductCount = Enumerable.Sum(g, x => ((int) x.ProductCount)) " + + "}).Select(this0 => new { " + + " this0 = this0, " + + " PriceSum = Enumerable.Sum(this0.g, x0 => ((decimal) x0.PriceSum)) " + + "}).Select(this1 => new { " + + " Category = this1.this0.g.Key, " + + " PriceSum = this1.PriceSum, " + + " PriceAverage = this1.PriceSum / ((decimal) this1.this0.ProductCount), " + + " ProductCount = this1.this0.ProductCount " + + "})"; + } +} +`} + + + + +{`public static class Product_Average_ByCategory extends AbstractJavaScriptIndexCreationTask { + public Product_Average_ByCategory() { + setMaps(Sets.newHashSet("map('products', function(product){\\n" + + " return {\\n" + + " Category: load(product.Category, 'Categories').Name,\\n" + + " PriceSum: product.PricePerUnit,\\n" + + " PriceAverage: 0,\\n" + + " ProductCount: 1\\n" + + " }\\n" + + "})")); + + setReduce("groupBy(x => x.Category)\\n" + + " .aggregate(g => {\\n" + + " var pricesum = g.values.reduce((sum,x) => x.PriceSum + sum,0);\\n" + + " var productcount = g.values.reduce((sum,x) => x.ProductCount + sum,0);\\n" + + " return {\\n" + + " Category: g.key,\\n" + + " PriceSum: pricesum,\\n" + + " ProductCount: productcount,\\n" + + " PriceAverage: pricesum / productcount\\n" + + " }\\n" + + " })"); + } +} +`} + + + + +and the query: + + + +{`List results = session + .query(Products_Average_ByCategory.Result.class, Products_Average_ByCategory.class) + .whereEquals("Category", "Seafood") + .toList(); +`} + + + + +{`from 'Products/Average/ByCategory' +where Category == 'Seafood' +`} + + + + +### Example III - Calculations + +This example illustrates how we can put some calculations inside an index using +one of the indexes available in the sample database (`Product/Sales`). + +We want to know how many times each product was ordered and how much we earned for it. +In order to extract that information, we need to define the following index: + + + +{`public static class Product_Sales extends AbstractIndexCreationTask { + public static class Result { + private String product; + private int count; + private double total; + + public String getProduct() { + return product; + } + + public void setProduct(String product) { + this.product = product; + } + + public int getCount() { + return count; + } + + public void setCount(int count) { + this.count = count; + } + + public double getTotal() { + return total; + } + + public void setTotal(double total) { + this.total = total; + } + } + + public Product_Sales() { + map = "docs.Orders.SelectMany(order => order.Lines, (order, line) => new { " + + " Product = line.Product, " + + " Count = 1, " + + " Total = (((decimal) line.Quantity) * line.PricePerUnit) * (1M - line.Discount) " + + "})"; + + + reduce = "results.GroupBy(result => result.Product).Select(g => new { " + + " Product = g.Key, " + + " Count = Enumerable.Sum(g, x => ((int) x.Count)), " + + " Total = Enumerable.Sum(g, x0 => ((decimal) x0.Total)) " + + "})"; + } +} +`} + + + + +{`public static class Product_Sales extends AbstractJavaScriptIndexCreationTask { + public Product_Sales() { + setMaps(Sets.newHashSet("map('orders', function(order){\\n" + + " var res = [];\\n" + + " order.Lines.forEach(l => {\\n" + + " res.push({\\n" + + " Product: l.Product,\\n" + + " Count: 1,\\n" + + " Total: (l.Quantity * l.PricePerUnit) * (1- l.Discount)\\n" + + " })\\n" + + " });\\n" + + " return res;\\n" + + " })")); + + setReduce("groupBy(x => x.Product)\\n" + + " .aggregate(g => {\\n" + + " return {\\n" + + " Product : g.key,\\n" + + " Count: g.values.reduce((sum, x) => x.Count + sum, 0),\\n" + + " Total: g.values.reduce((sum, x) => x.Total + sum, 0)\\n" + + " }\\n" + + " })"); + } +} +`} + + + + +and send the query: + + + +{`List results = session + .query(Product_Sales.Result.class, Product_Sales.class) + .toList(); +`} + + + + +{`from 'Product/Sales' +`} + + + + + + +## Reduce Results as Artificial Documents + +#### Map-Reduce Output Documents + +In addition to storing the aggregation results in the index, the map-reduce index can also output +those reduce results as documents to a specified collection. In order to create these documents, +called _"artificial",_ you need to define the target collection using the `outputReduceToCollection` +property in the index definition. + +Writing map-reduce outputs into documents allows you to define additional indexes on top of them +that give you the option to create recursive map-reduce operations. This makes it cheap and easy +to, for example, recursively create daily, monthly, and yearly summaries on the same data. + +In addition, you can also apply the usual operations on artificial documents (e.g. data +subscriptions or ETL). + +If the aggregation value for a given reduce key changes, we overwrite the output document. If the +given reduce key no longer has a result, the output document will be removed. + +#### Reference Documents + +To help organize these output documents, the map-reduce index can also create an additional +collection of artificial _reference documents_. These documents aggregate the output documents +and store their document IDs in an array field `ReduceOutputs`. + +The document IDs of reference documents are customized to follow some pattern. The format you +give to their document ID also determines how the output documents are grouped. + +Because reference documents have well known, predictable IDs, they are easier to plug into +indexes and other operations, and can serve as an intermediary for the output documents whose +IDs are less predictable. This allows you to chain map-reduce indexes in a recursive fashion, +see [Example II](../indexes/map-reduce-indexes.mdx#example-ii). + +Learn more about how to configure output and reference documents in the +[Studio: Create Map-Reduce Index](../studio/database/indexes/create-map-reduce-index.mdx). +### Artificial Document Properties + +#### IDs + +The identifiers of **map reduce output documents** have three components in this format: + +`//` + +The index in [Example I](../indexes/map-reduce-indexes.mdx#example-i) might generate an output document +ID like this: + +`DailyProductSales/35/14369232530304891504` + +* "DailyProductSales" is the collection name specified for the output documents. +* The middle part is an incrementing integer assigned by the server. This number grows by some +amount whenever the index definition is modified. This can be useful because when an index definition +changes, there is a brief transition phase when the new output documents are being created, but the +old output documents haven't been deleted yet (this phase is called +["side-by-side indexing"](../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---side-by-side-indexing)). +During this phase, the output collection contains output documents created both by the old version +and the new version of the index, and they can be distinguished by this value: the new output +documents will always have a higher value (by 1 or more). +* The last part of the document ID (the unique part) is the hash of the reduce key values - in this +case: `hash(Product, Month)`. + +The identifiers of **reference documents** follow some pattern you choose, and this pattern +determines which output documents are held by a given reference document. + +The index in [Example I](../indexes/map-reduce-indexes.mdx#example-i) has this pattern for reference documents: + +`sales/daily/{Date:yyyy-MM-dd}` + +And this produces reference document IDs like this: + +`sales/daily/1998-05-06` + +The pattern is built using the same syntax as +[the `StringBuilder.AppendFormat` method](https://docs.microsoft.com/en-us/dotnet/api/system.text.stringbuilder.appendformat). +See [here](https://docs.microsoft.com/en-us/dotnet/standard/base-types/standard-date-and-time-format-strings) +to learn about the date formatting in particular. +#### Metadata + +Artificial documents generated by map-reduce indexes get the following `@flags` in their metadata: + + + +{`"@flags": "Artificial, FromIndex" +`} + + + +These flags are used internally by the database to filter out artificial documents during replication. + +### Syntax +The map-reduce output documents are configured with these properties of +`IndexDefinition`: + + + +{`private String outputReduceToCollection; + +private String patternReferencesCollectionName; + +private String patternForOutputReduceToCollectionReferences; +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **outputReduceToCollection** | `String` | Collection name for the output documents. | +| **patternReferencesCollectionName** | `String` | Optional collection name for the reference documents - by default it is `/References`. | +| **patternForOutputReduceToCollectionReferences** | `String` | Document ID format for reference documents. This ID references the fields of the reduce function output, which determines how the output documents are aggregated. The type of this parameter is different depending on if the index is created using [IndexDefinition](../indexes/creating-and-deploying.mdx#using-maintenance-operations) or [AbstractIndexCreationTask](../indexes/creating-and-deploying.mdx#using-abstractindexcreationtask). | + +### Example + +Here is a map-reduce index with output documents and reference documents: + + + + +{`public Product_Sales_ByMonth() { + map = "docs.Orders.SelectMany(order => order.Lines, (order, line) => new { " + + " Product = line.Product, " + + " Month = new DateTime(order.OrderedAt.Year, order.OrderedAt.Month, 1), " + + " Count = 1, " + + " Total = (((decimal) line.Quantity) * line.PricePerUnit) * (1M - line.Discount) " + + "})"; + + reduce = "results.GroupBy(result => new { " + + " Product = result.Product, " + + " Month = result.Month " + + "}).Select(g => new { " + + " Product = g.Key.Product, " + + " Month = g.Key.Month, " + + " Count = Enumerable.Sum(g, x => ((int) x.Count)), " + + " Total = Enumerable.Sum(g, x0 => ((decimal) x0.Total)) " + + "})"; + + outputReduceToCollection = "MonthlyProductSales"; + patternReferencesCollectionName = "DailyProductSales/References"; + patternForOutputReduceToCollectionReferences = "sales/daily/{Date:yyyy-MM-dd}"; +} + } +`} + + + + +{`public static class Product_Sales_ByMonth extends AbstractJavaScriptIndexCreationTask { + public Product_Sales_ByMonth() { + setMaps(Sets.newHashSet("map('orders', function(order){\\n" + + " var res = [];\\n" + + " order.Lines.forEach(l => {\\n" + + " res.push({\\n" + + " Product: l.Product,\\n" + + " Month: new Date( (new Date(order.OrderedAt)).getFullYear(),(new Date(order.OrderedAt)).getMonth(),1),\\n" + + " Count: 1,\\n" + + " Total: (l.Quantity * l.PricePerUnit) * (1- l.Discount)\\n" + + " })\\n" + + " });\\n" + + " return res;\\n" + + " })")); + + setReduce("groupBy(x => ({Product: x.Product, Month: x.Month}))\\n" + + " .aggregate(g => {\\n" + + " return {\\n" + + " Product: g.key.Product,\\n" + + " Month: g.key.Month,\\n" + + " Count: g.values.reduce((sum, x) => x.Count + sum, 0),\\n" + + " Total: g.values.reduce((sum, x) => x.Total + sum, 0)\\n" + + " }\\n" + + " })"); + + setOutputReduceToCollection("MonthlyProductSales"); + setPatternReferencesCollectionName("DailyProductSales/References"); + setPatternForOutputReduceToCollectionReferences("sales/daily/{Date:yyyy-MM-dd}"); + } +} +`} + + + + +In the **LINQ** index example above (which inherits `AbstractIndexCreationTask`), +the reference document ID pattern is set with a lambda expression: + + + +{`PatternForOutputReduceToCollectionReferences = "sales/daily/\{Date:yyyy-MM-dd\}"; +`} + + + +This gives the reference documents IDs in this general format: `sales/monthly/1998-05-01`. +The reference document with that ID contains the IDs of all the output documents from the +month of May 1998. + + +In the **JavaScript** index example (which uses `IndexDefinition`), +the reference document ID pattern is set with a `String`: + + + +{`PatternForOutputReduceToCollectionReferences ("sales/daily/\{Date:yyyy-MM-dd\})" +`} + + + +This gives the reference documents IDs in this general format: `sales/daily/1998-05-06`. +The reference document with that ID contains the IDs of all the output documents from +May 6th 1998. + + + +## Important Comments + +## Saving documents +[Artificial documents](../indexes/map-reduce-indexes.mdx#reduce-results-as-artificial-documents) +are stored immediately after the indexing transaction completes. + +## Recursive indexing loop +It is **forbidden** to output reduce results to collections such as the following: + +- A collection that the current index is already working on. + E.g., an index on a `DailyInvoices` collection outputs to `DailyInvoices`. +- A collection that the current index is loading a document from. + E.g., an index with `LoadDocument(id, "Invoices")` outputs to `Invoices`. +- Two collections, each processed by a map-reduce indexes, + when each index outputs to the second collection. + E.g., + An index on the `Invoices` collection outputs to the `DailyInvoices` collection, + while an index on `DailyInvoices` outputs to `Invoices`. + +When an attempt to create such an infinite indexing loop is +detected a detailed error is generated. + +## Output to an Existing collection +Creating a map-reduce index which defines an output collection that already +exists and contains documents, will result in an error. +Delete all documents from the target collection before creating the index, +or output results to a different collection. + +## Modification of Artificial Documents +Artificial documents can be loaded and queried just like regular documents. +However, it is **not** recommended to edit artificial documents manually since +any index results update would overwrite all manual modifications made in them. + +## Map-Reduce Indexes on a Sharded Database +On a [sharded database](../sharding/overview.mdx), the behavior of map-reduce +indexes is altered in in a few ways that database operators should be aware of. + +* Read [here](../sharding/indexing.mdx#map-reduce-indexes-on-a-sharded-database) + about map-reduce indexes on a sharded database. +* Read [here](../sharding/querying.mdx#querying-map-reduce-indexes) about querying + map-reduce indexes on a sharded database. + + + + diff --git a/versioned_docs/version-7.1/indexes/_map-reduce-indexes-nodejs.mdx b/versioned_docs/version-7.1/indexes/_map-reduce-indexes-nodejs.mdx new file mode 100644 index 0000000000..b48cbc8ea6 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_map-reduce-indexes-nodejs.mdx @@ -0,0 +1,361 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Map-Reduce indexes** allow complex ***data aggregation*** that can be queried on + with very little cost, regardless of the data size. + +* To expedite queries and prevent performance degradation during queries, the aggregation + is done during the indexing phase, _not_ at query time. + +* Once new data enters the database, or existing documents are modified, + the Map-Reduce index will re-calculate the aggregated data so that the + aggregation results are always available and up-to-date. + +* The aggregation computation is done in two separate consecutive actions: the `Map` and the `Reduce`. + * **The Map stage:** + This first stage runs the defined Map function(s) on each document, indexing the specified fields. + * **The Reduce stage:** + This second stage groups the specified requested fields that were indexed in the Map stage, + and then runs the Reduce function to get a final aggregation result per field value. + +* In this page: + * [Creating Map Reduce Indexes](../indexes/map-reduce-indexes.mdx#creating-map-reduce-indexes) + * [Reduce Results as Artificial Documents](../indexes/map-reduce-indexes.mdx#reduce-results-as-artificial-documents) + * [Important Comments](../indexes/map-reduce-indexes.mdx#important-comments) + + + +## Creating Map Reduce Indexes + +When it comes to index creation, the only difference between simple indexes and the map-reduce ones +is an additional reduce function defined in the index definition. +To deploy an index we need to create a definition and deploy it using one of the ways described in the +[creating and deploying](../indexes/creating-and-deploying.mdx) article. +### Example I - Count + +Let's assume that we want to count the number of products for each category. +To do it, we can create the following index using `LoadDocument` inside: + + +{`class Products_ByCategory extends AbstractCsharpIndexCreationTask \{ + + constructor() \{ + super(); + + this.map = "docs.Products.Select(product => new \{ " + + " Product = product, " + + " CategoryName = (this.LoadDocument(product.Category, \\"Categories\\")).Name " + + "\}).Select(this0 => new \{ " + + " Category = this0.CategoryName, " + + " Count = 1 " + + "\})"; + + this.reduce = "results.GroupBy(result => result.category).Select(g => new \{ " + + " category = g.Key, " + + " count = Enumerable.Sum(g, x => ((int) x.count)) " + + "\})"; + \} +\} +`} + + + +and issue the query: + + + +{`const results = await session + .query({ indexName: "Products/ByCategory" }) + .whereEquals("Category", "Seafood") + .all(); +`} + + + + +{`from 'Products/ByCategory' +where Category == 'Seafood' +`} + + + + +The above query will return one result for _Seafood_ with the appropriate number of products from that category. + +### Example II - Average + +In this example, we will count an average product price for each category. +The index definition: + + +{`class Products_Average_ByCategory extends AbstractCsharpIndexCreationTask \{ + + constructor() \{ + super(); + + this.map = "docs.Products.Select(product => new \{ " + + " Product = product, " + + " CategoryName = (this.LoadDocument(product.Category, \\"Categories\\")).Name " + + "\}).Select(this0 => new \{ " + + " Category = this0.CategoryName, " + + " PriceSum = this0.product.PricePerUnit, " + + " PriceAverage = 0, " + + " ProductCount = 1 " + + "\})"; + + this.reduce = "results.GroupBy(result => result.Category).Select(g => new \{ " + + " g = g, " + + " ProductCount = Enumerable.Sum(g, x => ((int) x.ProductCount)) " + + "\}).Select(this0 => new \{ " + + " this0 = this0, " + + " PriceSum = Enumerable.Sum(this0.g, x0 => ((decimal) x0.PriceSum)) " + + "\}).Select(this1 => new \{ " + + " Category = this1.this0.g.Key, " + + " PriceSum = this1.PriceSum, " + + " PriceAverage = this1.PriceSum / ((decimal) this1.this0.ProductCount), " + + " ProductCount = this1.this0.ProductCount " + + "\})"; + \} +\} +`} + + + +and the query: + + + +{`const results = await session + .query({ indexName: "Products_Average_ByCategory" }) + .whereEquals("Category", "Seafood") + .all(); +`} + + + + +{`from 'Products/Average/ByCategory' +where Category == 'Seafood' +`} + + + + +### Example III - Calculations + +This example illustrates how we can put some calculations inside an index using +on one of the indexes available in the sample database (`Product/Sales`). + +We want to know how many times each product was ordered and how much we earned for it. +In order to extract that information, we need to define the following index: + + +{`class Product_Sales extends AbstractCsharpIndexCreationTask \{ + constructor() \{ + super(); + + this.map = "docs.Orders.SelectMany(order => order.Lines, (order, line) => new \{ " + + " Product = line.Product, " + + " Count = 1, " + + " Total = (((decimal) line.Quantity) * line.PricePerUnit) * (1M - line.Discount) " + + "\})"; + + + this.reduce = "results.GroupBy(result => result.Product).Select(g => new \{ " + + " Product = g.Key, " + + " Count = Enumerable.Sum(g, x => ((int) x.Count)), " + + " Total = Enumerable.Sum(g, x0 => ((decimal) x0.Total)) " + + "\})"; + \} +\} +`} + + + +and send the query: + + + +{`const results = await session + .query({ indexName: "Product/Sales" }) + .all(); +`} + + + + +{`from 'Product/Sales' +`} + + + + + + +## Reduce Results as Artificial Documents + +#### Map-Reduce Output Documents + +In addition to storing the aggregation results in the index, the map-reduce index can also output +those reduce results as documents to a specified collection. In order to create these documents, +called _"artificial",_ you need to define the target collection using the `outputReduceToCollection` +property in the index definition. + +Writing map-reduce outputs into documents allows you to define additional indexes on top of them +that give you the option to create recursive map-reduce operations. This makes it cheap and easy +to, for example, recursively create daily, monthly, and yearly summaries on the same data. + +In addition, you can also apply the usual operations on artificial documents (e.g. data +subscriptions or ETL). + +If the aggregation value for a given reduce key changes, we overwrite the output document. If the +given reduce key no longer has a result, the output document will be removed. + +#### Reference Documents + +To help organize these output documents, the map-reduce index can also create an additional +collection of artificial _reference documents_. These documents aggregate the output documents +and store their document IDs in an array field `ReduceOutputs`. + +The document IDs of reference documents are customized to follow some pattern. The format you +give to their document ID also determines how the output documents are grouped. + +Because reference documents have well known, predictable IDs, they are easier to plug into +indexes and other operations, and can serve as an intermediary for the output documents whose +IDs are less predictable. This allows you to chain map-reduce indexes in a recursive fashion, +see [Example II](../indexes/map-reduce-indexes.mdx#example-ii). + +Learn more about how to configure output and reference documents in the +[Studio: Create Map-Reduce Index](../studio/database/indexes/create-map-reduce-index.mdx). +### Artificial Document IDs + +The identifiers of artificial documents are generated as: + +- `/` + +For the above sample index, the document ID can be: + +- `MonthlyProductSales/13770576973199715021` + +The numeric part is the hash of the reduce key values, in this case: `hash(Product, Month)`. + +If the aggregation value for a given reduce key changes then we overwrite the artificial document. It will get removed once there is no result for a given reduce key. + +### Artificial Document Flags + +Documents generated by map-reduce indexes get the following `@flags` metadata: + + + +{`"@flags": "Artificial, FromIndex" +`} + + + +Those flags are used internally by the database to filter out artificial documents during replication. + +### Usage + +The map-reduce output documents are configured with these properties of +`IndexDefinition`: + + + +{`const outputReduceToCollection = indexDefinition.outputReduceToCollection; + +const patternReferencesCollectionName = indexDefinition.patternReferencesCollectionName; + +const patternForOutputReduceToCollectionReferences = indexDefinition.patternForOutputReduceToCollectionReferences; +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **outputReduceToCollection** | `string` | Collection name for the output documents. | +| **patternReferencesCollectionName** | `string` | Optional collection name for the reference documents - by default it is `/References`. | +| **patternForOutputReduceToCollectionReferences** | `string` | Document ID format for reference documents. This ID references the fields of the reduce function output, which determines how the output documents are aggregated. The type of this parameter is different depending on if the index is created using [IndexDefinition](../client-api/operations/maintenance/indexes/put-indexes.mdx#put-indexes-operation) or [AbstractJavaScriptIndexCreationTask](../indexes/creating-and-deploying.mdx#define-a-static-index-using-a-custom-class). | + +### Examples + + + +{`class Product_Sales_ByMonth extends AbstractCsharpIndexCreationTask \{ + constructor() \{ + super(); + + this.map = "docs.Orders.SelectMany(order => order.Lines, (order, line) => new \{ " + + " Product = line.Product, " + + " Month = new DateTime(order.OrderedAt.Year, order.OrderedAt.Month, 1), " + + " Count = 1, " + + " Total = (((decimal) line.Quantity) * line.PricePerUnit) * (1M - line.Discount) " + + "\})"; + + this.reduce = "results.GroupBy(result => new \{ " + + " Product = result.Product, " + + " Month = result.Month " + + "\}).Select(g => new \{ " + + " Product = g.Key.Product, " + + " Month = g.Key.Month, " + + " Count = Enumerable.Sum(g, x => ((int) x.Count)), " + + " Total = Enumerable.Sum(g, x0 => ((decimal) x0.Total)) " + + "\})"; + + this.outputReduceToCollection = "MonthlyProductSales"; + \} +\} +`} + + + + + +## Important Comments + +## Saving documents +[Artificial documents](../indexes/map-reduce-indexes.mdx#reduce-results-as-artificial-documents) +are stored immediately after the indexing transaction completes. + +## Recursive indexing loop +It is **forbidden** to output reduce results to collections such as the following: + +- A collection that the current index is already working on. + E.g., an index on a `DailyInvoices` collection outputs to `DailyInvoices`. +- A collection that the current index is loading a document from. + E.g., an index with `LoadDocument(id, "Invoices")` outputs to `Invoices`. +- Two collections, each processed by a map-reduce indexes, + when each index outputs to the second collection. + E.g., + An index on the `Invoices` collection outputs to the `DailyInvoices` collection, + while an index on `DailyInvoices` outputs to `Invoices`. + +When an attempt to create such an infinite indexing loop is +detected a detailed error is generated. + +## Output to an Existing collection +Creating a map-reduce index which defines an output collection that already +exists and contains documents, will result in an error. +Delete all documents from the target collection before creating the index, +or output results to a different collection. + +## Modification of Artificial Documents +Artificial documents can be loaded and queried just like regular documents. +However, it is **not** recommended to edit artificial documents manually since +any index results update would overwrite all manual modifications made in them. + +## Map-Reduce Indexes on a Sharded Database +On a [sharded database](../sharding/overview.mdx), the behavior of map-reduce +indexes is altered in in a few ways that database operators should be aware of. + +* Read [here](../sharding/indexing.mdx#map-reduce-indexes-on-a-sharded-database) + about map-reduce indexes on a sharded database. +* Read [here](../sharding/querying.mdx#querying-map-reduce-indexes) about querying + map-reduce indexes on a sharded database. + + + + diff --git a/versioned_docs/version-7.1/indexes/_map-reduce-indexes-php.mdx b/versioned_docs/version-7.1/indexes/_map-reduce-indexes-php.mdx new file mode 100644 index 0000000000..b1127533c9 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_map-reduce-indexes-php.mdx @@ -0,0 +1,972 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Map-Reduce indexes** allow complex ***data aggregation*** that can be queried on + with very little cost, regardless of the data size. + +* To expedite queries and prevent performance degradation during queries, the aggregation + is done during the indexing phase, _not_ at query time. + +* Once new data enters the database, or existing documents are modified, + the Map-Reduce index will re-calculate the aggregated data so that the + aggregation results are always available and up-to-date. + +* The aggregation computation is done in two separate consecutive actions: + * **The `map` stage:** + This first stage runs the defined Map function(s) on each document, indexing the specified fields. + * **The `reduce` stage:** + This second stage groups the specified requested fields that were indexed in the Map stage, + and then runs the Reduce function to get a final aggregation result per field value. + +* In this page: + * [Creating Map Reduce Indexes](../indexes/map-reduce-indexes.mdx#creating-map-reduce-indexes) + * [Creating Multi-Map-Reduce Indexes](../indexes/map-reduce-indexes.mdx#creating-multi-map-reduce-indexes) + * [Reduce Results as Artificial Documents](../indexes/map-reduce-indexes.mdx#reduce-results-as-artificial-documents) + * [Remarks](../indexes/map-reduce-indexes.mdx#remarks) + + + +## Creating Map Reduce Indexes + +When it comes to index creation, the only difference between simple indexes and the map-reduce ones +is an additional reduce function defined in the index definition. +To deploy an index we need to create a definition and deploy it using one of the ways described in the +[creating and deploying](../indexes/creating-and-deploying.mdx) article. +#### Example I - Count + +Let's assume that we want to count the number of products for each category. +To do it, we can create the following index using `LoadDocument` inside: + + + +{`class Products_ByCategory_Result +{ + public ?string $category = null; + public ?int $count = null; + + public function getCategory(): ?string + { + return $this->category; + } + + public function setCategory(?string $category): void + { + $this->category = $category; + } + + public function getCount(): ?int + { + return $this->count; + } + + public function setCount(?int $count): void + { + $this->count = $count; + } +} + +class Products_ByCategory extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "docs.Products.Select(product => new { " . + " Product = Product, " . + " CategoryName = (this.LoadDocument(product.Category, \\"Categories\\")).Name " . + "}).Select(this0 => new { " . + " Category = this0.CategoryName, " . + " Count = 1 " . + "})"; + + $this->reduce = "results.GroupBy(result => result.Category).Select(g => new { " . + " Category = g.Key, " . + " Count = Enumerable.Sum(g, x => ((int) x.Count)) " . + "})"; + } +} +`} + + + + +{`class Products_ByCategory_Result +{ + private ?string $category = null; + public ?int $count = null; + + public function getCategory(): ?string + { + return $this->category; + } + + public function setCategory(?string $category): void + { + $this->category = $category; + } + + public function getCount(): ?int + { + return $this->count; + } + + public function setCount(?int $count): void + { + $this->count = $count; + } +} +class Products_ByCategory extends AbstractJavaScriptIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->setMaps([ + "map('products', function(p){ + return { + Category: load(p.Category, 'Categories').Name, + Count: 1 + } + })" + ]); + + $this->setReduce( + "groupBy(x => x.Category) + .aggregate(g => { + return { + Category: g.key, + Count: g.values.reduce((count, val) => val.Count + count, 0) + }; + })" + ); + } +} +`} + + + + +and issue the query: + + + +{`/** @var array $results */ +$results = $session + ->query(Products_ByCategory_Result::class, Products_ByCategory::class) + ->whereEquals("Category", "Seafood") + ->toList(); +`} + + + + +{`from 'Products/ByCategory' +where Category == 'Seafood' +`} + + + + +The above query will return one result for _Seafood_ with the appropriate number of products from that category. +#### Example II - Average + +In this example, we will count an average product price for each category. +The index definition: + + + +{`class Products_Average_ByCategory_Result +{ + private ?string $category = null; + private ?float $priceSum = null; + private ?float $priceAverage = null; + private ?int $productCount = null; + + public function getCategory(): ?string + { + return $this->category; + } + + public function setCategory(?string $category): void + { + $this->category = $category; + } + + public function getPriceSum(): ?float + { + return $this->priceSum; + } + + public function setPriceSum(?float $priceSum): void + { + $this->priceSum = $priceSum; + } + + public function getPriceAverage(): ?float + { + return $this->priceAverage; + } + + public function setPriceAverage(?float $priceAverage): void + { + $this->priceAverage = $priceAverage; + } + + public function getProductCount(): ?int + { + return $this->productCount; + } + + public function setProductCount(?int $productCount): void + { + $this->productCount = $productCount; + } +} + +class Products_Average_ByCategory extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "docs.Products.Select(product => new { " . + " Product = Product, " . + " CategoryName = (this.LoadDocument(product.Category, \\"Categories\\")).Name " . + "}).Select(this0 => new { " . + " Category = this0.CategoryName, " . + " PriceSum = this0.Product.PricePerUnit, " . + " PriceAverage = 0, " . + " ProductCount = 1 " . + "})"; + + $this->reduce = "results.GroupBy(result => result.Category).Select(g => new { " . + " g = g, " . + " ProductCount = Enumerable.Sum(g, x => ((int) x.ProductCount)) " . + "}).Select(this0 => new { " . + " this0 = this0, " . + " PriceSum = Enumerable.Sum(this0.g, x0 => ((decimal) x0.PriceSum)) " . + "}).Select(this1 => new { " . + " Category = this1.this0.g.Key, " . + " PriceSum = this1.PriceSum, " . + " PriceAverage = this1.PriceSum / ((decimal) this1.this0.ProductCount), " . + " ProductCount = this1.this0.ProductCount " . + "})"; + } +} +`} + + + + +{`class Products_Average_ByCategory_Result +{ + private ?string $category = null; + private ?float $priceSum = null; + private ?float $priceAverage = null; + private ?int $productCount = null; + + public function getCategory(): ?string + { + return $this->category; + } + + public function setCategory(?string $category): void + { + $this->category = $category; + } + + public function getPriceSum(): ?float + { + return $this->priceSum; + } + + public function setPriceSum(?float $priceSum): void + { + $this->priceSum = $priceSum; + } + + public function getPriceAverage(): ?float + { + return $this->priceAverage; + } + + public function setPriceAverage(?float $priceAverage): void + { + $this->priceAverage = $priceAverage; + } + + public function getProductCount(): ?int + { + return $this->productCount; + } + + public function setProductCount(?int $productCount): void + { + $this->productCount = $productCount; + } +} +class Products_Average_ByCategory extends AbstractJavaScriptIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->setMaps([ + "map('products', function(product){ + return { + Category: load(product.Category, 'Categories').Name, + PriceSum: product.PricePerUnit, + PriceAverage: 0, + ProductCount: 1 + } + })" + ]); + + $this->setReduce("groupBy(x => x.Category) + .aggregate(g => { + var pricesum = g.values.reduce((sum,x) => x.PriceSum + sum,0); + var productcount = g.values.reduce((sum,x) => x.ProductCount + sum,0); + return { + Category: g.key, + PriceSum: pricesum, + ProductCount: productcount, + PriceAverage: pricesum / productcount + } + })"); + } +} +`} + + + + +and the query: + + + +{`/** @var array $results */ +$results = $session + ->query(Products_Average_ByCategory_Result::class, Products_Average_ByCategory::class) + ->whereEquals("Category", "Seafood") + ->toList(); +`} + + + + +{`from 'Products/Average/ByCategory' +where Category == 'Seafood' +`} + + + +#### Example III - Calculations + +This example illustrates how we can put some calculations inside an index using +one of the indexes available in the sample database (`Product/Sales`). + +We want to know how many times each product was ordered and how much we earned for it. +To extract that information, we need to define the following index: + + + +{`class Product_Sales_Result +{ + private ?string $product = null; + private ?int $count = null; + private ?float $total = null; + + public function getProduct(): ?string + { + return $this->product; + } + + public function setProduct(?string $product): void + { + $this->product = $product; + } + + public function getCount(): ?int + { + return $this->count; + } + + public function setCount(?int $count): void + { + $this->count = $count; + } + + public function getTotal(): ?float + { + return $this->total; + } + + public function setTotal(?float $total): void + { + $this->total = $total; + } +} + +class Product_Sales extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "docs.Orders.SelectMany(order => order.Lines, (order, line) => new { " . + " Product = line.Product, " . + " Count = 1, " . + " Total = (((decimal) line.Quantity) * line.PricePerUnit) * (1M - line.Discount) " . + "})"; + + + $this->reduce = "results.GroupBy(result => result.Product).Select(g => new { " . + " Product = g.Key, " . + " Count = Enumerable.Sum(g, x => ((int) x.Count)), " . + " Total = Enumerable.Sum(g, x0 => ((decimal) x0.Total)) " . + "})"; + } +} +`} + + + + +{`class Product_Sales_Result +{ + private ?string $product = null; + private ?int $count = null; + private ?float $total = null; + + public function getProduct(): ?string + { + return $this->product; + } + + public function setProduct(?string $product): void + { + $this->product = $product; + } + + public function getCount(): ?int + { + return $this->count; + } + + public function setCount(?int $count): void + { + $this->count = $count; + } + + public function getTotal(): ?float + { + return $this->total; + } + + public function setTotal(?float $total): void + { + $this->total = $total; + } +} +class Product_Sales extends AbstractJavaScriptIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->setMaps([ + "map('orders', function(order){ + var res = []; + order.Lines.forEach(l => { + res.push({ + Product: l.Product, + Count: 1, + Total: (l.Quantity * l.PricePerUnit) * (1- l.Discount) + }) + }); + return res; + })" + ]); + + $this->setReduce("groupBy(x => x.Product) + .aggregate(g => { + return { + Product : g.key, + Count: g.values.reduce((sum, x) => x.Count + sum, 0), + Total: g.values.reduce((sum, x) => x.Total + sum, 0) + } + })"); + } +} +`} + + + + +And run the query: + + + +{`/** @var array $results */ +$results = $session + ->query(Product_Sales_Result::class, Product_Sales::class) + ->toList(); +`} + + + + +{`from 'Product/Sales' +`} + + + + + + +## Creating Multi-Map-Reduce Indexes + +A **Multi-Map-Reduce** index allows aggregating (or 'reducing') data from several collections. + +They can be created and edited via [Studio](../studio/database/indexes/create-map-reduce-index.mdx#multi-map-reduce), +or with API as shown below. + +In the following code sample, we want the number of companies, suppliers, and employees per city. +We define the map phase on collections 'Employees', 'Companies', and 'Suppliers'. +We then define the reduce phase. + + +{`class Cities_Details_IndexEntry +\{ + private ?string $city = null; + private ?int $companies = null; + private ?int $employees = null; + private ?int $suppliers = null; + + public function getCity(): ?string + \{ + return $this->city; + \} + + public function setCity(?string $city): void + \{ + $this->city = $city; + \} + + public function getCompanies(): ?int + \{ + return $this->companies; + \} + + public function setCompanies(?int $companies): void + \{ + $this->companies = $companies; + \} + + public function getEmployees(): ?int + \{ + return $this->employees; + \} + + public function setEmployees(?int $employees): void + \{ + $this->employees = $employees; + \} + + public function getSuppliers(): ?int + \{ + return $this->suppliers; + \} + + public function setSuppliers(?int $suppliers): void + \{ + $this->suppliers = $suppliers; + \} +\} + +class Cities_Details extends AbstractMultiMapIndexCreationTask +\{ + public function __construct() + \{ + parent::__construct(); + + // Map employees collection. + $this->addMap("docs.Employees.SelectMany(e => new \{ " . + " City = e.Address.City, " . + " Companies = 0, " . + " Suppliers = 0, " . + " Employees = 1 " . + "\})"); + + // Map companies collection. + $this->addMap("docs.Companies.SelectMany(c => new \{ " . + " City = c.Address.City, " . + " Companies = 1, " . + " Suppliers = 0, " . + " Employees = 0 " . + "\})"); + + // Map suppliers collection. + $this->addMap("docs.Suppliers.SelectMany(s => new \{ " . + " City = s.Address.City, " . + " Companies = 0, " . + " Suppliers = 1, " . + " Employees = 0 " . + "\})"); + + + $this->reduce = "results.GroupBy(result => result.Product).Select(g => new \{ " . + " Product = g.Key, " . + " Count = Enumerable.Sum(g, x => ((int) x.Count)), " . + " Total = Enumerable.Sum(g, x0 => ((decimal) x0.Total)) " . + "\})"; + + // Apply reduction/aggregation on multi-map results. + $this->reduce = "results.GroupBy(result => result.City).Select(g => new \{ " . + " City = g.Key, " . + " Companies = Enumerable.Sum(g, x => ((int) x.Companies)), " . + " Suppliers = Enumerable.Sum(g, x => ((int) x.Suppliers)), " . + " Employees = Enumerable.Sum(g, x => ((int) x.Employees)), " . + "\})"; + \} +\} +`} + + + +A query on the index: + + +{`// Queries the index "Cities_Details" - filters "Companies" results and orders by "City". +/** @var array $commerceDetails */ +$commerceDetails = $session + ->query(Cities_Details_IndexEntry::class, Cities_Details::class) + ->whereGreaterThan("Companies", 5) + ->orderBy("City") + ->toList(); +`} + + + + +You can see this sample described in detail in [Inside RavenDB - Multi-Map-Reduce Indexes](https://ravendb.net/learn/inside-ravendb-book/reader/4.0/11-mapreduce-and-aggregations-in-ravendb#multimapreduce-indexes). + + + + +## Reduce Results as Artificial Documents + +#### Map-Reduce Output Documents + +In addition to storing the aggregation results in the index, the map-reduce index can also output +those reduce results as documents to a specified collection. In order to create these documents, +called _"artificial",_ you need to define the target collection using the `output_reduce_to_collection` +property in the index definition. + +Writing map-reduce outputs into documents allows you to define additional indexes on top of them +that give you the option to create recursive map-reduce operations. This makes it cheap and easy +to, for example, recursively create daily, monthly, and yearly summaries on the same data. + +In addition, you can also apply the usual operations on artificial documents (e.g. data +subscriptions or ETL). + +If the aggregation value for a given reduce key changes, we overwrite the output document. If the +given reduce key no longer has a result, the output document will be removed. + +#### Reference Documents + +To help organize these output documents, the map-reduce index can also create an additional +collection of artificial _reference documents_. These documents aggregate the output documents +and store their document IDs in an array field `ReduceOutputs`. + +The document IDs of reference documents are customized to follow some pattern. The format you +give to their document ID also determines how the output documents are grouped. + +Because reference documents have well known, predictable IDs, they are easier to plug into +indexes and other operations, and can serve as an intermediary for the output documents whose +IDs are less predictable. This allows you to chain map-reduce indexes in a recursive fashion. + +Learn more about how to configure output and reference documents in the +[Studio: Create Map-Reduce Index](../studio/database/indexes/create-map-reduce-index.mdx) article. +### Artificial Document Properties + +#### IDs + +The identifiers of **map reduce output documents** have three components in this format: + +`//` + +The index in the [example](../indexes/map-reduce-indexes.mdx#example) below may generate +an output document ID like this: + +`DailyProductSales/35/14369232530304891504` + +* "DailyProductSales" is the collection name specified for the output documents. +* The middle part is an incrementing integer assigned by the server. This number grows by some +amount whenever the index definition is modified. This can be useful because when an index definition +changes, there is a brief transition phase when the new output documents are being created, but the +old output documents haven't been deleted yet (this phase is called +["side-by-side indexing"](../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---side-by-side-indexing)). +During this phase, the output collection contains output documents created both by the old version +and the new version of the index, and they can be distinguished by this value: the new output +documents will always have a higher value (by 1 or more). +* The last part of the document ID (the unique part) is the hash of the reduce key values - in this +case: `hash(Product, Month)`. + +The identifiers of **reference documents** follow some pattern you choose, and this pattern +determines which output documents are held by a given reference document. + +The index in this [example](../indexes/map-reduce-indexes.mdx#example) has this pattern for +reference documents: + +`sales/daily/{Date:yyyy-MM-dd}` + +And this produces reference document IDs like this: + +`sales/daily/1998-05-06` + +The pattern is built using the same syntax as +[the `StringBuilder.AppendFormat` method](https://docs.microsoft.com/en-us/dotnet/api/system.text.stringbuilder.appendformat). +See [here](https://docs.microsoft.com/en-us/dotnet/standard/base-types/standard-date-and-time-format-strings) +to learn about the date formatting in particular. +#### Metadata + +Artificial documents generated by map-reduce indexes get the following `@flags` in their metadata: + + + +{`"@flags": "Artificial, FromIndex" +`} + + + +These flags are used internally by the database to filter out artificial documents during replication. +### Syntax + +The map-reduce output documents are configured with these properties of +`IndexDefinition`: + + + +{`private ?string $outputReduceToCollection = null; + +private ?string $patternReferencesCollectionName = null; + +// Using IndexDefinition +private ?string $patternForOutputReduceToCollectionReferences = null; +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **outputReduceToCollection** | `str` | Collection name for the output documents. | +| **patternReferencesCollectionName** | `str` | Optional collection name for the reference documents - by default it is `OutputReduceToCollection/References` | +| **patternForOutputReduceToCollectionReferences** | `str` | Document ID format for reference documents. This ID references the fields of the reduce function output, which determines how the output documents are aggregated. The type of this parameter is different depending on if the index is created using [IndexDefinition](../indexes/creating-and-deploying.mdx#using-maintenance-operations) or [AbstractIndexCreationTask](../indexes/creating-and-deploying.mdx#using-abstractindexcreationtask). | +#### Example: + +Here is a map-reduce index with output documents and reference documents: + + + + +{`class DailyProductSale +{ + public ?string $product = null; + public ?DateTime $date = null; + public ?int $count = null; + public ?float $total = null; + + public function getProduct(): ?string + { + return $this->product; + } + + public function setProduct(?string $product): void + { + $this->product = $product; + } + + public function getDate(): ?DateTime + { + return $this->date; + } + + public function setDate(?DateTime $date): void + { + $this->date = $date; + } + + public function getCount(): ?int + { + return $this->count; + } + + public function setCount(?int $count): void + { + $this->count = $count; + } + + public function getTotal(): ?float + { + return $this->total; + } + + public function setTotal(?float $total): void + { + $this->total = $total; + } +} + +class ProductSales_ByDate extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "docs.Orders.SelectMany(order => order.Lines, (order, line) => new { " . + " Product = line.Product, " . + " Date = new DateTime(order.OrderedAt.Year, order.OrderedAt.Month, order.OrderedAt.Day), " . + " Count = 1, " . + " Total = (((decimal) line.Quantity) * line.PricePerUnit) * (1M - line.Discount) " . + "})"; + + + $this->reduce = "results.GroupBy(result => new { " . + " Product = result.Product, " . + " Date = result.Date " . + "}).Select(g => new { " . + " Product = g.Key.Product, " . + " Date = g.Key.Date, " . + " Count = Enumerable.Sum(g, x => ((int) x.Count)), " . + " Total = Enumerable.Sum(g, x0 => ((decimal) x0.Total)) " . + "})"; + + $this->outputReduceToCollection = "DailyProductSales"; + $this->patternReferencesCollectionName = "DailyProductSales/References"; + $this->patternForOutputReduceToCollectionReferences = "sales/daily/{Date:yyyy-MM-dd}"; + } +} +`} + + + + +{`class Product_Sales_ByDate extends AbstractIndexCreationTask +{ + public function createIndexDefinition(): IndexDefinition + { + $indexDefinition = new IndexDefinition(); + $indexDefinition->setMaps([ + "from order in docs.Orders + from line in order.Lines + select new { + line.Product, + Date = order.OrderedAt, + Profit = line.Quantity * line.PricePerUnit * (1 - line.Discount) + };" + ]); + $indexDefinition->setReduce( + "from r in results + group r by new { r.OrderedAt, r.Product } + into g + select new { + Product = g.Key.Product, + Date = g.Key.Date, + Profit = g.Sum(r => r.Profit) + };" + ); + + $indexDefinition->setOutputReduceToCollection( "DailyProductSales"); + $indexDefinition->setPatternReferencesCollectionName("DailyProductSales/References"); + $indexDefinition->setPatternForOutputReduceToCollectionReferences("sales/daily/{Date:yyyy-MM-dd}"); + + return $indexDefinition; + } +} +`} + + + + +In the index example above (which inherits `AbstractIndexCreationTask`), +the reference document ID pattern is set with the expression: + + + +{`self._pattern_for_output_reduce_to_collection_references = "sales/daily/\{Date:yyyy-MM-dd\}" +`} + + + +This gives the reference documents IDs in this general format: `sales/monthly/1998-05-01`. +The reference document with that ID contains the IDs of all the output documents from the +month of May 1998. + + +In the **JavaScript** index example (which uses `IndexDefinition`), +the reference document ID pattern is set with a `string`: + + + +{`pattern_for_output_reduce_to_collection_references="sales/daily/\{Date:yyyy-MM-dd\}" +`} + + + +This gives the reference documents IDs in this general format: `sales/daily/1998-05-06`. +The reference document with that ID contains the IDs of all the output documents from +May 6th 1998. + + + +## Remarks + +#### Saving documents + +[Artificial documents](../indexes/map-reduce-indexes.mdx#reduce-results-as-artificial-documents) +are stored immediately after the indexing transaction completes. +#### Recursive indexing loop + +It is **forbidden** to output reduce results to collections such as the following: + +- A collection that the current index is already working on. + E.g., an index on a `DailyInvoices` collection outputs to `DailyInvoices`. +- A collection that the current index is loading a document from. + E.g., an index with `LoadDocument(id, "Invoices")` outputs to `Invoices`. +- Two collections, each processed by a map-reduce indexes, + when each index outputs to the second collection. + E.g., + An index on the `Invoices` collection outputs to the `DailyInvoices` collection, + while an index on `DailyInvoices` outputs to `Invoices`. + +When an attempt to create such an infinite indexing loop is +detected a detailed error is generated. +#### Output to an Existing collection + +Creating a map-reduce index which defines an output collection that already +exists and contains documents, will result in an error. +Delete all documents from the target collection before creating the index, +or output results to a different collection. +#### Modification of Artificial Documents + +Artificial documents can be loaded and queried just like regular documents. +However, it is **not** recommended to edit artificial documents manually since +any index results update would overwrite all manual modifications made in them. +#### Map-Reduce Indexes on a Sharded Database + +On a [sharded database](../sharding/overview.mdx), the behavior of map-reduce +indexes is altered in in a few ways that database operators should be aware of. + +* Read [here](../sharding/indexing.mdx#map-reduce-indexes-on-a-sharded-database) + about map-reduce indexes on a sharded database. +* Read [here](../sharding/querying.mdx#querying-map-reduce-indexes) about querying + map-reduce indexes on a sharded database. + + + + diff --git a/versioned_docs/version-7.1/indexes/_map-reduce-indexes-python.mdx b/versioned_docs/version-7.1/indexes/_map-reduce-indexes-python.mdx new file mode 100644 index 0000000000..7f5915f173 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_map-reduce-indexes-python.mdx @@ -0,0 +1,688 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Map-Reduce indexes** allow complex ***data aggregation*** that can be queried on + with very little cost, regardless of the data size. + +* To expedite queries and prevent performance degradation during queries, the aggregation + is done during the indexing phase, _not_ at query time. + +* Once new data enters the database, or existing documents are modified, + the Map-Reduce index will re-calculate the aggregated data so that the + aggregation results are always available and up-to-date. + +* The aggregation computation is done in two separate consecutive actions: + * **The `map` stage:** + This first stage runs the defined Map function(s) on each document, indexing the specified fields. + * **The `reduce` stage:** + This second stage groups the specified requested fields that were indexed in the Map stage, + and then runs the Reduce function to get a final aggregation result per field value. + +* In this page: + * [Creating Map Reduce Indexes](../indexes/map-reduce-indexes.mdx#creating-map-reduce-indexes) + * [Creating Multi-Map-Reduce Indexes](../indexes/map-reduce-indexes.mdx#creating-multi-map-reduce-indexes) + * [Reduce Results as Artificial Documents](../indexes/map-reduce-indexes.mdx#reduce-results-as-artificial-documents) + * [Remarks](../indexes/map-reduce-indexes.mdx#remarks) + + + +## Creating Map Reduce Indexes + +When it comes to index creation, the only difference between simple indexes and the map-reduce ones +is an additional reduce function defined in the index definition. +To deploy an index we need to create a definition and deploy it using one of the ways described in the +[creating and deploying](../indexes/creating-and-deploying.mdx) article. +#### Example I - Count + +Let's assume that we want to count the number of products for each category. +To do it, we can create the following index using `LoadDocument` inside: + + + +{`class Products_ByCategory(AbstractIndexCreationTask): + class Result: + def __init__(self, category: str = None, count: int = None): + self.category = category + self.count = count + + def __init__(self): + super().__init__() + self.map = ( + "docs.Products.Select(product => new { " + " Product = Product," + ' CategoryName = (this.LoadDocument(product.Category, "Categories")).Name ' + "}).Select(this0 => new { " + " Category = this0.CategoryName, " + " Count = 1 " + "})" + ) + self.reduce = ( + "results.GroupBy(result => result.Category).Select(g => new {" + " Category = g.Key, " + " Count = Enumerable.Sum(g, x => ((int) x.Count)) " + "})" + ) +`} + + + + +{`class Products_ByCategory(AbstractJavaScriptIndexCreationTask): + class Result: + def __init__(self, category: str = None, count: int = None): + self.category = category + self.count = count + + def __init__(self): + super().__init__() + self.maps = { + """ + map('products', function(p){ + return { + Category: load(p.Category, 'Categories').Name, + Count: 1 + } + }) + """ + } + + self.reduce = """groupBy(x => x.Category) + .aggregate(g => { + return { + category: g.key, + count: g.values.reduce((count, val) => val.Count + count, 0) + }; + })""" +`} + + + + +and issue the query: + + + +{`results = list( + session.query_index_type(Products_ByCategory, Products_ByCategory.Result).where_equals( + "category", "Seafood" + ) +) +`} + + + + +{`from 'Products/ByCategory' +where Category == 'Seafood' +`} + + + + +The above query will return one result for _Seafood_ with the appropriate number of products from that category. +#### Example II - Average + +In this example, we will count an average product price for each category. +The index definition: + + + +{`class Products_Average_ByCategory(AbstractIndexCreationTask): + class Result: + def __init__( + self, category: str = None, price_sum: float = None, price_average: float = None, product_count: int = None + ): + self.category = category + self.price_sum = price_sum + self.price_average = price_average + self.product_count = product_count + + def __init__(self): + super().__init__() + self.map = """ + docs.Products.Select(product => new { + Product = Product, + CategoryName = (this.LoadDocument(product.Category, "Categories")).Name + }).Select(this0 => new { + category = this0.CategoryName, + price_sum = this0.Product.PricePerUnit, + price_average = 0, + product_count = 1 + }) + """ + self.reduce = """ + results.GroupBy(result => result.Category).Select(g => new { + g = g, + ProductCount = Enumerable.Sum(g, x => ((int) x.ProductCount)) + }).Select(this0 => new { + this0 = this0, + PriceSum = Enumerable.Sum(this0.g, x0 => ((decimal) x0.PriceSum)) + }).Select(this1 => new { + category = this1.this0.g.Key, + price_sum = this1.PriceSum, + price_average = this1.PriceSum / ((decimal) this1.this0.ProductCount), + product_count = this1.this0.ProductCount + }) + """ +`} + + + + +{`class Products_Average_ByCategory(AbstractJavaScriptIndexCreationTask): + class Result: + def __init__( + self, category: str = None, price_sum: float = None, price_average: float = None, product_count: int = None + ): + self.category = category + self.price_sum = price_sum + self.price_average = price_average + self.product_count = product_count + + def __init__(self): + super().__init__() + self.maps = { + """ + map('products', function(product){ + return { + Category: load(product.Category, 'Categories').Name, + PriceSum: product.PricePerUnit, + PriceAverage: 0, + ProductCount: 1 + } + }) + """ + } + + self.reduce = """ + groupBy(x => x.Category) + .aggregate(g => { + var pricesum = g.values.reduce((sum,x) => x.PriceSum + sum,0); + var productcount = g.values.reduce((sum,x) => x.ProductCount + sum,0); + return { + category: g.key, + price_sum: pricesum, + product_count: productcount, + price_average: pricesum / productcount + } + }) + """ +`} + + + + +and the query: + + + +{`results = list( + session.query_index_type( + Products_Average_ByCategory, Products_Average_ByCategory.Result + ).where_equals("category", "Seafood") +) +`} + + + + +{`from 'Products/Average/ByCategory' +where Category == 'Seafood' +`} + + + +#### Example III - Calculations + +This example illustrates how we can put some calculations inside an index using +one of the indexes available in the sample database (`Product/Sales`). + +We want to know how many times each product was ordered and how much we earned for it. +To extract that information, we need to define the following index: + + + +{`class Product_Sales(AbstractIndexCreationTask): + class Result: + def __init__(self, product: str = None, count: int = None, total: float = None): + self.product = product + self.count = count + self.total = total + + def __init__(self): + super().__init__() + self.map = """ + docs.Orders.SelectMany(order => order.Lines, (order, line) => new { + Product = line.Product, + Count = 1, + Total = (((decimal) line.Quantity) * line.PricePerUnit) * (1M - line.Discount) + }) + """ + self.reduce = """ + results.GroupBy(result => result.Product).Select(g => new { + product = g.Key, + count = Enumerable.Sum(g, x => ((int) x.Count)), + total = Enumerable.Sum(g, x0 => ((decimal) x0.Total)) + }) + """ +`} + + + + +{`class Product_Sales(AbstractJavaScriptIndexCreationTask): + class Result: + def __init__(self, product: str = None, count: int = None, total: float = None): + self.product = product + self.count = count + self.total = total + + def __init__(self): + super().__init__() + self.maps = { + """ + map('orders', function(order){ + var res = []; + order.Lines.forEach(l => { + res.push({ + Product: l.Product, + Count: 1, + Total: (l.Quantity * l.PricePerUnit) * (1- l.Discount) + }) + }); + return res; + }) + """ + } + self.reduce = """ + groupBy(x => x.Product) + .aggregate(g => { + return { + Product : g.key, + Count: g.values.reduce((sum, x) => x.Count + sum, 0), + Total: g.values.reduce((sum, x) => x.Total + sum, 0) + } + }) + """ +`} + + + + +And run the query: + + + +{`results = list(session.query_index_type(Product_Sales, Product_Sales.Result)) +`} + + + + +{`from 'Product/Sales' +`} + + + + + + +## Creating Multi-Map-Reduce Indexes + +A **Multi-Map-Reduce** index allows aggregating (or 'reducing') data from several collections. + +They can be created and edited via [Studio](../studio/database/indexes/create-map-reduce-index.mdx#multi-map-reduce), +or with API as shown below. + +In the following code sample, we want the number of companies, suppliers, and employees per city. +We define the map phase on collections 'Employees', 'Companies', and 'Suppliers'. +We then define the reduce phase. + + +{`class Cities_Details(AbstractMultiMapIndexCreationTask): + class IndexEntry: + def __init__(self, city: str = None, companies: int = None, employees: int = None, suppliers: int = None): + self.city = city + self.companies = companies + self.employees = employees + self.suppliers = suppliers + + def __init__(self): + super().__init__() + self._add_map( + """ + from e in docs.Employees + select new \{ + city = e.Address.City, + companies = 0, + supplier = 0, + employees = 1 + \} + """ + ) + + self._add_map( + """ + from c in docs.Companies + select new \{ + city = e.Address.City, + companies = 1, + supplier = 0, + employees = 0 + \} + """ + ) + + self._add_map( + """ + from s in docs.Suppliers + select new \{ + city = e.Address.City, + companies = 0, + supplier = 1, + employees = 0 + \} + """ + ) + + self.reduce = """ + from result in results + group result by result.city + into g + select new \{ + city = g.Key, + companies = g.Sum(x => x.companies), + suppliers = g.Sum(x => x.suppliers), + employees = g.Sum(x => x.employees) + \} + """ +`} + + + +A query on the index: + + +{`# Queries the index "Cities_Details" - filters "Companies" results and orders by "City". +commerce_details = list( + session.query_index_type(Cities_Details, Cities_Details.IndexEntry) + .where_greater_than("companies", 5) + .order_by("city") +) +`} + + + + +You can see this sample described in detail in [Inside RavenDB - Multi-Map-Reduce Indexes](https://ravendb.net/learn/inside-ravendb-book/reader/4.0/11-mapreduce-and-aggregations-in-ravendb#multimapreduce-indexes). + + + + +## Reduce Results as Artificial Documents + +#### Map-Reduce Output Documents + +In addition to storing the aggregation results in the index, the map-reduce index can also output +those reduce results as documents to a specified collection. In order to create these documents, +called _"artificial",_ you need to define the target collection using the `output_reduce_to_collection` +property in the index definition. + +Writing map-reduce outputs into documents allows you to define additional indexes on top of them +that give you the option to create recursive map-reduce operations. This makes it cheap and easy +to, for example, recursively create daily, monthly, and yearly summaries on the same data. + +In addition, you can also apply the usual operations on artificial documents (e.g. data +subscriptions or ETL). + +If the aggregation value for a given reduce key changes, we overwrite the output document. If the +given reduce key no longer has a result, the output document will be removed. + +#### Reference Documents + +To help organize these output documents, the map-reduce index can also create an additional +collection of artificial _reference documents_. These documents aggregate the output documents +and store their document IDs in an array field `ReduceOutputs`. + +The document IDs of reference documents are customized to follow some pattern. The format you +give to their document ID also determines how the output documents are grouped. + +Because reference documents have well known, predictable IDs, they are easier to plug into +indexes and other operations, and can serve as an intermediary for the output documents whose +IDs are less predictable. This allows you to chain map-reduce indexes in a recursive fashion, +see [Example II](../indexes/map-reduce-indexes.mdx#example-ii). + +Learn more about how to configure output and reference documents in the +[Studio: Create Map-Reduce Index](../studio/database/indexes/create-map-reduce-index.mdx). +### Artificial Document Properties + +#### IDs + +The identifiers of **map reduce output documents** have three components in this format: + +`//` + +The index in [Example I](../indexes/map-reduce-indexes.mdx#example-i) might generate an output document +ID like this: + +`DailyProductSales/35/14369232530304891504` + +* "DailyProductSales" is the collection name specified for the output documents. +* The middle part is an incrementing integer assigned by the server. This number grows by some +amount whenever the index definition is modified. This can be useful because when an index definition +changes, there is a brief transition phase when the new output documents are being created, but the +old output documents haven't been deleted yet (this phase is called +["side-by-side indexing"](../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---side-by-side-indexing)). +During this phase, the output collection contains output documents created both by the old version +and the new version of the index, and they can be distinguished by this value: the new output +documents will always have a higher value (by 1 or more). +* The last part of the document ID (the unique part) is the hash of the reduce key values - in this +case: `hash(Product, Month)`. + +The identifiers of **reference documents** follow some pattern you choose, and this pattern +determines which output documents are held by a given reference document. + +The index in [Example I](../indexes/map-reduce-indexes.mdx#example-i) has this pattern for reference documents: + +`sales/daily/{Date:yyyy-MM-dd}` + +And this produces reference document IDs like this: + +`sales/daily/1998-05-06` + +The pattern is built using the same syntax as +[the `StringBuilder.AppendFormat` method](https://docs.microsoft.com/en-us/dotnet/api/system.text.stringbuilder.appendformat). +See [here](https://docs.microsoft.com/en-us/dotnet/standard/base-types/standard-date-and-time-format-strings) +to learn about the date formatting in particular. +#### Metadata + +Artificial documents generated by map-reduce indexes get the following `@flags` in their metadata: + + + +{`"@flags": "Artificial, FromIndex" +`} + + + +These flags are used internally by the database to filter out artificial documents during replication. +### Syntax + +The map-reduce output documents are configured with these properties of +`IndexDefinition`: + + + +{`self._output_reduce_to_collection = output_reduce_to_collection +self._pattern_references_collection_name = pattern_references_collection_name +self._pattern_for_output_reduce_to_collection_references = pattern_for_output_reduce_to_collection_references +`} + + + +| Parameters | Type | Description | +| - | - | - | +| **\_output_reduce_to_collection** | `str` | Collection name for the output documents. | +| **\_pattern_references_collection_name** | `str` | Optional collection name for the reference documents - by default it is `OutputReduceToCollection/References` | +| **\_pattern_for_output_reduce_to_collection_references** | `str` | Document ID format for reference documents. This ID references the fields of the reduce function output, which determines how the output documents are aggregated. The type of this parameter is different depending on if the index is created using [IndexDefinition](../indexes/creating-and-deploying.mdx#using-maintenance-operations) or [AbstractIndexCreationTask](../indexes/creating-and-deploying.mdx#using-abstractindexcreationtask). | +#### Example: + +Here is a map-reduce index with output documents and reference documents: + + + + +{`class Product_Sales_ByMonth(AbstractIndexCreationTask): + class Result: + def __init__( + self, product: str = None, month: datetime.datetime = None, count: int = None, total: float = None + ): + self.product = product + self.month = month + self.count = count + self.total = total + + def __init__(self): + super().__init__() + self.map = """ + docs.Orders.SelectMany(order => order.Lines, (order, line) => new { + Product = line.Product, + Month = new DateTime(order.OrderedAt.Year, order.OrderedAt.Month, 1), + Count = 1, + Total = (((decimal) line.Quantity) * line.PricePerUnit) * (1M - line.Discount) + }) + """ + + self.reduce = """ + results.GroupBy(result => new { + Product = result.Product, + Month = result.Month + }).Select(g => new { + product = g.Key.Product, + month = g.Key.Month, + count = Enumerable.Sum(g, x => ((int) x.Count)), + total = Enumerable.Sum(g, x0 => ((decimal) x0.Total)) + }) + """ + self._output_reduce_to_collection = "MonthlyProductSales" + self._pattern_references_collection_name = "DailyProductSales/References" + self._pattern_for_output_reduce_to_collection_references = "sales/daily/{Date:yyyy-MM-dd}" +`} + + + + +{`class Product_Sales_ByDate(AbstractIndexCreationTask): + def create_index_definition(self) -> IndexDefinition: + return IndexDefinition( + maps={ + """ + from order in docs.Orders + from line in order.Lines + select new { + line.Product, + Date = order.OrderedAt, + Profit = line.Quantity * line.PricePerUnit * (1 - line.Discount) + }; + """ + }, + reduce=""" + from r in results + group r by new { r.OrderedAt, r.Product } + into g + select new { + Product = g.Key.Product, + Date = g.Key.Date, + Profit = g.Sum(r => r.Profit) + }; + """, + output_reduce_to_collection="DailyProductSales", + pattern_references_collection_name="DailyProductSales/References", + pattern_for_output_reduce_to_collection_references="sales/daily/{Date:yyyy-MM-dd}", + ) +`} + + + + +In the index example above (which inherits `AbstractIndexCreationTask`), +the reference document ID pattern is set with the expression: + + + +{`self._pattern_for_output_reduce_to_collection_references = "sales/daily/\{Date:yyyy-MM-dd\}" +`} + + + +This gives the reference documents IDs in this general format: `sales/monthly/1998-05-01`. +The reference document with that ID contains the IDs of all the output documents from the +month of May 1998. + + +In the **JavaScript** index example (which uses `IndexDefinition`), +the reference document ID pattern is set with a `string`: + + + +{`pattern_for_output_reduce_to_collection_references="sales/daily/\{Date:yyyy-MM-dd\}" +`} + + + +This gives the reference documents IDs in this general format: `sales/daily/1998-05-06`. +The reference document with that ID contains the IDs of all the output documents from +May 6th 1998. + + + +## Remarks + +#### Saving documents + +[Artificial documents](../indexes/map-reduce-indexes.mdx#reduce-results-as-artificial-documents) +are stored immediately after the indexing transaction completes. +#### Recursive indexing loop + +It is **forbidden** to output reduce results to collections such as the following: + +- A collection that the current index is already working on. + E.g., an index on a `DailyInvoices` collection outputs to `DailyInvoices`. +- A collection that the current index is loading a document from. + E.g., an index with `LoadDocument(id, "Invoices")` outputs to `Invoices`. +- Two collections, each processed by a map-reduce indexes, + when each index outputs to the second collection. + E.g., + An index on the `Invoices` collection outputs to the `DailyInvoices` collection, + while an index on `DailyInvoices` outputs to `Invoices`. + +When an attempt to create such an infinite indexing loop is +detected a detailed error is generated. +#### Output to an Existing collection + +Creating a map-reduce index which defines an output collection that already +exists and contains documents, will result in an error. +Delete all documents from the target collection before creating the index, +or output results to a different collection. +#### Modification of Artificial Documents + +Artificial documents can be loaded and queried just like regular documents. +However, it is **not** recommended to edit artificial documents manually since +any index results update would overwrite all manual modifications made in them. +#### Map-Reduce Indexes on a Sharded Database + +On a [sharded database](../sharding/overview.mdx), the behavior of map-reduce +indexes is altered in in a few ways that database operators should be aware of. + +* Read [here](../sharding/indexing.mdx#map-reduce-indexes-on-a-sharded-database) + about map-reduce indexes on a sharded database. +* Read [here](../sharding/querying.mdx#querying-map-reduce-indexes) about querying + map-reduce indexes on a sharded database. + + + + diff --git a/versioned_docs/version-7.1/indexes/_multi-map-indexes-csharp.mdx b/versioned_docs/version-7.1/indexes/_multi-map-indexes-csharp.mdx new file mode 100644 index 0000000000..d4784d3bbe --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_multi-map-indexes-csharp.mdx @@ -0,0 +1,259 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Multi-Map indexes allow you to index data from multiple collections, + like polymorphic data or any data common to different types. + +* Learn how to [index polymorphic data](../indexes/indexing-polymorphic-data.mdx) + Learn how to [create Multi-Map-Reduce indexes](../indexes/map-reduce-indexes.mdx#creating-multi-map-reduce-indexes) + +* In this page: + * [AddMap & AddMapForAll](../indexes/multi-map-indexes.mdx#addmap--addmapforall) + * [Searching across multiple collections](../indexes/multi-map-indexes.mdx#searching-across-multiple-collections) + * [Remarks](../indexes/multi-map-indexes.mdx#remarks) + + + +## AddMap & AddMapForAll + +The `AddMap` method is used to map fields from a single collection, e.g. `Dogs`. +`AddMapForAll` gives you the ability to specify what fields will be indexed from a base class. + +Let's assume that we have `Dog` and `Cat` classes, both inheriting from the class `Animal`: + + + + +{`public class Dog : Animal +{ + +} +`} + + + + +{`public class Cat : Animal +{ + +} +`} + + + + +{`public abstract class Animal : IAnimal +{ + public string Name { get; set; } +} +`} + + + + +{`public interface IAnimal +{ + string Name { get; set; } +} +`} + + + + +We can define our index using `AddMap` or `AddMapForAll` and query it as follows: + + + + +{`public class Animals_ByName : AbstractMultiMapIndexCreationTask +{ + public Animals_ByName() + { + AddMap(cats => from c in cats select new { c.Name }); + + AddMap(dogs => from d in dogs select new { d.Name }); + } +} +`} + + + + +{`public class Animals_ByName_ForAll : AbstractMultiMapIndexCreationTask +{ + public Animals_ByName_ForAll() + { + AddMapForAll(parents => from p in parents select new { p.Name }); + } +} +`} + + + + +{`public class Animals_ByName : AbstractJavaScriptIndexCreationTask +{ + public Animals_ByName() + { + Maps = new HashSet() + { + @"map('cats', function (c){ return {Name: c.Name}})", + @"map('dogs', function (d){ return {Name: d.Name}})" + }; + } +} +`} + + + + + + + +{`IList results = session + .Query() + .Where(x => x.Name == "Mitzy") + .ToList(); +`} + + + + +{`IList results = session + .Advanced + .DocumentQuery() + .WhereEquals(x => x.Name, "Mitzy") + .ToList(); +`} + + + + +{`from index 'Animals/ByName' +where Name = 'Mitzy' +`} + + + + + + +## Searching across multiple collections + +Another great usage of Multi-Map indexes is smart-search. + +To search for products, companies, or employees by their name, you need to define the following index: + + +{`public class Smart_Search : AbstractMultiMapIndexCreationTask +\{ + public class Result + \{ + public string Id \{ get; set; \} + + public string DisplayName \{ get; set; \} + + public object Collection \{ get; set; \} + + public string[] Content \{ get; set; \} + \} + + public class Projection + \{ + public string Id \{ get; set; \} + + public string DisplayName \{ get; set; \} + + public string Collection \{ get; set; \} + \} + + public Smart_Search() + \{ + AddMap(companies => from c in companies + select new Result + \{ + Id = c.Id, + Content = new[] + \{ + c.Name + \}, + DisplayName = c.Name, + Collection = MetadataFor(c)["@collection"] + \}); + + AddMap(products => from p in products + select new Result + \{ + Id = p.Id, + Content = new[] + \{ + p.Name + \}, + DisplayName = p.Name, + Collection = MetadataFor(p)["@collection"] + \}); + + AddMap(employees => from e in employees + select new Result + \{ + Id = e.Id, + Content = new[] + \{ + e.FirstName, + e.LastName + \}, + DisplayName = e.FirstName + " " + e.LastName, + Collection = MetadataFor(e)["@collection"] + \}); + + // mark 'Content' field as analyzed which enables full text search operations + Index(x => x.Content, FieldIndexing.Search); + + // storing fields so when projection (e.g. ProjectInto) + // requests only those fields + // then data will come from index only, not from storage + Store(x => x.Id, FieldStorage.Yes); + Store(x => x.DisplayName, FieldStorage.Yes); + Store(x => x.Collection, FieldStorage.Yes); + \} +\} +`} + + + +and query it using: + + +{`IList results = session + .Query() + .Search(x => x.Content, "Lau*") + .ProjectInto() + .ToList(); + +foreach (Smart_Search.Projection result in results) +\{ + Console.WriteLine(result.Collection + ": " + result.DisplayName); + // Companies: Laughing Bacchus Wine Cellars + // Products: Laughing Lumberjack Lager + // Employees: Laura Callahan +\} +`} + + + + + +## Remarks + + +Remember that all map functions **must** output objects +with an **identical** shape (the field names have to match). + + + + + diff --git a/versioned_docs/version-7.1/indexes/_multi-map-indexes-java.mdx b/versioned_docs/version-7.1/indexes/_multi-map-indexes-java.mdx new file mode 100644 index 0000000000..2ba3b91466 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_multi-map-indexes-java.mdx @@ -0,0 +1,279 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Multi-Map indexes allow you to index data from multiple collections, + like polymorphic data or any data common to different types. + +* Learn how to [index polymorphic data](../indexes/indexing-polymorphic-data.mdx) + Learn how to [create Multi-Map-Reduce indexes](../indexes/map-reduce-indexes.mdx#creating-multi-map-reduce-indexes) + +* In this page: + * [AddMap](../indexes/multi-map-indexes.mdx#addmap) + * [Searching across multiple collections](../indexes/multi-map-indexes.mdx#searching-across-multiple-collections) + * [Remarks](../indexes/multi-map-indexes.mdx#remarks) + + + +## AddMap + +The `AddMap` method is used to map fields from a single collection, e.g. `Dogs`. +Let's assume that we have `Dog` and `Cat` classes, both inheriting from the class `Animal`: + + + + +{`public static class Dog extends Animal { + +} +`} + + + + +{`public static class Cate extends Animal { + +} +`} + + + + +{`public abstract static class Animal implements IAnimal { + private String name; + + @Override + public String getName() { + return name; + } + + @Override + public void setName(String name) { + this.name = name; + } +} +`} + + + + +{`public interface IAnimal { + String getName(); + void setName(String name); +} +`} + + + + +Now we can define our index using `addMap` and query it as follows: + + + + +{`public static class Animals_ByName extends AbstractMultiMapIndexCreationTask { + public Animals_ByName() { + addMap( "docs.Cats.Select(c => new { " + + " Name = c.Name " + + "})"); + + addMap( "docs.Dogs.Select(d => new { " + + " Name = d.Name " + + "})"); + } +} +`} + + + + +{`public static class Animals_ByName extends AbstractJavaScriptIndexCreationTask { + public Animals_ByName() { + setMaps(Sets.newHashSet( + "map('cats', function (c){ return {name: c.name}})", + "map('dogs', function (d){ return {name: d.name}})" + )); + } +} +`} + + + + + + + +{`List results = session + .query(IAnimal.class, Animals_ByName.class) + .whereEquals("Name", "Mitzy") + .toList(); +`} + + + + +{`from index 'Animals/ByName' +where Name = 'Mitzy' +`} + + + + + + +## Searching across multiple collections + +Another great usage of Multi-Map indexes is smart-search. + +To search for products, companies, or employees by their name, you need to define the following index: + + +{`public static class Smart_Search extends AbstractMultiMapIndexCreationTask \{ + public static class Result \{ + private String id; + private String displayName; + private String collection; + private String content; + + public String getId() \{ + return id; + \} + + public void setId(String id) \{ + this.id = id; + \} + + public String getDisplayName() \{ + return displayName; + \} + + public void setDisplayName(String displayName) \{ + this.displayName = displayName; + \} + + public String getCollection() \{ + return collection; + \} + + public void setCollection(String collection) \{ + this.collection = collection; + \} + + public String getContent() \{ + return content; + \} + + public void setContent(String content) \{ + this.content = content; + \} + \} + + public static class Projection \{ + private String id; + private String displayName; + private String collection; + + public String getId() \{ + return id; + \} + + public void setId(String id) \{ + this.id = id; + \} + + public String getDisplayName() \{ + return displayName; + \} + + public void setDisplayName(String displayName) \{ + this.displayName = displayName; + \} + + public String getCollection() \{ + return collection; + \} + + public void setCollection(String collection) \{ + this.collection = collection; + \} + \} + + public Smart_Search() \{ + + addMap("docs.Companies.Select(c => new \{ " + + " Id = Id(c), " + + " Content = new string[] \{ " + + " c.Name " + + " \}, " + + " DisplayName = c.Name, " + + " Collection = this.MetadataFor(c)[\\"@collection\\"] " + + "\})"); + + addMap("docs.Products.Select(p => new \{ " + + " Id = Id(p), " + + " Content = new string[] \{ " + + " p.Name " + + " \}, " + + " DisplayName = p.Name, " + + " Collection = this.MetadataFor(p)[\\"@collection\\"] " + + "\})"); + + addMap("docs.Employees.Select(e => new \{ " + + " Id = Id(e), " + + " Content = new string[] \{ " + + " e.FirstName, " + + " e.LastName " + + " \}, " + + " DisplayName = (e.FirstName + \\" \\") + e.LastName, " + + " Collection = this.MetadataFor(e)[\\"@collection\\"] " + + "\})"); + + // mark 'content' field as analyzed which enables full text search operations + index("Content", FieldIndexing.SEARCH); + + // storing fields so when projection (e.g. ProjectInto) + // requests only those fields + // then data will come from index only, not from storage + store("Id", FieldStorage.YES); + store("DisplayName", FieldStorage.YES); + store("Collection", FieldStorage.YES); + \} +\} +`} + + + +and query it using: + + +{`List results = session + .query(Smart_Search.Result.class, Smart_Search.class) + .search("Content", "Lau*") + .selectFields(Smart_Search.Projection.class) + .toList(); + +for (Smart_Search.Projection result : results) \{ + System.out.println(result.getCollection() + ": " + result.getDisplayName()); + // Companies: Laughing Bacchus Wine Cellars + // Products: Laughing Lumberjack Lager + // Employees: Laura Callahan +\} +`} + + + + + +## Remarks + + +Remember that all map functions **must** output objects +with an **identical** shape (the field names have to match). + + + + + diff --git a/versioned_docs/version-7.1/indexes/_multi-map-indexes-nodejs.mdx b/versioned_docs/version-7.1/indexes/_multi-map-indexes-nodejs.mdx new file mode 100644 index 0000000000..722db1036c --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_multi-map-indexes-nodejs.mdx @@ -0,0 +1,198 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Multi-Map indexes allow you to index data from multiple collections, + like polymorphic data or any data common to different types. + +* Learn how to [index polymorphic data](../indexes/indexing-polymorphic-data.mdx) + Learn how to [create Multi-Map-Reduce indexes](../indexes/map-reduce-indexes.mdx#creating-multi-map-reduce-indexes) + +* In this page: + * [Indexing multiple collections](../indexes/multi-map-indexes.mdx#indexing-multiple-collections) + * [Searching across multiple collections](../indexes/multi-map-indexes.mdx#searching-across-multiple-collections) + * [Remarks](../indexes/multi-map-indexes.mdx#remarks) + + + +## Indexing multiple collections + +Let's assume that we have `Dog` and `Cat` classes, both inheriting from the class `Animal`: + + + + +{`class Dog extends Animal { } +`} + + + + +{`class Cat extends Animal { } +`} + + + + +{`class Animal { + constructor(name) { + this.name = name; + } +} +`} + + + + +Now we can define and query our index as follows: + + + +{`class Animals_ByName extends AbstractJavaScriptMultiMapIndexCreationTask \{ + constructor() \{ + super(); + + // Index field 'name' from the Cats collection + this.map('Cats', cat => \{ + return \{ + name: cat.name + \}; + \}); + + // Index field 'name' from the Dogs collection + this.map('Dogs', dog => \{ + return \{ + name: dog.name + \}; + \}); + \} +\} +`} + + + + + + +{`const results = await session + // Query the index + .query({ indexName: "Animals/ByName" }) + // Look for all animals (either a cat or a dog) that are named 'Mitzy' :) + .whereEquals("name", "Mitzy") + .all(); +`} + + + + +{`from index "Animals/ByName" +where Name == "Mitzy" +`} + + + + + + +## Searching across multiple collections + +Another great usage of Multi-Map indexes is smart-search. + +To search for products, companies, or employees by their name, you need to define the following index: + + +{`class Smart_Search extends AbstractJavaScriptMultiMapIndexCreationTask \{ + constructor() \{ + super(); + + this.map('Companies', company => \{ + return \{ + id: id(company), + content: company.Name, + displayName: company.Name, + collection: this.getMetadata(company)["@collection"] + \}; + \}); + + this.map('Products', product => \{ + return \{ + id: id(product), + content: product.Name, + displayName: product.Name, + collection: this.getMetadata(product)["@collection"] + \}; + \}); + + this.map('Employees', employee => \{ + return \{ + id: id(employee), + content: [employee.FirstName, employee.LastName], + displayName: employee.FirstName + " " + employee.LastName, + collection: this.getMetadata(employee)["@collection"] + \}; + \}); + + // Mark the 'content' field with 'Search' to enable full-text search queries + this.index("content", "Search"); + + // Store fields in index so that when projecting these fields from the query + // the data will come from the index, and not from the storage. + this.store("id", "Yes"); + this.store("collection", "Yes"); + this.store("displayName", "Yes"); + \} +\} +`} + + + +and query it using: + + + +{`const results = await session + .query({ indexName: "Smart/Search" }) + // Search across all indexed collections + .search("content", "Lau*") + // Project results + .selectFields([ "id", "displayName", "collection" ]) + .all(); + +// Results: +// ======== + +for (const result of results) { + console.log(result.collection + ": " + result.displayName); + + // Companies: Laughing Bacchus Wine Cellars + // Products: Laughing Lumberjack Lager + // Employees: Laura Callahan +} +`} + + + + +{`from index "Smart/Search" +where search(content, "Lau*") +select id() as id, displayName, collection +`} + + + + + + +## Remarks + + +Remember that all map functions **must** output objects +with an **identical** shape (the field names have to match). + + + + + diff --git a/versioned_docs/version-7.1/indexes/_multi-map-indexes-php.mdx b/versioned_docs/version-7.1/indexes/_multi-map-indexes-php.mdx new file mode 100644 index 0000000000..ac0eafe248 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_multi-map-indexes-php.mdx @@ -0,0 +1,318 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Multi-Map indexes allow you to index data from multiple collections, + like polymorphic data or any data common to different types. + +* Learn how to [index polymorphic data](../indexes/indexing-polymorphic-data.mdx) + Learn how to [create Multi-Map-Reduce indexes](../indexes/map-reduce-indexes.mdx#creating-multi-map-reduce-indexes) + +* In this page: + * [`addMap`](../indexes/multi-map-indexes.mdx#addmap) + * [Searching across multiple collections](../indexes/multi-map-indexes.mdx#searching-across-multiple-collections) + * [Remarks](../indexes/multi-map-indexes.mdx#remarks) + + + +## `addMap` + +The `addMap` method is used to map fields from a single collection, e.g. `Dogs`. + +Let's assume that we have `Dog` and `Cat` classes, both inheriting from the class `Animal`: + + + + +{`_1 + /** @var array $results */ + $results = $session + ->documentQuery(Smart_Search_Result::class, Smart_Search::class) + ->search("Content", "Lau*") + ->selectFields(Smart_Search_Projection::class) + ->toList(); + + /** @var Smart_Search_Projection $result */ + foreach ($results as $result) + { + echo $result->getCollection() . ": " . $result->getDisplayName(); + // Companies: Laughing Bacchus Wine Cellars + // Products: Laughing Lumberjack Lager + // Employees: Laura Callahan + } +`} + + + + +{`class Cat extends Animal +{ + +} +`} + + + + +{`abstract class Animal implements AnimalInterface +{ + public ?string $name = null; + + public function getName(): ?string + { + return $this->name; + } + + public function setName(?string $name): void + { + $this->name = $name; + } +} +`} + + + + +We can define our index using `addMap` and query it as follows: + + + + +{`class Animals_ByName extends AbstractMultiMapIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->addMap( "docs.Cats.Select(c => new { " . + " Name = c.Name " . + "})"); + + $this->addMap( "docs.Dogs.Select(d => new { " . + " Name = d.Name " . + "})"); + } +} +`} + + + + +{`class Animals_ByName extends AbstractJavaScriptIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->setMaps([ + "map('cats', function (c){ return {Name: c.Name}})", + "map('dogs', function (d){ return {Name: d.Name}})" + ]); + } +} +`} + + + + + + + +{`/** @var array $results */ +$results = $session + ->query(AnimalInterface::class, Animals_ByName::class) + ->whereEquals("Name", "Mitzy") + ->toList(); +`} + + + + +{`from index 'Animals/ByName' +where Name = 'Mitzy' +`} + + + + + + +## Searching across multiple collections + +Another great usage of Multi-Map indexes is smart-search. + +To search for products, companies, or employees by their name, you need to define the following index: + + +{`class Smart_Search_Result +\{ + private ?string $id = null; + private ?string $displayName = null; + private ?string $collection = null; + private ?string $content = null; + + public function getId(): ?string + \{ + return $this->id; + \} + + public function setId(?string $id): void + \{ + $this->id = $id; + \} + + public function getDisplayName(): ?string + \{ + return $this->displayName; + \} + + public function setDisplayName(?string $displayName): void + \{ + $this->displayName = $displayName; + \} + + public function getCollection(): ?string + \{ + return $this->collection; + \} + + public function setCollection(?string $collection): void + \{ + $this->collection = $collection; + \} + + public function getContent(): ?string + \{ + return $this->content; + \} + + public function setContent(?string $content): void + \{ + $this->content = $content; + \} +\} + +class Smart_Search_Projection +\{ + private ?string $id = null; + private ?string $displayName = null; + private ?string $collection = null; + + public function getId(): ?string + \{ + return $this->id; + \} + + public function setId(?string $id): void + \{ + $this->id = $id; + \} + + public function getDisplayName(): ?string + \{ + return $this->displayName; + \} + + public function setDisplayName(?string $displayName): void + \{ + $this->displayName = $displayName; + \} + + public function getCollection(): ?string + \{ + return $this->collection; + \} + + public function setCollection(?string $collection): void + \{ + $this->collection = $collection; + \} +\} + +class Smart_Search extends AbstractMultiMapIndexCreationTask +\{ + public function __construct() + \{ + parent::__construct(); + + $this->addMap("docs.Companies.Select(c => new \{ " . + " Id = Id(c), " . + " Content = new string[] \{ " . + " c.Name " . + " \}, " . + " DisplayName = c.Name, " . + " Collection = this.MetadataFor(c)[\\"@collection\\"] " . + "\})"); + + $this->addMap("docs.Products.Select(p => new \{ " . + " Id = Id(p), " . + " Content = new string[] \{ " . + " p.Name " . + " \}, " . + " DisplayName = p.Name, " . + " Collection = this.MetadataFor(p)[\\"@collection\\"] " . + "\})"); + + $this->addMap("docs.Employees.Select(e => new \{ " . + " Id = Id(e), " . + " Content = new string[] \{ " . + " e.FirstName, " . + " e.LastName " . + " \}, " . + " DisplayName = (e.FirstName + \\" \\") + e.LastName, " . + " Collection = this.MetadataFor(e)[\\"@collection\\"] " . + "\})"); + + // mark 'content' field as analyzed which enables full text search operations + $this->index("Content", FieldIndexing::search()); + + // storing fields so when projection (e.g. ProjectInto) + // requests only those fields + // then data will come from index only, not from storage + $this->store("Id", FieldStorage::yes()); + $this->store("DisplayName", FieldStorage::yes()); + $this->store("Collection", FieldStorage::yes()); + + \} +\} +`} + + + +and query it using: + + +{`/** @var array $results */ +$results = $session + ->documentQuery(Smart_Search_Result::class, Smart_Search::class) + ->search("Content", "Lau*") + ->selectFields(Smart_Search_Projection::class) + ->toList(); + +/** @var Smart_Search_Projection $result */ +foreach ($results as $result) +\{ + echo $result->getCollection() . ": " . $result->getDisplayName(); + // Companies: Laughing Bacchus Wine Cellars + // Products: Laughing Lumberjack Lager + // Employees: Laura Callahan +\} +`} + + + + + +## Remarks + + +Remember that all map functions **must** output objects +with an **identical** shape (the field names have to match). + + + + + diff --git a/versioned_docs/version-7.1/indexes/_multi-map-indexes-python.mdx b/versioned_docs/version-7.1/indexes/_multi-map-indexes-python.mdx new file mode 100644 index 0000000000..98ce38d81c --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_multi-map-indexes-python.mdx @@ -0,0 +1,198 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Multi-Map indexes allow you to index data from multiple collections, + like polymorphic data or any data common to different types. + +* Learn how to [index polymorphic data](../indexes/indexing-polymorphic-data.mdx) + Learn how to [create Multi-Map-Reduce indexes](../indexes/map-reduce-indexes.mdx#creating-multi-map-reduce-indexes) + +* In this page: + * [`_add_map`](../indexes/multi-map-indexes.mdx#_add_map) + * [Searching across multiple collections](../indexes/multi-map-indexes.mdx#searching-across-multiple-collections) + * [Remarks](../indexes/multi-map-indexes.mdx#remarks) + + + +## `_add_map` + +The `_add_map` method is used to map fields from a single collection, e.g. `Dogs`. + +Let's assume that we have `Dog` and `Cat` classes, both inheriting from the class `Animal`: + + + + +{`class Dog(Animal): ... +`} + + + + +{`class Cat(Animal): ... +`} + + + + +{`class Animal(ABC): + def __init__(self, name: str = None): + self.name = name +`} + + + + +We can define our index using `_add_map` and query it as follows: + + + + +{`class Animals_ByName(AbstractMultiMapIndexCreationTask): + def __init__(self): + super().__init__() + self._add_map("from c in docs.Cats select new { c.name }") + self._add_map("from d in docs.Dogs select new { d.name }") +`} + + + + +{`class Animals_ByName(AbstractJavaScriptIndexCreationTask): + def __init__(self): + super().__init__() + self.maps = { + "map('cats', function (c){ return {Name: c.Name}})", + "map('dogs', function (d){ return {Name: d.Name}})", + } +`} + + + + + + + +{`results = list(session.query_index_type(Animals_ByName, Animal).where_equals("name", "Mitzy")) +`} + + + + +{`from index 'Animals/ByName' +where Name = 'Mitzy' +`} + + + + + + +## Searching across multiple collections + +Another great usage of Multi-Map indexes is smart-search. + +To search for products, companies, or employees by their name, you need to define the following index: + + +{`class Smart_Search(AbstractMultiMapIndexCreationTask): + class Result: + def __init__( + self, Id: str = None, display_name: str = None, collection: object = None, content: List[str] = None + ): + self.Id = Id + self.display_name = display_name + self.collection = collection + self.content = content + + class Projection: + def __init__(self, Id: str = None, display_name: str = None, collection: str = None): + self.Id = Id + self.display_name = display_name + self.collection = collection + + def __init__(self): + super().__init__() + self._add_map( + "from c in docs.Companies select new \{" + "Id = c.Id," + "content = new[]" + "\{" + " c.name" + "\}," + "display_name= c.name, " + 'collection = MetadataFor(c)["@collection"]' + "\}" + ) + + self._add_map( + "from p in docs.Products select new \{" + "Id = p.Id," + "content = new[]" + "\{" + " p.name" + "\}," + "display_name = p.name," + 'collection = MetadataFor(p)["@collection"]' + "\}" + ) + + self._add_map( + "from e in docs.Employees select new \{" + "Id = e.Id," + "content = new[]" + "\{" + " e.first_name," + " e.last_name" + "\}," + 'display_name = e.first_name + " " + e.last_name,' + 'collection = MetadataFor(e)["@collection"]' + "\}" + ) + + # mark 'content' field as analyzed which enables full text search operations + self._index("content", FieldIndexing.SEARCH) + + # storing fields so when projection (e.g. ProjectInto) requests only those fields, + # data will come from index only, not from storage + self._store("Id", FieldStorage.YES) + self._store("display_name", FieldStorage.YES) + self._store("collection", FieldStorage.YES) +`} + + + +and query it using: + + +{`results = list( + session.query_index_type(Smart_Search, Smart_Search.Result) + .search("content", "Lau*") + .select_fields(Smart_Search.Projection) +) + +for result in results: + print(f"\{result.collection\}: \{result.display_name\}") + # Companies: Laughing Bacchus Wine Cellars + # Products: Laughing Lumberjack Lager + # Employees: Laura Callahan +`} + + + + + +## Remarks + + +Remember that all map functions **must** output objects +with an **identical** shape (the field names have to match). + + + + + diff --git a/versioned_docs/version-7.1/indexes/_number-type-conversion-csharp.mdx b/versioned_docs/version-7.1/indexes/_number-type-conversion-csharp.mdx new file mode 100644 index 0000000000..447ee94585 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_number-type-conversion-csharp.mdx @@ -0,0 +1,164 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The `TryConvert()` method can be used to safely convert values to numerical types. + +* Learn more about how numbers are stored in RavenDB [here](../server/kb/numbers-in-ravendb.mdx). + +* In this page: + * [Syntax](..\indexes\number-type-conversion.mdx#syntax) + * [Examples](..\indexes\number-type-conversion.mdx#examples) + + +## Syntax + +The following methods are used to convert values into one of the common primitive numerical +types - `int`, `long`, `float`, or `double`. They are called from within the +[Map](../indexes/map-indexes.mdx) or [Reduce](../indexes/map-reduce-indexes.mdx) functions of the +index. If the submitted value cannot be converted to the specified type, these methods return +`null`. + +In **LINQ syntax**, use `TryConvert()`: + + + +{`protected T? TryConvert(object value) +`} + + + +| Parameter | Type | Description | +|-----------|------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **T** | Generic type parameter | The numerical type to which you want to convert your value. Possible values:
- `int`
- `long`
- `float`
- `double` | +| **value** | `object` | The value you want to convert, such as a document field. If you pass a `string` or `object`, the method will attempt to parse it for a numerical value. | + +In **JavaScript syntax**, use `tryConvertToNumber()`. + + + +{`tryConvertToNumber(value) +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **value** | `object` | The value you want to convert, such as a document field. If you pass a `string` or `object`, the method will attempt to parse it for a numerical value. | + + + +## Examples + +The class `Item` has fields of type `int`, `long`, `float`, `double`, `string`, and an object +field of type [Company](../start/about-examples.mdx). The following indexes take an `Item` +entity and attempt to convert each of its fields into the corresponding type. In case of +failure, the field is indexed with value `-1` instead. + + + + +{`public class TryConvertIndexLINQ : AbstractIndexCreationTask +{ + public TryConvertIndexLINQ() + { + Map = items => from item in items + select new + { + DoubleValue = TryConvert(item.DoubleValue) ?? -1, + FloatValue = TryConvert(item.FloatValue) ?? -1, + LongValue = TryConvert(item.LongValue) ?? -1, + IntValue = TryConvert(item.IntValue) ?? -1, + StringValue = TryConvert(item.StringValue) ?? -1, + ObjectValue = TryConvert(item.ObjectValue) ?? -1 + }; + } +} +`} + + + + +{`public class TryConvertIndexJS : AbstractJavaScriptIndexCreationTask +{ + public TryConvertIndexJS() + { + Maps = new HashSet + { + @"map('Items', function (item) { + return { + DoubleValue: tryConvertToNumber(item.DoubleValue) || -1, + FloatValue: tryConvertToNumber(item.FloatValue) || -1, + LongValue: tryConvertToNumber(item.LongValue) || -1, + IntValue: tryConvertToNumber(item.IntValue) || -1, + StringValue: tryConvertToNumber(item.StringValue) || -1, + ObjectValue: tryConvertToNumber(item.ObjectValue) || -1 + }; + })" + }; + } +} +`} + + + + +{`public class Item +{ + public double DoubleValue { get; set; } + public float FloatValue { get; set; } + public long LongValue { get; set; } + public int IntValue { get; set; } + public string StringValue { get; set; } + public Company ObjectValue { get; set; } +} +`} + + + +This next index takes the [`string` field `Employee.Address.PostalCode`](../start/about-examples.mdx) +and attempts to convert it to `long`. + +The query below it finds all employees that do not have a valid `PostalCode` field - whether +because the employee does not have a postal code or because the value could not be converted to +a valid `long`. + + + +{`public class Employees_ByPostalCode : AbstractIndexCreationTask +\{ + public class Result \{ + public long PostalCode \{ get; set; \} + \} + + public Employees_ByPostalCode() + \{ + Map = employees => from employee in employees + select new Result + \{ + PostalCode = TryConvert(employee.Address.PostalCode) ?? -1 + \}; + \} +\} +`} + + +Query: + + + +{`List employeesWithoutPostalCode = session + .Query() + .Where(x => x.PostalCode == -1) + .OfType() + .ToList(); +`} + + + + + + diff --git a/versioned_docs/version-7.1/indexes/_sorting-and-collation-csharp.mdx b/versioned_docs/version-7.1/indexes/_sorting-and-collation-csharp.mdx new file mode 100644 index 0000000000..f541b8aa47 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_sorting-and-collation-csharp.mdx @@ -0,0 +1,79 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Starting from version 4.0, RavenDB automatically determines sorting based on an indexed value. All values will have a capability to be sorted `lexicographically`. Numerical values will also be sortable by their `double` and `long` value. + +## Date types + +Dates are written to the index in a form which preserves lexicography order, and is readable by both human and machine (like so: `2011-04-04T11:28:46.0404749+03:00`). This requires no user intervention. + +## Example + +Please read our dedicated article describing `sorting` capabilities when queries are executed. It can be found [here](../indexes/querying/sorting.mdx). + +## Collation + +RavenDB supports using collations for documents sorting and indexing. You can setup a specific collation for an index field, so you can sort based of culture specific rules. + +The following is an example of an index definition which allows sorting based on the Swedish lexical sorting rules: + + + + +{`public class Products_ByName : AbstractIndexCreationTask +{ + public Products_ByName() + { + Map = products => from product in products + select new + { + product.Name + }; + + Analyzers.Add(x => x.Name, "Raven.Server.Documents.Indexes.Persistence.Lucene.Analyzers.Collation.Cultures.SvCollationAnalyzer, Raven.Server"); + } +} +`} + + + + +{`private class Products_ByName : AbstractJavaScriptIndexCreationTask +{ + public Products_ByName() + { + Maps = new HashSet + { + @"map('products', function (u){ + return { + Name: u.Name, + _: {$value: u.Name, $name:'AnalyzedName'} + }; + })", + }; + Fields = new Dictionary + { + { + "AnalyzedName", new IndexFieldOptions() + { + Indexing = FieldIndexing.Search, + Analyzer = "StandardAnalyzer" + } + } + }; + } + public class Result + { + public string AnalyzedName { get; set; } + } +} +`} + + + + +In general, you can sort using `Raven.Database.Indexing.Collation.Cultures.CollationAnalyzer`, and _all_ the cultures supported by the .NET framework are supported. + + diff --git a/versioned_docs/version-7.1/indexes/_sorting-and-collation-java.mdx b/versioned_docs/version-7.1/indexes/_sorting-and-collation-java.mdx new file mode 100644 index 0000000000..7b700a96f6 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_sorting-and-collation-java.mdx @@ -0,0 +1,65 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Starting from version 4.0, RavenDB automatically determines sorting based on an indexed value. All values will have a capability to be sorted `lexicographically`. Numerical values will also be sortable by their `double` and `long` value. + +## Date types + +Dates are written to the index in a form which preserves lexicography order, and is readable by both human and machine (like so: `2011-04-04T11:28:46.0404749+03:00`). This requires no user intervention. + +## Example + +Please read our dedicated article describing `sorting` capabilities when queries are executed. It can be found [here](../indexes/querying/sorting.mdx). + +## Collation + +RavenDB supports using collations for documents sorting and indexing. You can setup a specific collation for an index field, so you can sort based of culture specific rules. + +The following is an example of an index definition which allows sorting based on the Swedish lexical sorting rules: + + + + +{`public static class Products_ByName extends AbstractIndexCreationTask { + public Products_ByName() { + map = "docs.Products.Select(product => new { " + + " Name = product.Name " + + "})"; + + analyze("Name", "Raven.Server.Documents.Indexes.Persistence.Lucene.Analyzers.Collation.Cultures.SvCollationAnalyzer, Raven.Server"); + } +} +`} + + + + +{`private static class Products_ByName extends AbstractJavaScriptIndexCreationTask { + public Products_ByName() { + setMaps(Sets.newHashSet("map('products', function (u){\\n" + + " return {\\n" + + " Name: u.Name,\\n" + + " _: {$value: u.Name, $name:'AnalyzedName'}\\n" + + " };\\n" + + " })")); + + IndexFieldOptions indexFieldOptions = new IndexFieldOptions(); + indexFieldOptions.setIndexing(FieldIndexing.SEARCH); + indexFieldOptions.setAnalyzer("StandardAnalyzer"); + + HashMap fields = new HashMap<>(); + fields.put("AnalyzedName", indexFieldOptions); + + setFields(fields); + } +} +`} + + + + +In general, you can sort using `Raven.Database.Indexing.Collation.Cultures.CollationAnalyzer`, and _all_ the cultures supported by the .NET framework are supported. + + diff --git a/versioned_docs/version-7.1/indexes/_stale-indexes-csharp.mdx b/versioned_docs/version-7.1/indexes/_stale-indexes-csharp.mdx new file mode 100644 index 0000000000..9d9113ff52 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_stale-indexes-csharp.mdx @@ -0,0 +1,123 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +RavenDB performs indexing in the background threads. The indexes start processing whenever the new data comes in, and the existing documents are updated or deleted. + +Running them in the background allows the server to return query results immediately even if a large number of documents has been just changed. + +However, in that case the index is stale until it processes them. + +The notion of stale indexes comes from the close observation of the way RavenDB is designed, and the assumption that the user should never suffer from assigning big tasks to +the server. As far as RavenDB is concerned, it is better to be stale than offline, and it will return query results even if it knows they may not be up-to-date. + +## Checking for Stale Results + +The querying response has an `IsStale` property indicating whether the results are stale: that is, whether there were any outstanding tasks against the index at the time of querying. + +It can be retrieved using the [Statistics](../client-api/session/querying/how-to-get-query-statistics.mdx) method: + + + +{`QueryStatistics stats; +List results = session.Query() + .Statistics(out stats) + .Where(x => x.PricePerUnit > 10) + .ToList(); + +if (stats.IsStale) +\{ + // Results are known to be stale +\} +`} + + + +When `IsStale` is true, some `Product` has been just added or changed, and the index didn't have enough time to fully update before the query. + +Typically, the index updates the records within milliseconds. But there are scenarios where you cannot work with possibly stale data. + +## Explicitly Waiting for Non-Stale Results + +In order to assure that a query won't return stale results, you can use a few approaches. For each of them the default wait timeout will be 15 seconds unless it was specified +differently by the user. It is handled by the server, the client won't send any additional requests meanwhile. If it exceeds the timeout then a `TimeoutException` will be thrown. + +### Customizing Single Query + +You have an option to instruct that a particular query should wait until the index is up-to-date: + + + +{`List results = session.Query() + .Customize(x => x.WaitForNonStaleResults(TimeSpan.FromSeconds(5))) + .Where(x => x.PricePerUnit > 10) + .ToList(); +`} + + + +### Customizing All Queries + +You can also apply such customization at the document store level so all queries will wait for non-stale results: + + + +{`store.OnBeforeQuery += (sender, beforeQueryExecutedArgs) => +\{ + beforeQueryExecutedArgs.QueryCustomization.WaitForNonStaleResults(); +\}; +`} + + + +### Waiting for Documents Stored in Session + +If you need to ensure the indexes process, the documents stored in the current session before `SaveChanges` returns, you can use: + + + +{`session.Advanced.WaitForIndexesAfterSaveChanges(); +`} + + + +It will wait for the indexes to catch up with the just saved changes. You can control the behavior and specify indexes you want to wait for using the optional +parameters in the following example: + + + +{`session.Advanced.WaitForIndexesAfterSaveChanges( + timeout: TimeSpan.FromSeconds(5), + throwOnTimeout: false, + indexes: new[] \{ "Products/ByName" \}); +`} + + + +The default parameters are: + + - timeout - null (will wait 15 seconds), + - throwOnTimeout - false, + - indexes - null (will wait for all indexes impacted by the changes made in the session) + + + +The indexing mechanism in RavenDB is built on [a BASE model](../client-api/faq/transaction-support.mdx#base-for-query-operations). +In order to avoid querying consistency pitfalls, you need to consider this at the data modeling phase. + +The usage of `WaitForNonStaleResults` at the query level is usually reasonable on only rare occasions. +Taking advantage of `WaitForNonStaleResults` customization applied to the all queries is very often a symptom of deeper issues in an application model and +misunderstanding of the querying concepts in RavenDB. + + + +## Cutoff Point + +If a query sent to the server specifies that it needs to wait for non-stale results then RavenDB sets the cutoff Etag for the staleness check. +It is the Etag of the last document (or document tombstone), from the collection(s) processed by the index, as of the query arrived to the server. +This way the server won't be waiting forever for the non-stale results even though documents are constantly updated meanwhile. + +If the last Etag processed by the index is greater than the cutoff, then the results are considered as non-stale. + + diff --git a/versioned_docs/version-7.1/indexes/_stale-indexes-java.mdx b/versioned_docs/version-7.1/indexes/_stale-indexes-java.mdx new file mode 100644 index 0000000000..e1872b081c --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_stale-indexes-java.mdx @@ -0,0 +1,126 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +RavenDB performs indexing in the background threads. The indexes start processing whenever the new data comes in, and the existing documents are updated or deleted. + +Running them in the background allows the server to return query results immediately even if a large number of documents has been just changed. + +However, in that case the index is stale until it processes them. + +The notion of stale indexes comes from the close observation of the way RavenDB is designed, and the assumption that the user should never suffer from assigning big tasks to +the server. As far as RavenDB is concerned, it is better to be stale than offline, and it will return query results even if it knows they may not be up-to-date. + +## Checking for Stale Results + +The querying response has an `isStale` method indicating whether the results are stale: that is, whether there were any outstanding tasks against the index at the time of querying. + +It can be retrieved using the [Statistics](../client-api/session/querying/how-to-get-query-statistics.mdx) method: + + + +{`Reference stats = new Reference<>(); + +List results = session.query(Product.class) + .statistics(stats) + .whereGreaterThan("PricePerUnit", 10) + .toList(); + +if (stats.value.isStale()) \{ + // results are known to be stale +\} +`} + + + +When `isStale` is true, some `Product` has been just added or changed, and the index didn't have enough time to fully update before the query. + +Typically, the index updates the records within milliseconds. But there are scenarios where you cannot work with possibly stale data. + +## Explicitly Waiting for Non-Stale Results + +In order to assure that a query won't return stale results, you can use a few approaches. For each of them the default wait timeout will be 15 seconds unless it was specified +differently by the user. It is handled by the server, the client won't send any additional requests meanwhile. If it exceeds the timeout then a `TimeoutException` will be thrown. + +### Customizing Single Query + +You have an option to instruct that a particular query should wait until the index is up-to-date: + + + +{`List results = session + .query(Product.class) + .waitForNonStaleResults(Duration.ofSeconds(5)) + .whereGreaterThan("PricePerUnit", 10) + .toList(); +`} + + + +### Customizing All Queries + +You can also apply such customization at the document store level so all queries will wait for non-stale results: + + + +{`store.addBeforeQueryListener(((sender, event) -> \{ + event.getQueryCustomization().waitForNonStaleResults(); +\})); +`} + + + +### Waiting for Documents Stored in Session + +If you need to ensure the indexes process, the documents stored in the current session before `saveChanges` returns, you can use: + + + +{`session.advanced().waitForIndexesAfterSaveChanges(); +`} + + + +It will wait for the indexes to catch up with the just saved changes. You can control the behavior and specify indexes you want to wait for using the optional +parameters in the following example: + + + +{`session + .advanced() + .waitForIndexesAfterSaveChanges(builder -> \{ + builder.withTimeout(Duration.ofSeconds(5)) + .throwOnTimeout(false) + .waitForIndexes("Products/ByName"); + \}); +`} + + + +The default parameters are: + + - timeout - null (will wait 15 seconds), + - throwOnTimeout - false, + - indexes - null (will wait for all indexes impacted by the changes made in the session) + + + +The indexing mechanism in RavenDB is built on [a BASE model](../client-api/faq/transaction-support.mdx#base-for-query-operations). +In order to avoid querying consistency pitfalls, you need to consider this at the data modeling phase. + +The usage of `WaitForNonStaleResults` at the query level is usually reasonable on only rare occasions. +Taking advantage of `WaitForNonStaleResults` customization applied to the all queries is very often a symptom of deeper issues in an application model and +misunderstanding of the querying concepts in RavenDB. + + + +## Cutoff Point + +If a query sent to the server specifies that it needs to wait for non-stale results then RavenDB sets the cutoff Etag for the staleness check. +It is the Etag of the last document (or document tombstone), from the collection(s) processed by the index, as of the query arrived to the server. +This way the server won't be waiting forever for the non-stale results even though documents are constantly updated meanwhile. + +If the last Etag processed by the index is greater than the cutoff, then the results are considered as non-stale. + + diff --git a/versioned_docs/version-7.1/indexes/_stale-indexes-nodejs.mdx b/versioned_docs/version-7.1/indexes/_stale-indexes-nodejs.mdx new file mode 100644 index 0000000000..85b9539801 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_stale-indexes-nodejs.mdx @@ -0,0 +1,124 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +RavenDB performs indexing in the background threads. The indexes start processing whenever the new data comes in, and the existing documents are updated or deleted. + +Running them in the background allows the server to return query results immediately even if a large number of documents has been just changed. + +However, in that case the index is stale until it processes them. + +The notion of stale indexes comes from the close observation of the way RavenDB is designed, and the assumption that the user should never suffer from assigning big tasks to +the server. As far as RavenDB is concerned, it is better to be stale than offline, and it will return query results even if it knows they may not be up-to-date. + +## Checking for Stale Results + +The querying response has an `isStale` method indicating whether the results are stale: that is, whether there were any outstanding tasks against the index at the time of querying. + +It can be retrieved using the [Statistics](../client-api/session/querying/how-to-get-query-statistics.mdx) method: + + + +{`let stats; + +const results = await session.query(Product) + .statistics($ => stats = $) + .whereGreaterThan("PricePerUnit", 10) + .all(); + +if (stats.stale) \{ + // results are known to be stale +\} +`} + + + +When `isStale` is true, some `Product` has been just added or changed, and the index didn't have enough time to fully update before the query. + +Typically, the index updates the records within milliseconds. But there are scenarios where you cannot work with possibly stale data. + +## Explicitly Waiting for Non-Stale Results + +In order to assure that a query won't return stale results, you can use a few approaches. For each of them the default wait timeout will be 15 seconds unless it was specified +differently by the user. It is handled by the server, the client won't send any additional requests meanwhile. If it exceeds the timeout then a `TimeoutException` will be thrown. + +### Customizing Single Query + +You have an option to instruct that a particular query should wait until the index is up-to-date: + + + +{`const results = session + .query(Product) + .waitForNonStaleResults(5000) + .whereGreaterThan("PricePerUnit", 10) + .all(); +`} + + + +### Customizing All Queries + +You can also apply such customization at the document store level so all queries will wait for non-stale results: + + + +{`store.addSessionListener("beforeQuery", event => \{ + event.queryCustomization.waitForNonStaleResults(); +\}); +`} + + + +### Waiting for Documents Stored in Session + +If you need to ensure the indexes process, the documents stored in the current session before `saveChanges` returns, you can use: + + + +{`session.advanced.waitForIndexesAfterSaveChanges(); +`} + + + +It will wait for the indexes to catch up with the just saved changes. You can control the behavior and specify indexes you want to wait for using the optional +parameters in the following example: + + + +{`session.advanced.waitForIndexesAfterSaveChanges(\{ + withTimeout: 5000, + throwOnTimeout: false, + waitForIndexes: "Products/ByName" +\}); +`} + + + +The default parameters are: + + - timeout - null (will wait 15 seconds), + - throwOnTimeout - false, + - indexes - null (will wait for all indexes impacted by the changes made in the session) + + + +The indexing mechanism in RavenDB is built on [a BASE model](../client-api/faq/transaction-support.mdx#base-for-query-operations). +In order to avoid querying consistency pitfalls, you need to consider this at the data modeling phase. + +The usage of `WaitForNonStaleResults` at the query level is usually reasonable on only rare occasions. +Taking advantage of `WaitForNonStaleResults` customization applied to the all queries is very often a symptom of deeper issues in an application model and +misunderstanding of the querying concepts in RavenDB. + + + +## Cutoff Point + +If a query sent to the server specifies that it needs to wait for non-stale results then RavenDB sets the cutoff Etag for the staleness check. +It is the Etag of the last document (or document tombstone), from the collection(s) processed by the index, as of the query arrived to the server. +This way the server won't be waiting forever for the non-stale results even though documents are constantly updated meanwhile. + +If the last Etag processed by the index is greater than the cutoff, then the results are considered as non-stale. + + diff --git a/versioned_docs/version-7.1/indexes/_storing-data-in-index-csharp.mdx b/versioned_docs/version-7.1/indexes/_storing-data-in-index-csharp.mdx new file mode 100644 index 0000000000..d588e9caf4 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_storing-data-in-index-csharp.mdx @@ -0,0 +1,409 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* RavenDB allows you to store data in a static index. + +* When data is stored in the index, it can be retrieved directly from the index when querying the index and [projecting selected fields](../indexes/querying/projections.mdx), + without requiring the server to load the original document from storage. + This behavior can be configured at the query level. See [Projection behavior with a static-index](../indexes/querying/projections.mdx#projection-behavior-with-a-static-index) for details. + +* In this article: + * [What content is stored in the index](../indexes/storing-data-in-index.mdx#what-content-is-stored-in-the-index) + * [When and why to store data in an index](../indexes/storing-data-in-index.mdx#when-and-why-to-store-data-in-an-index) + * [Storing data in index - from the Client API](../indexes/storing-data-in-index.mdx#storing-data-in-index---from-the-client-api) + * [Storing data in index - from the Studio](../indexes/storing-data-in-index.mdx#storing-data-in-index---from-the-studio) + + +## What content is stored in the index + +* A static index is defined by its map function which determines the content of each **index-entry**. + Typically, a single index-entry is created for each document from the indexed source collection - + unless using a [Fanout index](../indexes/indexing-nested-data.mdx#fanout-index---multiple-index-entries-per-document), which produces multiple entries per document. + +* Each index-entry consists of a set of **index-fields**, populated with values as defined in the map function. + The content of an index-field can be a direct value from the source document field, + or a computed value based on the source document's content. + +* You can configure an [Analyzer](../indexes/using-analyzers.mdx) (either a custom one or one of RavenDB’s built-in analyzers) to tokenize the content of an index-field for [Full-text search](../indexes/querying/searching.mdx). + The tokens (terms) created by the analyzer form the searchable content of the index. When querying the index, you can filter documents based on these terms. + +* **RavenDB allows you to store the original index-field value in the index**. + **This stored value is the raw content produced by the map function, BEFORE it is tokenized by the analyzer**. + * The tokens (terms) generated by the analyzer are searchable but not stored. + * The index-field values, if explicitly marked as stored, are retrievable when [Projecting index query results](../indexes/querying/projections.mdx) + (by default they are not stored). + +* This behavior is supported by both Lucene and Corax search engines. + + + +## When and why to store data in an index + +* **Store a field in the index if**: + + * **You want to project that field without loading the full document.** + Storing data in a static index allows RavenDB to retrieve that data directly from the index when projecting fields in a query, instead of loading the original document from storage. + If all projected fields are stored, the document will not be loaded - values are fetched directly from the index, resulting in faster projections and better performance. + * **The index-field is a computed value that you want to return in the query results.** + Normally, querying an index returns matching documents. + But if you're projecting a computed index-field that is Not stored, + you'll need to re-calculate the computed value manually from the documents returned by the query. + Storing the computed field avoids this extra step. + +* **You do not need to store a field in the index in order to**: + + * Filter by the field in a query. + * Perform full-text search on the field. + +* **Disadvantage of storing data in the index**: + + * Increased disk space usage - stored fields take up additional space and increase index size. + + + +## Storing data in index - from the Client API + +To store an index-field in a static index, add it to the `Stores` dictionary with `FieldStorage.Yes` in the index definition (this syntax applies to LINQ indexes). +The default is `FieldStorage.No`. +**Index example:** + + + + +{`public class QuantityOrdered_ByCompany : + AbstractIndexCreationTask +{ + // The index-entry: + public class IndexEntry + { + // The index-fields: + public string Company { get; set; } + public string CompanyName { get; set; } + public int TotalItemsOrdered { get; set; } + } + + public QuantityOrdered_ByCompany() + { + Map = orders => from order in orders + select new IndexEntry + { + // 'Company' is a SIMPLE index-field, + // its value is taken directly from the Order document: + Company = order.Company, + + // 'CompanyName' is also considered a simple index-field, + // its value is taken from the related Company document: + CompanyName = LoadDocument(order.Company).Name, + + // 'TotalItemsOrdered' is a COMPUTED index-field: + // (the total quantity of items ordered in an Order document) + TotalItemsOrdered = order.Lines.Sum(orderLine => orderLine.Quantity) + }; + + // Store the calculated 'TotalItemsOrdered' index-field in the index: + // ================================================================== + Stores.Add(x => x.TotalItemsOrdered, FieldStorage.Yes); + + // You can use an analyzer to tokenize the 'CompanyName' index-field for full-text search: + // ======================================================================================= + Analyzers.Add(x => x.CompanyName, "SimpleAnalyzer"); + + // Store the original value of \`CompanyName\` in the index (BEFORE tokenization): + // ============================================================================= + Stores.Add(x => x.CompanyName, FieldStorage.Yes); + } +} +`} + + + + +{`public class QuantityOrdered_ByCompany_JS : AbstractJavaScriptIndexCreationTask +{ + public QuantityOrdered_ByCompany_JS() + { + Maps = new HashSet() + { + @"map('orders', function(order) { + let company = load(order.Company, 'Companies') + return { + Company: order.Company, + CompanyName: company.Name, + TotalItemsOrdered: order.Lines.reduce(function(total, line) { + return total + line.Quantity; + }, 0) + }; + })" + }; + + Fields = new Dictionary + { + { + "TotalItemsOrdered", new IndexFieldOptions + { + Storage = FieldStorage.Yes + } + }, + { + "CompanyName", new IndexFieldOptions + { + Storage = FieldStorage.Yes, + Analyzer = "SimpleAnalyzer" + } + } + }; + } +} +`} + + + + +{`var indexDefinition = new IndexDefinition +{ + Name = "QuantityOrdered/ByCompany", + + Maps = + { + @"from order in docs.Orders + select new + { + Company = order.Company, + CompanyName = LoadDocument(order.Company, ""Companies"").Name, + TotalItemsOrdered = order.Lines.Sum(orderLine => orderLine.Quantity) + }" + }, + + Fields = new Dictionary + { + { + "TotalItemsOrdered", new IndexFieldOptions + { + Storage = FieldStorage.Yes + } + }, + { + "CompanyName", new IndexFieldOptions + { + Storage = FieldStorage.Yes, + Analyzer = "SimpleAnalyzer" + } + } + } +}; + +store.Maintenance.Send(new PutIndexesOperation(indexDefinition)); +`} + + + +
+**Querying the index and projecting results:** + + + +* In this query, the projected results are defined by the custom class `NumberOfItemsOrdered`. + +* By default, the results will be retrieved from the index, because this class contains a single field `TotalItemsOrdered `, which is stored in the index. + The server does Not need to load the original document from storage. + This behavior can be configured at the query level. See [Projection behavior with a static-index](../indexes/querying/projections.mdx#projection-behavior-with-a-static-index) for details. + + + + +{`using (var session = store.OpenSession()) +{ + List itemsOrdered = session + .Query() + .Where(order => order.Company == "companies/90-A") + // Project results into a custom class: + .ProjectInto() + .ToList(); +} +`} + + + + +{`using (var asyncSession = store.OpenAsyncSession()) +{ + List itemsOrdered = await asyncSession + .Query() + .Where(order => order.Company == "companies/90-A") + .ProjectInto() + .ToListAsync(); +} +`} + + + + +{`using (var session = store.OpenSession()) +{ + List itemsOrdered = session.Advanced + .DocumentQuery() + .WhereEquals(order => order.Company, "companies/90-A") + .SelectFields() + .ToList(); +} +`} + + + + +{`using (var asyncSession = store.OpenAsyncSession()) +{ + List itemsOrdered = await asyncSession.Advanced + .AsyncDocumentQuery() + .WhereEquals(order => order.Company, "companies/90-A") + .SelectFields() + .ToListAsync(); +} +`} + + + + +{`public class NumberOfItemsOrdered +{ + // This field was stored in the index definition + public int TotalItemsOrdered { get; set; } +} +`} + + + + +{`from index "QuantityOrdered/ByCompany" +where Company = "companies/90-A" +select TotalItemsOrdered +`} + + + + + + + +* In this query, the projected results are defined by the custom class `ProjectedDetails`. + +* In this case, some of the fields in this class are Not stored in the index, so by default, + the server does need to load the original document from storage to complete the projection. + This behavior can be configured at the query level. See [Projection behavior with a static-index](../indexes/querying/projections.mdx#projection-behavior-with-a-static-index) for details. + + + + +{`using (var session = store.OpenSession()) +{ + List orders = session + .Query() + .Where(order => order.Company == "companies/90-A") + // Project results into a custom class: + .ProjectInto() + .ToList(); +} +`} + + + + +{`using (var asyncSession = store.OpenAsyncSession()) +{ + List orders = await asyncSession + .Query() + .Where(order => order.Company == "companies/90-A") + .ProjectInto() + .ToListAsync(); +} +`} + + + + +{`using (var session = store.OpenSession()) +{ + List orders = session.Advanced + .DocumentQuery() + .WhereEquals(order => order.Company, "companies/90-A") + .SelectFields() + .ToList(); +} +`} + + + + +{`using (var asyncSession = store.OpenAsyncSession()) +{ + List orders = await asyncSession.Advanced + .AsyncDocumentQuery() + .WhereEquals(order => order.Company, "companies/90-A") + .SelectFields() + .ToListAsync(); +} +`} + + + + +{`public class ProjectedDetails +{ + // This field was Not stored in the index definition + public string Company { get; set; } + // This field was Not stored in the index definition + public DateTime OrderedAt { get; set; } + // This field was stored in the index definition + public int TotalItemsOrdered { get; set; } +} +`} + + + + +{`from index "QuantityOrdered/ByCompany" +where Company = "companies/90-A" +select Company, OrderedAt, TotalItemsOrdered +`} + + + + + + + +## Storing data in index - from the Studio + +To configure index-fields from the Studio, open the _Edit Index_ view: + +![The index](./assets/store-field-in-index-1.png) + +1. This is the index from the [example above](../indexes/storing-data-in-index.mdx#storing-data-in-index---from-the-client-api). +2. These are the index-fields defined in the index map function. +Scroll down to configure each index-field: + +![Configure index fields](./assets/store-field-in-index-2.png) + +1. Open the _Fields_ tab. +2. Enter the name of the index-field. Here we configure index-field `TotalItemsOrdered`. +3. Select _Yes_ from the dropdown to store the field in the index. +4. Here we configure index-field `CompanyName`. +5. This index-field is stored in the index and also configured for full-text search. +When querying the index from the Studio, +you can choose to display the stored index fields in the Results view: + +![Display the stored fields](./assets/store-field-in-index-3.png) + +1. This is the query from the [example above](../indexes/storing-data-in-index.mdx#query-the-index). +2. Open the _Settings_ options. +3. Toggle ON _Show stored index fields only_. +4. When executing the query, + the results will display the stored index-fields for each object returned by the query. + + + + diff --git a/versioned_docs/version-7.1/indexes/_storing-data-in-index-java.mdx b/versioned_docs/version-7.1/indexes/_storing-data-in-index-java.mdx new file mode 100644 index 0000000000..e50ba911c0 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_storing-data-in-index-java.mdx @@ -0,0 +1,86 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +Once the [tokenization and analysis](../indexes/using-analyzers.mdx) process is completed, +the resulting tokens created by the used analyzer are stored in the index. +By default, tokens saved in the index are available for searching, but their original +field values are not stored. + +Lucene allows you to store the original field text (before it is analyzed) as well. + +* In this page: + * [Storing Data in Index](../indexes/storing-data-in-index.mdx#storing-data-in-index) + + + +## Storing Data in Index + +Lucene's original field text storage feature is exposed in the index definition object as +the `Storage` property of the `IndexFieldOptions`. + +When the original values are stored in the index, they become available for retrieval via +[projections](../indexes/querying/projections.mdx). + + + + +{`public static class Employees_ByFirstAndLastName extends AbstractIndexCreationTask { + public Employees_ByFirstAndLastName() { + map = "docs.Employees.Select(employee => new {" + + " FirstName = employee.FirstName," + + " LastName = employee.LastName" + + "})"; + + store("FirstName", FieldStorage.YES); + store("LastName", FieldStorage.YES); + } +} +`} + + + + +{`IndexDefinition indexDefinition = new IndexDefinition(); +indexDefinition.setName("Employees_ByFirstAndLastName"); +indexDefinition.setMaps(Collections.singleton("docs.Employees.Select(employee => new {" + + " FirstName = employee.FirstName," + + " LastName = employee.LastName" + + "})")); + +java.util.Map fields = new HashMap<>(); +indexDefinition.setFields(fields); + +IndexFieldOptions firstNameOptions = new IndexFieldOptions(); +firstNameOptions.setStorage(FieldStorage.YES); +fields.put("FirstName", firstNameOptions); + +IndexFieldOptions lastNameOptions = new IndexFieldOptions(); +lastNameOptions.setStorage(FieldStorage.YES); +fields.put("LastName", lastNameOptions); + +store + .maintenance() + .send(new PutIndexesOperation(indexDefinition)); +`} + + + + + +The default `Storage` value for each field is `FieldStorage.NO`. +Keep in mind that storing fields will increase disk space usage. + + + +If **the projection requires only the fields that are stored**, the document will +not be loaded from the storage and the query results will be retrieved directly from the index. +This can increase query performance at the cost of disk space used. + + + + + diff --git a/versioned_docs/version-7.1/indexes/_storing-data-in-index-nodejs.mdx b/versioned_docs/version-7.1/indexes/_storing-data-in-index-nodejs.mdx new file mode 100644 index 0000000000..b4f67e3e1e --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_storing-data-in-index-nodejs.mdx @@ -0,0 +1,81 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +Once the [tokenization and analysis](../indexes/using-analyzers.mdx) process is completed, +the resulting tokens created by the used analyzer are stored in the index. +By default, tokens saved in the index are available for searching, but their original +field values are not stored. + +Lucene allows you to store the original field text (before it is analyzed) as well. + +* In this page: + * [Storing Data in Index](../indexes/storing-data-in-index.mdx#storing-data-in-index) + + + +## Storing Data in Index + +Lucene's original field text storage feature is exposed in the index definition object as +the `Storage` property of the `IndexFieldOptions`. + +When the original values are stored in the index, they become available for retrieval via +[projections](../indexes/querying/projections.mdx). + + + + +{`class Employees_ByFirstAndLastName extends AbstractIndexCreationTask { + constructor() { + super(); + + this.map = \`docs.Employees.Select(employee => new { + FirstName = employee.FirstName, + LastName = employee.LastName + })\`; + + this.store("FirstName", "Yes"); + this.store("LastName", "Yes"); + } +} +`} + + + + +{`const indexDefinition = new IndexDefinition(); +indexDefinition.name = "Employees_ByFirstAndLastName"; +indexDefinition.maps = new Set([ + "docs.Employees.Select(employee => new {" + + " FirstName = employee.FirstName," + + " LastName = employee.LastName" + + "})" +]); +indexDefinition.fields = { + "FirstName": { storage: "Yes" }, + "LastName": { storage: "Yes" } +}; + +await store.maintenance.send(new PutIndexesOperation(indexDefinition)); +`} + + + + + +The default `Storage` value for each field is `"No"`. +Keep in mind that storing fields will increase disk space usage. + + + +If **the projection requires only the fields that are stored**, the document will +not be loaded from the storage and the query results will be retrieved directly from the index. +This can increase query performance at the cost of disk space used. + + + + + diff --git a/versioned_docs/version-7.1/indexes/_storing-data-in-index-php.mdx b/versioned_docs/version-7.1/indexes/_storing-data-in-index-php.mdx new file mode 100644 index 0000000000..07873499c9 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_storing-data-in-index-php.mdx @@ -0,0 +1,91 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +Once the [tokenization and analysis](../indexes/using-analyzers.mdx) process is completed, +the resulting tokens created by the used analyzer are stored in the index. +By default, tokens saved in the index are available for searching, but their original +field values are not stored. + +Lucene allows you to store the original field text (before it is analyzed) as well. + +* In this page: + * [Storing Data in Index](../indexes/storing-data-in-index.mdx#storing-data-in-index) + + + +## Storing Data in Index + +Lucene's original field text storage feature is exposed in the index definition object as +the `storage` property of the `IndexFieldOptions`. + +When the original values are stored in the index, they become available for retrieval via +[projections](../indexes/querying/projections.mdx). + + + + +{`class Employees_ByFirstAndLastName extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "docs.Employees.Select(employee => new {" . + " FirstName = employee.FirstName," . + " LastName = employee.LastName" . + "})"; + + $this->store('FirstName', FieldStorage::yes()); + $this->store('LastName', FieldStorage::yes()); + } +} +`} + + + + +{`$indexDefinition = new IndexDefinition(); +$indexDefinition->setName("Employees_ByFirstAndLastName"); +$indexDefinition->setMaps([ + "docs.Employees.Select(employee => new {" . + " FirstName = employee.FirstName," . + " LastName = employee.LastName" . + "})" +]); + +$fields = []; + +$firstNameOptions = new IndexFieldOptions(); +$firstNameOptions->setStorage(FieldStorage::yes()); +$fields['FirstName'] = $firstNameOptions; + +$lastNameOptions = new IndexFieldOptions(); +$lastNameOptions->setStorage(FieldStorage::yes()); +$fields['LastName'] = $lastNameOptions; + +$indexDefinition->setFields($fields); + +$store->maintenance()->send(new PutIndexesOperation($indexDefinition)); +`} + + + + + +The default `storage` value for each field is `FieldStorage.NO`. +Keep in mind that storing fields will increase disk space usage. + + + +If **the projection requires only the fields that are stored**, the document will +not be loaded from the storage and the query results will be retrieved directly from the index. +This can increase query performance at the cost of disk space used. + + + + + diff --git a/versioned_docs/version-7.1/indexes/_storing-data-in-index-python.mdx b/versioned_docs/version-7.1/indexes/_storing-data-in-index-python.mdx new file mode 100644 index 0000000000..c38dc2f24c --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_storing-data-in-index-python.mdx @@ -0,0 +1,81 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +Once the [tokenization and analysis](../indexes/using-analyzers.mdx) process is completed, +the resulting tokens created by the used analyzer are stored in the index. +By default, tokens saved in the index are available for searching, but their original +field values are not stored. + +Lucene allows you to store the original field text (before it is analyzed) as well. + +* In this page: + * [Storing Data in Index](../indexes/storing-data-in-index.mdx#storing-data-in-index) + + + +## Storing Data in Index + +Lucene's original field text storage feature is exposed in the index definition object as +the `storage` property of the `IndexFieldOptions`. + +When the original values are stored in the index, they become available for retrieval via +[projections](../indexes/querying/projections.mdx). + + + + +{`class Employees_ByFirstAndLastName(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = "from employee in docs.Employees select new { employee.FirstName, employee.LastName }" + self._store("FirstName", FieldStorage.YES) + self._store("LastName", FieldStorage.YES) +`} + + + + +{`store.maintenance.send( + PutIndexesOperation( + IndexDefinition( + name="Employees_ByFirstAndLastName", + maps={ + """ + from employee in docs.Employees + select new + { + employee.FirstName, + employee.LastName + } + """ + }, + fields={ + "FirstName": IndexFieldOptions(storage=FieldStorage.YES), + "LastName": IndexFieldOptions(storage=FieldStorage.YES), + }, + ) + ) +) +`} + + + + + +The default `storage` value for each field is `FieldStorage.NO`. +Keep in mind that storing fields will increase disk space usage. + + + +If **the projection requires only the fields that are stored**, the document will +not be loaded from the storage and the query results will be retrieved directly from the index. +This can increase query performance at the cost of disk space used. + + + + + diff --git a/versioned_docs/version-7.1/indexes/_using-analyzers-csharp.mdx b/versioned_docs/version-7.1/indexes/_using-analyzers-csharp.mdx new file mode 100644 index 0000000000..0c33a1d29f --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_using-analyzers-csharp.mdx @@ -0,0 +1,691 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* RavenDB supports fast and efficient querying through indexes, + which are powered by either [Lucene](http://lucene.apache.org/) or [Corax](../indexes/search-engine/corax.mdx), + a high-performance search engine developed specifically for RavenDB. + (You can choose which search engine to use for each index). + +* **Analyzers** are components used in the indexing and querying processes of the search engines, + controlling how data is indexed and how search queries interact with the indexed data. + +* **The Corax search engine fully respects and supports all Lucene analyzers**, + ensuring that existing configurations work seamlessly, + while also leveraging Corax's optimized performance for faster query execution. + +* This means you can use any analyzer with either search engine, + giving you full flexibility in configuring your indexes. + +* In this page: + * [Understanding the role of analyzers](../indexes/using-analyzers.mdx#understanding-the-role-of-analyzers) + * [Analyzers available in RavenDB](../indexes/using-analyzers.mdx#analyzers-available-in-ravendb) + * [Setting analyzer for index-field](../indexes/using-analyzers.mdx#setting-analyzer-for-index-field) + * [RavenDB's default analyzers](../indexes/using-analyzers.mdx#ravendb) + * [Disabling indexing for index-field](../indexes/using-analyzers.mdx#disabling-indexing-for-index-field) + * [Creating custom analyzers](../indexes/using-analyzers.mdx#creating-custom-analyzers) + * [Viewing the indexed terms](../indexes/using-analyzers.mdx#viewing-the-indexed-terms) + + +## Understanding the role of analyzers + + + +###### Analyzers in the index definition: +The [index definition](../studio/database/indexes/indexes-overview.mdx#index-definition) determines what content from the documents will be indexed for each index-field. +For each index-field you can specify a particular analyzer to process the content of that field. + + + + +###### Analyzers at indexing time: +During the [indexing process](../studio/database/indexes/indexes-overview.mdx#indexing-process), +the content to be indexed is processed and broken down into smaller components called tokens (or terms) through a process known as **tokenization**. +This is done by the **Analyzers**, which are objects that determine how text is split into tokens. + +Different analyzers vary in how they split the text stream ("tokenize"), and how they process those tokens after tokenization. +Analyzers can apply additional transformations, such as converting text to lowercase, removing stop words +(e.g., "the," "and"), or applying stemming (reducing words to their base forms, e.g., "running" → "run"). + +The resulting tokens are then stored in the index for each index-field and can later be searched by queries, +enabling [Full-text search](../indexes/querying/searching.mdx). + + + + +###### Analyzers at query time: +When running a [Full-text search with a dynamic query](../client-api/session/querying/text-search/full-text-search.mdx), +the auto-index created by the server breaks down the text of the searched document field using the [default search analyzer](../indexes/using-analyzers.mdx#using-the-default-search-analyzer). + +When running a [Full-text search on a static-index](../indexes/querying/query-index.mdx), +the **same analyzer** used to tokenize field content at indexing time is typically applied +to process the terms provided in the full-text search query before they are sent to the search engine to retrieve matching documents. + +There are two exceptions to this rule: + + 1. When setting the [NGramAnalyzer](../indexes/using-analyzers.mdx#analyzers-that-tokenize-according-to-the-defined-number-of-characters) in the index definition, + it tokenizes the index field at indexing time. + However, at query time, when performing a full-text search on that field, + the default [RavenStandardAnalyzer](../indexes/using-analyzers.mdx#using-the-default-search-analyzer) is used to tokenize the search term from the query predicate. + + Currently, for query time, you cannot specify a different analyzer than the one defined in the index definition, + so to address this issue, you have two options: + * Increase the [MaxGram](../server/configuration/indexing-configuration.mdx#indexingluceneanalyzersngrammaxgram) value to generate larger tokens during indexing (when using Lucene). + * Use a different analyzer other than _NGramAnalyzer_ that better matches your requirements. + + 2. Behavior is also different when making a full-text search with wildcards in the search terms. + This is explained in detail in [Searching with wildcards](../indexes/querying/searching.mdx#searching-with-wildcards). + + + + +###### Full-text search: +In most cases, Lucene's [StandardAnalyzer](../indexes/using-analyzers.mdx#analyzers-that-remove-common-stop-words) is sufficient for full-text searches. +For languages other than English, or when a custom analysis process is needed, you can provide your own [Custom analyzer](../indexes/using-analyzers.mdx#creating-custom-analyzers). +It is straightforward and may already be available as a contrib package for Lucene. + +You can also configure a specific collation for an index field to sort based on culture specific rules. +Learn more in [Sorting and Collation](../indexes/sorting-and-collation.mdx#collation). + + + + +## Analyzers available in RavenDB + +* RavenDB offers the following Lucene analyzers 'out of the box' (their details are listed below): + + * **StandardAnalyzer** + * **StopAnalyzer** + * **SimpleAnalyzer** + * **WhitespaceAnalyzer** + * **LowerCaseWhitespaceAnalyzer** + * **KeywordAnalyzer** + * **NGramAnalyzer** + +* If needed, you can create your own [Customized Analyzers](../indexes/using-analyzers.mdx#creating-custom-analyzers). + +* To assign the analyzer of your choice to a specific index-field, + see: [Setting analyzer for index-field](../indexes/using-analyzers.mdx#setting-analyzer-for-index-field). + +* When no analyzer is explicitly assigned to an index-field in the index definition, + RavenDB will use its [Default Analyzers](../indexes/using-analyzers.mdx#ravendb) to process and tokenize the content of a field. + +All examples below use the following text: +`The quick brown fox jumped over the lazy dogs, Bob@hotmail.com 123432.` + + + +##### Analyzers that remove common "stop words": + + +* **StandardAnalyzer**, which is Lucene's default, will produce the following tokens: + + `[quick] [brown] [fox] [jumped] [over] [lazy] [dogs] [bob@hotmail.com] [123432]` + + This analyzer: + + * Removes common "stop words". + * Converts text to lowercase, ensuring searches are case-insensitive. + * Separates on whitespace and punctuation that is followed by whitespace - a dot that is not followed by whitespace is considered part of the token. + * Email addresses and internet hostnames are treated as a single token. + * Splits words at hyphens, unless there's a number in the token, in which case the whole token is interpreted as a product number and is not split. + + + + +* **StopAnalyzer**, which works similarly, will produce the following tokens: + + `[quick] [brown] [fox] [jumped] [over] [lazy] [dogs] [bob] [hotmail] [com]` + + This analyzer: + + * Removes common "stop words". + * Converts text to lowercase, ensuring searches are case-insensitive. + * Separates and tokenizes text based on whitespace without performing light stemming. + * Removes numbers and symbols, separating tokens at those positions. + This means email and web addresses are split into separate tokens. + + + + + +* **Stop words**: + + * [Stop words](https://en.wikipedia.org/wiki/Stop_word) (e.g. the, it, a, is, this, who, that...) + are often removed to narrow search results by focusing on less frequently used words. + * If you want to include words such as IT (Information Technology), + be aware that analyzers removing common stop words may treat IT as a stop word and exclude it from the resulting terms. + This can also affect acronyms such as WHO (World Health Organization) or names such as "The Who" or "The IT Crowd". + * To avoid excluding acronyms, you can either spell out the full title instead of abbreviating it + or use an [Analyzer that doesn't remove stop words](../indexes/using-analyzers.mdx#analyzers-that-do-not-remove-common-stop-words). + + + + + + +##### Analyzers that do not remove common "stop words" + + +* **SimpleAnalyzer** will produce the following tokens: + + `[the] [quick] [brown] [fox] [jumped] [over] [lazy] [dogs] [bob] [hotmail] [com]` + + This analyzer: + + * Includes common "stop words". + * Converts text to lowercase, ensuring searches are case-insensitive. + * Separates on white spaces. + * Will tokenize on all non-alpha characters. + * Removes numbers and symbols, separating tokens at those positions. + This means email and web addresses are split into separate tokens. + + + + +* **WhitespaceAnalyzer** will produce the following tokens: + + `[The] [quick] [brown] [fox] [jumped] [over] [the] [lazy] [dogs,] [Bob@hotmail.com] [123432.]` + + This analyzer: + + * Includes common "stop words". + * Tokenizes text by separating it on whitespaces. + * Preserves upper/lower case in text, which means that searches will be case-sensitive. + * Keeps forms like email addresses, phone numbers, and web addresses whole. + + + + +* **LowerCaseWhitespaceAnalyzer** will produce the following tokens: + + `[the] [quick] [brown] [fox] [jumped] [over] [lazy] [dogs,] [bob@hotmail.com] [123432.]` + + This analyzer: + + * Includes common "stop words". + * Tokenizes text by separating it on whitespaces. + * Converts text to lowercase, ensuring searches are case-insensitive. + * Keeps forms like email addresses, phone numbers, and web addresses whole. + + + + +* **KeywordAnalyzer** will produce the following single token: + + `[The quick brown fox jumped over the lazy dogs, bob@hotmail.com 123432.]` + + This analyzer: + + * Will perform no tokenization, and will consider the whole text as one token. + * Preserves upper/lower case in text, which means that searches will be case-sensitive. + * Useful in situations like IDs and codes where you do not want to separate into multiple tokens. + + + + + +##### Analyzers that tokenize according to the defined number of characters + + +* **NGramAnalyzer** tokenizes based on predefined token lengths. + + By default, the minimum token length is **2** characters, and the maximum is **6** characters. + Using these defaults, the following tokens will be generated: + + `[.c] [.co] [.com] [12] [123] [1234] [12343] [123432] [23] [234] [2343] [23432] + [32] [34] [343] [3432] [43] [432] [@h] [@ho] [@hot] [@hotm] [@hotma] + [ai] [ail] [ail.] [ail.c] [ail.co] [az] [azy] [b@] [b@h] [b@ho] [b@hot] [b@hotm] + [bo] [bob] [bob@] [bob@h] [bob@ho] [br] [bro] [brow] [brown] [ck] [co] [com] + [do] [dog] [dogs] [ed] [er] [fo] [fox] [gs] [ho] [hot] [hotm] [hotma] [hotmai] + [ic] [ick] [il] [il.] [il.c] [il.co] [il.com] [ju] [jum] [jump] [jumpe] [jumped] + [l.] [l.c] [l.co] [l.com] [la] [laz] [lazy] [ma] [mai] [mail] [mail.] [mail.c] + [mp] [mpe] [mped] [ob] [ob@] [ob@h] [ob@ho] [ob@hot] [og] [ogs] [om] [ot] [otm] + [otma] [otmai] [otmail] [ov] [ove] [over] [ow] [own] [ox] [pe] [ped] [qu] [qui] + [quic] [quick] [ro] [row] [rown] [tm] [tma] [tmai] [tmail] [tmail.] + [ui] [uic] [uick] [um] [ump] [umpe] [umped] [ve] [ver] [wn] [zy]` + +* **Overriding default token length**: (only when using Lucene as the search engine) + + You can override the default token lengths of the NGram analyzer by setting the following configuration keys: + [Indexing.Lucene.Analyzers.NGram.MinGram](../server/configuration/indexing-configuration.mdx#indexingluceneanalyzersngrammingram) + and [Indexing.Lucene.Analyzers.NGram.MaxGram](../server/configuration/indexing-configuration.mdx#indexingluceneanalyzersngrammaxgram). + + For example, setting them to 3 and 4, respectively, will generate the following tokens: + + `[.co] [.com] [123] [1234] [234] [2343] [343] [3432] [432] [@ho] [@hot] + [ail] [ail.] [azy] [b@h] [b@ho] [bob] [bob@] [bro] [brow] [com] [dog] [dogs] [fox] + [hot] [hotm] [ick] [il.] [il.c] [jum] [jump] [l.c] [l.co] [laz] [lazy] [mai] [mail] + [mpe] [mped] [ob@] [ob@h] [ogs] [otm] [otma] [ove] [over] [own] [ped] [qui] [quic] + [row] [rown] [tma] [tmai] [uic] [uick] [ump] [umpe] [ver]` + +* **Querying with NGram analyzer**: + + In RavenDB, the analyzer configured in the index definition is typically used both at indexing time and query time (the same analyzer). + However, the `NGramAnalyzer` is an exception to this rule. + + Refer to section [Analyzers at query time](../indexes/using-analyzers.mdx#analyzers-at-query-time) to learn about the different behaviors. + + + + + +## Setting analyzer for index-field + +* To explicitly set an analyzer that will process/tokenize the content of a specific index-field, + use the `Analyzers.Add()` method within the index definition for that field. + +* Either: + * Specify an analyzer from the [Analyzers available in RavenDB](../indexes/using-analyzers.mdx#analyzers-available-in-ravendb), + * Or specify your own custom analyzer (see [Creating custom analyzers](../indexes/using-analyzers.mdx#creating-custom-analyzers)). + +* If you want RavenDB to use the default analyzers, see [RavenDB's default analyzers](../indexes/using-analyzers.mdx#ravendb). + +* An analyzer may also be set from the Edit Index view in the Studio, see [Index field options](../studio/database/indexes/create-map-index.mdx#index-field-options). + + + + +{`// The index definition +public class BlogPosts_ByTagsAndContent : AbstractIndexCreationTask +{ + public class IndexEntry + { + public string[] Tags { get; set; } + public string Content { get; set; } + } + + public BlogPosts_ByTagsAndContent() + { + Map = posts => from post in posts + select new IndexEntry() + { + Tags = post.Tags, + Content = post.Content + }; + + // Field 'Tags' will be tokenized by Lucene's SimpleAnalyzer + Analyzers.Add(x => x.Tags, "SimpleAnalyzer"); + + // Field 'Content' will be tokenized by the Custom analyzer SnowballAnalyzer + Analyzers.Add(x => x.Content, + typeof(SnowballAnalyzer).AssemblyQualifiedName); + } +} +`} + + + + +{`// The index definition +var indexDefinition = new IndexDefinitionBuilder("BlogPosts/ByTagsAndContent") +{ + Map = posts => from post in posts + select new {post.Tags, post.Content}, + + Analyzers = + { + // Field 'Tags' will be tokenized by Lucene's SimpleAnalyzer + {x => x.Tags, "SimpleAnalyzer"}, + + // Field 'Content' will be tokenized by the Custom analyzer SnowballAnalyzer + {x => x.Content, typeof(SnowballAnalyzer).AssemblyQualifiedName} + } +}.ToIndexDefinition(store.Conventions); + +store.Maintenance.Send(new PutIndexesOperation(indexDefinition)); +`} + + + + + + +## RavenDB's default analyzers + +* When no specific analyzer is explicitly assigned to an index-field in the index definition, + RavenDB will use the Default Analyzers to process and tokenize the content of the field, + depending on the specified Indexing Behavior. + +* The **Default Analyzers** are: + * `RavenStandardAnalyzer` - Serves as the [Default Search Analyzer](../indexes/using-analyzers.mdx#using-the-default-search-analyzer). + * `KeywordAnalyzer` - Servers as the [Default Exact Analyzer](../indexes/using-analyzers.mdx#using-the-default-exact-analyzer). + * `LowerCaseKeywordAnalyzer`- Serves as the [Default Analyzer](../indexes/using-analyzers.mdx#using-the-default-analyzer). + +* The available **Indexing Behavior** values are: + * `FieldIndexing.Exact` + * `FieldIndexing.Search` + * `FieldIndexing.No` - This behavior [disables field indexing](../indexes/using-analyzers.mdx#disabling-indexing-for-index-field). + +* See the detailed explanation for each scenario below: + + +##### Using the Default Search Analyzer +* When the indexing behavior is set to `FieldIndexing.Search` and no analyzer is specified for the index-field, + RavenDB will use the Default Search Analyzer. + By default, this analyzer is the `RavenStandardAnalyzer` (inherits from Lucene's _StandardAnalyzer_). + +* To customize a different analyzer that will serve as your Default Search Analyzer, + set the [Indexing.Analyzers.Search.Default](../server/configuration/indexing-configuration.mdx#indexinganalyzerssearchdefault) configuration key. + +* Using a search analyzer enables full-text search queries against the field. + Given the same sample text from above, _RavenStandardAnalyzer_ will produce the following tokens: + `[quick] [brown] [fox] [jumped] [over] [lazy] [dogs] [bob@hotmail.com] [123432]` + + + +{`public class BlogPosts_ByContent : AbstractIndexCreationTask +\{ + public BlogPosts_ByContent() + \{ + Map = posts => from post in posts + select new + \{ + Title = post.Title, + Content = post.Content + \}; + + // Set the Indexing Behavior: + // ========================== + + // Set 'FieldIndexing.Search' on index-field 'Content' + Indexes.Add(x => x.Content, FieldIndexing.Search); + + // => Index-field 'Content' will be processed by the "Default Search Analyzer" + // since no other analyzer is set. + \} +\} +`} + + + + + + +##### Using the Default Exact Analyzer +* When the indexing behavior is set to `FieldIndexing.Exact`, + RavenDB will use the Default Exact Analyzer. + By default, this analyzer is the `KeywordAnalyzer`. + +* _KeywordAnalyzer_ treats the entire content of the index-field as a single token, + preserving the original text's case and ensuring no transformations, such as case normalization or stemming, are applied. + The field's value is indexed exactly as provided, enabling precise, case-sensitive matching at query time. + +* To customize a different analyzer that will serve as your Default Exact Analyzer, + set the [Indexing.Analyzers.Exact.Default ](../server/configuration/indexing-configuration.mdx#indexinganalyzersexactdefault) configuration key. + +* Given the same sample text from above, _KeywordAnalyzer_ will produce a single token: + `[The quick brown fox jumped over the lazy dogs, Bob@hotmail.com 123432.]` + + + +{`public class Employees_ByFirstAndLastName : AbstractIndexCreationTask +\{ + public Employees_ByFirstAndLastName() + \{ + Map = employees => from employee in employees + select new + \{ + LastName = employee.LastName, + FirstName = employee.FirstName + \}; + + // Set the Indexing Behavior: + // ========================== + + // Set 'FieldIndexing.Exact' on index-field 'FirstName' + Indexes.Add(x => x.FirstName, FieldIndexing.Exact); + + // => Index-field 'FirstName' will be processed by the "Default Exact Analyzer" + \} +\} +`} + + + + + + +##### Using the Default Analyzer +* When no indexing behavior is set and no analyzer is specified for the index-field, + RavenDB will use the Default Analyzer. + By default, this analyzer is the `LowerCaseKeywordAnalyzer`. + +* _LowerCaseKeywordAnalyzer_ behaves like Lucene's _KeywordAnalyzer_, but additionally performs case normalization, converting all characters to lowercase. + The entire content of the field is processed into a single, lowercased token. + +* To customize a different analyzer that will serve as your Default Analyzer, + set the [Indexing.Analyzers.Default](../server/configuration/indexing-configuration.mdx#indexinganalyzersdefault) configuration key. + +* Given the same sample text from above, _LowerCaseKeywordAnalyzer_ will produce a single token: + `[the quick brown fox jumped over the lazy dogs, bob@hotmail.com 123432.]` + + + +{`public class Employees_ByFirstName : AbstractIndexCreationTask +\{ + public Employees_ByFirstName() + \{ + Map = employees => from employee in employees + select new + \{ + LastName = employee.LastName + \}; + + // Index-field 'LastName' will be processed by the "Default Analyzer" + // since: + // * No analyzer was specified + // * No Indexing Behavior was specified (neither Exact nor Search) + \} +\} +`} + + + + + + +## Disabling indexing for index-field + +* Use the `FieldIndexing.No` indexing behavior option to disable indexing of a particular index-field. + In this case: + * No analyzer will process the field, and no terms will be generated from its content. + * The field will not be available for querying. + * The field will still be accessible for extraction when [projecting query results](../indexes/querying/projections.mdx). + +* This is useful when you need to [store the field data in the index](../indexes/storing-data-in-index.mdx) and only intend to use it for query projections. + + + +{`public class BlogPosts_ByTitle : AbstractIndexCreationTask +\{ + public BlogPosts_ByTitle() + \{ + Map = posts => from post in posts + select new + \{ + Title = post.Title, + Content = post.Content + \}; + + // Set the Indexing Behavior: + // ========================== + + // Set 'FieldIndexing.No' on index-field 'Content' + Indexes.Add(x => x.Content, FieldIndexing.No); + + // Set 'FieldStorage.Yes' to store the original content of field 'Content' + // in the index + Stores.Add(x => x.Content, FieldStorage.Yes); + + // => No analyzer will process field 'Content', + // it will only be stored in the index. + \} +\} +`} + + + + + +## Creating custom analyzers + +* **Availability & file type**: + The custom analyzer you are referencing must be available to the RavenDB server instance. + You can create and add custom analyzers to RavenDB as `.cs` files. + +* **Scope**: + Custom analyzers can be defined as: + + * **Database Custom Analyzers** - can only be used by indexes of the database where they are defined. + * **Server-Wide Custom Analyzers** - can be used by indexes on all databases across all servers in the cluster. + + A database analyzer may have the same name as a server-wide analyzer. + In this case, the indexes of that database will use the database version of the analyzer. + You can think of database analyzers as overriding server-wide analyzers with the same names. + +* **Ways to create**: + There are three ways to create a custom analyzer and add it to your server: + + 1. [Add custom analyzer via Studio](../indexes/using-analyzers.mdx#add-custom-analyzer-via-studio) + 2. [Add custom analyzer via Client API](../indexes/using-analyzers.mdx#add-custom-analyzer-via-client-api) + 3. [Add custom analyzer directly to RavenDB's binaries](../indexes/using-analyzers.mdx#add-custom-analyzer-directly-to-ravendbs-binaries) + + +##### Add custom analyzer via Studio + +Custom analyzers can be added from the Custom Analyzers view in the Studio. +Learn more in this [Custom analyzers](../studio/database/settings/custom-analyzers.mdx) article. + + + + +##### Add custom analyzer via Client API + +First, create a class that inherits from the abstract `Lucene.Net.Analysis.Analyzer` class. +(you need to reference `Lucene.Net.dll`, which is included with the RavenDB Server package). +For example: + + + +{`public class MyAnalyzer : Lucene.Net.Analysis.Analyzer +\{ + public override TokenStream TokenStream(string fieldName, TextReader reader) + \{ + // Implement your analyzer's logic + throw new CodeOmitted(); + \} +\} +`} + + + +Next, use `PutAnalyzersOperation` to deploy the analyzer to a specific database. +By default, `PutAnalyzersOperation` will apply to the [default database](../client-api/setting-up-default-database.mdx) of the document store you're using. +To target a different database, use the [ForDatabase()](../client-api/operations/how-to/switch-operations-to-a-different-database.mdx) method. + +To make it a server-wide analyzer, use the `PutServerWideOperation` operation.` + + + +{`public PutAnalyzersOperation(params AnalyzerDefinition[] analyzersToAdd) +`} + + + + +{`public PutServerWideAnalyzersOperation(params AnalyzerDefinition[] analyzersToAdd) +`} + + + + +{`public class AnalyzerDefinition +\{ + // Name of the analyzer + public string Name \{ get; set; \} + + // C# source-code of the analyzer + public string Code \{ get; set; \} +\} +`} + + + +| Parameter | Type | Description | +|-------------|----------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **Name** | `string` | The class name of your custom analyzer, as defined in your code. | +| **Code** | `string` | Compilable csharp code:
A class that inherits from `Lucene.Net.Analysis.Analyzer`,
including the containing namespace and the necessary `using` statements. | + +**Client API example**: + + + +{`// Define the put analyzer operation: +var putAnalyzerOp = new PutAnalyzersOperation(new AnalyzerDefinition +\{ + // The name must be same as the analyzer's class name + Name = "MyAnalyzer", + + Code = @" + using System.IO; + using Lucene.Net.Analysis; + using Lucene.Net.Analysis.Standard; + + namespace MyAnalyzer + \{ + public class MyAnalyzer : Lucene.Net.Analysis.Analyzer + \{ + public override TokenStream TokenStream(string fieldName, TextReader reader) + \{ + throw new CodeOmitted(); + \} + \} + \}" +\}); + +// Deploy the analyzer: +store.Maintenance.ForDatabase("MyDatabase").Send(putAnalyzerOp); +`} + + + +
+ + +##### Add custom analyzer directly to RavenDB's binaries + +Another way to add custom analyzers to RavenDB is by placing them next to RavenDB's binaries. + +The fully qualified name must be specified for any index-field that will be tokenized by the analyzer. + +Note that the analyzer must be compatible with .NET Core 2.0 (e.g., a .NET Standard 2.0 assembly). + +This is the only method for adding custom analyzers in RavenDB versions older than 5.2. + + + + +## Viewing the indexed terms + +The terms generated for each index-field can be viewed in the Studio. + +![The index terms](./assets/index-terms-1.png) + +1. These are the index-fields +2. Click the "Terms" button to view the generated terms for each field + +---- + +![The index terms](./assets/index-terms-2.png) + +1. This is the "index-field name". +2. These are the terms generated for the index-field. + In this example the `StopAnalyzer` was used to tokenize the text. + + + + diff --git a/versioned_docs/version-7.1/indexes/_using-analyzers-java.mdx b/versioned_docs/version-7.1/indexes/_using-analyzers-java.mdx new file mode 100644 index 0000000000..0e985efbe8 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_using-analyzers-java.mdx @@ -0,0 +1,193 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +RavenDB uses indexes to facilitate fast queries powered by [**Lucene**](http://lucene.apache.org/), the full-text search engine. + +The indexing of a single document starts from creating Lucene's **Document** according an index definition. Lucene processes it by breaking it into **fields** and splitting all the text +from each **Field** into tokens (**Terms**) in a process called **Tokenization**. Those tokens will be stored in the index, and later will be searched upon. +The **Tokenization** process uses an object called an Analyzer underneath. + +The indexing process and its results can be controlled by various field options and Analyzers. + +## Understanding Analyzers + +Lucene offers several out of the box Analyzers, and the new ones can be created easily. Various analyzers differ in the way they split the text stream ("tokenize"), and in the way they process those tokens in post-tokenization. + +For example, given this sample text: + +`The quick brown fox jumped over the lazy dogs, Bob@hotmail.com 123432.` + +* **StandardAnalyzer**, which is Lucene's default, will produce the following tokens: + + `[quick] [brown] [fox] [jumped] [over] [lazy] [dog] [bob@hotmail.com] [123432]` + +* **StopAnalyzer** will work similarly, but will not perform light stemming and will only tokenize on white space: + + `[quick] [brown] [fox] [jumped] [over] [lazy] [dogs] [bob] [hotmail] [com]` + +* **SimpleAnalyzer** will tokenize on all non-alpha characters and will make all the tokens lowercase: + + `[the] [quick] [brown] [fox] [jumped] [over] [the] [lazy] [dogs] [bob] [hotmail] [com]` + +* **WhitespaceAnalyzer** will just tokenize on white spaces: + + `[The] [quick] [brown] [fox] [jumped] [over] [the] [lazy] [dogs,] [bob@hotmail.com] [123432.]` + +* **KeywordAnalyzer** will perform no tokenization, and will consider the whole text a stream as one token: + + `[The quick brown fox jumped over the lazy dogs, bob@hotmail.com 123432.]` + +* **NGramAnalyzer** will tokenize on pre define token lengths, 2-6 chars long, which are defined by `Indexing.Analyzers.NGram.MinGram` and `Indexing.Analyzers.NGram.MaxGram` configuration options: + + `[.c] [.co] [.com] [12] [123] [1234] [12343] [123432] [23] [234] [2343] [23432] [32] [34] [343] [3432] [43] [432] [@h] [@ho] [@hot] [@hotm] [@hotma] [ai] [ail] [ail.] [ail.c] [ail.co] [az] [azy] [b@] [b@h] [b@ho] [b@hot] [b@hotm] [bo] [bob] [bob@] [bob@h] [bob@ho] [br] [bro] [brow] [brown] [ck] [co] [com] [do] [dog] [dogs] [ed] [er] [fo] [fox] [gs] [ho] [hot] [hotm] [hotma] [hotmai] [ic] [ick] [il] [il.] [il.c] [il.co] [il.com] [ju] [jum] [jump] [jumpe] [jumped] [l.] [l.c] [l.co] [l.com] [la] [laz] [lazy] [ma] [mai] [mail] [mail.] [mail.c] [mp] [mpe] [mped] [ob] [ob@] [ob@h] [ob@ho] [ob@hot] [og] [ogs] [om] [ot] [otm] [otma] [otmai] [otmail] [ov] [ove] [over] [ow] [own] [ox] [pe] [ped] [qu] [qui] [quic] [quick] [ro] [row] [rown] [tm] [tma] [tmai] [tmail] [tmail.] [ui] [uic] [uick] [um] [ump] [umpe] [umped] [ve] [ver] [wn] [zy]` + You can override NGram analyzer default token lengths by configuring `Indexing.Analyzers.NGram.MinGram` and `Indexing.Analyzers.NGram.MaxGram` per index e.g. setting them to 3 and 4 accordingly will generate: + `[.co] [.com] [123] [1234] [234] [2343] [343] [3432] [432] [@ho] [@hot] [ail] [ail.] [azy] [b@h] [b@ho] [bob] [bob@] [bro] [brow] [com] [dog] [dogs] [fox] [hot] [hotm] [ick] [il.] [il.c] [jum] [jump] [l.c] [l.co] [laz] [lazy] [mai] [mail] [mpe] [mped] [ob@] [ob@h] [ogs] [otm] [otma] [ove] [over] [own] [ped] [qui] [quic] [row] [rown] [tma] [tmai] [uic] [uick] [ump] [umpe] [ver] ` + +## RavenDB Default Analyzer + +By default, RavenDB uses the custom analyzer called `LowerCaseKeywordAnalyzer` for all indexed content. Its implementation behaves like Lucene's KeywordAnalyzer, but it also performs case normalization by converting all characters to lower case. + +RavenDB stores the entire term as a single token, in a lower cased form. Given the same sample above text, `LowerCaseKeywordAnalyzer` will produce a single token: + +`[the quick brown fox jumped over the lazy dogs, bob@hotmail.com 123432.]` + +This default analyzer allows you to perform exact searches which is exactly what you would expect. However, it doesn't allow you to perform full-text searches. For that purposes, a different analyzer should be used. + +## Full-Text Search + +To allow full-text search on the text fields, you can use the analyzers provided out of the box with Lucene. These are available as part of the Lucene library which ships with RavenDB. + +For most cases, Lucene's `StandardAnalyzer` would be your analyzer of choice. As shown above, this analyzer is aware of e-mail and network addresses when tokenizing. It normalizes cases, filters out common English words, and does some basic English stemming as well. + +For languages other than English, or if you need a custom analysis process, you can roll your own `Analyzer`. It is quite simple and may be already available as a contrib package for Lucene. +There are also `Collation analyzers` available (you can read more about them [here](../indexes/sorting-and-collation.mdx#collation)). + +## Using Non-Default Analyzer + +To make a document property indexed using a specific Analyzer, all you need to do is to match it with the name of the property: + + + + +{`public static class BlogPosts_ByTagsAndContent extends AbstractIndexCreationTask { + public BlogPosts_ByTagsAndContent() { + map = "docs.Posts.Select(post => new { " + + " tags = post.tags, " + + " content = post.content " + + "})"; + analyze("tags", "SimpleAnalyzer"); + analyze("content", "Raven.Sample.SnowballAnalyzer"); + } +} +`} + + + + +{`IndexDefinitionBuilder builder = new IndexDefinitionBuilder("BlogPosts/ByTagsAndContent"); + builder.setMap( "docs.Posts.Select(post => new { " + + " tags = post.tags, " + + " content = post.content " + + "})"); + builder.getAnalyzersStrings().put("tags", "SimpleAnalyzer"); + builder.getAnalyzersStrings().put("content", "Raven.Sample.SnowballAnalyzer"); + + store.maintenance() + .send(new PutIndexesOperation(builder.toIndexDefinition(store.getConventions()))); +`} + + + + + +The analyzer you are referencing to has to be available to the RavenDB server instance. When using analyzers that do not come with the default Lucene.NET distribution, you need to drop all the necessary DLLs into the RavenDB working directory (where `Raven.Server` executable is located), and use their fully qualified type name (including the assembly name). + + +## Creating Own Analyzer + +You can create a custom analyzer on your own and deploy it to RavenDB server. To do that pefrom the following steps: + +- create a class that inherits from abstract `Lucene.Net.Analysis.Analyzer` (you need to reference `Lucene.Net.dll` supplied with RavenDB Server package), +- your DLL needs to be placed next to RavenDB binaries (note it needs to be compatible with .NET Core 2.0 e.g. .NET Standard 2.0 assembly) +- the fully qualified name needs to be specified for an indexing field that is going to be tokenized by the analyzer + + + +{`public class MyAnalyzer : Lucene.Net.Analysis.Analyzer +\{ + public override TokenStream TokenStream(string fieldName, TextReader reader) + \{ + // Implement your analyzer's logic + throw new CodeOmitted(); + \} +\} +`} + + + +## Manipulating Field Indexing Behavior + +By default, each indexed field is analyzed using the `LowerCaseKeywordAnalyzer` which indexes a field as a single, lower cased term. + +This behavior can be changed by turning off the field analysis (setting the `FieldIndexing` option for this field to `Exact`). This causes all the properties to be treated as a single token and the matches must be exact (case sensitive), similarly to using the `KeywordAnalyzer` on this field. + + + +{`public static class Employees_ByFirstAndLastName extends AbstractIndexCreationTask \{ + public Employees_ByFirstAndLastName() \{ + map = "docs.Employees.Select(employee => new \{ " + + " LastName = employee.LastName, " + + " FirstName = employee.FirstName " + + "\})"; + + index("FirstName", FieldIndexing.EXACT); + \} +\} +`} + + + +`FieldIndexing.SEARCH` allows performing full text search operations against the field: + + + +{`public static class BlogPosts_ByContent extends AbstractIndexCreationTask \{ + public BlogPosts_ByContent() \{ + map = "docs.Posts.Select(post => new \{ " + + " tags = post.tags, " + + " content = post.content " + + "\})"; + + index("content", FieldIndexing.SEARCH); + \} +\} +`} + + + +If you want to disable indexing on a particular field, use the `FieldIndexing.NO` option. This can be useful when you want to [store](../indexes/storing-data-in-index.mdx) field data in the index, but don't want to make it available for querying, however it will available for extraction by projections: + + + +{`public static class BlogPosts_ByTitle extends AbstractIndexCreationTask \{ + public BlogPosts_ByTitle() \{ + map = "docs.Posts.Select(post => new \{ " + + " tags = post.tags, " + + " content = post.content " + + "\})"; + + index("content", FieldIndexing.NO); + store("content", FieldStorage.YES); + \} +\} +`} + + + +## Ordering When Field is Searchable + +When field is marked as `SEARCH` sorting must be done using additional field. More [here](../indexes/querying/sorting.mdx#ordering-when-a-field-is-searchable). + + diff --git a/versioned_docs/version-7.1/indexes/_using-analyzers-nodejs.mdx b/versioned_docs/version-7.1/indexes/_using-analyzers-nodejs.mdx new file mode 100644 index 0000000000..b120b39ea1 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_using-analyzers-nodejs.mdx @@ -0,0 +1,655 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* RavenDB supports fast and efficient querying through indexes, + which are powered by either [Lucene](http://lucene.apache.org/) or [Corax](../indexes/search-engine/corax.mdx), + a high-performance search engine developed specifically for RavenDB. + (You can choose which search engine to use for each index). + +* **Analyzers** are components used in the indexing and querying processes of the search engines, + controlling how data is indexed and how search queries interact with the indexed data. + +* **The Corax search engine fully respects and supports all Lucene analyzers**, + ensuring that existing configurations work seamlessly, + while also leveraging Corax's optimized performance for faster query execution. + +* This means you can use any analyzer with either search engine, + giving you full flexibility in configuring your indexes. + +* In this page: + * [Understanding the role of analyzers](../indexes/using-analyzers.mdx#understanding-the-role-of-analyzers) + * [Analyzers available in RavenDB](../indexes/using-analyzers.mdx#analyzers-available-in-ravendb) + * [Setting analyzer for index-field](../indexes/using-analyzers.mdx#setting-analyzer-for-index-field) + * [RavenDB's default analyzers](../indexes/using-analyzers.mdx#ravendb) + * [Disabling indexing for index-field](../indexes/using-analyzers.mdx#disabling-indexing-for-index-field) + * [Creating custom analyzers](../indexes/using-analyzers.mdx#creating-custom-analyzers) + * [Viewing the indexed terms](../indexes/using-analyzers.mdx#viewing-the-indexed-terms) + + +## Understanding the role of analyzers + + + +###### Analyzers in the index definition: +The [index definition](../studio/database/indexes/indexes-overview.mdx#index-definition) determines what content from the documents will be indexed for each index-field. +For each index-field you can specify a particular analyzer to process the content of that field. + + + + +###### Analyzers at indexing time: +During the [indexing process](../studio/database/indexes/indexes-overview.mdx#indexing-process), +the content to be indexed is processed and broken down into smaller components called tokens (or terms) through a process known as **tokenization**. +This is done by the **Analyzers**, which are objects that determine how text is split into tokens. + +Different analyzers vary in how they split the text stream ("tokenize"), and how they process those tokens after tokenization. +Analyzers can apply additional transformations, such as converting text to lowercase, removing stop words +(e.g., "the," "and"), or applying stemming (reducing words to their base forms, e.g., "running" → "run"). + +The resulting tokens are then stored in the index for each index-field and can later be searched by queries, +enabling [Full-text search](../indexes/querying/searching.mdx). + + + + +###### Analyzers at query time: +When running a [Full-text search with a dynamic query](../client-api/session/querying/text-search/full-text-search.mdx), +the auto-index created by the server breaks down the text of the searched document field using the [default search analyzer](../indexes/using-analyzers.mdx#using-the-default-search-analyzer). + +When running a [Full-text search on a static-index](../indexes/querying/query-index.mdx), +the **same analyzer** used to tokenize field content at indexing time is typically applied +to process the terms provided in the full-text search query before they are sent to the search engine to retrieve matching documents. + +There are two exceptions to this rule: + +1. When setting the [NGramAnalyzer](../indexes/using-analyzers.mdx#analyzers-that-tokenize-according-to-the-defined-number-of-characters) in the index definition, + it tokenizes the index field at indexing time. + However, at query time, when performing a full-text search on that field, + the default [RavenStandardAnalyzer](../indexes/using-analyzers.mdx#using-the-default-search-analyzer) is used to tokenize the search term from the query predicate. + + Currently, for query time, you cannot specify a different analyzer than the one defined in the index definition, + so to address this issue, you have two options: + * Increase the [MaxGram](../server/configuration/indexing-configuration.mdx#indexingluceneanalyzersngrammaxgram) value to generate larger tokens during indexing (when using Lucene). + * Use a different analyzer other than _NGramAnalyzer_ that better matches your requirements. + +2. Behavior is also different when making a full-text search with wildcards in the search terms. + This is explained in detail in [Searching with wildcards](../indexes/querying/searching.mdx#searching-with-wildcards). + + + + +###### Full-text search: +In most cases, Lucene's [StandardAnalyzer](../indexes/using-analyzers.mdx#analyzers-that-remove-common-stop-words) is sufficient for full-text searches. +For languages other than English, or when a custom analysis process is needed, you can provide your own [Custom analyzer](../indexes/using-analyzers.mdx#creating-custom-analyzers). +It is straightforward and may already be available as a contrib package for Lucene. + +You can also configure a specific collation for an index field to sort based on culture specific rules. +Learn more in [Sorting and Collation](../indexes/sorting-and-collation.mdx#collation). + + + + +## Analyzers available in RavenDB + +* RavenDB offers the following Lucene analyzers 'out of the box' (their details are listed below): + + * **StandardAnalyzer** + * **StopAnalyzer** + * **SimpleAnalyzer** + * **WhitespaceAnalyzer** + * **LowerCaseWhitespaceAnalyzer** + * **KeywordAnalyzer** + * **NGramAnalyzer** + +* If needed, you can create your own [Customized Analyzers](../indexes/using-analyzers.mdx#creating-custom-analyzers). + +* To assign the analyzer of your choice to a specific index-field, + see: [Setting analyzer for index-field](../indexes/using-analyzers.mdx#setting-analyzer-for-index-field). + +* When no analyzer is explicitly assigned to an index-field in the index definition, + RavenDB will use its [Default Analyzers](../indexes/using-analyzers.mdx#ravendb) to process and tokenize the content of a field. + +All examples below use the following text: +`The quick brown fox jumped over the lazy dogs, Bob@hotmail.com 123432.` + + + +##### Analyzers that remove common "stop words": + + +* **StandardAnalyzer**, which is Lucene's default, will produce the following tokens: + + `[quick] [brown] [fox] [jumped] [over] [lazy] [dogs] [bob@hotmail.com] [123432]` + + This analyzer: + + * Removes common "stop words". + * Converts text to lowercase, ensuring searches are case-insensitive. + * Separates on whitespace and punctuation that is followed by whitespace - a dot that is not followed by whitespace is considered part of the token. + * Email addresses and internet hostnames are treated as a single token. + * Splits words at hyphens, unless there's a number in the token, in which case the whole token is interpreted as a product number and is not split. + + + + +* **StopAnalyzer**, which works similarly, will produce the following tokens: + + `[quick] [brown] [fox] [jumped] [over] [lazy] [dogs] [bob] [hotmail] [com]` + + This analyzer: + + * Removes common "stop words". + * Converts text to lowercase, ensuring searches are case-insensitive. + * Separates and tokenizes text based on whitespace without performing light stemming. + * Removes numbers and symbols, separating tokens at those positions. + This means email and web addresses are split into separate tokens. + + + + + +* **Stop words**: + + * [Stop words](https://en.wikipedia.org/wiki/Stop_word) (e.g. the, it, a, is, this, who, that...) + are often removed to narrow search results by focusing on less frequently used words. + * If you want to include words such as IT (Information Technology), + be aware that analyzers removing common stop words may treat IT as a stop word and exclude it from the resulting terms. + This can also affect acronyms such as WHO (World Health Organization) or names such as "The Who" or "The IT Crowd". + * To avoid excluding acronyms, you can either spell out the full title instead of abbreviating it + or use an [Analyzer that doesn't remove stop words](../indexes/using-analyzers.mdx#analyzers-that-do-not-remove-common-stop-words). + + + + + + +##### Analyzers that do not remove common "stop words" + + +* **SimpleAnalyzer** will produce the following tokens: + + `[the] [quick] [brown] [fox] [jumped] [over] [lazy] [dogs] [bob] [hotmail] [com]` + + This analyzer: + + * Includes common "stop words". + * Converts text to lowercase, ensuring searches are case-insensitive. + * Separates on white spaces. + * Will tokenize on all non-alpha characters. + * Removes numbers and symbols, separating tokens at those positions. + This means email and web addresses are split into separate tokens. + + + + +* **WhitespaceAnalyzer** will produce the following tokens: + + `[The] [quick] [brown] [fox] [jumped] [over] [the] [lazy] [dogs,] [Bob@hotmail.com] [123432.]` + + This analyzer: + + * Includes common "stop words". + * Tokenizes text by separating it on whitespaces. + * Preserves upper/lower case in text, which means that searches will be case-sensitive. + * Keeps forms like email addresses, phone numbers, and web addresses whole. + + + + +* **LowerCaseWhitespaceAnalyzer** will produce the following tokens: + + `[the] [quick] [brown] [fox] [jumped] [over] [lazy] [dogs,] [bob@hotmail.com] [123432.]` + + This analyzer: + + * Includes common "stop words". + * Tokenizes text by separating it on whitespaces. + * Converts text to lowercase, ensuring searches are case-insensitive. + * Keeps forms like email addresses, phone numbers, and web addresses whole. + + + + +* **KeywordAnalyzer** will produce the following single token: + + `[The quick brown fox jumped over the lazy dogs, bob@hotmail.com 123432.]` + + This analyzer: + + * Will perform no tokenization, and will consider the whole text as one token. + * Preserves upper/lower case in text, which means that searches will be case-sensitive. + * Useful in situations like IDs and codes where you do not want to separate into multiple tokens. + + + + + +##### Analyzers that tokenize according to the defined number of characters + + +* **NGramAnalyzer** tokenizes based on predefined token lengths. + + By default, the minimum token length is **2** characters, and the maximum is **6** characters. + Using these defaults, the following tokens will be generated: + + `[.c] [.co] [.com] [12] [123] [1234] [12343] [123432] [23] [234] [2343] [23432] + [32] [34] [343] [3432] [43] [432] [@h] [@ho] [@hot] [@hotm] [@hotma] + [ai] [ail] [ail.] [ail.c] [ail.co] [az] [azy] [b@] [b@h] [b@ho] [b@hot] [b@hotm] + [bo] [bob] [bob@] [bob@h] [bob@ho] [br] [bro] [brow] [brown] [ck] [co] [com] + [do] [dog] [dogs] [ed] [er] [fo] [fox] [gs] [ho] [hot] [hotm] [hotma] [hotmai] + [ic] [ick] [il] [il.] [il.c] [il.co] [il.com] [ju] [jum] [jump] [jumpe] [jumped] + [l.] [l.c] [l.co] [l.com] [la] [laz] [lazy] [ma] [mai] [mail] [mail.] [mail.c] + [mp] [mpe] [mped] [ob] [ob@] [ob@h] [ob@ho] [ob@hot] [og] [ogs] [om] [ot] [otm] + [otma] [otmai] [otmail] [ov] [ove] [over] [ow] [own] [ox] [pe] [ped] [qu] [qui] + [quic] [quick] [ro] [row] [rown] [tm] [tma] [tmai] [tmail] [tmail.] + [ui] [uic] [uick] [um] [ump] [umpe] [umped] [ve] [ver] [wn] [zy]` + +* **Overriding default token length**: (only when using Lucene as the search engine) + + You can override the default token lengths of the NGram analyzer by setting the following configuration keys: + [Indexing.Lucene.Analyzers.NGram.MinGram](../server/configuration/indexing-configuration.mdx#indexingluceneanalyzersngrammingram) + and [Indexing.Lucene.Analyzers.NGram.MaxGram](../server/configuration/indexing-configuration.mdx#indexingluceneanalyzersngrammaxgram). + + For example, setting them to 3 and 4, respectively, will generate the following tokens: + + `[.co] [.com] [123] [1234] [234] [2343] [343] [3432] [432] [@ho] [@hot] + [ail] [ail.] [azy] [b@h] [b@ho] [bob] [bob@] [bro] [brow] [com] [dog] [dogs] [fox] + [hot] [hotm] [ick] [il.] [il.c] [jum] [jump] [l.c] [l.co] [laz] [lazy] [mai] [mail] + [mpe] [mped] [ob@] [ob@h] [ogs] [otm] [otma] [ove] [over] [own] [ped] [qui] [quic] + [row] [rown] [tma] [tmai] [uic] [uick] [ump] [umpe] [ver]` + +* **Querying with NGram analyzer**: + + In RavenDB, the analyzer configured in the index definition is typically used both at indexing time and query time (the same analyzer). + However, the `NGramAnalyzer` is an exception to this rule. + + Refer to section [Analyzers at query time](../indexes/using-analyzers.mdx#analyzers-at-query-time) to learn about the different behaviors. + + + + + +## Setting analyzer for index-field + +* To explicitly set an analyzer that will process/tokenize the content of a specific index-field, + set the `analyze()` method within the index definition for that field. + +* Either: + * Specify an analyzer from the [Analyzers available in RavenDB](../indexes/using-analyzers.mdx#analyzers-available-in-ravendb), + * Or specify your own custom analyzer (see [Creating custom analyzers](../indexes/using-analyzers.mdx#creating-custom-analyzers)). + +* If you want RavenDB to use the default analyzers, see [RavenDB's default analyzers](../indexes/using-analyzers.mdx#ravendb). + +* An analyzer may also be set from the Edit Index view in the Studio, see [Index field options](../studio/database/indexes/create-map-index.mdx#index-field-options). + + + + +{`class BlogPosts_ByTagsAndContent extends AbstractIndexCreationTask { + constructor() { + super(); + + this.map = \`docs.Posts.Select(post => new { + tags = post.tags, + content = post.content + })\`; + + // Field 'tags' will be tokenized by Lucene's SimpleAnalyzer + this.analyze("tags", "SimpleAnalyzer"); + + // Field 'content' will be tokenized by the Custom analyzer SnowballAnalyzer + this.analyze("content", "Raven.Sample.SnowballAnalyzer"); + } +} +`} + + + + +{`const builder = new IndexDefinitionBuilder("BlogPosts/ByTagsAndContent"); +builder.map = \`docs.Posts.Select(post => new { + tags = post.tags, + content = post.content +})\`; +builder.analyzersStrings["tags"] = "SimpleAnalyzer"; +builder.analyzersStrings["content"] = "Raven.Sample.SnowballAnalyzer"; + +await store.maintenance + .send(new PutIndexesOperation( + builder.toIndexDefinition(store.conventions))); +`} + + + + + + +## RavenDB's default analyzers + +* When no specific analyzer is explicitly assigned to an index-field in the index definition, + RavenDB will use the Default Analyzers to process and tokenize the content of the field, + depending on the specified Indexing Behavior. + +* The **Default Analyzers** are: + * `RavenStandardAnalyzer` - Serves as the [Default Search Analyzer](../indexes/using-analyzers.mdx#using-the-default-search-analyzer). + * `KeywordAnalyzer` - Servers as the [Default Exact Analyzer](../indexes/using-analyzers.mdx#using-the-default-exact-analyzer). + * `LowerCaseKeywordAnalyzer`- Serves as the [Default Analyzer](../indexes/using-analyzers.mdx#using-the-default-analyzer). + +* The available **Indexing Behavior** values are: + * `Exact` + * `Search` + * `No` - This behavior [disables field indexing](../indexes/using-analyzers.mdx#disabling-indexing-for-index-field). + +* See the detailed explanation for each scenario below: + + +##### Using the Default Search Analyzer +* When the indexing behavior is set to `Search` and no analyzer is specified for the index-field, + RavenDB will use the Default Search Analyzer. + By default, this analyzer is the `RavenStandardAnalyzer` (inherits from Lucene's _StandardAnalyzer_). + +* To customize a different analyzer that will serve as your Default Search Analyzer, + set the [Indexing.Analyzers.Search.Default](../server/configuration/indexing-configuration.mdx#indexinganalyzerssearchdefault) configuration key. + +* Using a search analyzer enables full-text search queries against the field. + Given the same sample text from above, _RavenStandardAnalyzer_ will produce the following tokens: + `[quick] [brown] [fox] [jumped] [over] [lazy] [dogs] [bob@hotmail.com] [123432]` + + + +{`class BlogPosts_ByContent extends AbstractIndexCreationTask \{ + constructor() \{ + super(); + + this.map = "docs.Posts.Select(post => new \{ " + + " tags = post.tags, " + + " content = post.content " + + "\})"; + + // Set the Indexing Behavior: + // ========================== + + // Set 'Search' on index-field 'content' + this.index("content", "Search"); + + // => Index-field 'content' will be processed by the "Default Search Analyzer" + // since no other analyzer is set. + \} +\} +`} + + + + + + +##### Using the Default Exact Analyzer +* When the indexing behavior is set to `Exact`, RavenDB will use the Default Exact Analyzer. + By default, this analyzer is the `KeywordAnalyzer`. + +* _KeywordAnalyzer_ treats the entire content of the index-field as a single token, + preserving the original text's case and ensuring no transformations, such as case normalization or stemming, are applied. + The field's value is indexed exactly as provided, enabling precise, case-sensitive matching at query time. + +* To customize a different analyzer that will serve as your Default Exact Analyzer, + set the [Indexing.Analyzers.Exact.Default ](../server/configuration/indexing-configuration.mdx#indexinganalyzersexactdefault) configuration key. + +* Given the same sample text from above, _KeywordAnalyzer_ will produce a single token: + `[The quick brown fox jumped over the lazy dogs, Bob@hotmail.com 123432.]` + + + +{`class Employees_ByFirstAndLastName extends AbstractIndexCreationTask \{ + constructor() \{ + super(); + + this.map = "docs.Employees.Select(employee => new \{ " + + " LastName = employee.LastName, " + + " FirstName = employee.FirstName " + + "\})"; + + // Set the Indexing Behavior: + // ========================== + + // Set 'Exact' on index-field 'FirstName' + this.index("FirstName", "Exact"); + + // => Index-field 'FirstName' will be processed by the "Default Exact Analyzer" + \} +\} +`} + + + + + + +##### Using the Default Analyzer +* When no indexing behavior is set and no analyzer is specified for the index-field, + RavenDB will use the Default Analyzer. + By default, this analyzer is the `LowerCaseKeywordAnalyzer`. + +* _LowerCaseKeywordAnalyzer_ behaves like Lucene's _KeywordAnalyzer_, but additionally performs case normalization, converting all characters to lowercase. + The entire content of the field is processed into a single, lowercased token. + +* To customize a different analyzer that will serve as your Default Analyzer, + set the [Indexing.Analyzers.Default](../server/configuration/indexing-configuration.mdx#indexinganalyzersdefault) configuration key. + +* Given the same sample text from above, _LowerCaseKeywordAnalyzer_ will produce a single token: + `[the quick brown fox jumped over the lazy dogs, bob@hotmail.com 123432.]` + + + +{`class Employees_ByFirstName extends AbstractIndexCreationTask \{ + constructor() \{ + super(); + + this.map = "docs.Employees.Select(employee => new \{ " + + " LastName = employee.LastName" + + "\})"; + + // Index-field 'LastName' will be processed by the "Default Analyzer" + // since: + // * No analyzer was specified + // * No Indexing Behavior was specified (neither Exact nor Search) + \} +\} +`} + + + + + + +## Disabling indexing for index-field + +* Use the `No` indexing behavior option to disable indexing of a particular index-field. + In this case: + * No analyzer will process the field, and no terms will be generated from its content. + * The field will not be available for querying. + * The field will still be accessible for extraction when [projecting query results](../indexes/querying/projections.mdx). + +* This is useful when you need to [store the field data in the index](../indexes/storing-data-in-index.mdx) and only intend to use it for query projections. + + + +{`class BlogPosts_ByTitle extends AbstractIndexCreationTask \{ + constructor() \{ + super(); + + this.map = "docs.Posts.Select(post => new \{ " + + " tags = post.tags, " + + " content = post.content " + + "\})"; + + // Set the Indexing Behavior: + // ========================== + + // Set 'No' on index-field 'content' + this.index("content", "No"); + + // Set 'Yes' to store the original content of field 'content' in the index + this.store("content", "Yes"); + + // => No analyzer will process field 'content', + // it will only be stored in the index. + \} +\} +`} + + + + + +## Creating custom analyzers + +* **Availability & file type**: + The custom analyzer you are referencing must be available to the RavenDB server instance. + You can create and add custom analyzers to RavenDB as `.cs` files. + +* **Scope**: + Custom analyzers can be defined as: + + * **Database Custom Analyzers** - can only be used by indexes of the database where they are defined. + * **Server-Wide Custom Analyzers** - can be used by indexes on all databases across all servers in the cluster. + + A database analyzer may have the same name as a server-wide analyzer. + In this case, the indexes of that database will use the database version of the analyzer. + You can think of database analyzers as overriding server-wide analyzers with the same names. + +* **Ways to create**: + There are three ways to create a custom analyzer and add it to your server: + + 1. [Add custom analyzer via Studio](../indexes/using-analyzers.mdx#add-custom-analyzer-via-studio) + 2. [Add custom analyzer via Client API](../indexes/using-analyzers.mdx#add-custom-analyzer-via-client-api) + 3. [Add custom analyzer directly to RavenDB's binaries](../indexes/using-analyzers.mdx#add-custom-analyzer-directly-to-ravendbs-binaries) + + +##### Add custom analyzer via Studio + +Custom analyzers can be added from the Custom Analyzers view in the Studio. +Learn more in this [Custom analyzers](../studio/database/settings/custom-analyzers.mdx) article. + + + + +##### Add custom analyzer via Client API + +First, create a class that inherits from the abstract `Lucene.Net.Analysis.Analyzer` class. +(you need to reference `Lucene.Net.dll`, which is included with the RavenDB Server package). +For example: + + + +{`public class MyAnalyzer : Lucene.Net.Analysis.Analyzer +\{ + public override TokenStream TokenStream(string fieldName, TextReader reader) + \{ + // Implement your analyzer's logic + throw new CodeOmitted(); + \} +\} +`} + + + +Next, use `PutAnalyzersOperation` to deploy the analyzer to a specific database. +By default, `PutAnalyzersOperation` will apply to the [default database](../client-api/setting-up-default-database.mdx) of the document store you're using. +To target a different database, use the [forDatabase()](../client-api/operations/how-to/switch-operations-to-a-different-database.mdx) method. + +To make it a server-wide analyzer, use the `PutServerWideOperation` operation.` + + + +{`await store.maintenance.send(new PutAnalyzersOperation(analyzerDefinition)); +`} + + + + +{`await store.maintenance.send(new PutServerWideAnalyzersOperation(analyzerDefinition)); +`} + + + + +{`const analyzerDefinition = \{ + name: "analyzerName", + code: "code" +\}; +`} + + + +| Parameter | Type | Description | +|------------|----------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **name** | `string` | The class name of your custom analyzer, as defined in your code. | +| **code** | `string` | Compilable csharp code:
A class that inherits from `Lucene.Net.Analysis.Analyzer`,
including the containing namespace and the necessary `using` statements. | + +**Client API example**: + + + +{`const analyzerDefinition = \{ + name: "MyAnalyzer", + code: "using System.IO;\\n" + + "using Lucene.Net.Analysis;\\n" + + "using Lucene.Net.Analysis.Standard;\\n" + + "\\n" + + "namespace MyAnalyzer\\n" + + "\{\\n" + + " public class MyAnalyzer : Lucene.Net.Analysis.Analyzer\\n" + + " \{\\n" + + " public override TokenStream TokenStream(string fieldName, TextReader reader)\\n" + + " \{\\n" + + " throw new CodeOmitted();\\n" + + " \}\\n" + + " \}\\n" + + "\}\\n" +\}; + +await store.maintenance.send(new PutAnalyzersOperation(analyzerDefinition)) +`} + + + +
+ + +##### Add custom analyzer directly to RavenDB's binaries + +Another way to add custom analyzers to RavenDB is by placing them next to RavenDB's binaries. + +The fully qualified name must be specified for any index-field that will be tokenized by the analyzer. + +Note that the analyzer must be compatible with .NET Core 2.0 (e.g., a .NET Standard 2.0 assembly). + +This is the only method for adding custom analyzers in RavenDB versions older than 5.2. + + + + +## Viewing the indexed terms + +The terms generated for each index-field can be viewed in the Studio. + +![The index terms](./assets/index-terms-1.png) + +1. These are the index-fields +2. Click the "Terms" button to view the generated terms for each field + +---- + +![The index terms](./assets/index-terms-2.png) + +1. This is the "index-field name". +2. These are the terms generated for the index-field. + In this example the `StopAnalyzer` was used to tokenize the text. + + + + diff --git a/versioned_docs/version-7.1/indexes/_using-dynamic-fields-csharp.mdx b/versioned_docs/version-7.1/indexes/_using-dynamic-fields-csharp.mdx new file mode 100644 index 0000000000..30631bba09 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_using-dynamic-fields-csharp.mdx @@ -0,0 +1,550 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In RavenDB different documents can have different shapes. + Documents are schemaless - new fields can be added or removed as needed. + +* For such dynamic data, you can define indexes with **dynamic-index-fields**. + +* This allows querying the index on fields that aren't yet known at index creation time, + which is very useful when working on highly dynamic systems. + +* Any value type can be indexed, string, number, date, etc. + +* An index definition can contain both dynamic-index-fields and regular-index-fields. + +* In this page: + + * [Indexing documents fields KEYS](../indexes/using-dynamic-fields.mdx#indexing-documents-fields-keys) + * [Example - index any field under object](../indexes/using-dynamic-fields.mdx#example---index-any-field-under-object) + * [Example - index any field](../indexes/using-dynamic-fields.mdx#example---index-any-field) + * [Indexing documents fields VALUES](../indexes/using-dynamic-fields.mdx#indexing-documents-fields-values) + * [Example - basic](../indexes/using-dynamic-fields.mdx#example---basic) + * [Example - list](../indexes/using-dynamic-fields.mdx#example---list) + * [CreateField syntax](../indexes/using-dynamic-fields.mdx#createfield-syntax) + * [Indexed fields & terms view](../indexes/using-dynamic-fields.mdx#indexed-fields--terms-view) + + + +## Indexing documents fields KEYS + +## Example - index any field under object + + + +* Index any field that is under the some object from the document. +* After index is deployed, any new field added to the this object will be indexed as well. + + + +* **The document**: + + +{`public class Product +\{ + public string Id \{ get; set; \} + + // The KEYS under the Attributes object will be dynamically indexed + // Fields added to this object after index creation time will also get indexed + public Dictionary Attributes \{ get; set; \} +\} +`} + + + + +{`// Sample document content +\{ + "Attributes": \{ + "Color": "Red", + "Size": 42 + \} +\} +`} + + + +* **The index**: + The below index will index any field under the `Attributes` object from the document, + a dynamic-index-field will be created for each such field. + New fields added to the object after index creation time will be dynamically indexed as well. + + The actual dynamic-index-field name on which you can query will be the attribute field **key**. + E.g., Keys `Color` & `Size` will become the actual dynamic-index-fields. + + + + +{`public class Products_ByAttributeKey : AbstractIndexCreationTask +{ + public Products_ByAttributeKey() + { + Map = products => from p in products + select new + { + // Call 'CreateField' to generate dynamic-index-fields from the Attributes object keys + // Using '_' is just a convention. Any other string can be used instead of '_' + + // The actual field name will be item.Key + // The actual field terms will be derived from item.Value + _ = p.Attributes.Select(item => CreateField(item.Key, item.Value)) + }; + } +} +`} + + + + +{`public class Products_ByAttributeKey_JS : AbstractJavaScriptIndexCreationTask +{ + public Products_ByAttributeKey_JS() + { + Maps = new HashSet + { + @"map('Products', function (p) { + return { + _: Object.keys(p.Attributes).map(key => createField(key, p.Attributes[key], + { indexing: 'Search', storage: true, termVector: null })) + }; + })" + }; + } +} +`} + + + + + +* **The query**: + * You can now query the generated dynamic-index fields. + The `_` property is Not queryable but used only in the index definition syntax. + * To get all documents with some 'Size' use: + + + +{`IList matchingDocuments = session + .Advanced + .DocumentQuery() + // 'Size' is a dynamic-index-field that was indexed from the Attributes object + .WhereEquals("Size", 42) + .ToList(); +`} + + + + +{`// 'Size' is a dynamic-index-field that was indexed from the Attributes object +from index 'Products/ByAttributeKey' where Size = 42 +`} + + + + +## Example - index any field + + + + * Define an index on a collection **without** needing any common structure between the indexed documents. + * After index is deployed, any new field added to the document will be indexed as well. + + + + +Consider whether this is really necessary, as indexing every single field can end up costing time and disk space. + + +* **The document**: + + +{`public class Product +\{ + public string Id \{ get; set; \} + + // All KEYS in the document will be dynamically indexed + // Fields added to the document after index creation time will also get indexed + public string FirstName \{ get; set; \} + public string LastName \{ get; set; \} + public string Title \{ get; set; \} + // ... +\} +`} + + + + + +{`// Sample document content + \{ + "FirstName": "John", + "LastName": "Doe", + "Title": "Engineer", + // ... +\} +`} + + + +* **The index**: + The below index will index any field from the document, + a dynamic-index-field will be created for each field. + New fields added to the document after index creation time will be dynamically indexed as well. + + The actual dynamic-index-field name on which you can query will be the field **key**. + E.g., Keys `FirstName` & `LastName` will become the actual dynamic-index-fields. + + + + +{`public class Products_ByAnyField_JS : AbstractJavaScriptIndexCreationTask +{ + public Products_ByAnyField_JS() + { + // This will index EVERY FIELD under the top level of the document + Maps = new HashSet + { + @"map('Products', function (p) { + return { + _: Object.keys(p).map(key => createField(key, p[key], + { indexing: 'Search', storage: true, termVector: null })) + } + })" + }; + } +} +`} + + + + +* **The query**: + * To get all documents with some 'LastName' use: + + + +{`IList matchingDocuments = session + .Advanced + .DocumentQuery() + // 'LastName' is a dynamic-index-field that was indexed from the document + .WhereEquals("LastName", "Doe") + .ToList(); +`} + + + + +{`// 'LastName' is a dynamic-index-field that was indexed from the document +from index 'Products/ByAnyField/JS' where LastName = "Doe" +`} + + + + + + +## Indexing documents fields VALUES + +## Example - basic + + + +* Only the **basic concept** of creating a dynamic-index-field from the **value** of a document field. +* Documents can then be queried based on those indexed values. +* For a more practical usage see the [Example](../indexes/using-dynamic-fields.mdx#example---index-a-list-of-properties) below. + + + +* **The document**: + + +{`public class Product +\{ + public string Id \{ get; set; \} + + // The VALUE of ProductType will be dynamically indexed + public string ProductType \{ get; set; \} + public int PricePerUnit \{ get; set; \} +\} +`} + + + + + +{`// Sample document content +\{ + "ProductType": "Electronics", + "PricePerUnit": 23 +\} +`} + + + +* **The index**: + The below index will index the **value** of document field 'ProductType'. + + This value will be the dynamic-index-field name on which you can query. + E.g., Field value `Electronics` will be the dynamic-index-field. + + + + +{`public class Products_ByProductType : AbstractIndexCreationTask +{ + public Products_ByProductType() + { + Map = products => from p in products + select new + { + // Call 'CreateField' to generate the dynamic-index-fields + // The field name will be the value of document field 'ProductType' + // The field terms will be derived from document field 'PricePerUnit' + _ = CreateField(p.ProductType, p.PricePerUnit) + }; + } +} +`} + + + + +{`public class Products_ByProductType_JS : AbstractJavaScriptIndexCreationTask +{ + public Products_ByProductType_JS() + { + Maps = new HashSet + { + @"map('Products', function (p) { + return { + _: createField(p.ProductType, p.PricePerUnit, + { indexing: 'Search', storage: true, termVector: null }) + }; + })" + }; + } +} +`} + + + + +* **The query**: + * To get all documents of some product type having a specific price per unit use: + + + +{`IList matchingDocuments = session + .Advanced + .DocumentQuery() + // 'Electronics' is the dynamic-index-field that was indexed from document field 'ProductType' + .WhereEquals("Electronics", 23) + .ToList(); +`} + + + + +{`// 'Electronics' is the dynamic-index-field that was indexed from document field 'ProductType' +from index 'Products/ByProductType' where Electronics = 23 +`} + + + + +## Example - list + + + +* Index **values** from items in a list +* After index is deployed, any item added this list in the document will be dynamically indexed as well. + + + +* **The document**: + + +{`public class Product +\{ + public string Id \{ get; set; \} + public string Name \{ get; set; \} + + // For each element in this list, the VALUE of property 'PropName' will be dynamically indexed + // e.g. Color, Width, Length (in ex. below) will become dynamic-index-fields + public List Attributes \{ get; set; \} +\} + +public class Attribute +\{ + public string PropName \{ get; set; \} + public string PropValue \{ get; set; \} +\} +`} + + + + + +{`// Sample document content +\{ +Name": "SomeName", +Attributes": [ + \{ + "PropName": "Color", + "PropValue": "Blue" + \}, + \{ + "PropName": "Width", + "PropValue": "10" + \}, + \{ + "PropName": "Length", + "PropValue": "20" + \}, + ... + +\} +`} + + + +* **The index**: + The below index will create a dynamic-index-field per item in the document's `Attributes` list. + New items added to the Attributes list after index creation time will be dynamically indexed as well. + + The actual dynamic-index-field name on which you can query will be the item's PropName **value**. + E.g., 'PropName' value `Width` will be a dynamic-index-field. + + + + +{`public class Attributes_ByName : AbstractIndexCreationTask +{ + public Attributes_ByName() + { + Map = products => from a in products + select new + { + // Define the dynamic-index-fields by calling CreateField + // A dynamic-index-field will be generated for each item in the Attributes list + + // For each item, the field name will be the value of field 'PropName' + // The field terms will be derived from field 'PropValue' + _ = a.Attributes.Select(item => CreateField(item.PropName, item.PropValue)), + + // A regular index field can be defined as well: + Name = a.Name + }; + } +} +`} + + + + +{`public class Attributes_ByName_JS : AbstractJavaScriptIndexCreationTask +{ + public Attributes_ByName_JS() + { + Maps = new HashSet + { + @"map('Products', function (p) { + return { + _: p.Attributes.map(item => createField(item.PropName, item.PropValue, + { indexing: 'Search', storage: true, termVector: null })), + Name: p.Name + }; + })" + }; + } +} +`} + + + + +* **The query**: + * To get all documents matching a specific attribute property use: + + + +{`IList matchingDocuments = session + .Advanced + .DocumentQuery() + // 'Width' is a dynamic-index-field that was indexed from the Attributes list + .WhereEquals("Width", 10) + .ToList(); +`} + + + + +{`// 'Width' is a dynamic-index-field that was indexed from the Attributes list +from index 'Attributes/ByName' where Width = 10 +`} + + + + + + +## CreateField syntax + +#### Syntax for LINQ-index: + + + +{`object CreateField(string name, object value); + +object CreateField(string name, object value, bool stored, bool analyzed); + +object CreateField(string name, object value, CreateFieldOptions options); +`} + + + +#### Syntax for JavaScript-index: + + + +{`createField(fieldName, fieldValue, options); // returns object +`} + + + +| Parameters | Type | Description | +|----------------|----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **fieldName** | `string` | Name of the dynamic-index-field | +| **fieldValue** | `object` | Value of the dynamic-index-field
The field Terms are derived from this value. | +| **stored** | `bool` | Sets [FieldStorage](../indexes/storing-data-in-index.mdx)

`false` - will set `FieldStorage.No` (default value)
`true` - will set `FieldStorate.Yes` | +| **analyzed** | `bool` | Sets [FieldIndexing](../indexes/using-analyzers.mdx)

`null` - `FieldIndexing.Default` (default value)
`false` - `FieldIndexing.Exact`
`true` - `FieldIndexing.Search` | +| **options** | `CreateFieldOptions` | Dynamic-index-field options | + +| CreateFieldOptions | | | +|--------------------|--------------------|----------------------------------------------------------------------------| +| **Storage** | `FieldStorage?` | Learn about [storing data](../indexes/storing-data-in-index.mdx) in the index. | +| **Indexing** | `FieldIndexing?` | Learn about [using analyzers](../indexes/using-analyzers.mdx) in the index. | +| **TermVector** | `FieldTermVector?` | Learn about [term vectors](../indexes/using-term-vectors.mdx) in the index. | + + + +* All above examples have used the character `_` in the dynamic-index-field definition. + However, using `_` is just a convention. Any other string can be used instead. + +* This property is Not queryable, it is only used in the index definition syntax. + The actual dynamic-index-fields that are generated are defined by the `CreateField` method. + + + +## Indexed fields & terms view + +The generated dynamic-index-fields and their indexed terms can be viewed in the **Terms View**. +Below are sample index fields & their terms generated from the last example. + +![Figure 1. Go to terms view](./assets/dynamic-index-fields-1.png) + +![Figure 2. Indexed fields & terms](./assets/dynamic-index-fields-2.png) diff --git a/versioned_docs/version-7.1/indexes/_using-dynamic-fields-java.mdx b/versioned_docs/version-7.1/indexes/_using-dynamic-fields-java.mdx new file mode 100644 index 0000000000..2866006674 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_using-dynamic-fields-java.mdx @@ -0,0 +1,480 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In RavenDB different documents can have different shapes. + Documents are schemaless - new fields can be added or removed as needed. + +* For such dynamic data, you can define indexes with **dynamic-index-fields**. + +* This allows querying the index on fields that aren't yet known at index creation time, + which is very useful when working on highly dynamic systems. + +* Any value type can be indexed, string, number, date, etc. + +* An index definition can contain both dynamic-index-fields and regular-index-fields. + +* In this page: + + * [Indexing documents fields KEYS](../indexes/using-dynamic-fields.mdx#indexing-documents-fields-keys) + * [Example - index any field under object](../indexes/using-dynamic-fields.mdx#example---index-any-field-under-object) + * [Example - index any field](../indexes/using-dynamic-fields.mdx#example---index-any-field) + * [Indexing documents fields VALUES](../indexes/using-dynamic-fields.mdx#indexing-documents-fields-values) + * [Example - basic](../indexes/using-dynamic-fields.mdx#example---basic) + * [Example - list](../indexes/using-dynamic-fields.mdx#example---list) + * [CreateField syntax](../indexes/using-dynamic-fields.mdx#createfield-syntax) + * [Indexed fields & terms view](../indexes/using-dynamic-fields.mdx#indexed-fields--terms-view) + + + +## Indexing documents fields KEYS + +## Example - index any field under object + + + +* Index any field that is under the some object from the document. +* After index is deployed, any new field added to the this object will be indexed as well. + + + +* **The document**: + + +{`public class Product \{ + private String id; + + // The KEYS under the attributes object will be dynamically indexed + // Fields added to this object after index creation time will also get indexed + private Dictionary attributes; + + // get + set implementation ... +\} +`} + + + + +{`// Sample document content +\{ + "attributes": \{ + "color": "Red", + "size": 42 + \} +\} +`} + + + +* **The index**: + The following index will index any field under the `attributes` object from the document, + a dynamic-index-field will be created for each such field. + New fields added to the object after index creation time will be dynamically indexed as well. + + The actual dynamic-index-field name on which you can query will be the attribute field **key**. + e.g. Keys `color` & `size` will become the actual dynamic-index-fields. + + + + +{`public class Products_ByAttributeKey_JS extends AbstractJavaScriptIndexCreationTask { + public Products_ByAttributeKey_JS() { + // Call 'createField' to generate dynamic-index-fields from the attributes object keys + // Using '_' is just a convention. Any other string can be used instead of '_' + + // The actual field name will be the key + // The actual field terms will be derived from p.attributes[key] + setMaps(Sets.newHashSet( + "map('Products', function (p) { " + + " return { " + + " _: Object.keys(p.attributes).map(key => createField(key, p.attributes[key], " + + " { indexing: 'Search', storage: false, termVector: null })) " + + " }; " + + "}) " + )); + } +} +`} + + + + +* **The query**: + * You can now query the generated dynamic-index fields. + Property `_` is Not queryable, it is only used in the index definition syntax. + * To get all documents with some 'size' use: + + + +{`List matchingDocuments = session + .query(Product.class, Products_ByAttributeKey_JS.class) + .whereEquals("size", 42) + .toList(); +`} + + + + +{`// 'size' is a dynamic-index-field that was indexed from the Attributes object +from index 'Products/ByAttributeKey/JS' where size = 42 +`} + + + + +## Example - index any field + + + +* Define an index on a collection **without** needing any common structure between the indexed documents. +* After index is deployed, any new field added to the document will be indexed as well. + + + + +Consider if that is a true necessity, as indexing every single field can end up costing time and disk space. + + +* **The document**: + + +{`public class Product \{ + private String id; + + // All KEYS in the document will be dynamically indexed + // Fields added to the document after index creation time will also get indexed + private String firstName; + private String lastName; + private String title; + // ... + + // get + set implementation ... +\} +`} + + + + + +{`// Sample document content +\{ + "firstName": "John", + "lastName": "Doe", + "title": "Engineer", + // ... +\} +`} + + + +* **The index**: + The following index will index any field from the document, + a dynamic-index-field will be created for each field. + New fields added to the document after index creation time will be dynamically indexed as well. + + The actual dynamic-index-field name on which you can query will be the field **key**. + E.g., Keys `firstName` & `lastName` will become the actual dynamic-index-fields. + + + + +{`public class Products_ByAnyField_JS extends AbstractJavaScriptIndexCreationTask { + public Products_ByAnyField_JS() { + + // This will index EVERY FIELD under the top level of the document + setMaps(Sets.newHashSet( + "map('Products', function (p) { " + + " return { " + + " _: Object.keys(p).map(key => createField(key, p[key], " + + " { indexing: 'Search', storage: true, termVector: null })) " + + " }; " + + "}) " + )); + } +} +`} + + + + +* **The query**: + * To get all documents with some 'lastName' use: + + + +{`List matchingDocuments = session + .query(Product.class, Products_ByAnyField_JS.class) + .whereEquals("lastName", "Doe") + .toList(); +`} + + + + +{`// 'lastName' is a dynamic-index-field that was indexed from the document +from index 'Products/ByAnyField/JS' where lastName = "Doe" +`} + + + + + + +## Indexing documents fields VALUES + +## Example - basic + + + +* Only the **basic concept** of creating a dynamic-index-field from the **value** of a document field. +* Documents can then be queried based on those indexed values. +* For a more practical usage see the [Example](../indexes/using-dynamic-fields.mdx#example---index-a-list-of-properties) below. + + + +* **The document**: + + +{`public class Product \{ + private String id; + + // The VALUE of productType will be dynamically indexed + private String productType; + private int pricePerUnit; + + // get + set implementation ... +\} +`} + + + + + +{`// Sample document content +\{ + "productType": "Electronics", + "pricePerUnit": 23 +\} +`} + + + +* **The index**: + The following index will index the **value** of document field 'productType'. + + This value will be the dynamic-index-field name on which you can query. + e.g. Field value `Electronics` will be the dynamic-index-field. + + + + +{`public class Products_ByProductType extends AbstractIndexCreationTask { + public Products_ByProductType() { + + // The field name will be the value of document field 'productType' + // The field terms will be derived from document field 'pricePerUnit' + map = "docs.Products.Select(p => new { " + + " _ = this.CreateField(p.productType, p.pricePerUnit) " + + "})"; + } +} +`} + + + + +* **The query**: + * To get all documents of some product type having a specific price per unit use: + + + +{`List matchingDocuments = session + .query(Product.class, Products_ByProductType.class) + .whereEquals("Electronics", 23) + .toList(); +`} + + + + +{`// 'Electronics' is the dynamic-index-field that was indexed from document field 'productType' +from index 'Products/ByProductType' where Electronics = 23 +`} + + + + +## Example - list + + + +* Index **values** from items in a list +* After index is deployed, any item added this list in the document will be dynamically indexed as well. + + + +* **The document**: + + +{`public class Product \{ + private String id; + private String name; + + // For each element in this list, the VALUE of property 'propName' will be dynamically indexed + // e.g. Color, Width, Length (in ex. below) will become dynamic-index-fields + private List attributes; + + // get + set implementation ... +\} + +public class Attribute \{ + private String propName; + private String propValue; + + // get + set implementation ... +\} +`} + + + + + +{`// Sample document content +\{ +name": "SomeName", +attributes": [ + \{ + "propName": "Color", + "propValue": "Blue" + \}, + \{ + "propName": "Width", + "propValue": "10" + \}, + \{ + "propName": "Length", + "propValue": "20" + \}, + ... + +\} +`} + + + +* **The index**: + The following index will create a dynamic-index-field per item in the document's `attributes` list. + New items added to the attributes list after index creation time will be dynamically indexed as well. + + The actual dynamic-index-field name on which you can query will be the item's propName **value**. + E.g., 'propName' value `Width` will be a dynamic-index-field. + + + + +{`public class Attributes_ByName extends AbstractIndexCreationTask { + public Attributes_ByName() { + + // For each attribute item, the field name will be the value of field 'propName' + // The field terms will be derived from field 'propValue' + // A regular-index-field (Name) is defined as well + map = + "docs.Products.Select(p => new { " + + " _ = p.attributes.Select(item => this.CreateField(item.propName, item.propValue)), " + + " Name = p.name " + + "})"; + } +} +`} + + + + +* **The query**: + * To get all documents matching a specific attribute property use: + + + +{`List matchingDocuments = session + .query(Product.class, Attributes_ByName.class) + .whereEquals("Width", 10) + .toList(); +`} + + + + +{`// 'Width' is a dynamic-index-field that was indexed from the attributes list +from index 'Attributes/ByName' where Width = 10 +`} + + + + + + +## CreateField syntax + +#### Syntax for LINQ-index: + + + +{`object CreateField(string name, object value); + +object CreateField(string name, object value, bool stored, bool analyzed); + +object CreateField(string name, object value, CreateFieldOptions options); +`} + + + +#### Syntax for JavaScript-index: + + + +{`createField(fieldName, fieldValue, options); // returns object +`} + + + +| Parameters | Type | Description | +|----------------|---------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **fieldName** | `string` | Name of the dynamic-index-field | +| **fieldValue** | `object` | Value of the dynamic-index-field
The field Terms are derived from this value. | +| **stored** | `bool` | Sets [FieldStorage](../indexes/storing-data-in-index.mdx)

`false` - will set `FieldStorage.No` (default value)
`true` - will set `FieldStorate.Yes` | +| **analyzed** | `bool` | Sets [FieldIndexing](../indexes/using-analyzers.mdx)

`null` - `FieldIndexing.Default` (default value)
`false` - `FieldIndexing.Exact`
`true` - `FieldIndexing.Search` | +| **options** | `CreateFieldOptions` | Dynamic-index-field options | + +| CreateFieldOptions | | | +|--------------------|--------------------|----------------------------------------------------------------------------| +| **Storage** | `FieldStorage?` | Learn about [storing data](../indexes/storing-data-in-index.mdx) in the index. | +| **Indexing** | `FieldIndexing?` | Learn about [using analyzers](../indexes/using-analyzers.mdx) in the index. | +| **TermVector** | `FieldTermVector?` | Learn about [term vectors](../indexes/using-term-vectors.mdx) in the index. | + + + +* All above examples have used the character `_` in the dynamic-index-field definition. + However, using `_` is just a convention. Any other string can be used instead. + +* This property is Not queryable, it is only used in the index definition syntax. + The actual dynamic-index-fields that are generated are defined by the `CreateField` method. + + + + + +## Indexed fields & terms view + +The generated dynamic-index-fields and their indexed terms can be viewed in the **Terms View**. +Below are sample index fields & their terms generated from the last example. + +![Figure 1. Go to terms view](./assets/dynamic-index-fields-1.png) + +![Figure 2. Indexed fields & terms](./assets/dynamic-index-fields-2.png) + + + + diff --git a/versioned_docs/version-7.1/indexes/_using-dynamic-fields-nodejs.mdx b/versioned_docs/version-7.1/indexes/_using-dynamic-fields-nodejs.mdx new file mode 100644 index 0000000000..9bda34e83c --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_using-dynamic-fields-nodejs.mdx @@ -0,0 +1,556 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In RavenDB different documents can have different shapes. + Documents are schemaless - new fields can be added or removed as needed. + +* For such dynamic data, you can define indexes with **dynamic-index-fields**. + +* This allows querying the index on fields that aren't yet known at index creation time, + which is very useful when working on highly dynamic systems. + +* Any value type can be indexed, string, number, date, etc. + +* An index definition can contain both dynamic-index-fields and regular-index-fields. + +* In this page: + + * [Indexing documents fields KEYS](../indexes/using-dynamic-fields.mdx#indexing-documents-fields-keys) + * [Example - index any field under object](../indexes/using-dynamic-fields.mdx#example---index-any-field-under-object) + * [Example - index any field](../indexes/using-dynamic-fields.mdx#example---index-any-field) + * [Indexing documents fields VALUES](../indexes/using-dynamic-fields.mdx#indexing-documents-fields-values) + * [Example - basic](../indexes/using-dynamic-fields.mdx#example---basic) + * [Example - list](../indexes/using-dynamic-fields.mdx#example---list) + * [CreateField syntax](../indexes/using-dynamic-fields.mdx#createfield-syntax) + * [Indexed fields & terms view](../indexes/using-dynamic-fields.mdx#indexed-fields--terms-view) + + + +## Indexing documents fields KEYS + + +#### Example - index any field under object +The following allows you to: + +* Index any field that is under the some object from the document. +* After index is deployed, any new field added to the this object will be indexed as well. +**The document**: + + +{`class Product \{ + constructor(id, attributes) \{ + this.id = id; + + // The KEYS under the attributes object will be dynamically indexed + // Fields added to this object after index creation time will also get indexed + this.attributes = attributes; + \} +\} +`} + + + + + +{`// Sample document content +\{ + "attributes": \{ + "color": "Red", + "size": 42 + \} +\} +`} + + + +**The index**: + +* The following index will index any field under the `attributes` object from the document, + a dynamic-index-field will be created for each such field. + New fields added to the object after index creation time will be dynamically indexed as well. + +* The actual dynamic-index-field name on which you can query will be the attribute field **key**. + e.g. Keys `color` & `size` will become the actual dynamic-index-fields. + + + + +{`class Products_ByAttributeKey_JS extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + const { createField } = this.mapUtils(); + + this.map("Products", p => { + return { + // Call 'createField' to generate dynamic-index-fields from the attributes object keys + // Using '_' is just a convention. Any other string can be used instead of '_' + + // The actual field name will be the key + // The actual field terms will be derived from p.attributes[key] + _: Object.keys(p.attributes).map(key => createField(key, p.attributes[key], { + indexing: "Search", + storage: false, + termVector: null + })) + }; + }); + } +} +`} + + + + +**The query**: + +* You can now query the generated dynamic-index fields. + Property `_` is Not queryable, it is only used in the index definition syntax. + +* To get all documents with some 'size' use: + + + + +{`const matchingDocuments = session.query({indexName: 'Products_ByAttributeKey'}) + // 'size' is a dynamic-index-field that was indexed from the attributes object + .whereEquals('size', 42) + .all(); +`} + + + + +{`// 'size' is a dynamic-index-field that was indexed from the attributes object +from index 'Products/ByAttributeKey' where size = 42 +`} + + + + + + + +#### Example - index any field +The following allows you to: + +* Define an index on a collection **without** needing any common structure between the indexed documents. +* After index is deployed, any new field added to the document will be indexed as well. + + + +Consider if that is a true necessity, as indexing every single field can end up costing time and disk space. + + +**The document**: + + +{`class Product \{ + constructor(id, firstName, lastName, title) \{ + this.id = id; + + // All KEYS in the document will be dynamically indexed + // Fields added to the document after index creation time will also get indexed + this.firstName = firstName; + this.lastName = lastName; + this.title = title; + // ... + \} +\} +`} + + + + + +{`// Sample document content +\{ + "firstName": "John", + "lastName": "Doe", + "title": "Engineer", + // ... +\} +`} + + + +**The index**: + +* The following index will index any field from the document, + a dynamic-index-field will be created for each field. + New fields added to the document after index creation time will be dynamically indexed as well. + +* The actual dynamic-index-field name on which you can query will be the field **key**. + e.g. Keys `firstName` & `lastName` will become the actual dynamic-index-fields. + + + + +{`class Products_ByAnyField_JS extends AbstractJavaScriptIndexCreationTask { + constructor () { + super(); + + const { createField } = this.mapUtils(); + + this.map("Products", p => { + return { + // This will index EVERY FIELD under the top level of the document + _: Object.keys(p).map(key => createField(key, p[key], { + indexing: "Search", + storage: true, + termVector: null + })) + }; + }); + } +} +`} + + + + +**The query**: + +* To get all documents with some 'lastName' use: + + + + +{`const matchingDocuments = session.query({ indexName: 'Products_ByAnyField_JS' }) + // 'lastName' is a dynamic-index-field that was indexed from the document + .whereEquals('lastName', 'Doe') + .all(); +`} + + + + +{`// 'lastName' is a dynamic-index-field that was indexed from the document +from index 'Products/ByAnyField/JS' where lastName = "Doe" +`} + + + + + + + + +## Indexing documents fields VALUES + + +#### Example - basic +This example shows: + +* Only the **basic concept** of creating a dynamic-index-field from the **value** of a document field. +* Documents can then be queried based on those indexed values. +* For a more practical usage see the [Example](../indexes/using-dynamic-fields.mdx#example---index-a-list-of-properties) below. +**The document**: + + +{`class Product \{ + constructor(id, productType, pricePerUnit) \{ + this.id = id; + + // The VALUE of productType will be dynamically indexed + this.productType = productType; + this.pricePerUnit = pricePerUnit; + \} +\} +`} + + + + + +{`// Sample document content +\{ + "productType": "Electronics", + "pricePerUnit": 23 +\} +`} + + + +**The index**: + +* The following index will index the **value** of document field 'productType'. + +* This value will be the dynamic-index-field name on which you can query. + e.g. Field value `Electronics` will be the dynamic-index-field. + + + + +{`class Products_ByProductType extends AbstractCsharpIndexCreationTask { + constructor () { + super(); + + // The field name will be the value of document field 'productType' + // The field terms will be derived from document field 'pricePerUnit' + this.map = "docs.Products.Select(p => new { " + + " _ = this.CreateField(p.productType, p.pricePerUnit) " + + "})"; + } +} +`} + + + + +{`class Products_ByProductType_JS extends AbstractJavaScriptIndexCreationTask { + constructor () { + super(); + + const { createField } = this.mapUtils(); + + this.map("Products", p => { + return { + _: [ + // The field name will be the value of document field 'productType' + // The field terms will be derived from document field 'pricePerUnit' + createField(p.productType, p.pricePerUnit, { + indexing: "Search", + storage: false, + termVector: null + }) + ] + }; + }); + } +} +`} + + + + +**The query**: + +* To get all documents of some product type having a specific price per unit use: + + + + +{`const matchingDocuments = session.query({ indexName: 'Products_ByProductType' }) + // 'Electronics' is the dynamic-index-field that was indexed from document field 'productType' + .whereEquals('Electronics', 23) + .all(); +`} + + + + +{`// 'Electronics' is the dynamic-index-field that was indexed from document field 'productType' +from index 'Products/ByProductType' where Electronics = 23 +`} + + + + + + + +#### Example - list +The following allows you to: + +* Index **values** from items in a list +* After index is deployed, any item added this list in the document will be dynamically indexed as well. +**The document**: + + +{`class Product \{ + constructor(id, name, attributes) \{ + this.id = id; + this.name = name; + + // For each element in this list, the VALUE of property 'propName' will be dynamically indexed + // e.g. Color, Width, Length (in ex. below) will become dynamic-index-fields + this.attributes = attributes; + \} +\} + +class Attribute \{ + constructor(propName, propValue) \{ + this.propName = propName; + this.propValue = propValue; + \} +\} +`} + + + + + +{`// Sample document content +\{ + "name": "SomeName", + "attributes": [ + \{ + "propName": "Color", + "propValue": "Blue" + \}, + \{ + "propName": "Width", + "propValue": "10" + \}, + \{ + "propName": "Length", + "propValue": "20" + \}, + ... + ] +\} +`} + + + +**The index**: + +* The following index will create a dynamic-index-field per item in the document's `attributes` list. + New items added to the attributes list after index creation time will be dynamically indexed as well. + +* The actual dynamic-index-field name on which you can query will be the item's propName **value**. + e.g. 'propName' value `Width` will be a dynamic-index-field. + + + + +{`class Attributes_ByName extends AbstractCsharpIndexCreationTask +{ + constructor () { + super(); + + // For each attribute item, the field name will be the value of field 'propName' + // The field terms will be derived from field 'propValue' + // A regular-index-field (Name) is defined as well + this.map = + "docs.Products.Select(p => new { " + + " _ = p.attributes.Select(item => this.CreateField(item.propName, item.propValue)), " + + " Name = p.name " + + "})"; + } +} +`} + + + + +{`class Attributes_ByName_JS extends AbstractJavaScriptIndexCreationTask { + constructor () { + super(); + + const { createField } = this.mapUtils(); + + this.map("Products", p => { + return { + // For each item, the field name will be the value of field 'propName' + // The field terms will be derived from field 'propValue' + _: p.attributes.map(item => createField(item.propName, item.propValue, { + indexing: "Search", + storage: true, + termVector: null + })), + + // A regular-index-field can be defined as well: + Name: p.name + }; + }); + } +} +`} + + + + +**The query**: + +* To get all documents matching a specific attribute property use: + + + + +{`const matchingDocuments = session.query({ indexName: 'Attributes/ByName' }) + // 'Width' is a dynamic-index-field that was indexed from the attributes list + .whereEquals('Width', 10) + .all(); +`} + + + + +{`// 'Width' is a dynamic-index-field that was indexed from the attributes list +from index 'Attributes/ByName' where Width = 10 +`} + + + + + + + +## CreateField syntax + +#### Syntax for LINQ-index: + + + +{`object CreateField(string name, object value); + +object CreateField(string name, object value, bool stored, bool analyzed); + +object CreateField(string name, object value, CreateFieldOptions options); +`} + + + +#### Syntax for JavaScript-index: + + + +{`createField(fieldName, fieldValue, options); // returns object +`} + + + +| Parameters | Type | Description | +|----------------|---------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **fieldName** | `string` | Name of the dynamic-index-field | +| **fieldValue** | `object` | Value of the dynamic-index-field
The field Terms are derived from this value. | +| **stored** | `bool` | Sets [FieldStorage](../indexes/storing-data-in-index.mdx)

`false` - will set `FieldStorage.No` (default value)
`true` - will set `FieldStorate.Yes` | +| **analyzed** | `bool` | Sets [FieldIndexing](../indexes/using-analyzers.mdx)

`null` - `FieldIndexing.Default` (default value)
`false` - `FieldIndexing.Exact`
`true` - `FieldIndexing.Search` | +| **options** | `CreateFieldOptions` | Dynamic-index-field options | + +| CreateFieldOptions | | | +|--------------------|--------------------|----------------------------------------------------------------------------| +| **Storage** | `FieldStorage?` | Learn about [storing data](../indexes/storing-data-in-index.mdx) in the index. | +| **Indexing** | `FieldIndexing?` | Learn about [using analyzers](../indexes/using-analyzers.mdx) in the index. | +| **TermVector** | `FieldTermVector?` | Learn about [term vectors](../indexes/using-term-vectors.mdx) in the index. | + + + +* All above examples have used the character `_` in the dynamic-index-field definition. + However, using `_` is just a convention. Any other string can be used instead. + +* This property is Not queryable, it is only used in the index definition syntax. + The actual dynamic-index-fields that are generated are defined by the `CreateField` method. + + + + + +## Indexed fields & terms view + +The generated dynamic-index-fields and their indexed terms can be viewed in the **Terms View**. +Below are sample index fields & their terms generated from the last example. + +![Figure 1. Go to terms view](./assets/dynamic-index-fields-1.png) + +![Figure 2. Indexed fields & terms](./assets/dynamic-index-fields-2.png) + + + + diff --git a/versioned_docs/version-7.1/indexes/_using-dynamic-fields-php.mdx b/versioned_docs/version-7.1/indexes/_using-dynamic-fields-php.mdx new file mode 100644 index 0000000000..d57827b1a7 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_using-dynamic-fields-php.mdx @@ -0,0 +1,560 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In RavenDB different documents can have different shapes. + Documents are schemaless - new fields can be added or removed as needed. + +* For such dynamic data, you can define indexes with **dynamic-index-fields**. + +* This allows querying the index on fields that aren't yet known at index creation time, + which is very useful when working on highly dynamic systems. + +* Any value type can be indexed, string, number, date, etc. + +* An index definition can contain both dynamic-index-fields and regular-index-fields. + +* In this page: + + * [Indexing documents fields KEYS](../indexes/using-dynamic-fields.mdx#indexing-documents-fields-keys) + * [Example - index any field under object](../indexes/using-dynamic-fields.mdx#example---index-any-field-under-object) + * [Example - index any field](../indexes/using-dynamic-fields.mdx#example---index-any-field) + * [Indexing documents fields VALUES](../indexes/using-dynamic-fields.mdx#indexing-documents-fields-values) + * [Example - basic](../indexes/using-dynamic-fields.mdx#example---basic) + * [Example - list](../indexes/using-dynamic-fields.mdx#example---list) + * [CreateField syntax](../indexes/using-dynamic-fields.mdx#createfield-syntax) + * [Indexed fields & terms view](../indexes/using-dynamic-fields.mdx#indexed-fields--terms-view) + + + +## Indexing documents fields KEYS + +## Example - index any field under object + + + +* Index any field that is under the some object from the document. +* After index is deployed, any new field added to the this object will be indexed as well. + + + +* **The document**: + + +{`use Ds\\Map as DSMap; + +class Product +\{ + private ?string $id = null; + + // The KEYS under the Attributes object will be dynamically indexed + // Fields added to this object after index creation time will also get indexed + public ?DSMap $attributes = null; +\} +`} + + + + +{`// Sample document content +\{ + "Attributes": \{ + "Color": "Red", + "Size": 42 + \} +\} +`} + + + +* **The index**: + The below index will index any field under the `Attributes` object from the document, + a dynamic-index-field will be created for each such field. + New fields added to the object after index creation time will be dynamically indexed as well. + + The actual dynamic-index-field name on which you can query will be the attribute field **key**. + E.g., Keys `Color` & `Size` will become the actual dynamic-index-fields. + + + + +{`class Products_ByAttributeKey extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "from p in docs.Products select new {" . + "_ = p.attributes.Select(item => CreateField(item.Key, item.Value))" . + "}"; + } +} +`} + + + + +{`class Products_ByAttributeKey_JS extends AbstractJavaScriptIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->setMaps([ + "map('Products', function (p) { " . + " return { " . + " _: Object.keys(p.attributes).map(key => createField(key, p.attributes[key], " . + " { indexing: 'Search', storage: false, termVector: null })) " . + " }; " . + "}) " + ]); + } +} +`} + + + + + +* **The query**: + * You can now query the generated dynamic-index fields. + * To get all documents with some 'size' use: + + + +{`$matchingDocuments = $session + ->advanced() + ->documentQuery(Product::class, Products_ByAttributeKey::class) + // 'Size' is a dynamic-index-field that was indexed from the Attributes object + ->whereEquals("Size", 42) + ->toList(); +`} + + + + +{`// 'Size' is a dynamic-index-field that was indexed from the Attributes object +from index 'Products/ByAttributeKey' where Size = 42 +`} + + + + +## Example - index any field + + + + * Define an index on a collection **without** needing any common structure between the indexed documents. + * After index is deployed, any new field added to the document will be indexed as well. + + + + +Consider whether this is really necessary, as indexing every single field can end up costing time and disk space. + + +* **The document**: + + +{`class Product +\{ + private ?string $id = null; + + // All KEYS in the document will be dynamically indexed + // Fields added to the document after index creation time will also get indexed + public ?string $firstName = null; + public ?string $lastName = null; + public ?string $title = null; + + // ... getters and setters +\} +`} + + + + + +{`// Sample document content + \{ + "FirstName": "John", + "LastName": "Doe", + "Title": "Engineer", + // ... +\} +`} + + + +* **The index**: + The below index will index any field from the document, + a dynamic-index-field will be created for each field. + New fields added to the document after index creation time will be dynamically indexed as well. + + The actual dynamic-index-field name on which you can query will be the field **key**. + E.g., Keys `FirstName` & `LastName` will become the actual dynamic-index-fields. + + + + +{`class Products_ByAnyField_JS extends AbstractJavaScriptIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + // This will index EVERY FIELD under the top level of the document + $this->setMaps([ + "map('Products', function (p) { + return { + _: Object.keys(p).map(key => createField(key, p[key], + { indexing: 'Search', storage: true, termVector: null })) + } + })" + ]); + } +} +`} + + + + +* **The query**: + * To get all documents with some 'LastName' use: + + + +{`$matchingDocuments = $session + ->advanced() + ->documentQuery(Product::class, Products_ByAnyField_JS::class) + // 'LastName' is a dynamic-index-field that was indexed from the document + ->whereEquals("LastName", "Doe") + ->toList(); +`} + + + + +{`// 'LastName' is a dynamic-index-field that was indexed from the document +from index 'Products/ByAnyField/JS' where LastName = "Doe" +`} + + + + + + +## Indexing documents fields VALUES + +## Example - basic + + + +* Only the **basic concept** of creating a dynamic-index-field from the **value** of a document field. +* Documents can then be queried based on those indexed values. + + + +* **The document**: + + +{`class Product +\{ + public ?string $id = null; + + // The VALUE of ProductType will be dynamically indexed + public ?string $productType = null; + public ?int $pricePerUnit = null; + + // ... getters and setters +\} +`} + + + + + +{`// Sample document content +\{ + "ProductType": "Electronics", + "PricePerUnit": 23 +\} +`} + + + +* **The index**: + The below index will index the **value** of document field 'ProductType'. + + This value will be the dynamic-index-field name on which you can query. + E.g., Field value `Electronics` will be the dynamic-index-field. + + + + +{`class Products_ByProductType extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + // Call 'CreateField' to generate the dynamic-index-fields + // The field name will be the value of document field 'ProductType' + // The field terms will be derived from document field 'PricePerUnit' + $this->map = "docs.Products.Select(p => new { " . + " _ = this.CreateField(p.productType, p.pricePerUnit) " . + "})"; + } +} +`} + + + + +{`class Products_ByProductType_JS extends AbstractJavaScriptIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->setMaps([ + "map('Products', function (p) { + return { + _: createField(p.ProductType, p.PricePerUnit, + { indexing: 'Search', storage: true, termVector: null }) + }; + })" + ]); + } +} +`} + + + + +* **The query**: + * To get all documents of some product type having a specific price per unit use: + + + +{`$matchingDocuments = $session + ->advanced() + ->documentQuery(Product::class, Products_ByProductType::class) +// 'Electronics' is the dynamic-index-field that was indexed from document field 'ProductType' +->whereEquals("Electronics", 23) +->toList(); +`} + + + + +{`// 'Electronics' is the dynamic-index-field that was indexed from document field 'ProductType' +from index 'Products/ByProductType' where Electronics = 23 +`} + + + + +## Example - list + + + +* Index **values** from items in a list +* After index is deployed, any item added this list in the document will be dynamically indexed as well. + + + +* **The document**: + + +{`class Product +\{ + public ?string $id = null; + public ?string $name = null; + + // For each element in this list, the VALUE of property 'PropName' will be dynamically indexed + // e.g. Color, Width, Length (in ex. below) will become dynamic-index-fields + public ?AttributeList $attributes = null; + + // ... getters and setters +\} + +class Attribute +\{ + public ?string $propName = null; + public ?string $propValue = null; + + // ... getters and setters +\} + +class AttributeList extends TypedList +\{ + protected function __construct() + \{ + parent::__construct(Attribute::class); + \} +\} +`} + + + + + +{`// Sample document content +\{ +Name": "SomeName", +Attributes": [ + \{ + "PropName": "Color", + "PropValue": "Blue" + \}, + \{ + "PropName": "Width", + "PropValue": "10" + \}, + \{ + "PropName": "Length", + "PropValue": "20" + \}, + ... + +\} +`} + + + +* **The index**: + The below index will create a dynamic-index-field per item in the document's `Attributes` list. + New items added to the Attributes list after index creation time will be dynamically indexed as well. + + The actual dynamic-index-field name on which you can query will be the item's PropName **value**. + E.g., 'PropName' value `width` will be a dynamic-index-field. + + + + +{`class Attributes_ByName extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + // Define the dynamic-index-fields by calling CreateField + // A dynamic-index-field will be generated for each item in the Attributes list + + // For each item, the field name will be the value of field 'PropName' + // The field terms will be derived from field 'PropValue' + + $this->map = + "docs.Products.Select(p => new { " . + " _ = p.attributes.Select(item => this.CreateField(item.propName, item.propValue)), " . + " Name = p.name " . + "})"; + } +} +`} + + + + +{`class Attributes_ByName_JS extends AbstractJavaScriptIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->setMaps([ + "map('Products', function (p) { + return { + _: p.Attributes.map(item => createField(item.PropName, item.PropValue, + { indexing: 'Search', storage: true, termVector: null })), + Name: p.Name + }; + })" + ]); + } +} +`} + + + + +* **The query**: + To get all documents matching a specific attribute property use: + + + +{`/** @var array $matchingDocuments */ +$matchingDocuments = $session + ->advanced() + ->documentQuery(Product::class, Attributes_ByName::class) + // 'Width' is a dynamic-index-field that was indexed from the Attributes list + ->whereEquals("Width", 10) + ->toList(); +`} + + + + +{`// 'Width' is a dynamic-index-field that was indexed from the Attributes list +from index 'Attributes/ByName' where Width = 10 +`} + + + + + + +## CreateField syntax + + + +{`object CreateField(string name, object value); + +object CreateField(string name, object value, bool stored, bool analyzed); + +object CreateField(string name, object value, CreateFieldOptions options); +`} + + + +| Parameters | Type | Description | +|------------|---------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **name** | `string` | Name of the dynamic-index-field | +| **value** | `object` | Value of the dynamic-index-field
The field Terms are derived from this value. | +| **stored** | `bool` | Sets [FieldStorage](../indexes/storing-data-in-index.mdx)

`False` - will set `FieldStorage.NO` (default value)
`True` - will set `FieldStorate.YES` | +| **analyzed** | `bool` | Sets [FieldIndexing](../indexes/using-analyzers.mdx)

`None` - `FieldIndexing.Default` (default value)
`False` - `FieldIndexing.Exact`
`True` - `FieldIndexing.Search` | +| **options** | `CreateFieldOptions` | Dynamic-index-field options | + +| CreateFieldOptions | | | +|--------------------|--------------------|----------------------------------------------------------------------------| +| **Storage** | `FieldStorage` | Learn about [storing data](../indexes/storing-data-in-index.mdx) in the index. | +| **Indexing** | `FieldIndexing` | | +| **TermVector** | `FieldTermVector` | | + + + +* All above examples have used the character `_` in the dynamic-index-field definition. + However, using `_` is just a convention. Any other string can be used instead. + +* This property is Not queryable, it is only used in the index definition syntax. + The actual dynamic-index-fields that are generated are defined by the `CreateField` method. + + + + + +## Indexed fields & terms view + +The generated dynamic-index-fields and their indexed terms can be viewed in the **Terms View**. +Below are sample index fields & their terms generated from the last example. + +![Figure 1. Go to terms view](./assets/dynamic-index-fields-1.png) + +![Figure 2. Indexed fields & terms](./assets/dynamic-index-fields-2.png) + + + + diff --git a/versioned_docs/version-7.1/indexes/_using-dynamic-fields-python.mdx b/versioned_docs/version-7.1/indexes/_using-dynamic-fields-python.mdx new file mode 100644 index 0000000000..ffbfebfff6 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_using-dynamic-fields-python.mdx @@ -0,0 +1,512 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In RavenDB different documents can have different shapes. + Documents are schemaless - new fields can be added or removed as needed. + +* For such dynamic data, you can define indexes with **dynamic-index-fields**. + +* This allows querying the index on fields that aren't yet known at index creation time, + which is very useful when working on highly dynamic systems. + +* Any value type can be indexed, string, number, date, etc. + +* An index definition can contain both dynamic-index-fields and regular-index-fields. + +* In this page: + + * [Indexing documents fields KEYS](../indexes/using-dynamic-fields.mdx#indexing-documents-fields-keys) + * [Example - index any field under object](../indexes/using-dynamic-fields.mdx#example---index-any-field-under-object) + * [Example - index any field](../indexes/using-dynamic-fields.mdx#example---index-any-field) + * [Indexing documents fields VALUES](../indexes/using-dynamic-fields.mdx#indexing-documents-fields-values) + * [Example - basic](../indexes/using-dynamic-fields.mdx#example---basic) + * [Example - list](../indexes/using-dynamic-fields.mdx#example---list) + * [CreateField syntax](../indexes/using-dynamic-fields.mdx#createfield-syntax) + * [Indexed fields & terms view](../indexes/using-dynamic-fields.mdx#indexed-fields--terms-view) + + + +## Indexing documents fields KEYS + +## Example - index any field under object + + + +* Index any field that is under the some object from the document. +* After index is deployed, any new field added to the this object will be indexed as well. + + + +* **The document**: + + +{`class Product: + def __init__(self, Id: str = None, attributes: Dict[str, object] = None): + self.Id = Id + + # The KEYS under the Attributes object will be dynamically indexed + # Fields added to this object after index creation time will also get indexed + self.attributes = attributes +`} + + + + +{`// Sample document content +\{ + "Attributes": \{ + "Color": "Red", + "Size": 42 + \} +\} +`} + + + +* **The index**: + The below index will index any field under the `Attributes` object from the document, + a dynamic-index-field will be created for each such field. + New fields added to the object after index creation time will be dynamically indexed as well. + + The actual dynamic-index-field name on which you can query will be the attribute field **key**. + E.g., Keys `Color` & `Size` will become the actual dynamic-index-fields. + + + + +{`class Products_ByAttributeKey(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = ( + "from p in docs.Products select new {" + "_ = p.attributes.Select(item => CreateField(item.Key, item.Value))" + "}" + ) +`} + + + + +{`class Products_ByAttributeKey_JS(AbstractJavaScriptIndexCreationTask): + def __init__(self): + super().__init__() + self.maps = { + """ + map('Products', function (p) { + return { + _: Object.keys(p.attributes).map(key => createField(key, p.attributes[key], + { indexing: 'Search', storage: true, termVector: null })) + }; + }) + """ + } +`} + + + + + +* **The query**: + * You can now query the generated dynamic-index fields. + * To get all documents with some 'size' use: + + + +{`matching_documents = list( + session.query_index_type(Products_ByAttributeKey, Product) + # 'size' is a dynamic-index-field that was indexed from the attributes object + .where_equals("size", 42) +) +`} + + + + +{`// 'Size' is a dynamic-index-field that was indexed from the Attributes object +from index 'Products/ByAttributeKey' where Size = 42 +`} + + + + +## Example - index any field + + + + * Define an index on a collection **without** needing any common structure between the indexed documents. + * After index is deployed, any new field added to the document will be indexed as well. + + + + +Consider whether this is really necessary, as indexing every single field can end up costing time and disk space. + + +* **The document**: + + +{`class Product: + def __init__(self, Id: str = None, first_name: str = None, last_name: str = None, title: str = None): + self.Id = Id + + # All KEYS in the document will be dynamically indexes + # Fields added to the document after index creation time wil also get indexed + self.first_name = first_name + self.last_name = last_name + self.title = title + # ... +`} + + + + + +{`// Sample document content + \{ + "FirstName": "John", + "LastName": "Doe", + "Title": "Engineer", + // ... +\} +`} + + + +* **The index**: + The below index will index any field from the document, + a dynamic-index-field will be created for each field. + New fields added to the document after index creation time will be dynamically indexed as well. + + The actual dynamic-index-field name on which you can query will be the field **key**. + E.g., Keys `FirstName` & `LastName` will become the actual dynamic-index-fields. + + + + +{`class Products_ByAnyField_JS(AbstractJavaScriptIndexCreationTask): + def __init__(self): + super().__init__() + # This will index EVERY FIELD under the top level of the document + self.maps = { + """ + map('Products', function (p) { + return { + _: Object.keys(p).map(key => createField(key, p[key], + { indexing: 'Search', storage: true, termVector: null })) + } + }) + """ + } +`} + + + + +* **The query**: + * To get all documents with some 'LastName' use: + + + +{`# 'last_name' is a dynamic-index-field that was indexed from the document +matching_documents = list( + session.query_index_type(Products_ByAnyField_JS, Product).where_equals("last_name", "Doe") +) +`} + + + + +{`// 'LastName' is a dynamic-index-field that was indexed from the document +from index 'Products/ByAnyField/JS' where LastName = "Doe" +`} + + + + + + +## Indexing documents fields VALUES + +## Example - basic + + + +* Only the **basic concept** of creating a dynamic-index-field from the **value** of a document field. +* Documents can then be queried based on those indexed values. + + + +* **The document**: + + +{`class Product: + def __init__(self, Id: str = None, product_type: str = None, price_per_unit: float = None): + self.Id = Id + + # The VALUE of ProductType will be dynamically indexed + self.product_type = product_type + self.price_per_unit = price_per_unit +`} + + + + + +{`// Sample document content +\{ + "ProductType": "Electronics", + "PricePerUnit": 23 +\} +`} + + + +* **The index**: + The below index will index the **value** of document field 'ProductType'. + + This value will be the dynamic-index-field name on which you can query. + E.g., Field value `Electronics` will be the dynamic-index-field. + + + + +{`class Products_ByProductType(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + + # Call 'CreateField' to generate the dynamic-index-fields + # The field name will be the value of document field 'product_type' + # The field terms will be derived from document field 'price_per_unit' + self.map = "from p in docs.Products select new { _ = CreateField(p.product_type, p.price_per_unit)}" +`} + + + + +{`class Products_ByProductType_JS(AbstractJavaScriptIndexCreationTask): + def __init__(self): + super().__init__() + self.maps = { + """ + map('Products', function (p) { + return { + _: createField(p.product_type, p.price_per_unit, + { indexing: 'Search', storage: true, termVector: null }) + }; + }) + """ + } +`} + + + + +* **The query**: + * To get all documents of some product type having a specific price per unit use: + + + +{`# 'electronics' is the dynamic-index-field that was indexed from the document 'product_type' +matching_documents = list( + session.advanced.document_query_from_index_type(Products_ByProductType, Product).where_equals( + "electronics", 23 + ) +) +`} + + + + +{`// 'Electronics' is the dynamic-index-field that was indexed from document field 'ProductType' +from index 'Products/ByProductType' where Electronics = 23 +`} + + + + +## Example - list + + + +* Index **values** from items in a list +* After index is deployed, any item added this list in the document will be dynamically indexed as well. + + + +* **The document**: + + +{`class Attribute: + def __init__(self, prop_name: str = None, prop_value: str = None): + self.prop_name = prop_name + self.prop_value = prop_value + + +class Product: + def __init__(self, Id: str = None, name: str = None, attributes: List[Attribute] = None): + self.Id = Id + self.name = name + # For each element in this list, the VALUE of property 'prop_name' will be dynamically indexed + # e.g. color, width, length (in ex. below) will become dynamic-index-field + self.attributes = attributes +`} + + + + + +{`// Sample document content +\{ +Name": "SomeName", +Attributes": [ + \{ + "PropName": "Color", + "PropValue": "Blue" + \}, + \{ + "PropName": "Width", + "PropValue": "10" + \}, + \{ + "PropName": "Length", + "PropValue": "20" + \}, + ... + +\} +`} + + + +* **The index**: + The below index will create a dynamic-index-field per item in the document's `Attributes` list. + New items added to the Attributes list after index creation time will be dynamically indexed as well. + + The actual dynamic-index-field name on which you can query will be the item's PropName **value**. + E.g., 'PropName' value `width` will be a dynamic-index-field. + + + + +{`class Attributes_ByName(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = ( + "from a in docs.Products select new " + "{ _ = a.attributes.Select( item => CreateField(item.prop_name, item.prop_value)), name = a.name " + "}" + ) +`} + + + + +{`class Attributes_ByName_JS(AbstractJavaScriptIndexCreationTask): + def __init__(self): + super().__init__() + self.maps = { + """ + map('Products', function (p) { + return { + _: p.Attributes.map(item => createField(item.PropName, item.PropValue, + { indexing: 'Search', storage: true, termVector: null })), + Name: p.Name + }; + }) + """ + } +`} + + + + +* **The query**: + To get all documents matching a specific attribute property use: + + + +{`matching_documents = list( + session.advanced.document_query_from_index_type(Attributes_ByName, Product).where_equals( + "width", 10 + ) +) +`} + + + + +{`// 'Width' is a dynamic-index-field that was indexed from the Attributes list +from index 'Attributes/ByName' where Width = 10 +`} + + + + + + +## CreateField syntax + +#### Syntax for Index: + + + +{`object CreateField(string name, object value); + +object CreateField(string name, object value, bool stored, bool analyzed); + +object CreateField(string name, object value, CreateFieldOptions options); +`} + + + +#### Syntax for JavaScript-index: + + + +{`createField(fieldName, fieldValue, options); // returns object +`} + + + +| Parameters | Type | Description | +|------------|---------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **name** | `string` | Name of the dynamic-index-field | +| **value** | `object` | Value of the dynamic-index-field
The field Terms are derived from this value. | +| **stored** | `bool` | Sets [FieldStorage](../indexes/storing-data-in-index.mdx)

`False` - will set `FieldStorage.NO` (default value)
`True` - will set `FieldStorate.YES` | +| **analyzed** | `bool` | Sets [FieldIndexing](../indexes/using-analyzers.mdx)

`None` - `FieldIndexing.Default` (default value)
`False` - `FieldIndexing.Exact`
`True` - `FieldIndexing.Search` | +| **options** | `CreateFieldOptions` | Dynamic-index-field options | + +| CreateFieldOptions | | | +|--------------------|--------------------|----------------------------------------------------------------------------| +| **Storage** | `FieldStorage?` | Learn about [storing data](../indexes/storing-data-in-index.mdx) in the index. | +| **Indexing** | `FieldIndexing?` | Learn about [using analyzers](../indexes/using-analyzers.mdx) in the index. | +| **TermVector** | `FieldTermVector?` | Learn about [term vectors](../indexes/using-term-vectors.mdx) in the index. | + + + +* All above examples have used the character `_` in the dynamic-index-field definition. + However, using `_` is just a convention. Any other string can be used instead. + +* This property is Not queryable, it is only used in the index definition syntax. + The actual dynamic-index-fields that are generated are defined by the `CreateField` method. + + + + + +## Indexed fields & terms view + +The generated dynamic-index-fields and their indexed terms can be viewed in the **Terms View**. +Below are sample index fields & their terms generated from the last example. + +![Figure 1. Go to terms view](./assets/dynamic-index-fields-1.png) + +![Figure 2. Indexed fields & terms](./assets/dynamic-index-fields-2.png) + + + + diff --git a/versioned_docs/version-7.1/indexes/_using-term-vectors-csharp.mdx b/versioned_docs/version-7.1/indexes/_using-term-vectors-csharp.mdx new file mode 100644 index 0000000000..cbeea5addd --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_using-term-vectors-csharp.mdx @@ -0,0 +1,145 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A [Term Vector](https://en.wikipedia.org/wiki/Vector_space_model) is a representation of a text document + as a vector of identifiers. + Lucene indexes can contain term vectors for documents they index. +* Term vectors can be used for various purposes, including similarity searches, information filtering + and retrieval, and indexing. + A book's index, for example, may have term vector enabled on the book's **subject** field, to be able + to use this field to search for books with similar subjects. +* RavenDB features like [MoreLikeThis](../client-api/session/querying/how-to-use-morelikethis.mdx) leverage + stored term vectors to accomplish their goals. + +* In this page: + * [Creating an index and enabling Term Vectors on a field](../indexes/using-term-vectors.mdx#creating-an-index-and-enabling-term-vectors-on-a-field) + * [Using the API](../indexes/using-term-vectors.mdx#using-the-api) + * [Using Studio](../indexes/using-term-vectors.mdx#using-studio) + + +## Creating an index and enabling Term Vectors on a field + +Indexes that include term vectors can be created and configured using the API +or Studio. + +## Using the API + +To create an index and enable Term Vectors on a specific field, we can - + +A. Create an index using the `AbstractIndexCreationTask`, and specify the term vectors there. +B. Or, we can define our term vectors in the `IndexDefinition` (directly or using the `IndexDefinitionBuilder`). + + + + +{`public class BlogPosts_ByTagsAndContent : AbstractIndexCreationTask +{ + public BlogPosts_ByTagsAndContent() + { + Map = users => from doc in users + select new + { + doc.Tags, + doc.Content + }; + + Indexes.Add(x => x.Content, FieldIndexing.Search); + TermVectors.Add(x => x.Content, FieldTermVector.WithPositionsAndOffsets); + } +} +`} + + + + +{`IndexDefinitionBuilder indexDefinitionBuilder = + new IndexDefinitionBuilder("BlogPosts/ByTagsAndContent") + { + Map = users => from doc in users + select new + { + doc.Tags, + doc.Content + }, + Indexes = + { + { x => x.Content, FieldIndexing.Search } + }, + TermVectors = + { + { x => x.Content, FieldTermVector.WithPositionsAndOffsets } + } + }; + +IndexDefinition indexDefinition = indexDefinitionBuilder + .ToIndexDefinition(store.Conventions); + +store.Maintenance.Send(new PutIndexesOperation(indexDefinition)); +`} + + + + +Available Term Vector options include: + + + +{`public enum FieldTermVector +\{ + /// + /// Do not store term vectors + /// + No, + + /// + /// Store the term vectors of each document. A term vector is a list of the document's + /// terms and their number of occurrences in that document. + /// + Yes, + + /// + /// Store the term vector + token position information + /// + WithPositions, + + /// + /// Store the term vector + Token offset information + /// + WithOffsets, + + /// + /// Store the term vector + Token position and offset information + /// + WithPositionsAndOffsets +\} +`} + + + +Learn which Lucene API methods and constants are available [here](https://lucene.apache.org/core/3_6_2/api/all/org/apache/lucene/document/Field.TermVector.html). + +## Using Studio + +Let's use as an example one of Studio's sample indexes, `Product/Search`, that has term vector +enabled on its `Name` field so a feature like [MoreLikeThis](../client-api/session/querying/how-to-use-morelikethis.mdx) +can use this fiels to select a product and find products similar to it. + +![Term vector enabled on index field](./assets/term-vector-enabled.png) + +We can now use a query like: + + + +{`from index 'Product/Search' +where morelikethis(id() = 'products/7-A') +`} + + + + + + diff --git a/versioned_docs/version-7.1/indexes/_using-term-vectors-java.mdx b/versioned_docs/version-7.1/indexes/_using-term-vectors-java.mdx new file mode 100644 index 0000000000..0ba966f12b --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_using-term-vectors-java.mdx @@ -0,0 +1,79 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +[Term Vector](https://en.wikipedia.org/wiki/Vector_space_model) is a representation of a text document as a vector of identifiers that can be used for similarity searches, information filtering, information retrieval, and indexing. In RavenDB the feature like [MoreLikeThis](../client-api/session/querying/how-to-use-morelikethis.mdx) is leveraging the term vectors to accomplish its purposes. + +To create an index and enable Term Vectors on a specific field we can create an index using the `AbstractIndexCreationTask`, then specify the term vectors there, or define our term vectors in the `IndexDefinition` (directly or using the `IndexDefinitionBuilder`). + + + + +{`public static class BlogPosts_ByTagsAndContent extends AbstractIndexCreationTask { + public BlogPosts_ByTagsAndContent() { + map = "docs.Posts.Select(post => new { " + + " Tags = post.Tags, " + + " Content = post.Content " + + "})"; + + index("Content", FieldIndexing.SEARCH); + termVector("Content", FieldTermVector.WITH_POSITIONS_AND_OFFSETS); + } +} +`} + + + + +{`IndexDefinitionBuilder builder = new IndexDefinitionBuilder("BlogPosts/ByTagsAndContent"); +builder.setMap("docs.Posts.Select(post => new { " + + " Tags = post.Tags, " + + " Content = post.Content " + + "})"); + +builder.getIndexesStrings().put("Content", FieldIndexing.SEARCH); +builder.getTermVectorsStrings().put("Content", FieldTermVector.WITH_POSITIONS_AND_OFFSETS); + +IndexDefinition indexDefinition = builder.toIndexDefinition(store.getConventions()); + +store.maintenance().send(new PutIndexesOperation(indexDefinition)); +`} + + + + +The available Term Vector options are: + + + +{`public enum FieldTermVector \{ + /** + * Do not store term vectors + */ + NO, + + /** + * Store the term vectors of each document. A term vector is a list of the document's + * terms and their number of occurrences in that document. + */ + YES, + /** + * Store the term vector + token position information + */ + WITH_POSITIONS, + /** + * Store the term vector + Token offset information + */ + WITH_OFFSETS, + + /** + * Store the term vector + Token position and offset information + */ + WITH_POSITIONS_AND_OFFSETS +\} +`} + + + + diff --git a/versioned_docs/version-7.1/indexes/_using-term-vectors-nodejs.mdx b/versioned_docs/version-7.1/indexes/_using-term-vectors-nodejs.mdx new file mode 100644 index 0000000000..71778b913a --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_using-term-vectors-nodejs.mdx @@ -0,0 +1,58 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +[Term Vector](https://en.wikipedia.org/wiki/Vector_space_model) is a representation of a text document as a vector of identifiers that can be used for similarity searches, information filtering, information retrieval, and indexing. In RavenDB the feature like [MoreLikeThis](../client-api/session/querying/how-to-use-morelikethis.mdx) is leveraging the term vectors to accomplish its purposes. + +To create an index and enable Term Vectors on a specific field we can create an index using the `AbstractIndexCreationTask`, then specify the term vectors there, or define our term vectors in the `IndexDefinition` (directly or using the `IndexDefinitionBuilder`). + + + + +{`class BlogPosts_ByTagsAndContent extends AbstractIndexCreationTask { + constructor() { + super(); + + this.map = \`docs.Posts.Select(post => new { + tags = post.tags, + content = post.content + })\`; + + this.index("content", "Search"); + this.termVector("content", "WithPositionsAndOffsets"); + } +} +`} + + + + +{`const builder = new IndexDefinitionBuilder("BlogPosts/ByTagsAndContent"); +builder.map = \`docs.Posts.Select(post => new { + tags = post.tags, + content = post.content +})\`; + +builder.indexesStrings["content"] = "Search"; +builder.termVectorsStrings["content"] = "WithPositionsAndOffsets"; + +const indexDefinition = builder.toIndexDefinition(store.conventions); + +await store.maintenance.send(new PutIndexesOperation(indexDefinition)); +`} + + + + +The available Term Vector options are: + +| Term Vector | | +| ----------- | - | +| `"No"` | Do not store term vectors | +| `"Yes"` | Store the term vectors of each document. A term vector is a list of the document's terms and their number of occurrences in that document. | +| `"WithPositions"` | Store the term vector + token position information | +| `"WithOffsets"` | Store the term vector + token offset information | +| `"WithPositionsAndOffsets"` | Store the term vector + token position and offset information | + + diff --git a/versioned_docs/version-7.1/indexes/_what-are-indexes-csharp.mdx b/versioned_docs/version-7.1/indexes/_what-are-indexes-csharp.mdx new file mode 100644 index 0000000000..f784c64ebd --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_what-are-indexes-csharp.mdx @@ -0,0 +1,235 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Indexes in RavenDB enable efficient querying by processing raw data and providing fast query results without scanning the entire dataset each and every time. + Learn more in the [Indexes overview](../indexes/what-are-indexes.mdx#indexes-overview) below. + +* This page provides a [Basic example](../indexes/what-are-indexes.mdx#basic-example) of creating, deploying, and querying an index. + Additional examples can be found in [Creating and deploying indexes](../indexes/creating-and-deploying.mdx), [Map Indexes](../indexes/map-indexes.mdx), + and many other articles under the "Indexes" menu. + +* In this page: + * [Indexes overview](../indexes/what-are-indexes.mdx#indexes-overview) + * [Types of indexes](../indexes/what-are-indexes.mdx#types-of-indexes) + * [Basic example](../indexes/what-are-indexes.mdx#basic-example) + * [Understanding index query results](../indexes/what-are-indexes.mdx#understanding-index-query-results) + * [If indexes exhaust system resources](../indexes/what-are-indexes.mdx#if-indexes-exhaust-system-resources) + + + +## Indexes overview + +**Indexes are fundamental**: + +* Indexes are fundamental to RavenDB’s query execution, enabling efficient query performance by processing the underlying data and delivering faster results. +* ALL queries in RavenDB use an index to deliver results and ensure optimal performance. + Learn more in [Queries always provide results using an index](../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index). +**Main concepts**: + +* When discussing indexes in RavenDB, three key components come into play: + * The index definition + * The indexing process + * The indexed data + +* Each of these components is described in detail in [Indexes - The moving parts](../studio/database/indexes/indexes-overview.mdx#indexes---the-moving-parts). +**The indexing process**: + +* The indexing process iterates over the raw documents, creating an **index-entry** for each document that is processed. + (Usually a single index-entry is created per raw document, unless working with a [fanout index](../indexes/indexing-nested-data.mdx#fanout-index---multiple-index-entries-per-document)). +* Each index-entry contains **index-fields**, and each index-field contains content (**index-terms**) that was generated from the raw documents, + as defined by the index definition and depending on the [analyzer](../indexes/using-analyzers.mdx) used. +* A map is built between the indexed-terms and the documents they originated from, + enabling you to query for documents based on the indexed data. +**Automatic data processing**: + +* Once defined and deployed, an index will initially process the entire dataset. + After that, the index will only process documents that were modified, added or deleted. + This happens automatically without requiring direct user intervention. +* For example, if changes are made to documents in the "Orders" collection, + all indexes defined for the "Orders" collection will be triggered to update the index with the new data. +* This approach helps avoid costly table scans, allows the server to respond quickly, + and reduces the load on queries while optimizing machine resource usage. +**Background operation**: + +* RavenDB indexes are designed to run asynchronously in the background. +* The indexing process does not block or interrupt database operations, such as writing data or running queries, + though queries may temporarily return [stale results](../indexes/stale-indexes.mdx) until the index is fully updated. +**Separate storage**: + +* Indexes store their processed data separately, ensuring that the raw data remains unaffected. + This separation helps maintain the integrity of the raw data while allowing the index to optimize query performance. + +* If system resources become strained due to indexing, it may require adjustments to the index design, hardware, or other factors. + Learn more in [If indexes exhaust system resources](../indexes/what-are-indexes.mdx#if-indexes-exhaust-system-resources). + + + +## Types of indexes + +* Indexes in RavenDB are categorized along the following axes: + * **Auto** indexes -vs- **Static** indexes + * **Map** indexes -vs- **Map-Reduce** indexes + * **Single-Collection** (Single-Map) indexes -vs- **Multi-Collection** (Multi-Map) indexes + +* For a detailed description of each type, refer to section [Index types](../studio/database/indexes/indexes-overview.mdx#index-types). + + + +## Basic example + +In this example we create a static-index that indexes content from documents in the `Employees` [collection](../client-api/faq/what-is-a-collection.mdx). +This allows querying for _Employee_ documents by any of the index-fields (`FullName`, `LastName`, `Country`). + + + +#### Define the index + +* The first step is to define the index. + One way to do this is by inheriting from `AbstractIndexCreationTask`. + Learn more in [Define a static-index using a custom class](../indexes/creating-and-deploying.mdx#define-a-static-index-using-a-custom-class). +* Other methods to create a static-index are: + * [Creating a static-index using an Operation](../client-api/operations/maintenance/indexes/put-indexes.mdx) + * [Creating a static-index from the Studio](../studio/database/indexes/indexes-list-view.mdx) + + + +{`// Define the index: +// ================= + +public class Employees_ByNameAndCountry : AbstractIndexCreationTask +\{ + public class IndexEntry + \{ + // The index-fields + public string LastName \{ get; set; \} + public string FullName \{ get; set; \} + public string Country \{ get; set; \} + \} + + public Employees_ByNameAndCountry() + \{ + Map = employees => from employee in employees + select new IndexEntry() + \{ + // Define the content for each index-field + LastName = employee.LastName, + FullName = employee.FirstName + " " + employee.LastName, + Country = employee.Address.Country + \}; + \} +\} +`} + + + + + + +#### Deploy the index + +* The next step is to deploy the index to the RavenDB server. + One way to do this is by calling `Execute()` on the index instance. +* Additional methods for deploying static-indexes are described in [Deploy a static index](../indexes/creating-and-deploying.mdx#deploy-a-static-index). +* Once deployed, the indexing process will start indexing documents. + + + +{`// Deploy the index to the server: +// =============================== + +new Employees_ByNameAndCountry().Execute(store); +`} + + + + + + +#### Query the index + +* Now you can query the _Employees_ collection using the index. + In this example we query for _Employee_ documents, **filtering results based on index-fields** `LastName` and `Country`. + The results will include only the _Employee_ documents that match the query predicate. +* For detailed guidance on querying with an index, refer to the [Querying an index](../indexes/querying/query-index.mdx). + + + +{`// Query the database using the index: +// =================================== + +IList employeesFromUK = session + .Query() + // Here we query for all Employee documents that are from the UK + // and have 'King' in their LastName field: + .Where(x => x.LastName == "King" && x.Country == "UK") + .OfType() + .ToList(); +`} + + + + + + +## Understanding index query results + + + +A common mistake is treating indexes like SQL Views, but they are not analogous. +The results of a query for a given index are the **full raw documents** that match the query predicate, +and not just the indexed fields. + +This behavior can be changed by applying [Projections](../indexes/querying/projections.mdx), +which let you project the query results into selected fields instead of returning the entire document. + + +#### Viewing the resulting documents: + +For example, the results shown in the following image are the **documents** that match the query predicate. + +![Index query results - documents](./assets/index-query-results-1.png) + +1. This is the index query. + The query predicate filters the resulting documents based on the content of the index-fields. +2. Each row in the results represents a **matching document**. +3. In this example, the `LastName`, `FirstName`, `Title`, etc., are the raw **document-fields**. +#### Viewing the index-entries: + +If you wish to **view the index-entries** that compose the index itself, +you can enable the option to show "raw index entries" instead of the matching documents. + +![Index query results - index entries](./assets/index-query-results-2.png) + +1. Query the index (no filtering is applied in this example). +2. Click the "Settings" button and toggle on "Show raw index-entries instead of matching documents". +3. Each row in the results represents an **index-entry**. +4. In this example, the `Country`, `FullName`, and `LastName` columns are the **index-fields**, + which were defined in the index definition. +5. This a **term**. + In this example, `usa` is a term generated by the analyzer for index-field `Country` from document `employees/4-a`. + + + +## If indexes exhaust system resources + +* The indexing process utilizes machine resources to keep the data up-to-date for queries. + +* If indexing drains system resources, it may indicate one or more of the following: + * Indexes may have been defined in a way that causes inefficient processing. + * The [license](https://ravendb.net/buy) may need to be upgraded, + * Your [cloud instance](/cloud/cloud-instances#a-production-cloud-cluster) (if used) may require optimization. + * Hardware upgrades may be necessary to better support your workload. + +* Refer to the [Indexing Performance View](../studio/database/indexes/indexing-performance.mdx) in the Studio to monitor and analyze the indexing process. + This view provides graphical representations and detailed statistics of all index activities at each stage. + +* Additionally, refer to the [Common indexing issues](../studio/database/indexes/indexing-performance.mdx#common-indexing-issues) section + for troubleshooting and resolving indexing challenges. + + + + diff --git a/versioned_docs/version-7.1/indexes/_what-are-indexes-java.mdx b/versioned_docs/version-7.1/indexes/_what-are-indexes-java.mdx new file mode 100644 index 0000000000..bc0dfd5fef --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_what-are-indexes-java.mdx @@ -0,0 +1,222 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Indexes in RavenDB enable efficient querying by processing raw data and providing fast query results without scanning the entire dataset each and every time. + Learn more in the [Indexes overview](../indexes/what-are-indexes.mdx#indexes-overview) below. + +* This page provides a [Basic example](../indexes/what-are-indexes.mdx#basic-example) of creating, deploying, and querying an index. + Additional examples can be found in [Creating and deploying indexes](../indexes/creating-and-deploying.mdx), [Map Indexes](../indexes/map-indexes.mdx), + and many other articles under the "Indexes" menu. + +* In this page: + * [Indexes overview](../indexes/what-are-indexes.mdx#indexes-overview) + * [Types of indexes](../indexes/what-are-indexes.mdx#types-of-indexes) + * [Basic example](../indexes/what-are-indexes.mdx#basic-example) + * [Understanding index query results](../indexes/what-are-indexes.mdx#understanding-index-query-results) + * [If indexes exhaust system resources](../indexes/what-are-indexes.mdx#if-indexes-exhaust-system-resources) + + + +## Indexes overview + +**Indexes are fundamental**: + +* Indexes are fundamental to RavenDB’s query execution, enabling efficient query performance by processing the underlying data and delivering faster results. +* ALL queries in RavenDB use an index to deliver results and ensure optimal performance. + Learn more in [Queries always provide results using an index](../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index). +**Main concepts**: + +* When discussing indexes in RavenDB, three key components come into play: + * The index definition + * The indexing process + * The indexed data + +* Each of these components is described in detail in [Indexes - The moving parts](../studio/database/indexes/indexes-overview.mdx#indexes---the-moving-parts). +**The indexing process**: + +* The indexing process iterates over the raw documents, creating an **index-entry** for each document that is processed. + (Usually a single index-entry is created per raw document, unless working with a [fanout index](../indexes/indexing-nested-data.mdx#fanout-index---multiple-index-entries-per-document)). +* Each index-entry contains **index-fields**, and each index-field contains content (**index-terms**) that was generated from the raw documents, + as defined by the index definition and depending on the [analyzer](../indexes/using-analyzers.mdx) used. +* A map is built between the indexed-terms and the documents they originated from, + enabling you to query for documents based on the indexed data. +**Automatic data processing**: + +* Once defined and deployed, an index will initially process the entire dataset. + After that, the index will only process documents that were modified, added or deleted. + This happens automatically without requiring direct user intervention. +* For example, if changes are made to documents in the "Orders" collection, + all indexes defined for the "Orders" collection will be triggered to update the index with the new data. +* This approach helps avoid costly table scans, allows the server to respond quickly, + and reduces the load on queries while optimizing machine resource usage. +**Background operation**: + +* RavenDB indexes are designed to run asynchronously in the background. +* The indexing process does not block or interrupt database operations, such as writing data or running queries, + though queries may temporarily return [stale results](../indexes/stale-indexes.mdx) until the index is fully updated. +**Separate storage**: + +* Indexes store their processed data separately, ensuring that the raw data remains unaffected. + This separation helps maintain the integrity of the raw data while allowing the index to optimize query performance. + +* If system resources become strained due to indexing, it may require adjustments to the index design, hardware, or other factors. + Learn more in [If indexes exhaust system resources](../indexes/what-are-indexes.mdx#if-indexes-exhaust-system-resources). + + + +## Types of indexes + +* Indexes in RavenDB are categorized along the following axes: + * **Auto** indexes -vs- **Static** indexes + * **Map** indexes -vs- **Map-Reduce** indexes + * **Single-Collection** (Single-Map) indexes -vs- **Multi-Collection** (Multi-Map) indexes + +* For a detailed description of each type, refer to section [Index types](../studio/database/indexes/indexes-overview.mdx#index-types). + + + +## Basic example + +In this example we create a static-index that indexes content from documents in the `Employees` [collection](../client-api/faq/what-is-a-collection.mdx). +This allows querying for _Employee_ documents by any of the index-fields (`FullName`, `LastName`, `Country`). + + + +#### Define the index + +* The first step is to define the index. + One way to do this is by extending the `AbstractIndexCreationTask` class. + Learn more in [Define a static-index using a custom class](../indexes/creating-and-deploying.mdx#using-abstractindexcreationtask). +* Other methods to create a static-index are: + * [Creating a static-index using an Operation](../client-api/operations/maintenance/indexes/put-indexes.mdx) + * [Creating a static-index from the Studio](../studio/database/indexes/indexes-list-view.mdx) + + + +{`// Define the index: +// ================= + +public static class Employees_ByNameAndCountry extends AbstractIndexCreationTask \{ + public Employees_ByNameAndCountry() \{ + map = "docs.Employees.Select(employee => new \{ " + + " LastName = employee.LastName, " + + " FullName = (employee.FirstName + \\" \\") + employee.LastName, " + + " Country = employee.Address.Country " + + "\})"; + \} +\} +`} + + + + + + +#### Deploy the index + +* The next step is to deploy the index to the RavenDB server. + One way to do this is by calling `execute()` on the index instance. +* Additional methods for deploying static-indexes are described in [Deploy a static index](../indexes/creating-and-deploying.mdx#deploy-a-static-index). +* Once deployed, the indexing process will start indexing documents. + + + +{`// Deploy the index to the server: +// =============================== + +new Employees_ByNameAndCountry().execute(store); +`} + + + + + + +#### Query the index + +* Now you can query the _Employees_ collection using the index. + In this example we query for _Employee_ documents, **filtering results based on index-fields** `LastName` and `Country`. + The results will include only the _Employee_ documents that match the query predicate. +* For detailed guidance on querying with an index, refer to the [Querying an index](../indexes/querying/query-index.mdx). + + + +{`// Query the database using the index: +// =================================== + +List employeesFromUK = session + .query(Employee.class, Employees_ByNameAndCountry.class) + // Here we query for all Employee documents that are from the UK + // and have 'King' in their LastName field: + .whereEquals("LastName", "King") + .whereEquals("Country", "UK") + .toList(); +`} + + + + + + +## Understanding index query results + + + +A common mistake is treating indexes like SQL Views, but they are not analogous. +The results of a query for a given index are the **full raw documents** that match the query predicate, +and not just the indexed fields. + +This behavior can be changed by applying [Projections](../indexes/querying/projections.mdx), +which let you project the query results into selected fields instead of returning the entire document. + + +#### Viewing the resulting documents: + +For example, the results shown in the following image are the **documents** that match the query predicate. + +![Index query results - documents](./assets/index-query-results-1.png) + +1. This is the index query. + The query predicate filters the resulting documents based on the content of the index-fields. +2. Each row in the results represents a **matching document**. +3. In this example, the `LastName`, `FirstName`, `Title`, etc., are the raw **document-fields**. +#### Viewing the index-entries: + +If you wish to **view the index-entries** that compose the index itself, +you can enable the option to show "raw index entries" instead of the matching documents. + +![Index query results - index entries](./assets/index-query-results-2.png) + +1. Query the index (no filtering is applied in this example). +2. Click the "Settings" button and toggle on "Show raw index-entries instead of matching documents". +3. Each row in the results represents an **index-entry**. +4. In this example, the `Country`, `FullName`, and `LastName` columns are the **index-fields**, + which were defined in the index definition. +5. This a **term**. + In this example, `usa` is a term generated by the analyzer for index-field `Country` from document `employees/4-a`. + + + +## If indexes exhaust system resources + +* The indexing process utilizes machine resources to keep the data up-to-date for queries. + +* If indexing drains system resources, it may indicate one or more of the following: + * Indexes may have been defined in a way that causes inefficient processing. + * The [license](https://ravendb.net/buy) may need to be upgraded, + * Your [cloud instance](/cloud/cloud-instances#a-production-cloud-cluster) (if used) may require optimization. + * Hardware upgrades may be necessary to better support your workload. + +* Refer to the [Indexing Performance View](../studio/database/indexes/indexing-performance.mdx) in the Studio to monitor and analyze the indexing process. + This view provides graphical representations and detailed statistics of all index activities at each stage. + +* Additionally, refer to the [Common indexing issues](../studio/database/indexes/indexing-performance.mdx#common-indexing-issues) section + for troubleshooting and resolving indexing challenges. + + + + diff --git a/versioned_docs/version-7.1/indexes/_what-are-indexes-nodejs.mdx b/versioned_docs/version-7.1/indexes/_what-are-indexes-nodejs.mdx new file mode 100644 index 0000000000..cdcce58869 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_what-are-indexes-nodejs.mdx @@ -0,0 +1,229 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Indexes in RavenDB enable efficient querying by processing raw data and providing fast query results without scanning the entire dataset each and every time. + Learn more in the [Indexes overview](../indexes/what-are-indexes.mdx#indexes-overview) below. + +* This page provides a [Basic example](../indexes/what-are-indexes.mdx#basic-example) of creating, deploying, and querying an index. + Additional examples can be found in [Creating and deploying indexes](../indexes/creating-and-deploying.mdx), [Map Indexes](../indexes/map-indexes.mdx), + and many other articles under the "Indexes" menu. + +* In this page: + * [Indexes overview](../indexes/what-are-indexes.mdx#indexes-overview) + * [Types of indexes](../indexes/what-are-indexes.mdx#types-of-indexes) + * [Basic example](../indexes/what-are-indexes.mdx#basic-example) + * [Understanding index query results](../indexes/what-are-indexes.mdx#understanding-index-query-results) + * [If indexes exhaust system resources](../indexes/what-are-indexes.mdx#if-indexes-exhaust-system-resources) + + + +## Indexes overview + +**Indexes are fundamental**: + +* Indexes are fundamental to RavenDB’s query execution, enabling efficient query performance by processing the underlying data and delivering faster results. +* ALL queries in RavenDB use an index to deliver results and ensure optimal performance. + Learn more in [Queries always provide results using an index](../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index). +**Main concepts**: + +* When discussing indexes in RavenDB, three key components come into play: + * The index definition + * The indexing process + * The indexed data + +* Each of these components is described in detail in [Indexes - The moving parts](../studio/database/indexes/indexes-overview.mdx#indexes---the-moving-parts). +**The indexing process**: + +* The indexing process iterates over the raw documents, creating an **index-entry** for each document that is processed. + (Usually a single index-entry is created per raw document, unless working with a [fanout index](../indexes/indexing-nested-data.mdx#fanout-index---multiple-index-entries-per-document)). +* Each index-entry contains **index-fields**, and each index-field contains content (**index-terms**) that was generated from the raw documents, + as defined by the index definition and depending on the [analyzer](../indexes/using-analyzers.mdx) used. +* A map is built between the indexed-terms and the documents they originated from, + enabling you to query for documents based on the indexed data. +**Automatic data processing**: + +* Once defined and deployed, an index will initially process the entire dataset. + After that, the index will only process documents that were modified, added or deleted. + This happens automatically without requiring direct user intervention. +* For example, if changes are made to documents in the "Orders" collection, + all indexes defined for the "Orders" collection will be triggered to update the index with the new data. +* This approach helps avoid costly table scans, allows the server to respond quickly, + and reduces the load on queries while optimizing machine resource usage. +**Background operation**: + +* RavenDB indexes are designed to run asynchronously in the background. +* The indexing process does not block or interrupt database operations, such as writing data or running queries, + though queries may temporarily return [stale results](../indexes/stale-indexes.mdx) until the index is fully updated. +**Separate storage**: + +* Indexes store their processed data separately, ensuring that the raw data remains unaffected. + This separation helps maintain the integrity of the raw data while allowing the index to optimize query performance. + +* If system resources become strained due to indexing, it may require adjustments to the index design, hardware, or other factors. + Learn more in [If indexes exhaust system resources](../indexes/what-are-indexes.mdx#if-indexes-exhaust-system-resources). + + + +## Types of indexes + +* Indexes in RavenDB are categorized along the following axes: + * **Auto** indexes -vs- **Static** indexes + * **Map** indexes -vs- **Map-Reduce** indexes + * **Single-Collection** (Single-Map) indexes -vs- **Multi-Collection** (Multi-Map) indexes + +* For a detailed description of each type, refer to section [Index types](../studio/database/indexes/indexes-overview.mdx#index-types). + + + +## Basic example + +In this example we create a static-index that indexes content from documents in the `Employees` [collection](../client-api/faq/what-is-a-collection.mdx). +This allows querying for _Employee_ documents by any of the index-fields (`FullName`, `LastName`, `Country`). + + + +#### Define the index + +* The first step is to define the index. + One way to do this is by extending `AbstractJavaScriptIndexCreationTask`. + Learn more in [Define a static-index using a custom class](../indexes/creating-and-deploying.mdx#define-a-static-index-using-a-custom-class). +* Other methods to create a static-index are: + * [Creating a static-index using an Operation](../client-api/operations/maintenance/indexes/put-indexes.mdx) + * [Creating a static-index from the Studio](../studio/database/indexes/indexes-list-view.mdx) + + + +{`// Define the index: +// ================= + +class Employees_ByNameAndCountry extends AbstractJavaScriptIndexCreationTask \{ + constructor() \{ + super(); + + this.map("Employees", employee => \{ + return \{ + // Define the content for each index-field: + // ======================================== + LastName: employee.LastName, + FullName: employee.FirstName + " " + employee.LastName, + Country: employee.Address.Country + \}; + \}); + \} +\} +`} + + + + + + +#### Deploy the index + +* The next step is to deploy the index to the RavenDB server. + One way to do this is by calling `execute()` on the index instance. +* Additional methods for deploying static-indexes are described in [Deploy a static index](../indexes/creating-and-deploying.mdx#deploy-a-static-index). +* Once deployed, the indexing process will start indexing documents. + + + +{`// Deploy the index to the server: +// =============================== + +const employeesIndex = new Employees_ByNameAndCountry(); +await employeesIndex.execute(store); +`} + + + + + + +#### Query the index + +* Now you can query the _Employees_ collection using the index. + In this example we query for _Employee_ documents, **filtering results based on index-fields** `LastName` and `Country`. + The results will include only the _Employee_ documents that match the query predicate. +* For detailed guidance on querying with an index, refer to the [Querying an index](../indexes/querying/query-index.mdx). + + + +{`// Query the database using the index: +// =================================== + +const employeesFromUK = await session + .query(\{ indexName: employeesIndex.getIndexName() \}) + // Here we query for all Employee documents that are from the UK + // and have 'King' in their LastName field: + .whereEquals("LastName", "King") + .whereEquals("Country", "UK") + .all(); +`} + + + + + + +## Understanding index query results + + + +A common mistake is treating indexes like SQL Views, but they are not analogous. +The results of a query for a given index are the **full raw documents** that match the query predicate, +and not just the indexed fields. + +This behavior can be changed by applying [Projections](../indexes/querying/projections.mdx), +which let you project the query results into selected fields instead of returning the entire document. + + +#### Viewing the resulting documents: + +For example, the results shown in the following image are the **documents** that match the query predicate. + +![Index query results - documents](./assets/index-query-results-1.png) + +1. This is the index query. + The query predicate filters the resulting documents based on the content of the index-fields. +2. Each row in the results represents a **matching document**. +3. In this example, the `LastName`, `FirstName`, `Title`, etc., are the raw **document-fields**. +#### Viewing the index-entries: + +If you wish to **view the index-entries** that compose the index itself, +you can enable the option to show "raw index entries" instead of the matching documents. + +![Index query results - index entries](./assets/index-query-results-2.png) + +1. Query the index (no filtering is applied in this example). +2. Click the "Settings" button and toggle on "Show raw index-entries instead of matching documents". +3. Each row in the results represents an **index-entry**. +4. In this example, the `Country`, `FullName`, and `LastName` columns are the **index-fields**, + which were defined in the index definition. +5. This a **term**. + In this example, `usa` is a term generated by the analyzer for index-field `Country` from document `employees/4-a`. + + + +## If indexes exhaust system resources + +* The indexing process utilizes machine resources to keep the data up-to-date for queries. + +* If indexing drains system resources, it may indicate one or more of the following: + * Indexes may have been defined in a way that causes inefficient processing. + * The [license](https://ravendb.net/buy) may need to be upgraded, + * Your [cloud instance](/cloud/cloud-instances#a-production-cloud-cluster) (if used) may require optimization. + * Hardware upgrades may be necessary to better support your workload. + +* Refer to the [Indexing Performance View](../studio/database/indexes/indexing-performance.mdx) in the Studio to monitor and analyze the indexing process. + This view provides graphical representations and detailed statistics of all index activities at each stage. + +* Additionally, refer to the [Common indexing issues](../studio/database/indexes/indexing-performance.mdx#common-indexing-issues) section + for troubleshooting and resolving indexing challenges. + + + + diff --git a/versioned_docs/version-7.1/indexes/_what-are-indexes-php.mdx b/versioned_docs/version-7.1/indexes/_what-are-indexes-php.mdx new file mode 100644 index 0000000000..560e0d448d --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_what-are-indexes-php.mdx @@ -0,0 +1,214 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Indexes in RavenDB enable efficient querying by processing raw data and providing fast query results without scanning the entire dataset each and every time. + Learn more in the [Indexes overview](../indexes/what-are-indexes.mdx#indexes-overview) below. + +* This page provides a [Basic example](../indexes/what-are-indexes.mdx#basic-example) of creating, deploying, and querying an index. + Additional examples can be found in [Creating and deploying indexes](../indexes/creating-and-deploying.mdx), [Map Indexes](../indexes/map-indexes.mdx), + and many other articles under the "Indexes" menu. + +* In this page: + * [Indexes overview](../indexes/what-are-indexes.mdx#indexes-overview) + * [Types of indexes](../indexes/what-are-indexes.mdx#types-of-indexes) + * [Basic example](../indexes/what-are-indexes.mdx#basic-example) + * [Understanding index query results](../indexes/what-are-indexes.mdx#understanding-index-query-results) + * [If indexes exhaust system resources](../indexes/what-are-indexes.mdx#if-indexes-exhaust-system-resources) + + + +## Indexes overview + +**Indexes are fundamental**: + +* Indexes are fundamental to RavenDB’s query execution, enabling efficient query performance by processing the underlying data and delivering faster results. +* ALL queries in RavenDB use an index to deliver results and ensure optimal performance. + Learn more in [Queries always provide results using an index](../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index). +**Main concepts**: + +* When discussing indexes in RavenDB, three key components come into play: + * The index definition + * The indexing process + * The indexed data + +* Each of these components is described in detail in [Indexes - The moving parts](../studio/database/indexes/indexes-overview.mdx#indexes---the-moving-parts). +**The indexing process**: + +* The indexing process iterates over the raw documents, creating an **index-entry** for each document that is processed. + (Usually a single index-entry is created per raw document, unless working with a [fanout index](../indexes/indexing-nested-data.mdx#fanout-index---multiple-index-entries-per-document)). +* Each index-entry contains **index-fields**, and each index-field contains content (**index-terms**) that was generated from the raw documents, + as defined by the index definition and depending on the [analyzer](../indexes/using-analyzers.mdx) used. +* A map is built between the indexed-terms and the documents they originated from, + enabling you to query for documents based on the indexed data. +**Automatic data processing**: + +* Once defined and deployed, an index will initially process the entire dataset. + After that, the index will only process documents that were modified, added or deleted. + This happens automatically without requiring direct user intervention. +* For example, if changes are made to documents in the "Orders" collection, + all indexes defined for the "Orders" collection will be triggered to update the index with the new data. +* This approach helps avoid costly table scans, allows the server to respond quickly, + and reduces the load on queries while optimizing machine resource usage. +**Background operation**: + +* RavenDB indexes are designed to run asynchronously in the background. +* The indexing process does not block or interrupt database operations, such as writing data or running queries, + though queries may temporarily return [stale results](../indexes/stale-indexes.mdx) until the index is fully updated. +**Separate storage**: + +* Indexes store their processed data separately, ensuring that the raw data remains unaffected. + This separation helps maintain the integrity of the raw data while allowing the index to optimize query performance. + +* If system resources become strained due to indexing, it may require adjustments to the index design, hardware, or other factors. + Learn more in [If indexes exhaust system resources](../indexes/what-are-indexes.mdx#if-indexes-exhaust-system-resources). + + + +## Types of indexes + +* Indexes in RavenDB are categorized along the following axes: + * **Auto** indexes -vs- **Static** indexes + * **Map** indexes -vs- **Map-Reduce** indexes + * **Single-Collection** (Single-Map) indexes -vs- **Multi-Collection** (Multi-Map) indexes + +* For a detailed description of each type, refer to section [Index types](../studio/database/indexes/indexes-overview.mdx#index-types). + + + +## Basic example + +In this example we create a static-index that indexes content from documents in the `Employees` [collection](../client-api/faq/what-is-a-collection.mdx). +This allows querying for _Employee_ documents by any of the index-fields (`FullName`, `LastName`, `Country`). + + + +#### Define the index + +* The first step is to define the index. + One way to do this is by inheriting from `AbstractIndexCreationTask`. + Learn more in [Define a static-index using a custom class](../indexes/creating-and-deploying.mdx#define-a-static-index-using-a-custom-class). +* Other methods to create a static-index are: + * [Creating a static-index using an Operation](../client-api/operations/maintenance/indexes/put-indexes.mdx) + * [Creating a static-index from the Studio](../studio/database/indexes/indexes-list-view.mdx) + + + +{`class Employees_ByFirstAndLastName extends AbstractIndexCreationTask +\{ + public function __construct() + \{ + parent::__construct(); + + $this->map = "docs.Employees.Select(employee => new \{" . + " FirstName = employee.FirstName," . + " LastName = employee.LastName" . + "\})"; + \} +\} +`} + + + + + + +#### Deploy the index + +* The next step is to deploy the index to the RavenDB server. + One way to do this is by calling `execute()` on the index instance. +* Additional methods for deploying static-indexes are described in [Deploy a static index](../indexes/creating-and-deploying.mdx#deploy-a-static-index). +* Once deployed, the indexing process will start indexing documents. + + + +{`// save index on server +(new Employees_ByFirstAndLastName())->execute($store); +`} + + + + + + +#### Query the index + +* Now you can query the _Employees_ collection using the index. + In this example we query for _Employee_ documents, **filtering results based on index-fields** `LastName` and `Country`. + The results will include only the _Employee_ documents that match the query predicate. +* For detailed guidance on querying with an index, refer to the [Querying an index](../indexes/querying/query-index.mdx). + + + +{`/** @var array $results */ +$results = $session->query(Employee::class, Employees_ByFirstAndLastName::class) + ->whereEquals('FirstName', "Robert") + ->toList(); +`} + + + + + + +## Understanding index query results + + + +A common mistake is treating indexes like SQL Views, but they are not analogous. +The results of a query for a given index are the **full raw documents** that match the query predicate, +and not just the indexed fields. + +This behavior can be changed by applying [Projections](../indexes/querying/projections.mdx), +which let you project the query results into selected fields instead of returning the entire document. + + +#### Viewing the resulting documents: + +For example, the results shown in the following image are the **documents** that match the query predicate. + +![Index query results - documents](./assets/index-query-results-1.png) + +1. This is the index query. + The query predicate filters the resulting documents based on the content of the index-fields. +2. Each row in the results represents a **matching document**. +3. In this example, the `LastName`, `FirstName`, `Title`, etc., are the raw **document-fields**. +#### Viewing the index-entries: + +If you wish to **view the index-entries** that compose the index itself, +you can enable the option to show "raw index entries" instead of the matching documents. + +![Index query results - index entries](./assets/index-query-results-2.png) + +1. Query the index (no filtering is applied in this example). +2. Click the "Settings" button and toggle on "Show raw index-entries instead of matching documents". +3. Each row in the results represents an **index-entry**. +4. In this example, the `Country`, `FullName`, and `LastName` columns are the **index-fields**, + which were defined in the index definition. +5. This a **term**. + In this example, `usa` is a term generated by the analyzer for index-field `Country` from document `employees/4-a`. + + + +## If indexes exhaust system resources + +* The indexing process utilizes machine resources to keep the data up-to-date for queries. + +* If indexing drains system resources, it may indicate one or more of the following: + * Indexes may have been defined in a way that causes inefficient processing. + * The [license](https://ravendb.net/buy) may need to be upgraded, + * Your [cloud instance](/cloud/cloud-instances#a-production-cloud-cluster) (if used) may require optimization. + * Hardware upgrades may be necessary to better support your workload. + +* Refer to the [Indexing Performance View](../studio/database/indexes/indexing-performance.mdx) in the Studio to monitor and analyze the indexing process. + This view provides graphical representations and detailed statistics of all index activities at each stage. + +* Additionally, refer to the [Common indexing issues](../studio/database/indexes/indexing-performance.mdx#common-indexing-issues) section + for troubleshooting and resolving indexing challenges. + + + + diff --git a/versioned_docs/version-7.1/indexes/_what-are-indexes-python.mdx b/versioned_docs/version-7.1/indexes/_what-are-indexes-python.mdx new file mode 100644 index 0000000000..5a3924c991 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/_what-are-indexes-python.mdx @@ -0,0 +1,225 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Indexes in RavenDB enable efficient querying by processing raw data and providing fast query results without scanning the entire dataset each and every time. + Learn more in the [Indexes overview](../indexes/what-are-indexes.mdx#indexes-overview) below. + +* This page provides a [Basic example](../indexes/what-are-indexes.mdx#basic-example) of creating, deploying, and querying an index. + Additional examples can be found in [Creating and deploying indexes](../indexes/creating-and-deploying.mdx), [Map Indexes](../indexes/map-indexes.mdx), + and many other articles under the "Indexes" menu. + +* In this page: + * [Indexes overview](../indexes/what-are-indexes.mdx#indexes-overview) + * [Types of indexes](../indexes/what-are-indexes.mdx#types-of-indexes) + * [Basic example](../indexes/what-are-indexes.mdx#basic-example) + * [Understanding index query results](../indexes/what-are-indexes.mdx#understanding-index-query-results) + * [If indexes exhaust system resources](../indexes/what-are-indexes.mdx#if-indexes-exhaust-system-resources) + + + +## Indexes overview + +**Indexes are fundamental**: + +* Indexes are fundamental to RavenDB’s query execution, enabling efficient query performance by processing the underlying data and delivering faster results. +* ALL queries in RavenDB use an index to deliver results and ensure optimal performance. + Learn more in [Queries always provide results using an index](../client-api/session/querying/how-to-query.mdx#queries-always-provide-results-using-an-index). +**Main concepts**: + +* When discussing indexes in RavenDB, three key components come into play: + * The index definition + * The indexing process + * The indexed data + +* Each of these components is described in detail in [Indexes - The moving parts](../studio/database/indexes/indexes-overview.mdx#indexes---the-moving-parts). +**The indexing process**: + +* The indexing process iterates over the raw documents, creating an **index-entry** for each document that is processed. + (Usually a single index-entry is created per raw document, unless working with a [fanout index](../indexes/indexing-nested-data.mdx#fanout-index---multiple-index-entries-per-document)). +* Each index-entry contains **index-fields**, and each index-field contains content (**index-terms**) that was generated from the raw documents, + as defined by the index definition and depending on the [analyzer](../indexes/using-analyzers.mdx) used. +* A map is built between the indexed-terms and the documents they originated from, + enabling you to query for documents based on the indexed data. +**Automatic data processing**: + +* Once defined and deployed, an index will initially process the entire dataset. + After that, the index will only process documents that were modified, added or deleted. + This happens automatically without requiring direct user intervention. +* For example, if changes are made to documents in the "Orders" collection, + all indexes defined for the "Orders" collection will be triggered to update the index with the new data. +* This approach helps avoid costly table scans, allows the server to respond quickly, + and reduces the load on queries while optimizing machine resource usage. +**Background operation**: + +* RavenDB indexes are designed to run asynchronously in the background. +* The indexing process does not block or interrupt database operations, such as writing data or running queries, + though queries may temporarily return [stale results](../indexes/stale-indexes.mdx) until the index is fully updated. +**Separate storage**: + +* Indexes store their processed data separately, ensuring that the raw data remains unaffected. + This separation helps maintain the integrity of the raw data while allowing the index to optimize query performance. + +* If system resources become strained due to indexing, it may require adjustments to the index design, hardware, or other factors. + Learn more in [If indexes exhaust system resources](../indexes/what-are-indexes.mdx#if-indexes-exhaust-system-resources). + + + +## Types of indexes + +* Indexes in RavenDB are categorized along the following axes: + * **Auto** indexes -vs- **Static** indexes + * **Map** indexes -vs- **Map-Reduce** indexes + * **Single-Collection** (Single-Map) indexes -vs- **Multi-Collection** (Multi-Map) indexes + +* For a detailed description of each type, refer to section [Index types](../studio/database/indexes/indexes-overview.mdx#index-types). + + + +## Basic example + +In this example we create a static-index that indexes content from documents in the `Employees` [collection](../client-api/faq/what-is-a-collection.mdx). +This allows querying for _Employee_ documents by any of the index-fields (`FullName`, `LastName`, `Country`). + + + +#### Define the index + +* The first step is to define the index. + One way to do this is by inheriting from `AbstractIndexCreationTask`. + Learn more in [Define a static-index using a custom class](../indexes/creating-and-deploying.mdx#define-a-static-index-using-a-custom-class). +* Other methods to create a static-index are: + * [Creating a static-index using an Operation](../client-api/operations/maintenance/indexes/put-indexes.mdx) + * [Creating a static-index from the Studio](../studio/database/indexes/indexes-list-view.mdx) + + + +{`# Define the index: +# ================= + +class Employees_ByNameAndCountry(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + + self.map = """ + from employee in docs.Employees + select new \{ + LastName = employee.LastName, + FullName = employee.FirstName + " " + employee.LastName, + Country = employee.Address.country + \} + """ +`} + + + + + + +#### Deploy the index + +* The next step is to deploy the index to the RavenDB server. + One way to do this is by calling `execute()` on the index instance. +* Additional methods for deploying static-indexes are described in [Deploy a static index](../indexes/creating-and-deploying.mdx#deploy-a-static-index). +* Once deployed, the indexing process will start indexing documents. + + + +{`# Deploy the index to the server: +# =============================== + +Employees_ByNameAndCountry().execute(store) +`} + + + + + + +#### Query the index + +* Now you can query the _Employees_ collection using the index. + In this example we query for _Employee_ documents, **filtering results based on index-fields** `LastName` and `Country`. + The results will include only the _Employee_ documents that match the query predicate. +* For detailed guidance on querying with an index, refer to the [Querying an index](../indexes/querying/query-index.mdx). + + + +{`# Query the database using the index: +# =================================== + +employeesFromUK = list( + session.query_index_type(Employees_ByNameAndCountry, Employee) + # Here we query for all Employee documents that are from the UK + # and have 'King' in their LastName field: + .where_equals("Country", "UK") + .where_equals("LastName", "King") +) +`} + + + + + + +## Understanding index query results + + + +A common mistake is treating indexes like SQL Views, but they are not analogous. +The results of a query for a given index are the **full raw documents** that match the query predicate, +and not just the indexed fields. + +This behavior can be changed by applying [Projections](../indexes/querying/projections.mdx), +which let you project the query results into selected fields instead of returning the entire document. + + +#### Viewing the resulting documents: + +For example, the results shown in the following image are the **documents** that match the query predicate. + +![Index query results - documents](./assets/index-query-results-1.png) + +1. This is the index query. + The query predicate filters the resulting documents based on the content of the index-fields. +2. Each row in the results represents a **matching document**. +3. In this example, the `LastName`, `FirstName`, `Title`, etc., are the raw **document-fields**. +#### Viewing the index-entries: + +If you wish to **view the index-entries** that compose the index itself, +you can enable the option to show "raw index entries" instead of the matching documents. + +![Index query results - index entries](./assets/index-query-results-2.png) + +1. Query the index (no filtering is applied in this example). +2. Click the "Settings" button and toggle on "Show raw index-entries instead of matching documents". +3. Each row in the results represents an **index-entry**. +4. In this example, the `Country`, `FullName`, and `LastName` columns are the **index-fields**, + which were defined in the index definition. +5. This a **term**. + In this example, `usa` is a term generated by the analyzer for index-field `Country` from document `employees/4-a`. + + + +## If indexes exhaust system resources + +* The indexing process utilizes machine resources to keep the data up-to-date for queries. + +* If indexing drains system resources, it may indicate one or more of the following: + * Indexes may have been defined in a way that causes inefficient processing. + * The [license](https://ravendb.net/buy) may need to be upgraded, + * Your [cloud instance](/cloud/cloud-instances#a-production-cloud-cluster) (if used) may require optimization. + * Hardware upgrades may be necessary to better support your workload. + +* Refer to the [Indexing Performance View](../studio/database/indexes/indexing-performance.mdx) in the Studio to monitor and analyze the indexing process. + This view provides graphical representations and detailed statistics of all index activities at each stage. + +* Additionally, refer to the [Common indexing issues](../studio/database/indexes/indexing-performance.mdx#common-indexing-issues) section + for troubleshooting and resolving indexing challenges. + + + + diff --git a/versioned_docs/version-7.1/indexes/additional-assemblies.mdx b/versioned_docs/version-7.1/indexes/additional-assemblies.mdx new file mode 100644 index 0000000000..519bd01e3f --- /dev/null +++ b/versioned_docs/version-7.1/indexes/additional-assemblies.mdx @@ -0,0 +1,180 @@ +--- +title: "Indexes: Additional Assemblies" +hide_table_of_contents: true +sidebar_label: Additional Assemblies +sidebar_position: 29 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Indexes: Additional Assemblies + + +* Index capabilities can be expanded by importing whole libraries + with useful classes and methods that can then be used in the index syntax. + **Additional Assemblies** make it very easy to import assemblies from: + * **NuGet** + * **Runtime** + * **Local file** + +* Indexes can be enhanced with capabilities like [machine learning image recognition](https://ayende.com/blog/192001-B/using-machine-learning-with-ravendb) + or [full text searching in Office files](https://ayende.com/blog/192385-A/ravendb-5-1-features-searching-in-office-documents). + +* This feature is similar to the [Additional Sources](../indexes/extending-indexes.mdx) feature, + that can be used to add methods and classes to an index in the form of a file or pure text. + These two features can be used together: an index's Additional Sources code has access to + all of the index's Additional Assemblies. + +* In this page: + * [Syntax](../indexes/additional-assemblies.mdx#syntax) + * [Examples](../indexes/additional-assemblies.mdx#examples) + * [Basic Example](../indexes/additional-assemblies.mdx#basic-example) + * [Complex Example](../indexes/additional-assemblies.mdx#complex-example) + * [Pre-Release Packages](../indexes/additional-assemblies.mdx#pre-release-packages) + + +## Syntax + +Additional assemblies are defined using the `AdditionalAssembly` object. + + + +{`public class AdditionalAssembly +\{ + public static AdditionalAssembly FromRuntime(string assemblyName, + HashSet usings = null); + + public static AdditionalAssembly FromPath(string assemblyPath, + HashSet usings = null); + + public static AdditionalAssembly FromNuGet(string packageName, + string packageVersion, + string packageSourceUrl = null, + HashSet usings = null); +\} +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **assemblyName** | `string` | The name of an assembly to import from runtime | +| **assemblyPath** | `string` | Local path to an assembly | +| **packageName** | `string` | Name of a NuGet package to import | +| **packageVersion** | `string` | The version number of the NuGet package - optional | +| **packageSourceUrl** | `string` | The URL of the package's original source - optional | +| **usings** | `HashSet` | A set of namespaces to attach to the compiled index with `using` - optional | +The `AdditionalAssembly`s are collected in `AdditionalAssemblies`, a property of +`IndexDefinition`: + + + +{`public HashSet AdditionalAssemblies; +`} + + + + + +## Examples + +#### Basic Example + +This index is able to use the method `GetFileName()` from the class `Path` +because the namespace `System.IO` has been imported as an additional assembly. +It takes a `string` file path and retrieves just the file name and extension. + + + +{`var runtimeindex = new IndexDefinition +\{ + Name = "Dog_Pictures", + Maps = \{ @" + from user in docs.Users + let fileName = Path.GetFileName(user.ImagePath) + where fileName = ""My_Dogs.jpeg"" + select new \{ + user.Name, + fileName + \}" + \}, + AdditionalAssemblies = \{ + AdditionalAssembly.FromRuntime("System.IO") + \} +\}; +`} + + + +#### Complex Example + +This index uses a machine learning algorithm imported from NuGet that can +recognize the contents of images and classify them with an appropriate tag. +These tags are then stored in the index just like any other term. + + + +{`store.Maintenance.Send(new PutIndexesOperation(new IndexDefinition +\{ + Name = "Photographs/Tags", + Maps = + \{ + @" + from p in docs.Photographs + let photo = LoadAttachment(p, ""photo.png"") + where photo != null + let classified = ImageClassifier.Classify(photo.GetContentAsStream()) + select new \{ + e.Name, + Tag = classified.Where(x => x.Value > 0.75f).Select(x => x.Key), + _ = classified.Select(x => CreateField(x.Key, x.Value)) + \}" + \}, + AdditionalSources = new System.Collections.Generic.Dictionary + \{ + \{ + "ImageClassifier", + @" + public static class ImageClassifier + \{ + public static IDictionary Classify(Stream s) + \{ + // returns a list of descriptors with a + // value between 0 and 1 of how well the + // image matches that descriptor. + \} + \}" + \} + + \}, + AdditionalAssemblies = + \{ + AdditionalAssembly.FromRuntime("System.Memory"), + AdditionalAssembly.FromNuGet("System.Drawing.Common", "4.7.0"), + AdditionalAssembly.FromNuGet("Microsoft.ML", "1.5.2") + \} +\})); +`} + + + +#### Pre-Release Packages + +Additional assemblies are allowed to include pre-release packages. + + + +{`AdditionalAssemblies = \{ + AdditionalAssembly.FromNuGet("FlexiMvvm.Common.PreRelease", "0.10.8-prerelease") +\} +`} + + + + + diff --git a/versioned_docs/version-7.1/indexes/assets/click-to-view-terms.png b/versioned_docs/version-7.1/indexes/assets/click-to-view-terms.png new file mode 100644 index 0000000000..5bb4a4ca45 Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/click-to-view-terms.png differ diff --git a/versioned_docs/version-7.1/indexes/assets/dynamic-index-fields-1.png b/versioned_docs/version-7.1/indexes/assets/dynamic-index-fields-1.png new file mode 100644 index 0000000000..b13d9076f8 Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/dynamic-index-fields-1.png differ diff --git a/versioned_docs/version-7.1/indexes/assets/dynamic-index-fields-2.png b/versioned_docs/version-7.1/indexes/assets/dynamic-index-fields-2.png new file mode 100644 index 0000000000..dd2b4082ea Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/dynamic-index-fields-2.png differ diff --git a/versioned_docs/version-7.1/indexes/assets/fanout-index-performance-hint-1.png b/versioned_docs/version-7.1/indexes/assets/fanout-index-performance-hint-1.png new file mode 100644 index 0000000000..1ccb5d8437 Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/fanout-index-performance-hint-1.png differ diff --git a/versioned_docs/version-7.1/indexes/assets/fanout-index-performance-hint-2.png b/versioned_docs/version-7.1/indexes/assets/fanout-index-performance-hint-2.png new file mode 100644 index 0000000000..00d4bac1d4 Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/fanout-index-performance-hint-2.png differ diff --git a/versioned_docs/version-7.1/indexes/assets/index-query-results-1.png b/versioned_docs/version-7.1/indexes/assets/index-query-results-1.png new file mode 100644 index 0000000000..0b9acecd0d Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/index-query-results-1.png differ diff --git a/versioned_docs/version-7.1/indexes/assets/index-query-results-2.png b/versioned_docs/version-7.1/indexes/assets/index-query-results-2.png new file mode 100644 index 0000000000..3044311080 Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/index-query-results-2.png differ diff --git a/versioned_docs/version-7.1/indexes/assets/index-related-documents.png b/versioned_docs/version-7.1/indexes/assets/index-related-documents.png new file mode 100644 index 0000000000..b40f6af80b Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/index-related-documents.png differ diff --git a/versioned_docs/version-7.1/indexes/assets/index-terms-1.png b/versioned_docs/version-7.1/indexes/assets/index-terms-1.png new file mode 100644 index 0000000000..a9def8b49b Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/index-terms-1.png differ diff --git a/versioned_docs/version-7.1/indexes/assets/index-terms-2.png b/versioned_docs/version-7.1/indexes/assets/index-terms-2.png new file mode 100644 index 0000000000..61d032a841 Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/index-terms-2.png differ diff --git a/versioned_docs/version-7.1/indexes/assets/index-terms.png b/versioned_docs/version-7.1/indexes/assets/index-terms.png new file mode 100644 index 0000000000..5e556c37f0 Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/index-terms.png differ diff --git a/versioned_docs/version-7.1/indexes/assets/index-throttling-01.png b/versioned_docs/version-7.1/indexes/assets/index-throttling-01.png new file mode 100644 index 0000000000..f8b58bc72e Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/index-throttling-01.png differ diff --git a/versioned_docs/version-7.1/indexes/assets/index-throttling-02.png b/versioned_docs/version-7.1/indexes/assets/index-throttling-02.png new file mode 100644 index 0000000000..eec08141de Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/index-throttling-02.png differ diff --git a/versioned_docs/version-7.1/indexes/assets/index-throttling-03.png b/versioned_docs/version-7.1/indexes/assets/index-throttling-03.png new file mode 100644 index 0000000000..13fbe4a63c Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/index-throttling-03.png differ diff --git a/versioned_docs/version-7.1/indexes/assets/index-throttling-04.png b/versioned_docs/version-7.1/indexes/assets/index-throttling-04.png new file mode 100644 index 0000000000..e83accbfb3 Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/index-throttling-04.png differ diff --git a/versioned_docs/version-7.1/indexes/assets/indexing-nested-data-1.png b/versioned_docs/version-7.1/indexes/assets/indexing-nested-data-1.png new file mode 100644 index 0000000000..c43a09f3d8 Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/indexing-nested-data-1.png differ diff --git a/versioned_docs/version-7.1/indexes/assets/indexing-nested-data-2.png b/versioned_docs/version-7.1/indexes/assets/indexing-nested-data-2.png new file mode 100644 index 0000000000..22a3f12e24 Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/indexing-nested-data-2.png differ diff --git a/versioned_docs/version-7.1/indexes/assets/list-of-indexes-view.png b/versioned_docs/version-7.1/indexes/assets/list-of-indexes-view.png new file mode 100644 index 0000000000..ce872b0298 Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/list-of-indexes-view.png differ diff --git a/versioned_docs/version-7.1/indexes/assets/polymorphic_indexes_faq.png b/versioned_docs/version-7.1/indexes/assets/polymorphic_indexes_faq.png new file mode 100644 index 0000000000..51ea7d8821 Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/polymorphic_indexes_faq.png differ diff --git a/versioned_docs/version-7.1/indexes/assets/query-view.png b/versioned_docs/version-7.1/indexes/assets/query-view.png new file mode 100644 index 0000000000..57fa6641cb Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/query-view.png differ diff --git a/versioned_docs/version-7.1/indexes/assets/rolling-index-deployment-01.png b/versioned_docs/version-7.1/indexes/assets/rolling-index-deployment-01.png new file mode 100644 index 0000000000..007a21ab15 Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/rolling-index-deployment-01.png differ diff --git a/versioned_docs/version-7.1/indexes/assets/rolling-index-deployment-02.png b/versioned_docs/version-7.1/indexes/assets/rolling-index-deployment-02.png new file mode 100644 index 0000000000..84f9a0c4fb Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/rolling-index-deployment-02.png differ diff --git a/versioned_docs/version-7.1/indexes/assets/snagit/store-field-in-index-1.snagx b/versioned_docs/version-7.1/indexes/assets/snagit/store-field-in-index-1.snagx new file mode 100644 index 0000000000..1887cc046e Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/snagit/store-field-in-index-1.snagx differ diff --git a/versioned_docs/version-7.1/indexes/assets/snagit/store-field-in-index-2.snagx b/versioned_docs/version-7.1/indexes/assets/snagit/store-field-in-index-2.snagx new file mode 100644 index 0000000000..69682580c8 Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/snagit/store-field-in-index-2.snagx differ diff --git a/versioned_docs/version-7.1/indexes/assets/snagit/store-field-in-index-3.snagx b/versioned_docs/version-7.1/indexes/assets/snagit/store-field-in-index-3.snagx new file mode 100644 index 0000000000..1ed8da6ddc Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/snagit/store-field-in-index-3.snagx differ diff --git a/versioned_docs/version-7.1/indexes/assets/store-field-in-index-1.png b/versioned_docs/version-7.1/indexes/assets/store-field-in-index-1.png new file mode 100644 index 0000000000..be04172b12 Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/store-field-in-index-1.png differ diff --git a/versioned_docs/version-7.1/indexes/assets/store-field-in-index-2.png b/versioned_docs/version-7.1/indexes/assets/store-field-in-index-2.png new file mode 100644 index 0000000000..2816b03a26 Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/store-field-in-index-2.png differ diff --git a/versioned_docs/version-7.1/indexes/assets/store-field-in-index-3.png b/versioned_docs/version-7.1/indexes/assets/store-field-in-index-3.png new file mode 100644 index 0000000000..92b0f74a93 Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/store-field-in-index-3.png differ diff --git a/versioned_docs/version-7.1/indexes/assets/term-vector-enabled.png b/versioned_docs/version-7.1/indexes/assets/term-vector-enabled.png new file mode 100644 index 0000000000..546ec5bb36 Binary files /dev/null and b/versioned_docs/version-7.1/indexes/assets/term-vector-enabled.png differ diff --git a/versioned_docs/version-7.1/indexes/boosting.mdx b/versioned_docs/version-7.1/indexes/boosting.mdx new file mode 100644 index 0000000000..385a2cce56 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/boosting.mdx @@ -0,0 +1,49 @@ +--- +title: "Indexes: Boosting" +hide_table_of_contents: true +sidebar_label: Boosting +sidebar_position: 23 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import BoostingCsharp from './_boosting-csharp.mdx'; +import BoostingJava from './_boosting-java.mdx'; +import BoostingPhp from './_boosting-php.mdx'; +import BoostingNodejs from './_boosting-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + diff --git a/versioned_docs/version-7.1/indexes/creating-and-deploying.mdx b/versioned_docs/version-7.1/indexes/creating-and-deploying.mdx new file mode 100644 index 0000000000..338fe99570 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/creating-and-deploying.mdx @@ -0,0 +1,55 @@ +--- +title: "Creating and Deploying Indexes" +hide_table_of_contents: true +sidebar_label: Creating and Deploying Indexes +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import CreatingAndDeployingCsharp from './_creating-and-deploying-csharp.mdx'; +import CreatingAndDeployingJava from './_creating-and-deploying-java.mdx'; +import CreatingAndDeployingNodejs from './_creating-and-deploying-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/extending-indexes.mdx b/versioned_docs/version-7.1/indexes/extending-indexes.mdx new file mode 100644 index 0000000000..7091bda617 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/extending-indexes.mdx @@ -0,0 +1,37 @@ +--- +title: "Indexes: Extending Indexes" +hide_table_of_contents: true +sidebar_label: Extending Indexes +sidebar_position: 28 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import ExtendingIndexesCsharp from './_extending-indexes-csharp.mdx'; +import ExtendingIndexesJava from './_extending-indexes-java.mdx'; +import ExtendingIndexesNodejs from './_extending-indexes-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + diff --git a/versioned_docs/version-7.1/indexes/index-administration.mdx b/versioned_docs/version-7.1/indexes/index-administration.mdx new file mode 100644 index 0000000000..095985afaf --- /dev/null +++ b/versioned_docs/version-7.1/indexes/index-administration.mdx @@ -0,0 +1,208 @@ +--- +title: "Index Administration" +hide_table_of_contents: true +sidebar_label: Index Administration +sidebar_position: 4 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Index Administration + + +* Indexes can be easily managed from the [Studio](../studio/database/indexes/indexes-list-view.mdx#indexes-list-view) + or via [Maintenance operations](../client-api/operations/what-are-operations.mdx#maintenance-operations) in the Client API. + +* In this article: + * [Index management operations](../indexes/index-administration.mdx#index-management-operations) + * [Pause & resume index](../indexes/index-administration.mdx#pause--resume-index) + * [Pause & resume indexing](../indexes/index-administration.mdx#pause--resume-indexing) + * [Disable & enable index](../indexes/index-administration.mdx#disable--enable-index) + * [Disable & enable indexing](../indexes/index-administration.mdx#disable--enable-indexing) + * [Reset index](../indexes/index-administration.mdx#reset-index) + * [Delete index](../indexes/index-administration.mdx#delete-index) + * [Set index lock mode](../indexes/index-administration.mdx#set-index-lock-mode) + * [Set index priority](../indexes/index-administration.mdx#set-index-priority) + * [Index states](../indexes/index-administration.mdx#index-states) + * [Customize indexing configuration](../indexes/index-administration.mdx#customize-indexing-configuration) + + +## Index management operations + + + +##### Pause & resume index +* An index can be paused (and resumed). + +* See [pause index](../client-api/operations/maintenance/indexes/stop-index.mdx) & [resume index](../client-api/operations/maintenance/indexes/start-index.mdx) for detailed information. + +* Operation scope: Single node. + + + + +##### Pause & resume indexing +* You can pause (and resume) indexing of ALL indexes. + +* See [pause indexing](../client-api/operations/maintenance/indexes/stop-indexing.mdx) & [resume indexing](../client-api/operations/maintenance/indexes/start-indexing.mdx) for detailed information. + +* Operation scope: Single node. + + + + +##### Disable & enable index +* An index can be disabled (and enabled), this is a persistent operation. + +* See [disable index](../client-api/operations/maintenance/indexes/disable-index.mdx) & [enable index](../client-api/operations/maintenance/indexes/enable-index.mdx) for detailed information. + +* Operation scope: Single node, or all database-group nodes. + + + + +##### Disable & enable indexing +* Indexing can be disabled (and enabled) for ALL indexes, this is a persistent operation. + +* This is done from the [database list view](../studio/database/databases-list-view.mdx#more-actions) in the Studio. + +* Operation scope: All database-group nodes. + + + + +##### Reset index +* Resetting an index will force re-indexing of all documents that match the index definition. + An index usually needs to be reset once it reached its error quota and is in an _Error_ state. + An index usually needs to be reset when it reaches its error quota and enters the _Error_ state. + +* See [reset index](../client-api/operations/maintenance/indexes/reset-index.mdx) for detailed information. + +* Operation scope: Single node. + + + + +##### Delete index +* An index can be deleted from the database. + +* See [delete index](../client-api/operations/maintenance/indexes/delete-index.mdx) for detailed information. + +* Operation scope: All database-group nodes. + + + + +##### Set index lock mode +* The lock mode controls whether modifications to the index definition are applied (static indexes only). + +* See [set index lock](../client-api/operations/maintenance/indexes/set-index-lock.mdx) for detailed information. + +* Operation scope: All database-group nodes. + + + + +##### Set index priority +* Each index has a dedicated thread that handles all the work for the index. + Setting the index priority will affect the thread priority at the operating system level. + +* See [set index priority](../client-api/operations/maintenance/indexes/set-index-priority.mdx) for detailed information. + +* Operation scope: All database-group nodes. + + + + +## Index states + +An index can be in one of the following states: + +* `Normal` + * The index is active, any new data is indexed. + +* `Paused` + * New data is not being indexed. + * Queries will be stale as new data is not indexed. + * The indexing process will resume upon any of the following actions: + * Setting _'Resume indexing'_ from the [Studio](../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions). + * Resume indexing from the Client API. See [Resume index operation](../client-api/operations/maintenance/indexes/start-index.mdx). + * Restarting the server. + * Reloading the database. See [How to reload the database](../studio/database/settings/database-settings.mdx#how-to-reload-the-database). + +* `Disabled` + * New data is not being indexed. + * Queries will be stale as new data is not indexed. + * The indexing process will resume upon either of the following: + * Setting _'Enable indexing'_ from the [Studio](../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---actions). + * Resume indexing from the Client API. See [Enable index operation](../client-api/operations/maintenance/indexes/enable-index.mdx). + * The index will NOT automatically resume upon restarting the server or reloading the database. + +* `Idle` (auto-indexes only) + + * An auto-index is marked as _'Idle'_ when it has not been queried for a configurable period of time. + This state indicates that the index may later be deleted, as detailed in the following points: + + * Specifically, an **auto-index** is marked as _'Idle'_ when the time difference between its last-query-time + and the most recent time the database was queried (using any other index) exceeds a configurable threshold. This threshold is set by the + [Indexing.TimeToWaitBeforeMarkingAutoIndexAsIdleInMin](../server/configuration/indexing-configuration.mdx#indexingtimetowaitbeforemarkingautoindexasidleinmin) configuration key (30 minutes by default). + + * This mechanism is designed to prevent auto-indexes from being marked as idle in databases that were offline for a long period, + had no new data to index, were not queried, or were recently restored from a snapshot or backup. + + * While an auto-index is _'Idle'_, it is NOT considered _'Disabled'_. + **It continues to index data** from any documents relevant to its definition as they are created or modified. + + * An idle auto-index returns to the _'Normal'_ state in the following cases: + * When it is queried again. + * When the auto-index is reset. + * When [the database is reloaded](../studio/database/settings/database-settings.mdx#how-to-reload-the-database). + + * If the idle auto-index is Not returned to the _'Normal'_ state, the server will **delete** it after a configurable time period, + set by the [Indexing.TimeToWaitBeforeDeletingAutoIndexMarkedAsIdleInHrs](../server/configuration/indexing-configuration.mdx#indexingtimetowaitbeforedeletingautoindexmarkedasidleinhrs) configuration key (72 hours by default). + + * Note: + The server evaluates whether an auto-index should be marked as idle, or whether an idle auto-index should be deleted, + at intervals defined by the [Indexing.CleanupIntervalInMin](../server/configuration/indexing-configuration.mdx#indexingcleanupintervalinmin) configuration key (10 minutes by default). + If _TimeToWaitBeforeMarkingAutoIndexAsIdleInMin_ or _TimeToWaitBeforeDeletingAutoIndexMarkedAsIdleInHrs_ + are set to values smaller than the cleanup interval, the index will be marked as idle or deleted only after the cleanup interval elapses. + +* `Error` + * An indexing error can occur when the indexing-function is malformed (e.g., incorrectly written) + or when the document data is corrupted/missing. + * Once the index error rate exceeds a certain threshold (as described in [Marking index as errored](../indexes/troubleshooting/debugging-index-errors.mdx#marking-index-as-errored)), + the index state is marked as _'Error'_. + * An errored index cannot be queried - all queries against it will result in an exception. + * Learn more in [Debugging index errors](../indexes/troubleshooting/debugging-index-errors.mdx). + +* `Faulty` + * When an index is successfully defined but the server fails to open its index data file from disk, or if this file is corrupted, + the server marks the index as _'Faulty'_, indicating that something is wrong with its index data files. + * Learn more in [Faulty index](../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---errors). + + + +## Customize indexing configuration + +* There are many [indexing configuration](../server/configuration/indexing-configuration.mdx) options available. + +* A configuration key with a **"per-index" scope** can be customized for a specific index, + overriding the server-wide and the database configuration values. + +* The "per-index" configuration key can be set from: + * The [configuration tab](../studio/database/indexes/create-map-index.mdx#configuration) in the Edit Index view in the Studio. + * The [index class constructor](../indexes/creating-and-deploying.mdx#creating-an-index-with-custom-configuration) when defining an index. + * The [index definition](../client-api/operations/maintenance/indexes/put-indexes.mdx#put-indexes-operation-with-indexdefinition) when sending a [putIndexesOperation](../client-api/operations/maintenance/indexes/put-indexes.mdx). + +**Expert configuration options**: + +* [Server.IndexingAffinityMask](../server/configuration/server-configuration.mdx#serverindexingaffinitymask) - Control the affinity mask of indexing threads +* [Server.NumberOfUnusedCoresByIndexes](../server/configuration/server-configuration.mdx#servernumberofunusedcoresbyindexes) - Set the number of cores that _won't_ be used by indexes + + + diff --git a/versioned_docs/version-7.1/indexes/index-throttling.mdx b/versioned_docs/version-7.1/indexes/index-throttling.mdx new file mode 100644 index 0000000000..852f480168 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/index-throttling.mdx @@ -0,0 +1,156 @@ +--- +title: "Indexes: Index Throttling" +hide_table_of_contents: true +sidebar_label: Index Throttling +sidebar_position: 22 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Indexes: Index Throttling + + +* **Index Throttling** delays indexing operations by a time interval of your choice. + Indexing is triggered normally when items are added or modified, but RavenDB suspends + index procession by the set interval. +* The gaps in indexing activity and the procession of + [larger batches](../indexes/index-throttling.mdx#throttling-and-batches) + can reduce overall indexing CPU utilization. +* Indexing can be throttled server-wide, per-database, and per-index. +* Index throttling applies to the procession of **existing** indexes. + **New** indexes are processed sequentially without delay. + +* In this page: + * [Why Use Index Throttling?](../indexes/index-throttling.mdx#why-use-index-throttling?) + * [Index Throttling and Batches](../indexes/index-throttling.mdx#throttling-and-batches) + * [Setting Index Throttling](../indexes/index-throttling.mdx#setting-index-throttling) + * [Server-Wide](../indexes/index-throttling.mdx#server-wide-index-throttling) + * [Per-Database](../indexes/index-throttling.mdx#index-throttling-per-database) + * [Per-Index](../indexes/index-throttling.mdx#index-throttling-in-an-index-definition) + + +## Why Use Index Throttling? + +Indexing a rapidly modified group of items (e.g. a time series, a group of counters, +or a patched collection) can preoccupy RavenDB with lengthy consecutive indexing of +small batches of items. + +![Throttles Index Performance View](./assets/index-throttling-01.png) + +The Indexing Performance view shown above demonstrates the procession of a throttled index +along the timeline, with a 1500 Milliseconds gap between consecutive batches. +Throttling offers indexing-free time gaps that the database can use for other +tasks and the procession of larger batches when indexing does take place. + + +Throttled indexes are kept [stale](../indexes/stale-indexes.mdx#indexes-stale-indexes) +as long as their procession is incomplete. +They are therefore expected to remain stale longer than indexes that are processed sequentially. + + + + +## Throttling and Batches + +The delay in index procession increases the size of Item [batches](../server/configuration/indexing-configuration.mdx#indexingmapbatchsize) +that are processed during indexing intervals. + +In special cases, throttled indexes' batches are processed without delay. + +* The batch following a batch that cannot be processed for known reasons, will be processed + without delay. +* Accumulated data (collected due to batch size configuration, for example) whose procession + requires multiple batches, will be processed with no delay between batches. + + + +## Setting Index Throttling +### Server-Wide Index Throttling + + Index Throttling can be set server-wide using a designated [configuration option](../server/configuration/configuration-options.mdx#settingsjson). + Setting the server-wide configuration option will apply to all databases on a given node. + +* Set the index throttling time interval in Milliseconds using the `Indexing.Throttling.TimeIntervalInMs` configuration option, e.g.: + `"Indexing.Throttling.TimeIntervalInMs": "5000"` +### Index Throttling Per-Database + +Enable or disable index throttling for a specific database using the designated database configuration key. +Setting this property overrides the +[Server-Wide](../indexes/index-throttling.mdx#server-wide-index-throttling) default. + +* **From Studio**: + + ![Database Configuration Keys](./assets/index-throttling-02.png) + + 1. Open **Settings** > **Database Settings** view. + 2. **Filter Keys** - Enter a search string to locate the configuration key. + 3. **Edit** - Click to edit values (see next image for details). + 4. **Configuration Key** - + `Indexing.Throttling.TimeIntervalInMs` - Index Throttling configuration key. + 5. **Effective Value** - The current configuration. + 6. **Origin** - The origin of the current configuration. + Can be - Default | Database + + ![Edit Values](./assets/index-throttling-03.png) + + 1. **Override** - Toggle to override the server-wide configuration. + 2. **Edit Value** - Enter a new time in Milliseconds, or leave empty for null (no throttling). + 3. **Set Default** - Click 'Set Default' to select the server-wide default value. + 4. **Save** - Apply changes. + + An edited configuration key's value will become effective only after the database is reloaded. + +### Index Throttling in an Index Definition + +Setting throttling in an index definition overrides [server-wide](../indexes/index-throttling.mdx#server-wide-index-throttling) +and [database](../indexes/index-throttling.mdx#index-throttling-per-database) settings configuration. + + +* **From Code**: + Set throttling for a specific index using the `Indexing.Throttling.TimeIntervalInMs` property. + + +{`var index = new IndexDefinition +\{ + Name = "ByOrderedAt", + Maps = \{"from o in orders select new \{o.OrderedAt\}"\}, + Configuration = new IndexConfiguration + \{ + \{ "Indexing.Throttling.TimeIntervalInMs", "2000" \} + \}, +\}; +`} + + + + The batch size can be left for RavenDB to decide, or you can set it yourself using + the [Indexing.MapBatchSize](../server/configuration/indexing-configuration.mdx#indexingmapbatchsize) property. + + +{`Configuration = new IndexConfiguration +\{ + \{ "Indexing.Throttling.TimeIntervalInMs", "2000" \}, + \{ "Indexing.MapBatchSize", "50" \} +\}, +`} + + + +* **From Studio**: + Use the [Configuration tab](../studio/database/indexes/create-map-index.mdx#configuration) of an index definition + to set the index's `Indexing.Throttling.TimeIntervalInMs` configuration key value in Milliseconds. + ![Configuration Key](./assets/index-throttling-04.png) + 1. ****Configuration Tab**** - Open to set index configuration keys. + 2. **Add Customized Indexing Configuration** - Click to add a configuration key. + 3. **Indexing Configuration Key** - + Type `Indexing.Throttling.TimeIntervalInMs` or select this key from the droplist to set index throttling. + 4. **Value** - Type the throttling time interval in Milliseconds. + + + + diff --git a/versioned_docs/version-7.1/indexes/indexing-attachments.mdx b/versioned_docs/version-7.1/indexes/indexing-attachments.mdx new file mode 100644 index 0000000000..8f42c5faf2 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/indexing-attachments.mdx @@ -0,0 +1,21 @@ +--- +title: "Indexing Attachments" +hide_table_of_contents: true +sidebar_label: Indexing Attachments +sidebar_position: 16 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Indexing Attachments + + +Learn about __Indexing Attachments__ here: [Document Extensions > Attachments > Indexing Attachments](../document-extensions/attachments/indexing.mdx). + + + diff --git a/versioned_docs/version-7.1/indexes/indexing-basics.mdx b/versioned_docs/version-7.1/indexes/indexing-basics.mdx new file mode 100644 index 0000000000..4af6d10b69 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/indexing-basics.mdx @@ -0,0 +1,49 @@ +--- +title: "Indexes: Indexing Basics" +hide_table_of_contents: true +sidebar_label: Indexing Basics +sidebar_position: 3 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import IndexingBasicsCsharp from './_indexing-basics-csharp.mdx'; +import IndexingBasicsJava from './_indexing-basics-java.mdx'; +import IndexingBasicsNodejs from './_indexing-basics-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/indexing-counters.mdx b/versioned_docs/version-7.1/indexes/indexing-counters.mdx new file mode 100644 index 0000000000..58b32713fc --- /dev/null +++ b/versioned_docs/version-7.1/indexes/indexing-counters.mdx @@ -0,0 +1,21 @@ +--- +title: "Indexing Counters" +hide_table_of_contents: true +sidebar_label: Indexing Counters +sidebar_position: 17 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Indexing Counters + + +Learn about **Indexing Counters** here: [Document Extensions > Counters > Indexing Counters](../document-extensions/counters/indexing.mdx). + + + diff --git a/versioned_docs/version-7.1/indexes/indexing-hierarchical-data.mdx b/versioned_docs/version-7.1/indexes/indexing-hierarchical-data.mdx new file mode 100644 index 0000000000..43d85a2881 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/indexing-hierarchical-data.mdx @@ -0,0 +1,54 @@ +--- +title: "Indexing Hierarchical Data" +hide_table_of_contents: true +sidebar_label: Indexing Hierarchical Data +sidebar_position: 13 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import IndexingHierarchicalDataJava from './_indexing-hierarchical-data-java.mdx'; +import IndexingHierarchicalDataCsharp from './_indexing-hierarchical-data-csharp.mdx'; +import IndexingHierarchicalDataPython from './_indexing-hierarchical-data-python.mdx'; +import IndexingHierarchicalDataPhp from './_indexing-hierarchical-data-php.mdx'; +import IndexingHierarchicalDataNodejs from './_indexing-hierarchical-data-nodejs.mdx'; + +export const supportedLanguages = ["java", "csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/indexing-linq-extensions.mdx b/versioned_docs/version-7.1/indexes/indexing-linq-extensions.mdx new file mode 100644 index 0000000000..2c08dc68f7 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/indexing-linq-extensions.mdx @@ -0,0 +1,38 @@ +--- +title: "Indexes: Indexing LINQ Extensions" +hide_table_of_contents: true +sidebar_label: Indexing LINQ Extensions +sidebar_position: 19 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import IndexingLinqExtensionsCsharp from './_indexing-linq-extensions-csharp.mdx'; +import IndexingLinqExtensionsJava from './_indexing-linq-extensions-java.mdx'; +import IndexingLinqExtensionsNodejs from './_indexing-linq-extensions-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/indexing-metadata.mdx b/versioned_docs/version-7.1/indexes/indexing-metadata.mdx new file mode 100644 index 0000000000..d9cbf3b6f8 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/indexing-metadata.mdx @@ -0,0 +1,50 @@ +--- +title: "Indexing Metadata" +hide_table_of_contents: true +sidebar_label: Indexing Metadata +sidebar_position: 20 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import IndexingMetadataCsharp from './_indexing-metadata-csharp.mdx'; +import IndexingMetadataJava from './_indexing-metadata-java.mdx'; +import IndexingMetadataPython from './_indexing-metadata-python.mdx'; +import IndexingMetadataPhp from './_indexing-metadata-php.mdx'; +import IndexingMetadataNodejs from './_indexing-metadata-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/versioned_docs/version-7.1/indexes/indexing-nested-data.mdx b/versioned_docs/version-7.1/indexes/indexing-nested-data.mdx new file mode 100644 index 0000000000..dfaa98f5b5 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/indexing-nested-data.mdx @@ -0,0 +1,50 @@ +--- +title: "Indexing Nested data" +hide_table_of_contents: true +sidebar_label: Indexing Nested Data +sidebar_position: 12 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import IndexingNestedDataCsharp from './_indexing-nested-data-csharp.mdx'; +import IndexingNestedDataJava from './_indexing-nested-data-java.mdx'; +import IndexingNestedDataPython from './_indexing-nested-data-python.mdx'; +import IndexingNestedDataPhp from './_indexing-nested-data-php.mdx'; +import IndexingNestedDataNodejs from './_indexing-nested-data-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/indexing-polymorphic-data.mdx b/versioned_docs/version-7.1/indexes/indexing-polymorphic-data.mdx new file mode 100644 index 0000000000..c3fb00682b --- /dev/null +++ b/versioned_docs/version-7.1/indexes/indexing-polymorphic-data.mdx @@ -0,0 +1,54 @@ +--- +title: "Indexing Polymorphic Data" +hide_table_of_contents: true +sidebar_label: Indexing Polymorphic Data +sidebar_position: 14 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import IndexingPolymorphicDataCsharp from './_indexing-polymorphic-data-csharp.mdx'; +import IndexingPolymorphicDataJava from './_indexing-polymorphic-data-java.mdx'; +import IndexingPolymorphicDataPython from './_indexing-polymorphic-data-python.mdx'; +import IndexingPolymorphicDataPhp from './_indexing-polymorphic-data-php.mdx'; +import IndexingPolymorphicDataNodejs from './_indexing-polymorphic-data-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/indexing-related-documents.mdx b/versioned_docs/version-7.1/indexes/indexing-related-documents.mdx new file mode 100644 index 0000000000..e0b7a785f4 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/indexing-related-documents.mdx @@ -0,0 +1,54 @@ +--- +title: "Indexes: Indexing Related Documents" +hide_table_of_contents: true +sidebar_label: Indexing Related Documents +sidebar_position: 11 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import IndexingRelatedDocumentsJava from './_indexing-related-documents-java.mdx'; +import IndexingRelatedDocumentsCsharp from './_indexing-related-documents-csharp.mdx'; +import IndexingRelatedDocumentsPython from './_indexing-related-documents-python.mdx'; +import IndexingRelatedDocumentsPhp from './_indexing-related-documents-php.mdx'; +import IndexingRelatedDocumentsNodejs from './_indexing-related-documents-nodejs.mdx'; + +export const supportedLanguages = ["java", "csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/indexing-spatial-data.mdx b/versioned_docs/version-7.1/indexes/indexing-spatial-data.mdx new file mode 100644 index 0000000000..7c6d3bad56 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/indexing-spatial-data.mdx @@ -0,0 +1,56 @@ +--- +title: "Indexing Spatial Data" +hide_table_of_contents: true +sidebar_label: Indexing Spatial Data +sidebar_position: 15 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import IndexingSpatialDataCsharp from './_indexing-spatial-data-csharp.mdx'; +import IndexingSpatialDataJava from './_indexing-spatial-data-java.mdx'; +import IndexingSpatialDataPython from './_indexing-spatial-data-python.mdx'; +import IndexingSpatialDataPhp from './_indexing-spatial-data-php.mdx'; +import IndexingSpatialDataNodejs from './_indexing-spatial-data-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/indexing-time-series.mdx b/versioned_docs/version-7.1/indexes/indexing-time-series.mdx new file mode 100644 index 0000000000..bfd2d8f76f --- /dev/null +++ b/versioned_docs/version-7.1/indexes/indexing-time-series.mdx @@ -0,0 +1,21 @@ +--- +title: "Indexing Time Series" +hide_table_of_contents: true +sidebar_label: Indexing Time Series +sidebar_position: 18 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Indexing Time Series + + +Learn about **Indexing Time Series** here: [Document Extensions > Time Series > Indexing Time Series](../document-extensions/timeseries/indexing.mdx). + + + diff --git a/versioned_docs/version-7.1/indexes/javascript-indexes.mdx b/versioned_docs/version-7.1/indexes/javascript-indexes.mdx new file mode 100644 index 0000000000..b6e380f7c8 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/javascript-indexes.mdx @@ -0,0 +1,40 @@ +--- +title: "JavaScript Indexes" +hide_table_of_contents: true +sidebar_label: JavaScript Indexes +sidebar_position: 8 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import JavascriptIndexesCsharp from './_javascript-indexes-csharp.mdx'; +import JavascriptIndexesJava from './_javascript-indexes-java.mdx'; + +export const supportedLanguages = ["csharp", "java"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/map-indexes.mdx b/versioned_docs/version-7.1/indexes/map-indexes.mdx new file mode 100644 index 0000000000..e0c4872daf --- /dev/null +++ b/versioned_docs/version-7.1/indexes/map-indexes.mdx @@ -0,0 +1,68 @@ +--- +title: "Indexes: Map Indexes" +hide_table_of_contents: true +sidebar_label: Map Indexes +sidebar_position: 5 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import MapIndexesCsharp from './_map-indexes-csharp.mdx'; +import MapIndexesJava from './_map-indexes-java.mdx'; +import MapIndexesPython from './_map-indexes-python.mdx'; +import MapIndexesPhp from './_map-indexes-php.mdx'; +import MapIndexesNodejs from './_map-indexes-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/map-reduce-indexes.mdx b/versioned_docs/version-7.1/indexes/map-reduce-indexes.mdx new file mode 100644 index 0000000000..0fae0b8116 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/map-reduce-indexes.mdx @@ -0,0 +1,75 @@ +--- +title: "Indexes: Map-Reduce Indexes" +hide_table_of_contents: true +sidebar_label: Map-Reduce Indexes +sidebar_position: 7 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import MapReduceIndexesCsharp from './_map-reduce-indexes-csharp.mdx'; +import MapReduceIndexesJava from './_map-reduce-indexes-java.mdx'; +import MapReduceIndexesPython from './_map-reduce-indexes-python.mdx'; +import MapReduceIndexesPhp from './_map-reduce-indexes-php.mdx'; +import MapReduceIndexesNodejs from './_map-reduce-indexes-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/multi-map-indexes.mdx b/versioned_docs/version-7.1/indexes/multi-map-indexes.mdx new file mode 100644 index 0000000000..fbe0e7b5ed --- /dev/null +++ b/versioned_docs/version-7.1/indexes/multi-map-indexes.mdx @@ -0,0 +1,52 @@ +--- +title: "Multi-Map Indexes" +hide_table_of_contents: true +sidebar_label: Multi-Map Indexes +sidebar_position: 6 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import MultiMapIndexesCsharp from './_multi-map-indexes-csharp.mdx'; +import MultiMapIndexesJava from './_multi-map-indexes-java.mdx'; +import MultiMapIndexesPython from './_multi-map-indexes-python.mdx'; +import MultiMapIndexesPhp from './_multi-map-indexes-php.mdx'; +import MultiMapIndexesNodejs from './_multi-map-indexes-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/number-type-conversion.mdx b/versioned_docs/version-7.1/indexes/number-type-conversion.mdx new file mode 100644 index 0000000000..7429433e0a --- /dev/null +++ b/versioned_docs/version-7.1/indexes/number-type-conversion.mdx @@ -0,0 +1,33 @@ +--- +title: "Indexing: Numerical Type Conversion" +hide_table_of_contents: true +sidebar_label: Numerical Type Conversion +sidebar_position: 30 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import NumberTypeConversionCsharp from './_number-type-conversion-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_category_.json b/versioned_docs/version-7.1/indexes/querying/_category_.json new file mode 100644 index 0000000000..d55dbf2aec --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 0, + "label": Querying, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/querying/_distinct-csharp.mdx b/versioned_docs/version-7.1/indexes/querying/_distinct-csharp.mdx new file mode 100644 index 0000000000..05bc663fb8 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_distinct-csharp.mdx @@ -0,0 +1,275 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The `Distinct` method allows you to remove duplicates from query results. + Items are compared based on the fields listed in the `select` section of the query. + +* In this page: + * [Sample query with Distinct](../../indexes/querying/distinct.mdx#sample-query-with-distinct) + * [Paging with Distinct](../../indexes/querying/distinct.mdx#paging-with-distinct) + * [Count with Distinct](../../indexes/querying/distinct.mdx#count-with-distinct) + * [Performance cost and alternative approaches](../../indexes/querying/distinct.mdx#performance-cost-and-alternative-approaches) + + +## Sample query with Distinct + + + + +{`// Get a sorted list without duplicates: +// ===================================== + +List countries = session + .Query() + .OrderBy(x => x.ShipTo.Country) + .Select(x => x.ShipTo.Country) + // Call 'Distinct' to remove duplicates from results + // Items wil be compared based on field 'Country' that is specified in the above 'Select' + .Distinct() + .ToList(); + +// Running this on the Northwind sample data +// will result in a sorted list of 21 countries w/o duplicates. +`} + + + + +{`// Get a sorted list without duplicates: +// ===================================== + +List countries = await asyncSession + .Query() + .OrderBy(x => x.ShipTo.Country) + .Select(x => x.ShipTo.Country) + // Call 'Distinct' to remove duplicates from results + // Items wil be compared based on field 'Country' that is specified in the above 'Select' + .Distinct() + .ToListAsync(); + +// Running this on the Northwind sample data +// will result in a sorted list of 21 countries w/o duplicates. +`} + + + + +{`// Get a sorted list without duplicates: +// ===================================== + +IList countries = session + .Advanced + .DocumentQuery() + .OrderBy(x => x.ShipTo.Country) + .SelectFields("ShipTo.Country") + // Call 'Distinct' to remove duplicates from results + // Items wil be compared based on field 'Country' that is specified in the above 'SelectFields' + .Distinct() + .ToList(); + +// Running this on the Northwind sample data +// will result in a sorted list of 21 countries w/o duplicates. +`} + + + + +{`from "Orders" +order by ShipTo.Country +select distinct ShipTo.Country +`} + + + + + + +## Paging with Distinct + +A special approach must be used when calling `Distinct()` while paging. +Please read the dedicated article about [paging through tampered results](../../indexes/querying/paging.mdx#paging-through-tampered-results). + + + +## Count with Distinct + +Use `Count()` in combination with `Distinct()` to get the number of unique items. +Similar to _ToList()_, _Count()_ triggers query execution on the server-side. + + + + +{`// Count the number of unique countries: +// ===================================== + +var numberOfCountries = session + .Query() + .Select(x => x.ShipTo.Country) + .Distinct() + .Count(); + +// Running this on the Northwind sample data, +// will result in 21, which is the number of unique countries. +`} + + + + +{`// Count the number of unique countries: +// ===================================== + +var numberOfCountries = await asyncSession + .Query() + .Select(x => x.ShipTo.Country) + .Distinct() + .CountAsync(); + +// Running this on the Northwind sample data, +// will result in 21, which is the number of unique countries. +`} + + + + +{`// Count the number of unique countries: +// ===================================== + +var numberOfCountries = session + .Advanced + .DocumentQuery() + .SelectFields("ShipTo.Country") + .Distinct() + .Count(); + +// Running this on the Northwind sample data, +// will result in 21, which is the number of unique countries. +`} + + + + +{`// This RQL is intended for use when issuing a Raw Query from the client API. +// Running directly from the Studio will not display the number of results. +// ======================================================================== + +from "Orders" +select distinct ShipTo.Country +limit 0, 0 +`} + + + + +### Performance cost and alternative approaches + +* Please keep in mind that using `Count()` with `Distinct()` might not be efficient for large sets of data + as it requires scanning all index results to find unique values. + +* Getting the distinct items' count can also be achieved by creating a [Map-Reduce](../../indexes/map-reduce-indexes.mdx) index + that will aggregate data by the field for which distinct count results are needed. + +* Using a Map-Reduce index is more efficient since computations are done during indexing time and not at query time. + The entire dataset is [indexed](../../indexes/creating-and-deploying.mdx) once, + whereafter the aggregated value is always kept up to date as indexing will occur only for new/modified data. + +#### Map-Reduce index example: + +Index definition: + + + +{`public class Orders_ByShipToCountry : AbstractIndexCreationTask +\{ + public class IndexEntry + \{ + public string Country \{ get; set; \} + public int CountryCount \{ get; set; \} + \} + + public Orders_ByShipToCountry() + \{ + // The Map phase indexes the country listed in each order document + // CountryCount is assigned with 1, which will be aggregated in the Reduce phase + Map = orders => from order in orders + select new IndexEntry + \{ + Country = order.ShipTo.Country, + CountryCount = 1 + \}; + + // The Reduce phase will group the country results and aggregate the CountryCount + Reduce = results => from result in results + group result by result.Country + into g + select new IndexEntry + \{ + Country = g.Key, + CountryCount = g.Sum(x => x.CountryCount) + \}; + \} +\} +`} + + + +Query the index: + + + + +{`// Query the map-reduce index defined above +var queryResult = session + .Query() + .ToList(); + +// The resulting list contains all index-entry items where each entry represents a country. +// The size of the list corresponds to the number of unique countries. +var numberOfUniqueCountries = queryResult.Count; +`} + + + + +{`// Query the map-reduce index defined above +var queryResult = await asyncSession + .Query() + .ToListAsync(); + +// The resulting list contains all index-entry items where each entry represents a country. +// The size of the list corresponds to the number of unique countries. +var numberOfUniqueCountries = queryResult.Count; +`} + + + + +{`// Query the map-reduce index defined above +var queryResult = session.Advanced + .DocumentQuery() + .ToList(); + +// The resulting list contains all index-entry items where each entry represents a country. +// The size of the list corresponds to the number of unique countries. +var numberOfUniqueCountries = queryResult.Count; +`} + + + + +{`from index "Orders/ByShipToCountry" +`} + + + +#### Combining faceted queries with Map-Reduce: + +Faceted queries can be used together with a map-reduce index as another alternative approach. +See the article "[Implementing a count(distinct) query in RavenDB](https://ravendb.net/articles/implementing-a-countdistinct-query-in-ravendb)" for an example. + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_distinct-java.mdx b/versioned_docs/version-7.1/indexes/querying/_distinct-java.mdx new file mode 100644 index 0000000000..ec6134f620 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_distinct-java.mdx @@ -0,0 +1,188 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The `distinct` method allows you to remove duplicates from query results. + Items are compared based on the fields listed in the `select` section of the query. + +* In this page: + * [Sample query with distinct](../../indexes/querying/distinct.mdx#sample-query-with-distinct) + * [Paging with distinct](../../indexes/querying/distinct.mdx#paging-with-distinct) + * [Count with distinct](../../indexes/querying/distinct.mdx#count-with-distinct) + * [Performance cost and an alternative approach](../../indexes/querying/distinct.mdx#performance-cost-and-an-alternative-approach) + + +## Sample query with Distinct + + + + +{`// Get a sorted list without duplicates: +// ===================================== + +List countries = session + .query(Order.class) + .orderBy("ShipTo.Country") + .selectFields(String.class, "ShipTo.Country") + // Call 'distinct' to remove duplicates from results + // Items wil be compared based on field 'Country' that is specified in the above 'selectFields' + .distinct() + .toList(); + +// Running this on the Northwind sample data +// will result in a sorted list of 21 countries w/o duplicates. +`} + + + + +{`from "Orders" +order by ShipTo.Country +select distinct ShipTo.Country +`} + + + + + + +## Paging with distinct + +A special approach must be used when calling `distinct()` while paging. +Please read the dedicated article about [paging through tampered results](../../indexes/querying/paging.mdx#paging-through-tampered-results). + + + +## Count with distinct + +Use `count()` in combination with `distinct()` to get the number of unique items. +Similar to _toList()_, _count()_ triggers query execution on the server-side. + + + + +{`// Count the number of unique countries: +// ===================================== + +int numberOfCountries = session + .query(Order.class) + .selectFields(String.class, "ShipTo.Country") + .distinct() + .count(); + +// Running this on the Northwind sample data, +// will result in 21, which is the number of unique countries. +`} + + + + +{`// This RQL is intended for use when issuing a Raw Query from the client API. +// Running directly from the Studio will not display the number of results. +// ======================================================================== + +from "Orders" +select distinct ShipTo.Country +limit 0, 0 +`} + + + + +### Performance cost and an alternative approach + +* Please keep in mind that using `count()` with `distinct()` might not be efficient for large sets of data + as it requires scanning all index results to find unique values. + +* Getting the distinct items' count can also be achieved by creating a [Map-Reduce](../../indexes/map-reduce-indexes.mdx) index + that will aggregate data by the field for which distinct count results are needed. + +* Using a Map-Reduce index is more efficient since computations are done during indexing time and not at query time. + The entire dataset is [indexed](../../indexes/creating-and-deploying.mdx) once, + whereafter the aggregated value is always kept up to date as indexing will occur only for new/modified data. + +#### Map-Reduce index example: + +Index definition: + + + +{`public static class Orders_ByShipToCountry extends AbstractIndexCreationTask \{ + +public Orders_ByShipToCountry() \{ + + // The map phase indexes the country listed in each order document + // CountryCount is assigned with 1, which will be aggregated in the reduce phase + map = "docs.Orders.Select(order => new \{ " + + " Country = order.ShipTo.Country, " + + " CountryCount = 1 " + + "\})"; + + // The reduce phase will group the Country results and aggregate the CountryCount + reduce = "results.GroupBy(result => result.Country).Select(g => new \{ " + + " Country = g.Key, " + + " CountryCount = Enumerable.Sum(g, x => x.CountryCount) " + + "\})"; +\} + +public static class Result \{ + private String country; + private int countryCount; + + public String getCountry() \{ + return country; + \} + + public void setCountry(String country) \{ + this.country = country; + \} + + public int getCountryCount() \{ + return countryCount; + \} + + public void setCountryCount(int countryCount) \{ + this.countryCount = countryCount; + \} +\} +\} +`} + + + +Query the index: + + + + +{`// Query the map-reduce index defined above +try (IDocumentSession session = DocumentStoreHolder.store.openSession()) { + Orders_ByShipToCountry.Result queryResult = session + .query(Orders_ByShipToCountry.Result.class, Orders_ByShipToCountry.class) + .toList(); + + // The resulting list contains all index-entry items where each entry represents a country. + // The size of the list corresponds to the number of unique countries. + int numberOfUniqueCountries = queryResult.length; +} +`} + + + + +{`from index "Orders/ByShipToCountry" +`} + + + +#### Combining faceted queries with Map-Reduce: + +Faceted queries can be used together with a map-reduce index as another alternative approach. +See a C# example for [Implementing a count(distinct) query in RavenDB](https://ravendb.net/articles/implementing-a-countdistinct-query-in-ravendb). + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_distinct-nodejs.mdx b/versioned_docs/version-7.1/indexes/querying/_distinct-nodejs.mdx new file mode 100644 index 0000000000..0ac00c0a5b --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_distinct-nodejs.mdx @@ -0,0 +1,171 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The `distinct` method allows you to remove duplicates from query results. + Items are compared based on the fields listed in the `select` section of the query. + +* In this page: + * [Sample query with distinct](../../indexes/querying/distinct.mdx#sample-query-with-distinct) + * [Paging with distinct](../../indexes/querying/distinct.mdx#paging-with-distinct) + * [Count with distinct](../../indexes/querying/distinct.mdx#count-with-distinct) + * [Performance cost and an alternative approach](../../indexes/querying/distinct.mdx#performance-cost-and-an-alternative-approach) + + +## Sample query with distinct + + + + +{`// Get a sorted list without duplicates: +// ===================================== + +const countries = await session + .query(Order) + .orderBy("ShipTo.Country") + .selectFields("ShipTo.Country") + // Call 'distinct' to remove duplicates from results + // Items wil be compared based on field 'Country' that is specified in the above 'selectFields' + .distinct() + .all(); + +// Running this on the Northwind sample data +// will result in a sorted list of 21 countries w/o duplicates. +`} + + + + +{`from "Orders" +order by ShipTo.Country +select distinct ShipTo.Country +`} + + + + + + +## Paging with distinct + +A special approach must be used when calling `distinct()` while paging. +Please read the dedicated article about [paging through tampered results](../../indexes/querying/paging.mdx#paging-through-tampered-results). + + + +## Count with distinct + +Use `count()` in combination with `distinct()` to get the number of unique items. +Similar to _toList()_, _count()_ triggers query execution on the server-side. + + + + +{`// Count the number of unique countries: +// ===================================== + +const numberOfCountries = await session + .query(Order) + .selectFields("ShipTo.Country") + .distinct() + .count(); + +// Running this on the Northwind sample data, +// will result in 21, which is the number of unique countries. +`} + + + + +{`// This RQL is intended for use when issuing a Raw Query from the client API. +// Running directly from the Studio will not display the number of results. +// ======================================================================== + +from "Orders" +select distinct ShipTo.Country +limit 0, 0 +`} + + + + +### Performance cost and an alternative approach + +* Please keep in mind that using `count()` with `distinct()` might not be efficient for large sets of data + as it requires scanning all index results to find unique values. + +* Getting the distinct items' count can also be achieved by creating a [Map-Reduce](../../indexes/map-reduce-indexes.mdx) index + that will aggregate data by the field for which distinct count results are needed. + +* Using a Map-Reduce index is more efficient since computations are done during indexing time and not at query time. + The entire dataset is [indexed](../../indexes/creating-and-deploying.mdx) once, + whereafter the aggregated value is always kept up to date as indexing will occur only for new/modified data. + +#### Map-Reduce index example:# + +Index definition: + + + +{`class Orders_ByShipToCountry extends AbstractJavaScriptIndexCreationTask \{ + + constructor() \{ + super(); + + // The map phase indexes the country listed in each order document + // countryCount is assigned with 1, which will be aggregated in the reduce phase + this.map("Orders", order => \{ + return \{ + country: order.ShipTo.Country, + countryCount: 1 + \} + \}); + + // The reduce phase will group the country results and aggregate the countryCount + this.reduce(results => results.groupBy(x => x.country).aggregate(g => \{ + return \{ + country: g.key, + countryCount: g.values.reduce((p, c) => p + c.countryCount, 0) + \} + \})); + \} +\} +`} + + + +Query the index: + + + + +{`// Query the map - reduce index defined above +const session = documentStore.openSession(); +const queryResult = await session + .query({ indexName: "Orders/ByShipToCountry" }) + .all(); + +// The resulting list contains all index-entry items where each entry represents a country. +// The size of the list corresponds to the number of unique countries. +const numberOfUniqueCountries = queryResult.length; +`} + + + + +{`from index "Orders/ByShipToCountry" +`} + + + +#### Combining faceted queries with Map-Reduce: + +Faceted queries can be used together with a map-reduce index as another alternative approach. +See a C# example for [Implementing a count(distinct) query in RavenDB](https://ravendb.net/articles/implementing-a-countdistinct-query-in-ravendb). + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_exploration-queries-csharp.mdx b/versioned_docs/version-7.1/indexes/querying/_exploration-queries-csharp.mdx new file mode 100644 index 0000000000..9130291285 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_exploration-queries-csharp.mdx @@ -0,0 +1,327 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Exploration Queries** form an additional layer of filtering that can be applied + to a dataset after its retrieval by [Query](../../client-api/session/querying/how-to-query.mdx#sessionquery), + [DocumentQuery](../../client-api/session/querying/how-to-query.mdx#sessionadvanceddocumentquery), + or [Raw RQL](../../client-api/session/querying/how-to-query.mdx#sessionadvancedrawquery), + while the dataset is still held by the server. + +* The **retrieved dataset** is scanned and filtered **without requiring or creating an + index**, providing a way to conduct one-time explorations without creating an index that would + have to be maintained by the cluster. + +* You can filter the datasets retrieved by both **Index queries** and **Collection queries**. + +* Exploration queries need to be used + [with caution](../../indexes/querying/exploration-queries.mdx#when-should-exploration-queries-be-used) + since scanning and filtering all the data retrieved by a query cost substantial + [server resources and user waiting time](../../indexes/querying/exploration-queries.mdx#limit-the-query-and-prefer--for-recurring-queries) + when large datasets are handled. + + + We recommend that you - + + * **Limit** the number of records that an exploration query filters. + * Use [where](../../indexes/querying/filtering.mdx) in recurring queries, + so the query would [use an index](../../indexes/querying/exploration-queries.mdx#limit-the-query-and-prefer--for-recurring-queries). + + + +* In this page: + * [`filter`](../../indexes/querying/exploration-queries.mdx#filter) + * [When should exploration queries be used](../../indexes/querying/exploration-queries.mdx#when-should-exploration-queries-be-used) + * [Syntax](../../indexes/querying/exploration-queries.mdx#syntax) + * [Usage examples](../../indexes/querying/exploration-queries.mdx#usage-examples) + * [With collection queries](../../indexes/querying/exploration-queries.mdx#with-collection-queries) + * [With queries that use an index](../../indexes/querying/exploration-queries.mdx#with-queries-that-use-an-index) + * [With projections](../../indexes/querying/exploration-queries.mdx#with-projections) + * [With user-defined JavaScript functions (`declare`)](../../indexes/querying/exploration-queries.mdx#with-user-defined-javascript-functions-) + + + +## `filter` + +Exploration queries can be applied using - + +* `Query.Filter` +* `DocumentQuery.Filter` +* RQL's `filter` keyword + +The added filtering is parsed and executed by RavenDB's Javascript engine. + +The provided filtering operations resemble those implemented by +[where](../../indexes/querying/filtering.mdx) and can be further enhanced +by Javascript functions of your own. +Read [here](../../indexes/querying/exploration-queries.mdx#with-a-javascript-function:-declare) +about creating and using your own Javascript function in your filters. + + + +## When should exploration queries be used + +`filter` can be applied to a Collection query, like in: + + +{`from Employees as e +filter e.Address.Country = 'USA' +`} + + + +it can also be applied to queries handled by an index, e.g. - + + + +{`// in a dynamic query via an auto-index +from Employees as e +where e.Title = 'Sales Representative' +filter e.Address.Country = 'USA' +`} + + + + + +{`// in a query that uses an index explicitly +from index 'Orders/ByCompany' +filter Count > 10 +`} + + + +Both in a collection query and in a query handled by an index, the entire retrieved +dataset is scanned and filtered. +This helps understand when exploration queries should be used, why a Limit +should be set for the number of filtered records, and when `where` should +be preferred: + + +#### When to use +Use `filter` for an ad-hoc exploration of the retrieved dataset, that matches +no existing index and is not expected to be repeated much. + +* You gain the ability to filter post-query results on the server side, for + both collection queries and when an index was used. +* The dataset will be filtered without creating an unrequired index that the cluster + would continue updating from now on. + + +#### Limit the query, and prefer `where` for recurring queries +Be aware that when a large dataset is retrieved, like the whole collection in +the case of a collection query, exploring it all using `filter` would tax the server +in memory and CPU usage while it checks the filter condition for each query result, +and cost the user a substantial waiting time. Therefore - + +* **Limit** the number of records that an exploration query filters, e.g.: + + +{`from Employees as e +filter e.Address.Country = 'USA' +filter_limit 500 // limit the number of filtered records +`} + + +* Use [where](../../indexes/querying/filtering.mdx) rather than `filter` for recurring filtering. + `where` will use an index, creating it if necessary, to accelerate the filtering + in subsequent queries. + + + + +## Syntax + +* `Query.Filter` + + +{`IRavenQueryable Filter(this IRavenQueryable source, + Expression> predicate, + int limit = int.MaxValue); +`} + + + + | Parameters | Type | Description | + | ---------- | ---- | ----------- | + | **source** | `IRavenQueryable` | `Filter`, defined as an `IRavenQueryable` extension method | + | **predicate** | `Expression>` | The condition by which retrieved records are filtered | + | **limit** | `int ` | Limits the number of filtered records (Recommended)
Default: all retrieved records | + +* `DocumentQuery.Filter` + + +{`IDocumentQuery Filter(Action> builder, + int limit = int.MaxValue); +`} + + + + | Parameters | Type | Description | + | ---------- | ---- | ----------- | + | **builder** | `Action>` | Your filter | + | **limit** | `int ` | Limits the number of filtered records (Recommended)
Default: all retrieved records | + +* **RQL** + * In an RQL query, use: + The `filter` keyword, followed by the filtering condition. + The `filter_limit` option, followed by the max number of records to filter. + * E.g. - + + +{`from Employees as e +where e.Title = 'Sales Representative' +filter e.Address.Country = 'USA' // filter the retrieved dataset +filter_limit 500 // limit the number of filter records +`} + + + + +## Usage examples + +#### With collection queries + +Use `filter` with a collection query to scan and filter the entire collection. + + + +{`var result = session.Query() + .Filter(f => f.Address.Country == "USA", limit: 500) + .SingleOrDefault(); +`} + + + + +{`result = session.Advanced.DocumentQuery() + .Filter(p => p.Equals(a => a.Address.Country, "USA"), limit: 500) + .SingleOrDefault(); +`} + + + + +{`result = session.Advanced.RawQuery + ("from Employees as e " + + "filter e.Address.Country = 'USA' " + + "filter_limit 500").SingleOrDefault(); +`} + + + + + +Filtering a sizable collection will burden the server and prolong user waiting time. +Set a `filter_limit` to restrict the number of filtered records. + +#### With queries that use an index + +Use `filter` after a `where` clause to filter the results retrieved by an index query. + + + +{`var emp = session.Query() + .Where(w => w.Title == "Sales Representative") + .Filter(f => f.Address.Country == "USA", limit: 500) + .SingleOrDefault(); +`} + + + + +{`emp = session.Advanced.DocumentQuery() + .WhereEquals(w => w.Title, "Sales Representative") + .Filter(p => p.Equals(a => a.Address.Country, "USA"), limit: 500) + .SingleOrDefault(); +`} + + + + +{`emp = session.Advanced.RawQuery + ("from Employees as e" + + "where e.Title = $title" + + "filter e.Address.Country = $country" + + "filter_limit $limit") + .AddParameter("title", "Sales Representative") + .AddParameter("country", "USA") + .AddParameter("limit", 500) + .SingleOrDefault(); +`} + + + +#### With projections + +The filtered results can be projected using `select`, like those of any other query. + + + +{`var emp1 = session + .Query() + .Filter(f => f.Address.Country == "USA", limit: 500) + .Select(x => new + { + FullName = x.FirstName + " " + x.LastName + }) + .ToList(); +`} + + + + +{`var fullName = new string[]{ + "FirstName", + "LastName" +}; + +var emp2 = session.Advanced.DocumentQuery() + .Filter(p => p.Equals(a => a.Address.Country, "USA"), limit: 500) + .SelectFields(fullName) + .ToList(); +`} + + + + +{`var emp3 = session.Advanced.RawQuery + ("from Employees as e" + + "filter startsWith(e.FirstName, 'A')" + + "select { FullName: e.FirstName + ' ' + e.LastName }"); +`} + + + +#### With user-defined JavaScript functions (`declare`) + +You can define a Javascript function as part of your query using the +[declare](../../client-api/session/querying/what-is-rql.mdx#declare) keyword, and +use it as part of your `filter` condition to freely adapt the filtering +to your needs. + +Here is a simple example: + + +{`// declare a Javascript function +declare function titlePrefix(r, prefix) +\{ + // Add whatever filtering capabilities you like + return r.Title.startsWith(prefix) +\} + +from Employees as e + +// Filter using the function you've declared +filter titlePrefix(e, $prefix) +filter_limit 100 +`} + + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_exploration-queries-nodejs.mdx b/versioned_docs/version-7.1/indexes/querying/_exploration-queries-nodejs.mdx new file mode 100644 index 0000000000..d2230199ed --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_exploration-queries-nodejs.mdx @@ -0,0 +1,345 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Exploration Queries** form an additional layer of filtering that can be applied + to a dataset after its retrieval by [query](../../client-api/session/querying/how-to-query.mdx#sessionquery), + or [rawQuery](../../client-api/session/querying/how-to-query.mdx#sessionadvancedrawquery), + while the dataset is still held by the server. + +* The **retrieved dataset** is scanned and filtered **without requiring or creating an index**, + providing a way to conduct one-time explorations without creating an index that would have to be maintained by the cluster. + +* You can filter the datasets retrieved by both **Index queries** and **Collection queries**. + +* Exploration queries need to be used with caution when large datasets are handled + since scanning and filtering all the data retrieved by a query costs substantial server resources and user waiting time. + + + + We recommend that you - + + * **Limit** the number of records that an exploration query filters. + * Use [where](../../indexes/querying/filtering.mdx) in recurring queries, + so the query would [use an index](../../indexes/querying/exploration-queries.mdx#limit-the-query-and-prefer--for-recurring-queries). + + + +* In this page: + + * [`filter`](../../indexes/querying/exploration-queries.mdx#filter) + * [When should exploration queries be used](../../indexes/querying/exploration-queries.mdx#when-should-exploration-queries-be-used) + * [Usage examples](../../indexes/querying/exploration-queries.mdx#usage-examples) + * [With collection queries](../../indexes/querying/exploration-queries.mdx#with-collection-queries) + * [With queries that use an index](../../indexes/querying/exploration-queries.mdx#with-queries-that-use-an-index) + * [With projections](../../indexes/querying/exploration-queries.mdx#with-projections) + * [With user-defined JavaScript functions (`declare`)](../../indexes/querying/exploration-queries.mdx#with-user-defined-javascript-functions-) + * [Syntax](../../indexes/querying/exploration-queries.mdx#syntax) + + +## `filter` + +Exploration queries can be applied using - + +* `query.filter` +* RQL's `filter` keyword + +The added filtering is parsed and executed by RavenDB's Javascript engine. + +The provided filtering operations resemble those implemented by [where](../../indexes/querying/filtering.mdx) +and can be further enhanced by Javascript functions of your own. +Read [here](../../indexes/querying/exploration-queries.mdx#with-user-defined-javascript-functions-) about creating and using your own Javascript function in your filters. + + + +## When Should Exploration Queries Be Used + +`filter` can be applied to a Full-Collection query, for example: + + + +{`// A full-collection query: +// ======================== + +from Employees as e +filter e.Address.Country == "USA" + +// Results include only employees from USA +// No auto-index is created +`} + + + +It can also be applied to queries handled by an index: + + + +{`// A dynamic query: +// ================ + +from Employees as e +where e.Title == "Sales Representative" // This triggers auto-index creation +filter e.Address.Country == "USA" + +// Results include only employees from USA having the specified title +// The auto-index created only indexes the 'Title' field +`} + + + + + +{`// A static-index query: +// ===================== + +from index "Orders/ByCompany" +filter Count > 10 + +// Results include orders with Count > 10 using the specified static-index +`} + + + +Both in a collection query and in a query handled by an index, all the results that are retrieved by the query are scanned and filtered by `filter`. + + + +#### When to use + +Use `filter` for an ad-hoc exploration of the retrieved dataset, that matches no existing index and is not expected to be repeated much. + +* You gain the ability to filter post-query results on the server side, for both collection queries and when an index was used. +* The dataset will be filtered without creating an unrequired index that the cluster would continue updating from now on. + + + + + +#### Limit the query, and prefer `where` for recurring queries + +Be aware that when a large dataset is retrieved, like the whole collection in the case of a collection query, +exploring it using `filter` would tax the server in memory and CPU usage while it checks the filter condition for each query result, +and cost the user a substantial waiting time. Therefore: + +* **Limit** the number of records that an exploration query filters, e.g.: + + +{`from Orders +// Apply filter +filter ShipTo.Country == "UK" +// Limit the number of records that will be scanned by the filter operation +filter_limit 100 + +// While there are 830 records in the Orders collection, +// only the first 100 records that are retrieved by the query are scanned by 'filter' +// Running this RQL on the sample data returns 4 matching results out of the 100 scanned. +`} + + + +* Use [where](../../indexes/querying/filtering.mdx) rather than `filter` for recurring filtering. + `where` will use an index, creating it if necessary, to accelerate the filtering in subsequent queries. + + + + + +## Usage examples + +#### With collection queries: + +Use `filter` with a full-collection query to scan and filter the entire collection. + + + + +{`const filteredCompanies = await session + // Make a full-collection query + .query({ collection: "companies" }) + // Apply a filter, scan only first 50 records from query results + .filter(x => x.equals("Address.Country", "USA"), 50) + .all(); + +// Results: +// ======== + +// * While a full-collection query on the 'companies' colletion yields 91 results +// only the first 50 records are scanned by the filter operation - +// resulting in 5 matching documents. +// +// * No auto-index is created. +`} + + + + +{`const filteredCompanies = await session + .advanced + .rawQuery("from Companies filter Address.Country == 'USA' filter_limit 50") + .all(); +`} + + + + +{`from "Companies" +filter Address.Country == "USA" +filter_limit 50 +`} + + + + + + +Filtering a sizable collection will burden the server and prolong user waiting time. +It is recommended to set a `filter_limit` to restrict the number of filtered records. + + +#### With queries that use an index: + +Use `filter` after a `whereEquals` clause to filter the results retrieved by the query. + + + + +{`const filteredCompanies = await session + // Make a dynamic query on a collection + .query({ collection: "companies" }) + // Apply some condition - this will trigger auto-index creation + .whereEquals("Contact.Title", "Sales Representative") + // Apply a filter + .filter(x => x.equals("Address.Country", "Germany")) + .all(); + +// Results: +// ======== + +// * The dynamic query results (before applying the filter) contain 17 results. +// Applying the filter results in 4 matching documents. +// +// * Since a query predicate was applied (using 'whereEquals') +// an auto-index that is indexing field 'Contact.Title' is created. +// +// * Field 'Address.Country' is Not included in the auto-index +// since it is part of the filter operation. +`} + + + + +{`const filteredCompanies = await session + .advanced + .rawQuery(\`from Companies + where Contact.Title == 'Sales Representative' + filter Address.Country == 'Germany'\`) + .all(); +`} + + + + +{`from "Companies" +where Contact.Title == "Sales Representative" +filter Address.Country == "Germany" +`} + + + +#### With projections: + +The filtered results can be projected using `selectFields`, like those of any other query. + + + + +{`const filteredCompanies = await session + // Make a collection query + .query({ collection: "companies" }) + // Apply a filter + .filter(x => x.equals("Address.Country", "Germany")) + // Any fields can be projected in the results + .selectFields([ "Name", "Address.City", "Address.Country"]) + .all(); + + // Results: + // ======== + + // * Results include all companies with country = 'Germany' + // Each resluting object contains only the selected fields. + // + // * No auto-index is created. +`} + + + + +{`const filteredCompanies = await session + .advanced + .rawQuery(\`from Companies + filter Address.Country == 'Germany' + select Name, Address.City, Address.Count\`) + .all(); +`} + + + + +{`from "Companies" +filter Address.Country == "Germany" +select Name, Address.City, Address.Country +`} + + + +#### With user-defined JavaScript functions (`declare`): + +When using RQL, you can define a JavaScript function using the [declare](../../client-api/session/querying/what-is-rql.mdx#declare) keyword. +This function can then be used as part of your `filter` condition to further customize the results. +For example: + + + +{`// Declare a Javascript function: +// ============================== + +declare function filterByTitlePrefix(employee, prefix) +\{ + // Include any filtering logic that suits your needs + return employee.Title.startsWith(prefix) +\} + +// Use the function in your RQL: +// ============================= + +from Employees as employee +// Filter using the declared function +filter filterByTitlePrefix(employee, "Sales") +filter_limit 10 +`} + + + + + +## Syntax + + + +{`filter(builder); +filter(builder, limit); +`} + + + +| Parameter | Type | Description | +|---------------|---------------------|---------------------------------------------------------------------------------------------------------------------------| +| **builder** | `(factory) => void` | The filtering method | +| **limit** | `number` | The number of records from the query results that `filter` should scan.
Default: all retrieved records. | + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_exploration-queries-php.mdx b/versioned_docs/version-7.1/indexes/querying/_exploration-queries-php.mdx new file mode 100644 index 0000000000..4160c906a8 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_exploration-queries-php.mdx @@ -0,0 +1,288 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Exploration Queries** form an additional layer of filtering that can be applied + to a dataset after its retrieval by [raw rql](../../client-api/session/querying/how-to-query.mdx#sessionadvancedrawquery), + while the dataset is still held by the server. + +* The **retrieved dataset** is scanned and filtered **without requiring or creating an + index**, providing a way to conduct one-time explorations without creating an index that would + have to be maintained by the cluster. + +* You can filter the datasets retrieved by both **Index queries** and **Collection queries**. + +* Exploration queries need to be used + [with caution](../../indexes/querying/exploration-queries.mdx#when-should-exploration-queries-be-used) + since scanning and filtering all the data retrieved by a query cost substantial + [server resources and user waiting time](../../indexes/querying/exploration-queries.mdx#limit-the-query-and-prefer--for-recurring-queries) + when large datasets are handled. + + + We recommend that you - + + * [Limit](../../indexes/querying/exploration-queries.mdx#limit-the-query-and-prefer--for-recurring-queries) + the number of records that an exploration query filters. + * Use [where](../../indexes/querying/filtering.mdx) in recurring queries, + so the query would [use an index](../../indexes/querying/exploration-queries.mdx#limit-the-query-and-prefer--for-recurring-queries). + + + +* In this page: + * [`filter`](../../indexes/querying/exploration-queries.mdx#filter) + * [When should exploration queries be used](../../indexes/querying/exploration-queries.mdx#when-should-exploration-queries-be-used) + * [Syntax](../../indexes/querying/exploration-queries.mdx#syntax) + * [Usage examples](../../indexes/querying/exploration-queries.mdx#usage-examples) + * [With collection queries](../../indexes/querying/exploration-queries.mdx#with-collection-queries) + * [With queries that use an index](../../indexes/querying/exploration-queries.mdx#with-queries-that-use-an-index) + * [With projections](../../indexes/querying/exploration-queries.mdx#with-projections) + * [With user-defined JavaScript functions (`declare`)](../../indexes/querying/exploration-queries.mdx#with-user-defined-javascript-functions-) + + + +## `filter` + +Exploration queries can be applied via RQL using the `filter` keyword. + +The added filtering is parsed and executed by RavenDB's Javascript engine. + +The provided filtering operations resemble those implemented by +[where](../../indexes/querying/filtering.mdx) and can be further enhanced +by Javascript functions of your own. +Read [here](../../indexes/querying/exploration-queries.mdx#with-user-defined-javascript-functions-) +about creating and using your own Javascript function in your filters. + + + +## When should exploration queries be used + +`filter` can be applied to a Collection query, like in: + + +{`from Employees as e +filter e.Address.Country = 'USA' +`} + + + +it can also be applied to queries handled by an index, e.g. - + + + +{`// in a dynamic query via an auto-index +from Employees as e +where e.Title = 'Sales Representative' +filter e.Address.Country = 'USA' +`} + + + + + +{`// in a query that uses an index explicitly +from index 'Orders/ByCompany' +filter Count > 10 +`} + + + +Both in a collection query and in a query handled by an index, the entire retrieved +dataset is scanned and filtered. +This helps understand when exploration queries should be used, why a Limit +should be set for the number of filtered records, and when `where` should +be preferred: + + +#### When to use +Use `filter` for an ad-hoc exploration of the retrieved dataset, that matches +no existing index and is not expected to be repeated much. + +* You gain the ability to filter post-query results on the server side, for + both collection queries and when an index was used. +* The dataset will be filtered without creating an unrequired index that the cluster + would continue updating from now on. + + +#### Limit the query, and prefer `where` for recurring queries +Be aware that when a large dataset is retrieved, like the whole collection in +the case of a collection query, exploring it all using `filter` would tax the server +in memory and CPU usage while it checks the filter condition for each query result, +and cost the user a substantial waiting time. Therefore - + +* **Limit** the number of records that an exploration query filters, e.g.: + + +{`from Employees as e +filter e.Address.Country = 'USA' +filter_limit 500 // limit the number of filtered records +`} + + +* Use [where](../../indexes/querying/filtering.mdx) rather than `filter` for recurring filtering. + `where` will use an index, creating it if necessary, to accelerate the filtering + in subsequent queries. + + + + +## Syntax + +In an RQL query, use: +The `filter` keyword, followed by the filtering condition. +The `filter_limit` option, followed by the max number of records to filter. + +E.g. - + + +{`from Employees as e +where e.Title = 'Sales Representative' +filter e.Address.Country = 'USA' // filter the retrieved dataset +filter_limit 500 // limit the number of filter records +`} + + + + +## Usage examples + +#### With collection queries + +Use `filter` with a collection query to scan and filter the entire collection. + + + +{`$result = $session->query(Employee::class) + ->filter(function($f) { return $f->equals("Address.Country", "USA"); }, 500) + ->singleOrDefault(); +`} + + + + +{`$result = $session->advanced()->documentQuery(Employee::class) + ->filter(function($p) { return $p->equals("Address.Country", "USA"); }, limit: 500) + ->singleOrDefault(); +`} + + + + +{`$result = $session->advanced() + ->rawQuery( + Employee::class, + "from Employees as e " . + "filter e.Address.Country = 'USA' " . + "filter_limit 500") +->singleOrDefault(); +`} + + + + + +Filtering a sizable collection will burden the server and prolong user waiting time. +Set a `filter_limit` to restrict the number of filtered records. + +#### With queries that use an index + +Use `filter` after a `where` clause to filter the results retrieved by an index query. + + + +{`$emp = $session->query(Employee::class) + ->whereEquals("Title", "Sales Representative") + ->filter(function($f) { return $f->equals("Address.Country", "USA"); }, 500) + ->singleOrDefault(); +`} + + + + +{`$emp = $session->advanced()->documentQuery(Employee::class) + ->whereEquals("Title", "Sales Representative") + ->filter(function($p) { return $p->equals("Address.Country", "USA"); }, limit: 500) + ->singleOrDefault(); +`} + + + + +{`$emp = $session->advanced()->rawQuery(Employee::class, + "from Employees as e" . + "where e.Title = \\$title" . + "filter e.Address.Country = \\$country" . + "filter_limit \\$limit") + ->addParameter("title", "Sales Representative") + ->addParameter("country", "USA") + ->addParameter("limit", 500) + ->singleOrDefault(); +`} + + + +#### With projections + +The filtered results can be projected using `select`, like those of any other query. + + + +{`$emp1 = $session + ->query(Employee::class) + ->filter(function($f) { return $f->equals("Address.Country", "USA"); }, 500) + ->selectFields(null, "Name", "Address.City", "Address.Country") + ->toList(); + + // Results: + // ======== + + // * Results include all companies with country = 'USA' + // Each resulting object contains only the selected fields. + // + // * No auto-index is created. +`} + + + + +{`$emp3 = $session->advanced()->rawQuery( + Employee::class, + "from Companies " . + "filter Address.Country == 'USA'" . + "select Name, Address.City, Address.Count\`" +); +`} + + + +#### With user-defined JavaScript functions (`declare`) + +You can define a Javascript function as part of your query using the +[declare](../../client-api/session/querying/what-is-rql.mdx#declare) keyword, and +use it as part of your `filter` condition to freely adapt the filtering +to your needs. + +Here is a simple example: + + +{`// declare a Javascript function +declare function titlePrefix(r, prefix) +\{ + // Add whatever filtering capabilities you like + return r.Title.startsWith(prefix) +\} + +from Employees as e + +// Filter using the function you've declared +filter titlePrefix(e, $prefix) +filter_limit 100 +`} + + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_exploration-queries-python.mdx b/versioned_docs/version-7.1/indexes/querying/_exploration-queries-python.mdx new file mode 100644 index 0000000000..1ae78b2477 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_exploration-queries-python.mdx @@ -0,0 +1,233 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Exploration Queries** form an additional layer of filtering that can be applied + to a dataset after its retrieval by [raw_rql](../../client-api/session/querying/how-to-query.mdx#sessionadvancedraw_query), + while the dataset is still held by the server. + +* The **retrieved dataset** is scanned and filtered **without requiring or creating an + index**, providing a way to conduct one-time explorations without creating an index that would + have to be maintained by the cluster. + +* You can filter the datasets retrieved by both **Index queries** and **Collection queries**. + +* Exploration queries need to be used + [with caution](../../indexes/querying/exploration-queries.mdx#when-should-exploration-queries-be-used) + since scanning and filtering all the data retrieved by a query cost substantial + [server resources and user waiting time](../../indexes/querying/exploration-queries.mdx#limit-the-query-and-prefer--for-recurring-queries) + when large datasets are handled. + + + We recommend that you - + + * [Limit](../../indexes/querying/exploration-queries.mdx#limit-the-query-and-prefer--for-recurring-queries) + the number of records that an exploration query filters. + * Use [where](../../indexes/querying/filtering.mdx) in recurring queries, + so the query would [use an index](../../indexes/querying/exploration-queries.mdx#limit-the-query-and-prefer--for-recurring-queries). + + + +* In this page: + * [`filter`](../../indexes/querying/exploration-queries.mdx#filter) + * [When should exploration queries be used](../../indexes/querying/exploration-queries.mdx#when-should-exploration-queries-be-used) + * [Syntax](../../indexes/querying/exploration-queries.mdx#syntax) + * [Usage examples](../../indexes/querying/exploration-queries.mdx#usage-examples) + * [With collection queries](../../indexes/querying/exploration-queries.mdx#with-collection-queries) + * [With queries that use an index](../../indexes/querying/exploration-queries.mdx#with-queries-that-use-an-index) + * [With projections](../../indexes/querying/exploration-queries.mdx#with-projections) + * [With user-defined JavaScript functions (`declare`)](../../indexes/querying/exploration-queries.mdx#with-user-defined-javascript-functions-) + + + +## `filter` + +In Python, exploration queries can be applied via RQL using the `filter` keyword. + +The added filtering is parsed and executed by RavenDB's Javascript engine. + +The provided filtering operations resemble those implemented by +[where](../../indexes/querying/filtering.mdx) and can be further enhanced +by Javascript functions of your own. +Read [here](../../indexes/querying/exploration-queries.mdx#with-user-defined-javascript-functions-) +about creating and using your own Javascript function in your filters. + + + +## When should exploration queries be used + +`filter` can be applied to a Collection query, like in: + + +{`from Employees as e +filter e.Address.Country = 'USA' +`} + + + +it can also be applied to queries handled by an index, e.g. - + + + +{`// in a dynamic query via an auto-index +from Employees as e +where e.Title = 'Sales Representative' +filter e.Address.Country = 'USA' +`} + + + + + +{`// in a query that uses an index explicitly +from index 'Orders/ByCompany' +filter Count > 10 +`} + + + +Both in a collection query and in a query handled by an index, the entire retrieved +dataset is scanned and filtered. +This helps understand when exploration queries should be used, why a Limit +should be set for the number of filtered records, and when `where` should +be preferred: + + +#### When to use +Use `filter` for an ad-hoc exploration of the retrieved dataset, that matches +no existing index and is not expected to be repeated much. + +* You gain the ability to filter post-query results on the server side, for + both collection queries and when an index was used. +* The dataset will be filtered without creating an unrequired index that the cluster + would continue updating from now on. + + +#### Limit the query, and prefer `where` for recurring queries +Be aware that when a large dataset is retrieved, like the whole collection in +the case of a collection query, exploring it all using `filter` would tax the server +in memory and CPU usage while it checks the filter condition for each query result, +and cost the user a substantial waiting time. Therefore - + +* **Limit** the number of records that an exploration query filters, e.g.: + + +{`from Employees as e +filter e.Address.Country = 'USA' +filter_limit 500 // limit the number of filtered records +`} + + +* Use [where](../../indexes/querying/filtering.mdx) rather than `filter` for recurring filtering. + `where` will use an index, creating it if necessary, to accelerate the filtering + in subsequent queries. + + + + +## Syntax + +* In C#, for example, `filter` can be applied using code from the `Query` or `DocumentQuery`API. + There is no such API implementation under python, leaving `RQL` as the only way to perform exploration queries. + +* **RQL** + * In an RQL query, use: + The `filter` keyword, followed by the filtering condition. + The `filter_limit` option, followed by the max number of records to filter. + * E.g. - + + +{`from Employees as e +where e.Title = 'Sales Representative' +filter e.Address.Country = 'USA' // filter the retrieved dataset +filter_limit 500 // limit the number of filter records +`} + + + + +## Usage examples + +#### With collection queries + +Use `filter` with a collection query to scan and filter the entire collection. + + +{`result = session.advanced.raw_query( + "from Employees as e " "filter e.Address.Country = 'USA' " "filter_limit 500", Employee +).single() +`} + + + + +Filtering a sizable collection will burden the server and prolong user waiting time. +Set a `filter_limit` to restrict the number of filtered records. + +#### With queries that use an index + +Use `filter` after a `where` clause to filter the results retrieved by an index query. + + +{`emp = ( + session.advanced.raw_query( + "from Employees as e " + "where e.Title = $title " + "filter e.Address.Country = $country " + "filter_limit $limit", + Employee, + ) + .add_parameter("title", "Sales Representative") + .add_parameter("country", "USA") + .add_parameter("limit", 500) + .single() +) +`} + + +#### With projections + +The filtered results can be projected using `select`, like those of any other query. + + +{`emp3 = session.advanced.raw_query( + "from Employees as e " + "filter startsWith(e.FirstName, 'A') " + "select \{ FullName: e.FirstName + ' ' + e.LastName \}", + Employee, +) +`} + + +#### With user-defined JavaScript functions (`declare`) + +You can define a Javascript function as part of your query using the +[declare](../../client-api/session/querying/what-is-rql.mdx#declare) keyword, and +use it as part of your `filter` condition to freely adapt the filtering +to your needs. + +Here is a simple example: + + +{`// declare a Javascript function +declare function titlePrefix(r, prefix) +\{ + // Add whatever filtering capabilities you like + return r.Title.startsWith(prefix) +\} + +from Employees as e + +// Filter using the function you've declared +filter titlePrefix(e, $prefix) +filter_limit 100 +`} + + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_faceted-search-csharp.mdx b/versioned_docs/version-7.1/indexes/querying/_faceted-search-csharp.mdx new file mode 100644 index 0000000000..21157296fd --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_faceted-search-csharp.mdx @@ -0,0 +1,1052 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A **Faceted Search** provides an efficient way to explore and navigate through large datasets or search results. + +* Multiple filters (facets) are applied to narrow down the search results according to different attributes or categories. + +![Facets](./assets/CNET_faceted_search.jpg) +* In this page + * [Define an index](../../indexes/querying/faceted-search.mdx#define-an-index) + * [Facets - Basics](../../indexes/querying/faceted-search.mdx#facets---basics) + * [Facets - Options](../../indexes/querying/faceted-search.mdx#facets---options) + * [Facets - Aggregations](../../indexes/querying/faceted-search.mdx#facets---aggregations) + * [Storing facets definition in a document](../../indexes/querying/faceted-search.mdx#storing-facets-definition-in-a-document) + * [Syntax](../../indexes/querying/faceted-search.mdx#syntax) + + +## Define an index + +* To make a faceted search, **a static-index must be defined** for the fields you want to query and apply facets on. + +* The examples in this article will be based on the following Class, Index, and Sample Data: + + + + +{`public class Camera +{ + public string Manufacturer { get; set; } + public double Cost { get; set; } + public double MegaPixels { get; set; } + public int MaxFocalLength { get; set; } + public int UnitsInStock { get; set; } +} +`} + + + + +{`public class Cameras_ByFeatures : AbstractIndexCreationTask +{ + public class IndexEntry + { + public string Brand { get; set; } + public double Price { get; set; } + public double MegaPixels { get; set; } + public int MaxFocalLength { get; set; } + public int UnitsInStock { get; set; } + } + + public Cameras_ByFeatures() + { + Map = cameras => from camera in cameras + select new + { + Brand = camera.Manufacturer, + Price = camera.Cost, + MegaPixels = camera.MegaPixels, + MaxFocalLength = camera.MaxFocalLength, + UnitsInStock = camera.UnitsInStock + }; + } +} +`} + + + + +{`// Creating sample data for the examples in this article: +// ====================================================== + +var cameras = new[] +{ + new Camera { Manufacturer = "Sony", Cost = 100, MegaPixels = 20.1, MaxFocalLength = 200, UnitsInStock = 10 }, + new Camera { Manufacturer = "Sony", Cost = 200, MegaPixels = 29, MaxFocalLength = 250, UnitsInStock = 15 }, + new Camera { Manufacturer = "Nikon", Cost = 120, MegaPixels = 22.3, MaxFocalLength = 300, UnitsInStock = 2 }, + new Camera { Manufacturer = "Nikon", Cost = 180, MegaPixels = 32, MaxFocalLength = 300, UnitsInStock = 5 }, + new Camera { Manufacturer = "Nikon", Cost = 220, MegaPixels = 40, MaxFocalLength = 300, UnitsInStock = 20 }, + new Camera { Manufacturer = "Canon", Cost = 200, MegaPixels = 30.4, MaxFocalLength = 400, UnitsInStock = 30 }, + new Camera { Manufacturer = "Olympus", Cost = 250, MegaPixels = 32.5, MaxFocalLength = 600, UnitsInStock = 4 }, + new Camera { Manufacturer = "Olympus", Cost = 390, MegaPixels = 40, MaxFocalLength = 600, UnitsInStock = 6 }, + new Camera { Manufacturer = "Fuji", Cost = 410, MegaPixels = 45, MaxFocalLength = 700, UnitsInStock = 1 }, + new Camera { Manufacturer = "Fuji", Cost = 590, MegaPixels = 45, MaxFocalLength = 700, UnitsInStock = 5 }, + new Camera { Manufacturer = "Fuji", Cost = 650, MegaPixels = 61, MaxFocalLength = 800, UnitsInStock = 17 }, + new Camera { Manufacturer = "Fuji", Cost = 850, MegaPixels = 102, MaxFocalLength = 800, UnitsInStock = 19 } +}; + +using (var session = store.OpenSession()) +{ + foreach (var camera in cameras) + { + session.Store(camera); + } + + session.SaveChanges(); +} +`} + + + + + + +## Facets - Basics + +#### Facets definition: + +* Define a list of facets by which to aggregate the data. + +* There are two **Facet types**: + * `Facet` - returns a count for each unique term found in the specified index-field. + * `RangeFacet` - returns a count per range within the specified index-field. + + + +{`// Define a list of facets to query by: +// ==================================== +List facets = new List +\{ + // Define a Facet: + // =============== + new Facet + \{ + // Specify the index-field for which to get count of documents per unique ITEM + // e.g. get the number of Camera documents for each unique Brand + FieldName = "Brand", + // Set a display name for this field in the results (optional) + DisplayFieldName = "Camera Brand" + \}, + + // Define a RangeFacet: + // ==================== + new RangeFacet + \{ + Ranges = + \{ + // Specify ranges within an index-field in order to get count per RANGE + // e.g. get the number of Camera documents that cost below 200, between 200 & 400, etc... + x => x.Price < 200, + x => x.Price >= 200 && x.Price < 400, + x => x.Price >= 400 && x.Price < 600, + x => x.Price >= 600 && x.Price < 800, + x => x.Price >= 800 + \}, + // Set a display name for this field in the results (optional) + DisplayFieldName = "Camera Price" + \} +\}; +`} + + +#### Query the index for facets results: + +* Query the index to get the aggregated facets information. + +* Either: + + * Pass the facets definition from above directly to the query + + * Or - construct a facet using a builder with the Fluent API option, as shown below. + + + + +{`Dictionary results = session + // Query the index + .Query() + // Call 'AggregateBy' to aggregate the data by facets + // Pass the defined facets from above + .AggregateBy(facets) + .Execute(); +`} + + + + +{`Dictionary results = await asyncSession + // Query the index + .Query() + // Call 'AggregateBy' to aggregate the data by facets + // Pass the defined facets from above + .AggregateBy(facets) + .ExecuteAsync(); +`} + + + + +{`Dictionary results = session.Advanced + // Query the index + .DocumentQuery() + // Call 'AggregateBy' to aggregate the data by facets + // Pass the defined facets from above + .AggregateBy(facets) + .Execute(); +`} + + + + +{`Dictionary results = session + // Query the index + .Query() + // Call 'AggregateBy' to aggregate the data by facets + // Use a builder as follows: + .AggregateBy(builder => builder + // Specify the index-field (e.g. 'Brand') for which to get count per unique ITEM + .ByField(x => x.Brand) + // Set a display name for the field in the results (optional) + .WithDisplayName("Camera Brand")) + .AndAggregateBy(builder => builder + // Specify ranges within an index field (e.g. 'Price') in order to get count per RANGE + .ByRanges( + x => x.Price < 200, + x => x.Price >= 200 && x.Price < 400, + x => x.Price >= 400 && x.Price < 600, + x => x.Price >= 600 && x.Price < 800, + x => x.Price >= 800) + // Set a display name for the field in the results (optional) + .WithDisplayName("Camera Price")) + .Execute(); +`} + + + + +{`Dictionary results = session.Advanced + // Query the index + // Provide the RQL string to the RawQuery method + .RawQuery(@"from index 'Cameras/ByFeatures' + select + facet(Brand) as 'Camera Brand', + facet(Price < 200.0, + Price >= 200.0 and Price < 400.0, + Price >= 400.0 and Price < 600.0, + Price >= 600.0 and Price < 800.0, + Price >= 800.0) as 'Camera Price'") + // Execute the query + .ExecuteAggregation(); +`} + + + + +{`from index "Cameras/ByFeatures" +select + facet(Brand) as "Camera Brand", + facet(Price < 200.0, + Price >= 200.0 and Price < 400.0, + Price >= 400.0 and Price < 600.0, + Price >= 600.0 and Price < 800.0, + Price >= 800.0) as "Camera Price" +`} + + + +#### Query results: + +* **Query results** are Not the collection documents, they are of type: + `Dictionary` which is the facets results per index-field specified. + +* Using the sample data from this article, the resulting aggregations will be: + + + +{`// The resulting aggregations per display name will contain: +// ========================================================= + +// For the "Camera Brand" Facet: +// "canon" - Count: 1 +// "fuji" - Count: 4 +// "nikon" - Count: 3 +// "olympus" - Count: 2 +// "sony" - Count: 2 + +// For the "Camera Price" Ranges: +// "Price < 200" - Count: 3 +// "Price >= 200.0 and Price < 400.0" - Count: 5 +// "Price >= 400.0 and Price < 600.0" - Count: 2 +// "Price >= 600.0 and Price < 800.0" - Count: 1 +// "Price >= 800.0" - Count: 1 +`} + + + + +{`// Get facets results for index-field 'Brand' using the display name specified: +// ============================================================================ +var brandFacets = results["Camera Brand"]; +var numberOfBrands = brandFacets.Values.Count; // 5 unique brands + +// Get the aggregated facet value for a specific Brand: +var facetValue = brandFacets.Values[0]; +// The brand name is available in the 'Range' property +// Note: value is lower-case since the default RavenDB analyzer was used by the index +Assert.Equal("canon", facetValue.Range); +// Number of documents for 'Canon' is available in the 'Count' property +Assert.Equal(1, facetValue.Count); + +// Get facets results for index-field 'Price' using the display name specified: +// ============================================================================ +var priceFacets = results["Camera Price"]; +var numberOfRanges = priceFacets.Values.Count; // 5 different ranges + +// Get the aggregated facet value for a specific Range: +facetValue = priceFacets.Values[0]; +Assert.Equal("Price < 200", facetValue.Range); // The range string +Assert.Equal(3, facetValue.Count); // Number of documents in this range +`} + + + + + +**Query further**: + +* Typically, after presenting users with the initial facets results which show the available options, + users can select specific categories to explore further. + +* For example, if the user selects Fuji and Nikon, + then your next query can include a filter to focus only on those selected brands. + + + +{`Dictionary filteredResults = session + .Query() + // Limit query results to the selected brands: + .Where(x => x.Brand.In("Fuji", "Nikon")) + .AggregateBy(facets) + .Execute(); +`} + + + + + + + +## Facets - Options + +#### Facets definition: + +* **Options** are available only for the `Facet` type. + +* Available options: + + * `Start` - The position from which to send items (how many to skip). + * `PageSize` - Number of items to return. + * `IncludeRemainingTerms` - Show summary of items that didn't make it into the requested PageSize. + * `TermSortMode` - Set the sort order on the resulting items. + + + +{`// Define the list of facets to query by: +// ====================================== +List facetsWithOptions = new List +\{ + // Define a Facet: + new Facet + \{ + // Specify the index-field for which to get count of documents per unique ITEM + FieldName = "Brand", + // Set some facets options + Options = new FacetOptions + \{ + // Return the top 3 brands with most items count: + PageSize = 3, + TermSortMode = FacetTermSortMode.CountDesc + \} + \} +\}; +`} + + +#### Query the index for facets results: + + + + +{`Dictionary results = session + // Query the index + .Query() + // Call 'AggregateBy' to aggregate the data by facets + // Pass the defined facets from above + .AggregateBy(facetsWithOptions) + .Execute(); +`} + + + + +{`Dictionary results = await asyncSession + // Query the index + .Query() + // Call 'AggregateBy' to aggregate the data by facets + // Pass the defined facets from above + .AggregateBy(facetsWithOptions) + .ExecuteAsync(); +`} + + + + +{`Dictionary results = session.Advanced + // Query the index + .DocumentQuery() + // Call 'AggregateBy' to aggregate the data by facets + // Pass the defined facets from above + .AggregateBy(facetsWithOptions) + .Execute(); +`} + + + + +{`Dictionary results = session + // Query the index + .Query() + // Call 'AggregateBy' to aggregate the data by facets + // Use a builder as follows: + .AggregateBy(builder => builder + // Specify an index-field (e.g. 'Brand') for which to get count per unique ITEM + .ByField(x => x.Brand) + // Specify the facets options + .WithOptions(new FacetOptions + { + // Return the top 3 brands with most items count: + PageSize = 3, + TermSortMode = FacetTermSortMode.CountDesc + })) + .Execute(); +`} + + + + +{`Dictionary results = session.Advanced + // Query the index + // Provide the RQL string to the RawQuery method + .RawQuery(@"from index 'Cameras/ByFeatures' + select facet(Brand, $p0)") + // Add the facet options to the "p0" parameter + .AddParameter("p0", new { PageSize = 3, TermSortMode = FacetTermSortMode.CountDesc }) + // Execute the query + .ExecuteAggregation(); +`} + + + + +{`from index "Cameras/ByFeatures" +select facet(Brand, $p0) +{"p0": { "TermSortMode": "CountDesc", "PageSize": 3 }} +`} + + + +#### Query results: + + + +{`// The resulting items will contain: +// ================================= + +// For the "Brand" Facet: +// "fuji" - Count: 4 +// "nikon" - Count: 3 +// "olympus" - Count: 2 + +// As requested, only 3 unique items are returned, ordered by documents count descending: +`} + + + + +{`// Get facets results for index-field 'Brand': +// =========================================== +var brandFacets = results["Brand"]; +var numberOfBrands = brandFacets.Values.Count; // 3 brands + +// Get the aggregated facet value for a specific Brand: +var facetValue = brandFacets.Values[0]; +// The brand name is available in the 'Range' property +// Note: value is lower-case since the default RavenDB analyzer was used by the index +Assert.Equal("fuji", facetValue.Range); +// Number of documents for 'Fuji' is available in the 'Count' property +Assert.Equal(4, facetValue.Count); +`} + + + + + +## Facets - Aggregations + +#### Facets definition: + +* Aggregation of data is available for an index-field per unique Facet or Range item. + For example: + * Get the total number of UnitsInStock per Brand + * Get the highest MegaPixels value for documents that cost between 200 & 400 + +* The following aggregation operations are available: + * Sum + * Average + * Min + * Max + +* Multiple operations can be added on each facet, for multiple fields. + + + +{`// Define the list of facets to query by: +// ====================================== +List facetsWithAggregations = new List +\{ + // Define a Facet: + // =============== + new Facet + \{ + FieldName = "Brand", + Aggregations = + \{ + \{ + // Set the aggregation operation: + FacetAggregation.Sum, + // Create a HasSet specifying the index-fields for which to perform the aggregation + new HashSet + \{ + // Get total number of UnitsInStock per Brand + new FacetAggregationField \{Name = "UnitsInStock"\} + \} + \}, + \{ + FacetAggregation.Average, new HashSet + \{ + // Get average Price per Brand + new FacetAggregationField \{Name = "Price"\} + \} + \}, + \{ + FacetAggregation.Min, new HashSet + \{ + // Get min Price per Brand + new FacetAggregationField \{Name = "Price"\} + \} + \}, + \{ + FacetAggregation.Max, new HashSet + \{ + // Get max MegaPixels per Brand + new FacetAggregationField \{Name = "MegaPixels"\}, + // Get max MaxFocalLength per Brand + new FacetAggregationField \{Name = "MaxFocalLength"\} + \} + \} + \} + \}, + + // Define a RangeFacet: + // ==================== + new RangeFacet + \{ + Ranges = + \{ + x => x.Price < 200, + x => x.Price >= 200 && x.Price < 400, + x => x.Price >= 400 && x.Price < 600, + x => x.Price >= 600 && x.Price < 800, + x => x.Price >= 800 + \}, + Aggregations = + \{ + \{ + FacetAggregation.Sum, new HashSet + \{ + // Get total number of UnitsInStock for each group of documents per range specified + new FacetAggregationField \{Name = "UnitsInStock"\} + \} + \}, + \{ + FacetAggregation.Average, new HashSet + \{ + // Get average Price of each group of documents per range specified + new FacetAggregationField \{Name = "Price"\} + \} + \}, + \{ + FacetAggregation.Min, new HashSet + \{ + // Get min Price of each group of documents per range specified + new FacetAggregationField \{Name = "Price"\} + \} + \}, + \{ + FacetAggregation.Max, new HashSet + \{ + // Get max MegaPixels for each group of documents per range specified + new FacetAggregationField \{Name = "MegaPixels"\}, + // Get max MaxFocalLength for each group of documents per range specified + new FacetAggregationField \{Name = "MaxFocalLength"\} + \} + \} + \} + \} +\}; +`} + + +#### Query the index for facets results: + + + + +{`Dictionary results = session + // Query the index + .Query() + // Call 'AggregateBy' to aggregate the data by facets + // Pass the defined facets from above + .AggregateBy(facetsWithAggregations) + .Execute(); +`} + + + + +{`Dictionary results = await asyncSession + // Query the index + .Query() + // Call 'AggregateBy' to aggregate the data by facets + // Pass the defined facets from above + .AggregateBy(facetsWithAggregations) + .ExecuteAsync(); +`} + + + + +{`Dictionary results = session.Advanced + // Query the index + .DocumentQuery() + // Call 'AggregateBy' to aggregate the data by facets + // Pass the defined facets from above + .AggregateBy(facetsWithAggregations) + .Execute(); +`} + + + + +{`Dictionary results = session + // Query the index + .Query() + // Call 'AggregateBy' to aggregate the data by facets + // Use a builder as follows: + .AggregateBy(builder => builder + // Specify an index-field (e.g. 'Brand') for which to get count per unique ITEM + .ByField(x => x.Brand) + // Specify the aggregations per the Brand facet: + .SumOn(x => x.UnitsInStock) + .AverageOn(x => x.Price) + .MinOn(x => x.Price) + .MaxOn(x => x.MegaPixels) + .MaxOn(x => x.MaxFocalLength)) + .AndAggregateBy(builder => builder + // Specify ranges within an index field (e.g. 'Price') in order to get count per RANGE + .ByRanges( + x => x.Price < 200, + x => x.Price >= 200 && x.Price < 400, + x => x.Price >= 400 && x.Price < 600, + x => x.Price >= 600 && x.Price < 800, + x => x.Price >= 800) + // Specify the aggregations per the Price range: + .SumOn(x => x.UnitsInStock) + .AverageOn(x => x.Price) + .MinOn(x => x.Price) + .MaxOn(x => x.MegaPixels) + .MaxOn(x => x.MaxFocalLength)) + .Execute(); +`} + + + + +{`Dictionary results = session.Advanced + // Query the index + // Provide the RQL string to the RawQuery method + .RawQuery(@"from index 'Cameras/ByFeatures' + select + facet(Brand, + sum(UnitsInStock), + avg(Price), + min(Price), + max(MegaPixels), + max(MaxFocalLength)), + facet(Price < $p0, + Price >= $p1 and Price < $p2, + Price >= $p3 and Price < $p4, + Price >= $p5 and Price < $p6, + Price >= $p7, + sum(UnitsInStock), + avg(Price), + min(Price), + max(MegaPixels), + max(MaxFocalLength))") + // Add the parameters' values + .AddParameter("p0", 200.0) + .AddParameter("p1", 200.0) + .AddParameter("p2", 400.0) + .AddParameter("p3", 400.0) + .AddParameter("p4", 600.0) + .AddParameter("p5", 600.0) + .AddParameter("p6", 800.0) + .AddParameter("p7", 800.0) + // Execute the query + .ExecuteAggregation(); +`} + + + + +{`from index "Cameras/ByFeatures" +select + facet(Brand, + sum(UnitsInStock), + avg(Price), + min(Price), + max(MegaPixels), + max(MaxFocalLength)), + facet(Price < $p0, + Price >= $p1 and Price < $p2, + Price >= $p3 and Price < $p4, + Price >= $p5 and Price < $p6, + Price >= $p7, + sum(UnitsInStock), + avg(Price), + min(Price), + max(MegaPixels), + max(MaxFocalLength)) +{"p0":200.0,"p1":200.0,"p2":400.0,"p3":400.0,"p4":600.0,"p5":600.0,"p6":800.0,"p7":800.0} +`} + + + +#### Query results: + + + +{`// The resulting items will contain (Showing partial results): +// =========================================================== + +// For the "Brand" Facet: +// "canon" Count:1, Sum: 30, Name: UnitsInStock +// "canon" Count:1, Min: 200, Average: 200, Name: Price +// "canon" Count:1, Max: 30.4, Name: MegaPixels +// "canon" Count:1, Max: 400, Name: MaxFocalLength +// +// "fuji" Count:4, Sum: 42, Name: UnitsInStock +// "fuji" Count:4, Min: 410, Name: Price +// "fuji" Count:4, Max: 102, Name: MegaPixels +// "fuji" Count:4, Max: 800, Name: MaxFocalLength +// +// etc..... + +// For the "Price" Ranges: +// "Price < 200.0" Count:3, Sum: 17, Name: UnitsInStock +// "Price < 200.0" Count:3, Min: 100, Average: 133.33, Name: Price +// "Price < 200.0" Count:3, Max: 32, Name: MegaPixels +// "Price < 200.0" Count:3, Max: 300, Name: MaxFocalLength +// +// "Price < 200.0 and Price > 400.0" Count:5, Sum: 75, Name: UnitsInStock +// "Price < 200.0 and Price > 400.0" Count:5, Min: 200, Average: 252, Name: Price +// "Price < 200.0 and Price > 400.0" Count:5, Max: 40, Name: MegaPixels +// "Price < 200.0 and Price > 400.0" Count:5, Max: 600, Name: MaxFocalLength +// +// etc..... +`} + + + + +{`// Get results for the 'Brand' Facets: +// ========================================== +var brandFacets = results["Brand"]; + +// Get the aggregated facet value for a specific Brand: +var facetValue = brandFacets.Values[0]; +// The brand name is available in the 'Range' property: +Assert.Equal("canon", facetValue.Range); +// The index-field on which aggregation was done is in the 'Name' property: +Assert.Equal("UnitsInStock", facetValue.Name); +// The requested aggregation result: +Assert.Equal(30, facetValue.Sum); + +// Get results for the 'Price' RangeFacets: +// ======================================= +var priceRangeFacets = results["Price"]; + +// Get the aggregated facet value for a specific Brand: +facetValue = priceRangeFacets.Values[0]; +// The range string is available in the 'Range' property: +Assert.Equal("Price < 200.0", facetValue.Range); +// The index-field on which aggregation was done is in the 'Name' property: +Assert.Equal("UnitsInStock", facetValue.Name); +// The requested aggregation result: +Assert.Equal(17, facetValue.Sum); +`} + + + + + +## Storing facets definition in a document + +#### Define and store facets in a document: + +* The facets definitions can be stored in a document. + +* That document can then be used by a faceted search query. + + + +{`// Create a FacetSetup object: +// =========================== +FacetSetup facetSetup = new FacetSetup +\{ + // Provide the ID of the document in which the facet setup will be stored. + // This is optional - + // if not provided then the session will assign an ID for the stored document. + Id = "customDocumentID", + + // Define Facets and RangeFacets to query by: + Facets = new List \{ + new Facet() + \{ + FieldName = "Brand" + \}\}, + + RangeFacets = new List + \{ + new RangeFacet + \{ + Ranges = + \{ + x => x.MegaPixels < 20, + x => x.MegaPixels >= 20 && x.MegaPixels < 30, + x => x.MegaPixels >= 30 && x.MegaPixels < 50, + x => x.MegaPixels >= 50 + \} + \} + \} +\}; + +// Store the facet setup document and save changes: +// ================================================ +session.Store(facetSetup); +session.SaveChanges(); + +// The document will be stored under the 'FacetSetups' collection +`} + + +#### Query using facets from document: + + + + +{`Dictionary results = session + // Query the index + .Query() + // Call 'AggregateUsing' + // Pass the ID of the document that contains your facets setup + .AggregateUsing("customDocumentID") + .Execute(); +`} + + + + +{`Dictionary results = await asyncSession + // Query the index + .Query() + // Call 'AggregateUsing' + // Pass the ID of the document that contains your facets setup + .AggregateUsing("customDocumentID") + .ExecuteAsync(); +`} + + + + +{`Dictionary results = session.Advanced + // Query the index + .DocumentQuery() + // Call 'AggregateUsing' + // Pass the ID of the document that contains your facets setup + .AggregateUsing("customDocumentID") + .Execute(); +`} + + + + +{`Dictionary results = session.Advanced + // Query the index + // Provide the RQL string to the RawQuery method + .RawQuery(@"from index 'Cameras/ByFeatures' + select facet(id('customDocumentID'))") + // Execute the query + .ExecuteAggregation(); +`} + + + + +{`from index "Cameras/ByFeatures" +select facet(id("customDocumentID")) +`} + + + + + + +## Syntax + + + +{`IAggregationQuery AggregateBy(FacetBase facet); +IAggregationQuery AggregateBy(IEnumerable facets); +IAggregationQuery AggregateBy(Action> builder); +IAggregationQuery AggregateUsing(string facetSetupDocumentKey); +`} + + + +| Parameter | Type | Description | +|--------------------------|----------------------------|---------------------------------------------------------------------------------------------------| +| **facet** | `FacetBase` | `FacetBase` implementation defining the facet and its options.
Either `Facet` or `RangeFacet`. | +| **facets** | `IEnumerable` | Enumerable containing `FacetBase` implementations. | +| **builder** | `Action>` | Builder with a fluent API that constructs a `FacetBase` instance. | +| **facetSetupDocumentId** | `string` | ID of a document containing `FacetSetup`. | + + + + +{`public class Facet +{ + public string FieldName { get; set; } + public FacetOptions Options { get; set; } +} + +public class Facet +{ + public Expression> FieldName { get; set; } + public FacetOptions Options { get; set; } +} +`} + + + + +{`public class RangeFacet +{ + public List Ranges { get; set; } +} + +public class RangeFacet +{ + public List>> Ranges { get; set; } +} +`} + + + + +{`public class FacetBase +{ + public Dictionary> Aggregations { get; set; } + public string DisplayFieldName { get; set; } +} +`} + + + + +{`public enum FacetAggregation +{ + None, + Max, + Min, + Average, + Sum +} +`} + + + + +**Fluent API builder methods**: + + + +{`IFacetOperations ByField(string fieldName); +IFacetOperations ByField(Expression> path); +IFacetOperations ByRanges(Expression> path, params Expression>[] paths); +IFacetOperations WithDisplayName(string displayName); +IFacetOperations WithOptions(FacetOptions options); +IFacetOperations SumOn(Expression> path); +IFacetOperations MinOn(Expression> path); +IFacetOperations MaxOn(Expression> path); +IFacetOperations AverageOn(Expression> path); +`} + + + +| Parameter | Type | Description | +|-----------------|-----------------------------|----------------------------------------------------------------------------------------------------------------------------------------| +| **fieldName** | `string` | The index-field to use for the facet | +| **path** | `Expression>` | Points to the index-field to use for the facet (`ByRanges`, `ByField`) or for the aggregation (`SumOn`, `MinOn`, `MaxOn`, `AverageOn`) | +| **displayName** | `string` | If set, results of a facet will be returned under this name | +| **options** | `FacetOptions` | Non-default options to use in the facet definition | + +**Options**: + + + +{`public class FacetOptions +\{ + public FacetTermSortMode TermSortMode \{ get; set; \} = FacetTermSortMode.ValueAsc; + public bool IncludeRemainingTerms \{ get; set; \} + public int Start \{ get; set; \} + public int PageSize \{ get; set; \} = int.MaxValue; +\} +`} + + + +| Option | Type | Description | +|---------------------------|---------------------|-------------------------------------------------------------------------------------------------------------| +| **TermSortMode** | `FacetTermSortMode` | Set the sort order on the resulting items
(`ValueAsc` (Default), `ValueDesc`, `CountAsc`, `CountDesc`) | +| **Start** | `int` | The position from which to send items (how many to skip) | +| **PageSize** | `int` | Number of items to return | +| **IncludeRemainingTerms** | `bool` | Indicates if remaining terms that didn't make it into the requested PageSize should be included in results | + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_faceted-search-java.mdx b/versioned_docs/version-7.1/indexes/querying/_faceted-search-java.mdx new file mode 100644 index 0000000000..23d8f77340 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_faceted-search-java.mdx @@ -0,0 +1,362 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +When displaying a large amount of data, paging is often used to make viewing the data manageable. +It's also useful to give some context of the entire data-set and a easy way to drill-down into +particular categories. The common approach to doing this is a "faceted search", as shown in the +image below. **Note** how the count of each category within the current search is across the top. + +![Facets](./assets/CNET_faceted_search.jpg) + +<br /> +Let's start with defining a document like this: + + + +{`public class Camera \{ + private Date dateOfListing; + private String model; + private double cost; + private int zoom; + private double megapixels; + private boolean imageStabilizer; + private String manufacturer; + + public Date getDateOfListing() \{ + return dateOfListing; + \} + + public void setDateOfListing(Date dateOfListing) \{ + this.dateOfListing = dateOfListing; + \} + + public String getModel() \{ + return model; + \} + + public void setModel(String model) \{ + this.model = model; + \} + + public double getCost() \{ + return cost; + \} + + public void setCost(double cost) \{ + this.cost = cost; + \} + + public int getZoom() \{ + return zoom; + \} + + public void setZoom(int zoom) \{ + this.zoom = zoom; + \} + + public double getMegapixels() \{ + return megapixels; + \} + + public void setMegapixels(double megapixels) \{ + this.megapixels = megapixels; + \} + + public boolean isImageStabilizer() \{ + return imageStabilizer; + \} + + public void setImageStabilizer(boolean imageStabilizer) \{ + this.imageStabilizer = imageStabilizer; + \} + + public String getManufacturer() \{ + return manufacturer; + \} + + public void setManufacturer(String manufacturer) \{ + this.manufacturer = manufacturer; + \} +\} +`} + + + +## Step 1 + +Create an index to work against. + + + +{`public class Cameras_ByManufacturerModelCostDateOfListingAndMegapixels extends AbstractIndexCreationTask \{ + public Cameras_ByManufacturerModelCostDateOfListingAndMegapixels() \{ + map = "from camera in docs.Cameras " + + "select new \{" + + " camera.manufacturer," + + " camera.model," + + " camera.cost," + + " camera.dateOfListing," + + " camera.megapixels" + + "\} "; + \} +\} +`} + + + +## Step 2 + +Setup your facet definitions: + + + +{`Facet facet1 = new Facet(); +facet1.setFieldName("manufacturer"); + +RangeFacet facet2 = new RangeFacet(); +facet2.setRanges(Arrays.asList( + "cost <= 200", + "cost between 200 and 400", + "cost between 400 and 600", + "cost between 600 and 800", + "cost >= 800" +)); + +RangeFacet facet3 = new RangeFacet(); +facet3.setRanges(Arrays.asList( + "megapixels < 3", + "megapixels between 3 and 7", + "megapixels between 7 and 10", + "megapixels >= 10" +)); + +List facets = Arrays.asList(facet1); +List rangeFacets = Arrays.asList(facet2, facet3); +`} + + + +This tells RavenDB that you would like to get the following facets: + +* For the **manufacturer** field, look at the documents and return a count for each unique Term found. + +* For the **cost** field, return the count of the following ranges: + + * cost < 200.0 + * 200.0 <= cost < 400.0 + * 400.0 <= cost < 600.0 + * 600.0 <= cost < 800.0 + * cost >= 800.0 +* For the **megapixels** field, return the count of the following ranges: + * megapixels <= 3.0 + * 3.0 <= megapixels < 7.0 + * 7.0 <= megapixels < 10.0 + * megapixels >= 10.0 + +## Step 3 + +You can write the following code to get back the data below: + + + + +{`Map facetResults = session + .query(Camera.class, Cameras_ByManufacturerModelCostDateOfListingAndMegapixels.class) + .whereBetween("cost", 100, 300) + .aggregateBy(facets) + .execute(); +`} + + + + +{`Facet facet1 = new Facet(); +facet1.setFieldName("manufacturer"); + +RangeFacet facet2 = new RangeFacet(); +facet2.setRanges(Arrays.asList( + "cost <= 200", + "cost between 200 and 400", + "cost between 400 and 600", + "cost between 600 and 800", + "cost >= 800" +)); + +RangeFacet facet3 = new RangeFacet(); +facet3.setRanges(Arrays.asList( + "megapixels < 3", + "megapixels between 3 and 7", + "megapixels between 7 and 10", + "megapixels >= 10" +)); + +List facets = Arrays.asList(facet1); +List rangeFacets = Arrays.asList(facet2, facet3); +`} + + + + +{`from index 'Cameras/ByManufacturerModelCostDateOfListingAndMegapixels' +where cost between 100 and 300 +select facet(manufacturer), facet(cost <= 200, cost between 200 and 400, cost between 400 and 600, cost between 600 and 800, cost >= 800), facet(megapixels <= 3, megapixels between 3 and 7, megapixels between 7 and 10, megapixels >= 10) +`} + + + + +This data represents the sample faceted data that satisfies the above query: + + + +{`[ + \{ + "Name": "manufacturer", + "Values": [ + \{ + "Count": 1, + "Range": "canon" + \}, + \{ + "Count": 2, + "Range": "jessops" + \}, + \{ + "Count": 1, + "Range": "nikon" + \}, + \{ + "Count": 1, + "Range": "phillips" + \}, + \{ + "Count": 3, + "Range": "sony" + \} + ] + \}, + \{ + "Name": "cost", + "Values": [ + \{ + "Count": 6, + "Range": "cost <= 200" + \}, + \{ + "Count": 2, + "Range": "cost between 200 and 400" + \}, + \{ + "Count": 0, + "Range": "cost between 400 and 600" + \}, + \{ + "Count": 0, + "Range": "cost between 600 and 800" + \}, + \{ + "Count": 0, + "Range": "cost >= 800" + \} + ] + \}, + \{ + "Name": "megapixels", + "Values": [ + \{ + "Count": 0, + "Range": "megapixels <= 3" + \}, + \{ + "Count": 6, + "Range": "megapixels between 3 and 7" + \}, + \{ + "Count": 1, + "Range": "megapixels between 7 and 10" + \}, + \{ + "Count": 1, + "Range": "megapixels >= 10" + \} + ] + \} +] +`} + + + +### Storing Facets + +If you do not have to change your facets dynamically, you can store your facets as a `FacetSetup` document and pass the document ID instead of the list each time: + + + +{`FacetSetup facetSetup = new FacetSetup(); +facetSetup.setFacets(facets); +facetSetup.setRangeFacets(rangeFacets); + +session.store(facetSetup, "facets/CameraFacets"); +`} + + + + + + +{`Map facetResults = session + .query(Camera.class, Cameras_ByManufacturerModelCostDateOfListingAndMegapixels.class) + .whereBetween("cost", 100, 300) + .aggregateUsing("facets/CameraFacets") + .execute(); +`} + + + + +{`Facet facet1 = new Facet(); +facet1.setFieldName("manufacturer"); + +RangeFacet facet2 = new RangeFacet(); +facet2.setRanges(Arrays.asList( + "cost <= 200", + "cost between 200 and 400", + "cost between 400 and 600", + "cost between 600 and 800", + "cost >= 800" +)); + +RangeFacet facet3 = new RangeFacet(); +facet3.setRanges(Arrays.asList( + "megapixels < 3", + "megapixels between 3 and 7", + "megapixels between 7 and 10", + "megapixels >= 10" +)); + +List facets = Arrays.asList(facet1); +List rangeFacets = Arrays.asList(facet2, facet3); +`} + + + + +{`from index 'Cameras/ByManufacturerModelCostDateOfListingAndMegapixels' +where cost between 100 and 300 +select facet(id('facets/CameraFacets')) +`} + + + + +### Stale Results + +The faceted search does not take into account a staleness of an index. You can wait for non stale results by customizing your query with the `waitForNonStaleResults` method. + +### Fluent API + +As an alternative for creating a list of facets and passing it to the `aggregateBy` method, RavenDB also exposes a dynamic API where you can create your facets using a builder. You can read more about those methods in our dedicated Client API article [here](../../client-api/session/querying/how-to-perform-a-faceted-search.mdx). + + diff --git a/versioned_docs/version-7.1/indexes/querying/_faceted-search-nodejs.mdx b/versioned_docs/version-7.1/indexes/querying/_faceted-search-nodejs.mdx new file mode 100644 index 0000000000..3522cec64d --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_faceted-search-nodejs.mdx @@ -0,0 +1,918 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A **Faceted Search** provides an efficient way to explore and navigate through large datasets or search results. + +* Multiple filters (facets) are applied to narrow down the search results according to different attributes or categories. + +![Facets](./assets/CNET_faceted_search.jpg) +* In this page + * [Define an index](../../indexes/querying/faceted-search.mdx#define-an-index) + * [Facets - Basics](../../indexes/querying/faceted-search.mdx#facets---basics) + * [Facets - Options](../../indexes/querying/faceted-search.mdx#facets---options) + * [Facets - Aggregations](../../indexes/querying/faceted-search.mdx#facets---aggregations) + * [Storing facets definition in a document](../../indexes/querying/faceted-search.mdx#storing-facets-definition-in-a-document) + * [Syntax](../../indexes/querying/faceted-search.mdx#syntax) + + +## Define an index + +* To make a faceted search, **a static-index must be defined** for the fields you want to query and apply facets on. + +* The examples in this article will be based on the following Class, Index, and Sample Data: + + + + +{`class Camera { + constructor( + manufacturer = '', + cost = 0, + megaPixels = 0, + maxFocalLength = 0, + unitsInStock = 0 + ) { + Object.assign(this, { + manufacturer, + cost, + megaPixels, + maxFocalLength, + unitsInStock + }); + } +} +`} + + + + +{`class Cameras_ByFeatures extends AbstractJavaScriptIndexCreationTask { + constructor () { + super(); + + this.map("Cameras", camera => { + return { + brand: camera.manufacturer, + price: camera.cost, + megaPixels: camera.megaPixels, + maxFocalLength: camera.maxFocalLength, + unitsInStock: camera.unitsInStock + }; + }); + } +} +`} + + + + +{`// Creating sample data for the examples in this article: +// ====================================================== +const bulkInsert = store.bulkInsert(); + +const cameras = [ + new Camera("Sony", 100, 20.1, 200, 10), + new Camera("Sony", 200, 29, 250, 15), + new Camera("Nikon", 120, 22.3, 300, 2), + new Camera("Nikon", 180, 32, 300, 5), + new Camera("Nikon", 220, 40, 300, 20), + new Camera("Canon", 200, 30.4, 400, 30), + new Camera("Olympus", 250, 32.5, 600, 4), + new Camera("Olympus", 390, 40, 600, 6), + new Camera("Fuji", 410, 45, 700, 1), + new Camera("Fuji", 590, 45, 700, 5), + new Camera("Fuji", 650, 61, 800, 17), + new Camera("Fuji", 850, 102, 800, 19) +]; + +for (const camera of cameras) { + await bulkInsert.store(camera); +} + +await bulkInsert.finish(); +`} + + + + + + +## Facets - Basics + + + +**Facets definition**: +* Define a list of facets by which to aggregate the data. + +* There are two **Facet types**: + * `Facet` - returns a count for each unique term found in the specified index-field. + * `RangeFacet` - returns a count per range within the specified index-field. + + + +{`// Define a Facet: +// =============== +const brandFacet = new Facet(); +// Specify the index-field for which to get count of documents per unique ITEM +// e.g. get the number of Camera documents for each unique brand +brandFacet.fieldName = "brand"; +// Set a display name for this field in the results (optional) +brandFacet.displayFieldName = "Camera Brand"; + +// Define a RangeFacet: +// ==================== +const priceRangeFacet = new RangeFacet(); +// Specify ranges within an index-field in order to get count per RANGE +// e.g. get the number of Camera documents that cost below 200, between 200 & 400, etc... +priceRangeFacet.ranges = [ + "price < 200", + "price >= 200 and price < 400", + "price >= 400 and price < 600", + "price >= 600 and price < 800", + "price >= 800" +]; +// Set a display name for this field in the results (optional) +priceRangeFacet.displayFieldName = "Camera Price"; + +const facets = [brandFacet, priceRangeFacet]; +`} + + + + + + + +**Query the index for facets results**: +* Query the index to get the aggregated facets information. + +* Either: + + * Pass the facets definition from above directly to the query + + * Or - construct a facet using a builder with the Fluent API option, as shown below. + + + + +{`const results = await session + // Query the index + .query({ indexName: "Cameras/ByFeatures" }) + // Call 'aggregateBy' to aggregate the data by facets + // Pass the defined facets from above + .aggregateBy(...facets) + .execute(); +`} + + + + +{`// Define the index-field (e.g. 'price') that will be used by the range-facet in the query below +const range = RangeBuilder.forPath("price"); + +const results = await session + .query({ indexName: "Cameras/ByFeatures" }) + // Call 'aggregateBy' to aggregate the data by facets + // Use a builder as follows: + .aggregateBy(builder => builder + // Specify the index-field (e.g. 'brand') for which to get count per unique ITEM + .byField("brand")) + // Set a display name for the field in the results (optional) + .withDisplayName("Camera Brand")) + // Call 'andAggregateBy' to aggregate the data by also by range-facets + // Use a builder as follows: + .andAggregateBy(builder => builder + .byRanges( + // Specify the ranges within index field 'price' in order to get count per RANGE + range.isLessThan(200), + range.isGreaterThanOrEqualTo(200).isLessThan(400), + range.isGreaterThanOrEqualTo(400).isLessThan(600), + range.isGreaterThanOrEqualTo(600).isLessThan(800), + range.isGreaterThanOrEqualTo(800)) + // Set a display name for the field in the results (optional) + .withDisplayName("Camera Brand")) + .execute(); +`} + + + + +{`const results = await session.advanced + // Query the index + // Provide the RQL string to the rawQuery method + .rawQuery(\`from index "Cameras/ByFeatures" + select + facet(brand) as "Camera Brand", + facet(price < 200, + price >= 200 and price < 400, + price >= 400 and price < 600, + price >= 600 and price < 800, + price >= 800) as "Camera Price"\`) + // Execute the query + .executeAggregation(); +`} + + + + +{`from index "Cameras/ByFeatures" +select + facet(brand) as "Camera Brand", + facet(price < 200, + price >= 200 and price < 400, + price >= 400 and price < 600, + price >= 600 and price < 800, + price >= 800) as "Camera Price" +`} + + + + + + + + +**Query results**: +* **Query results** are Not the collection documents, they are of type `FacetResultObject` + which is the facets results per index-field specified. + +* Using the sample data from this article, the resulting aggregations will be: + + + +{`// The resulting aggregations per display name will contain: +// ========================================================= + +// For the "Camera Brand" Facet: +// "canon" - Count: 1 +// "fuji" - Count: 4 +// "nikon" - Count: 3 +// "olympus" - Count: 2 +// "sony" - Count: 2 + +// For the "Camera Price" Ranges: +// "Price < 200" - Count: 3 +// "Price >= 200.0 and Price < 400.0" - Count: 5 +// "Price >= 400.0 and Price < 600.0" - Count: 2 +// "Price >= 600.0 and Price < 800.0" - Count: 1 +// "Price >= 800.0" - Count: 1 +`} + + + + +{`// Get facets results for index-field 'brand' using the display name specified: +// ============================================================================ +const brandFacets = results["Camera Brand"]; +const numberOfBrands = brandFacets.values.length; // 5 unique brands + +// Get the aggregated facet value for a specific Brand: +let facetValue = brandFacets.values[0]; +// The brand name is available in the 'range' property +// Note: value is lower-case since the default RavenDB analyzer was used by the index +assert.equal(facetValue.range, "canon"); +// Number of documents for 'Canon' is available in the 'count' property +assert.equal(facetValue.count, 1); + +// Get facets results for index-field 'price' using the display name specified: +// ============================================================================ +const priceFacets = results["Camera Price"]; +const numberOfRanges = priceFacets.values.length; // 5 different ranges + +// Get the aggregated facet value for a specific Range: +facetValue = priceFacets.values[0]; +assert.equal(facetValue.range, "price < 200"); // The range string +assert.equalfacetValue.count, 3); // Number of documents in this range +`} + + + + + + + +**Query further**: +* Typically, after presenting users with the initial facets results which show the available options, + users can select specific categories to explore further. + +* For example, if the user selects Fuji and Nikon, + then your next query can include a filter to focus only on those selected brands. + + + +{`const filteredResults = await session + .query(\{ indexName: "Cameras/ByFeatures" \}) + // Limit query results to the selected brands: + .whereIn("brand", ["Fuji", "Nikon"]) + .aggregateBy(...facets) + .execute(); +`} + + + + + + + +## Facets - Options + + + +**Facets definition**: +* **Options** are available only for the `Facet` type. + +* Available options: + + * `start` - The position from which to send items (how many to skip). + * `pageSize` - Number of items to return. + * `includeRemainingTerms` - Show summary of items that didn't make it into the requested pageSize. + * `termSortMode` - Set the sort order on the resulting items. + + + +{`// Define a Facet: +// =============== +const facet = new Facet(); + +// Specify the index-field for which to get count of documents per unique ITEM +facet.fieldName = "brand"; + +// Set some facet options +// E.g.: Return top 3 brands with most items count +const facetOptions = new FacetOptions(); +facetOptions.pageSize = 3; +facetOptions.termSortMode = "CountDesc"; + +facet.options = facetOptions; +`} + + + + + + + +**Query the index for facets results**: + + + +{`const results = await session + // Query the index + .query({ indexName: "Cameras/ByFeatures" }) + // Call 'aggregateBy' to aggregate the data by facets + // Pass the defined facet from above + .aggregateBy(facet) + .execute(); +`} + + + + +{`// Set facet options to use in the following query +// E.g.: Return top 3 brands with most items count +const facetOptions = new FacetOptions(); +facetOptions.pageSize = 3; +facetOptions.termSortMode = "CountDesc"; + +const results = await session + //Query the index + .query({ indexName: "Cameras/ByFeatures" }) + // Call 'aggregateBy' to aggregate the data by facets + // Use a builder as follows: + .aggregateBy(builder => builder + // Specify the index-field (e.g. 'brand') for which to get count per unique ITEM + .byField("brand") + // Call 'withOptions', pass the facets options + .withOptions(facetOptions)) + .execute(); +`} + + + + +{`const results = await session.advanced + // Query the index + // Provide the RQL string to the rawQuery method + .rawQuery(\`from index "Cameras/ByFeatures" + select facet(brand, $p0)\`) + // Add the facet options to the "p0" parameter + .addParameter("p0", { "termSortMode": "CountDesc", "pageSize": 3 }) + // Execute the query + .executeAggregation(); +`} + + + + +{`from index "Cameras/ByFeatures" +select facet(brand, $p0) +{"p0": { "termSortMode": "CountDesc", "pageSize": 3 }} +`} + + + + + + + + +**Query results**: + + +{`// The resulting items will contain: +// ================================= + +// For the "brand" Facet: +// "fuji" - Count: 4 +// "nikon" - Count: 3 +// "olympus" - Count: 2 + +// As requested, only 3 unique items are returned, ordered by documents count descending: +`} + + + + +{`// Get facets results for index-field 'brand': +// =========================================== +const brandFacets = results["brand"]; +const numberOfBrands = brandFacets.values.length; // 3 brands + +// Get the aggregated facet value for a specific Brand: +const facetValue = brandFacets.values[0]; +// The brand name is available in the 'range' property +// Note: value is lower-case since the default RavenDB analyzer was used by the index +assert.equal(facetValue.range, "fuji"); +// Number of documents for 'Fuji' is available in the 'count' property +assert.equal(facetValue.count, 4); +`} + + + + + + + +## Facets - Aggregations + + + +**Facets definition**: +* Aggregation of data is available for an index-field per unique Facet or Range item. + For example: + * Get the total number of unitsInStock per Brand + * Get the highest megaPixels value for documents that cost between 200 & 400 + +* The following aggregation operations are available: + * Sum + * Average + * Min + * Max + +* Multiple operations can be added on each facet, for multiple fields. + + + +{`// Define a Facet: +// =============== +const facet = new Facet(); +facet.fieldName = "brand"; + +// Define the index-fields to aggregate: +const unitsInStockAggregationField = new FacetAggregationField(); +unitsInStockAggregationField.name = "unitsInStock"; + +const priceAggregationField = new FacetAggregationField(); +priceAggregationField.name = "price"; + +const megaPixelsAggregationField = new FacetAggregationField(); +megaPixelsAggregationField.name = "megaPixels"; + +const maxFocalLengthAggregationField = new FacetAggregationField(); +maxFocalLengthAggregationField.name = "maxFocalLength"; + +// Define the aggregation operations: +facet.aggregations.set("Sum", [unitsInStockAggregationField]); +facet.aggregations.set("Average", [priceAggregationField]); +facet.aggregations.set("Min", [priceAggregationField]); +facet.aggregations.set("Max", [megaPixelsAggregationField, maxFocalLengthAggregationField]); + +// Define a RangeFacet: +// ==================== +const rangeFacet = new RangeFacet(); +rangeFacet.ranges = [ + "price < 200", + "price >= 200 and price < 400", + "price >= 400 and price < 600", + "price >= 600 and price < 800", + "price >= 800" +]; + +// Define the aggregation operations: +rangeFacet.aggregations.set("Sum", [unitsInStockAggregationField]); +rangeFacet.aggregations.set("Average", [priceAggregationField]); +rangeFacet.aggregations.set("Min", [priceAggregationField]); +rangeFacet.aggregations.set("Max", [megaPixelsAggregationField, maxFocalLengthAggregationField]); + +const facetsWithAggregations = [facet, rangeFacet]; +`} + + + + + + + +**Query the index for facets results**: + + + +{`const results = await session + // Query the index + .query({ indexName: "Cameras/ByFeatures" }) + // Call 'aggregateBy' to aggregate the data by facets + // Pass the defined facet from above + .aggregateBy(...facetsWithAggregations) + .execute(); +`} + + + + +{`// Define the index-field (e.g. 'price') that will be used by the range-facet in the query below +const range = RangeBuilder.forPath("price"); + +const results = await session + .query({ indexName: "Cameras/ByFeatures" }) + // Call 'aggregateBy' to aggregate the data by facets + // Use a builder as follows: + .aggregateBy(builder => builder + // Specify the index-field (e.g. 'brand') for which to get count per unique ITEM + .byField("brand") + // Specify the aggregations per the brand facet: + .sumOn("unitsInStock") + .averageOn("price") + .minOn("price") + .maxOn("megaPixesls") + .maxOn("maxFocalLength")) + // Call 'andAggregateBy' to aggregate the data by also by range-facets + // Use a builder as follows: + .andAggregateBy(builder => builder + .byRanges( + // Specify the ranges within index field 'price' in order to get count per RANGE + range.isLessThan(200), + range.isGreaterThanOrEqualTo(200).isLessThan(400), + range.isGreaterThanOrEqualTo(400).isLessThan(600), + range.isGreaterThanOrEqualTo(600).isLessThan(800), + range.isGreaterThanOrEqualTo(800)) + // Specify the aggregations per the price range: + .sumOn("unitsInStock") + .averageOn("price") + .minOn("price") + .maxOn("megaPixesls") + .maxOn("maxFocalLength")) + .execute(); +`} + + + + +{`const results = await session.advanced + // Query the index + // Provide the RQL string to the rawQuery method + .rawQuery(\`from index "Cameras/ByFeatures" + select + facet(brand, + sum(unitsInStock), + avg(price), + min(price), + max(megaPixels), + max(maxFocalLength)), + facet(price < $p0, + price >= $p1 and price < $p2, + price >= $p3 and price < $p4, + price >= $p5 and price < $p6, + price >= $p7, + sum(unitsInStock), + avg(price), + min(price), + max(megaPixels), + max(maxFocalLength))\`) + // Add the parameters' values + .addParameter("p0", 200) + .addParameter("p1", 200) + .addParameter("p2", 400) + .addParameter("p3", 400) + .addParameter("p4", 600) + .addParameter("p5", 600) + .addParameter("p6", 800) + .addParameter("p7", 800) + // Execute the query + .executeAggregation(); +`} + + + + +{`from index "Cameras/ByFeatures" +select + facet(brand, + sum(unitsInStock), + avg(price), + min(price), + max(megaPixels), + max(maxFocalLength)), + facet(price < 200, + price >= 200 and price < 400, + price >= 400 and price < 600, + price >= 600 and price < 800, + price >= 800, + sum(unitsInStock), + avg(price), + min(price), + max(megaPixels), + max(maxFocalLength)) +`} + + + + + + + + +**Query results**: + + +{`// The resulting items will contain (Showing partial results): +// =========================================================== + +// For the "brand" Facet: +// "canon" Count:1, Sum: 30, Name: unitsInStock +// "canon" Count:1, Min: 200, Average: 200, Name: price +// "canon" Count:1, Max: 30.4, Name: megaPixels +// "canon" Count:1, Max: 400, Name: maxFocalLength +// +// "fuji" Count:4, Sum: 42, Name: unitsInStock +// "fuji" Count:4, Min: 410, Name: price +// "fuji" Count:4, Max: 102, Name: megaPixels +// "fuji" Count:4, Max: 800, Name: maxFocalLength +// +// etc..... + +// For the "price" Ranges: +// "Price < 200" Count:3, Sum: 17, Name: unitsInStock +// "Price < 200" Count:3, Min: 100, Average: 133.33, Name: price +// "Price < 200" Count:3, Max: 32, Name: megaPixels +// "Price < 200" Count:3, Max: 300, Name: maxFocalLength +// +// "Price < 200 and Price > 400" Count:5, Sum: 75, Name: unitsInStock +// "Price < 200 and Price > 400" Count:5, Min: 200, Average: 252, Name: price +// "Price < 200 and Price > 400" Count:5, Max: 40, Name: megaPixels +// "Price < 200 and Price > 400" Count:5, Max: 600, Name: maxFocalLength +// +// etc..... +`} + + + + +{`// Get results for the 'brand' Facets: +// ========================================== +const brandFacets = results["brand"]; + +// Get the aggregated facet value for a specific brand: +let facetValue = brandFacets.values[0]; +// The brand name is available in the 'range' property: +assert.equal(facetValue.range, "canon"); +// The index-field on which aggregation was done is in the 'name' property: +assert.equal(facetValue.name, "unitsInStock"); +// The requested aggregation result: +assert.equal(facetValue.sum, 30); + +// Get results for the 'price' RangeFacets: +// ======================================= +const priceRangeFacets = results["price"]; + +// Get the aggregated facet value for a specific Brand: +facetValue = priceRangeFacets.values[0]; +// The range string is available in the 'range' property: +assert.equal(facetValue.range, "price < 200"); +// The index-field on which aggregation was done is in the 'name' property: +assert.equal(facetValue.name, "unitsInStock"); +// The requested aggregation result: +assert.equal(facetValue.sum, 17); +`} + + + + + + + +## Storing facets definition in a document + + + + + +**Define and store facets in a document**: +* The facets definitions can be stored in a document. + +* That document can then be used by a faceted search query. + + + +{`// Create a FacetSetup object: +// =========================== +const facetSetup = new FacetSetup(); + +// Provide the ID of the document in which the facet setup will be stored. +// This is optional - +// if not provided then the session will assign an ID for the stored document. +facetSetup.id = "customDocumentID"; + +// Define Facets and RangeFacets to query by: +const facet = new Facet(); +facet.fieldName = 'brand'; + +facetSetup.facets = [facet]; + +const rangeFacet = new RangeFacet(); +rangeFacet.ranges = [ + "megaPixels < 20", + "megaPixels >= 20 and megaPixels < 30", + "megaPixels >= 30 and megaPixels < 50", + "megaPixels >= 50" +]; + +facetSetup.rangeFacets = [rangeFacet]; + +// Store the facet setup document and save changes: +// ================================================ +await session.store(facetSetup); +await session.saveChanges(); + +// The document will be stored under the 'FacetSetups' collection +`} + + + + + + + +**Query using facets from document**: + + + +{`const results = await session + // Query the index + .query({ indexName: "Cameras/ByFeatures" }) + // Call 'aggregateUsing' + // Pass the ID of the document that contains your facets setup + .aggregateUsing("customDocumentID") + .execute(); +`} + + + + +{`const results = await session.advanced + // Query the index + // Provide the RQL string to the rawQuery method + .rawQuery(\`from index "Cameras/ByFeatures" + select facet(id("customDocumentID"))\`) + // Execute the query + .executeAggregation(); +`} + + + + +{`from index "Cameras/ByFeatures" +select facet(id("customDocumentID")) +`} + + + + + + + + +## Syntax + + + +{`// Aggregate data by Facets: +aggregateBy(facet); +aggregateBy(...facet); +aggregateBy(action); + +// Aditional aggregation for another Facet/RangeFacet (use with fluent API) +andAggregateBy(facet); +andAggregateBy(builder); + +// Aggregate data by Facets stored in a document +aggregateUsing(facetSetupDocumentId); +`} + + + +| Parameter | Type | Description | +|--------------------------|----------------------------|---------------------------------------------------------------------------------------------------| +| **facet** | `FacetBase` | `FacetBase` implementation defining the facet and its options.
Either `Facet` or `RangeFacet`. | +| **...facet** | `FacetBase[]` | List containing `FacetBase` implementations. | +| **action** / **builder** | `(builder) => void` | Builder with a fluent API that constructs a `FacetBase` instance. | +| **facetSetupDocumentId** | `string` | ID of a document containing `FacetSetup`. | + + + + +{`class Facet { + fieldName; +} +`} + + + + +{`class RangeFacet { + ranges; +} +`} + + + + +{`class FacetBase { + displayFieldName; + options; + aggregations; // "None" | "Max" | "Min" | "Average" | "Sum" +} +`} + + + + +**builder methods**: + + + +{`byField(fieldName); +byRanges(range, ...ranges); + +withDisplayName(displayName); +withOptions(options); + +sumOn(path); +sumOn(path, displayName); + +minOn(path); +minOn(path, displayName); + +maxOn(path); +maxOn(path, displayName); + +averageOn(path); +averageOn(path, displayName); +`} + + + +| Parameter | Type | Description | +|-----------------|----------------|------------------------------------------------------------------------------------------------------------------------------| +| **fieldName** | `string` | The index-field to use for the facet | +| **path** | `string` | The index-field to use for the facet (`byRanges`, `byField`) or for the aggregation (`sumOn`, `minOn`, `maxOn`, `averageOn`) | +| **displayName** | `string` | If set, results of a facet will be returned under this name | +| **options** | `FacetOptions` | Non-default options to use in the facet definition | + +**Options**: + + + +{`class FacetOptions \{ + termSortMode; + includeRemainingTerms; + start; + pageSize; +\} +`} + + + +| Option | Type | Description | +|---------------------------|---------------------|-------------------------------------------------------------------------------------------------------------| +| **termSortMode** | `FacetTermSortMode` | Set the sort order on the resulting items
(`ValueAsc` (Default), `ValueDesc`, `CountAsc`, `CountDesc`) | +| **start** | `number` | The position from which to send items (how many to skip) | +| **pageSize** | `number` | Number of items to return | +| **includeRemainingTerms** | `boolean` | Indicates if remaining terms that didn't make it into the requested PageSize should be included in results | + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_faceted-search-php.mdx b/versioned_docs/version-7.1/indexes/querying/_faceted-search-php.mdx new file mode 100644 index 0000000000..0fa8ffec23 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_faceted-search-php.mdx @@ -0,0 +1,1094 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A **Faceted Search** provides an efficient way to explore and navigate through large datasets or search results. + +* Multiple filters (facets) are applied to narrow down the search results according to different attributes or categories. + +![Facets](./assets/CNET_faceted_search.jpg) +* In this page + * [Define an index](../../indexes/querying/faceted-search.mdx#define-an-index) + * [Facets - Basics](../../indexes/querying/faceted-search.mdx#facets---basics) + * [Facets - Options](../../indexes/querying/faceted-search.mdx#facets---options) + * [Facets - Aggregations](../../indexes/querying/faceted-search.mdx#facets---aggregations) + * [Storing facets definition in a document](../../indexes/querying/faceted-search.mdx#storing-facets-definition-in-a-document) + * [Syntax](../../indexes/querying/faceted-search.mdx#syntax) + + +## Define an index + +* To make a faceted search, **a static-index must be defined** for the fields you want to query and apply facets on. + +* The examples in this article will be based on the following Class, Index, and Sample Data: + + + + +{`class Camera +{ + private ?string $manufacturer = null; + private ?float $cost = null; + private ?float $megaPixels = null; + private ?int $maxFocalLength = null; + private ?int $unitsInStock = null; + + public function __construct( + ?string $manufacturer = null, + ?float $cost = null, + ?float $megaPixels = null, + ?int $maxFocalLength = null, + ?int $unitsInStock = null, + ) + { + $this->manufacturer = $manufacturer; + $this->cost = $cost; + $this->megaPixels = $megaPixels; + $this->maxFocalLength = $maxFocalLength; + $this->unitsInStock = $unitsInStock; + } + + public function getManufacturer(): ?string + { + return $this->manufacturer; + } + + public function setManufacturer(?string $manufacturer): void + { + $this->manufacturer = $manufacturer; + } + + public function getCost(): ?float + { + return $this->cost; + } + + public function setCost(?float $cost): void + { + $this->cost = $cost; + } + + public function getMegaPixels(): ?float + { + return $this->megaPixels; + } + + public function setMegaPixels(?float $megaPixels): void + { + $this->megaPixels = $megaPixels; + } + + public function getMaxFocalLength(): ?int + { + return $this->maxFocalLength; + } + + public function setMaxFocalLength(?int $maxFocalLength): void + { + $this->maxFocalLength = $maxFocalLength; + } + + public function getUnitsInStock(): ?int + { + return $this->unitsInStock; + } + + public function setUnitsInStock(?int $unitsInStock): void + { + $this->unitsInStock = $unitsInStock; + } +} +`} + + + + +{`class Cameras_ByFeatures_IndexEntry +{ + private ?string $brand = null; + private ?float $price = null; + private ?float $megaPixels = null; + private ?int $maxFocalLength = null; + private ?int $unitsInStock = null; + + public function getBrand(): ?string + { + return $this->brand; + } + + public function setBrand(?string $brand): void + { + $this->brand = $brand; + } + + public function getPrice(): ?float + { + return $this->price; + } + + public function setPrice(?float $price): void + { + $this->price = $price; + } + + public function getMegaPixels(): ?float + { + return $this->megaPixels; + } + + public function setMegaPixels(?float $megaPixels): void + { + $this->megaPixels = $megaPixels; + } + + public function getMaxFocalLength(): ?int + { + return $this->maxFocalLength; + } + + public function setMaxFocalLength(?int $maxFocalLength): void + { + $this->maxFocalLength = $maxFocalLength; + } + + public function getUnitsInStock(): ?int + { + return $this->unitsInStock; + } + + public function setUnitsInStock(?int $unitsInStock): void + { + $this->unitsInStock = $unitsInStock; + } +} + +class Cameras_ByFeatures extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = + "from camera in docs.Cameras " . + "select new " . + "{ " . + " brand = camera.manufacturer," . + " price = camera.cost," . + " megaPixels = camera.megaPixels," . + " maxFocalLength = camera.maxFocalLength," . + " unitsInStock = camera.unitsInStock" . + "}"; + } +} +`} + + + + +{`// Creating sample data for the examples in this article: +// ====================================================== + +$cameras = []; + +$cameras[] = new Camera ( $manufacturer = "Sony", $cost = 100, $megaPixels = 20.1, $maxFocalLength = 200, $unitsInStock = 10 ); +$cameras[] = new Camera ( $manufacturer = "Sony", $cost = 200, $megaPixels = 29, $maxFocalLength = 250, $unitsInStock = 15 ); +$cameras[] = new Camera ( $manufacturer = "Nikon", $cost = 120, $megaPixels = 22.3, $maxFocalLength = 300, $unitsInStock = 2 ); +$cameras[] = new Camera ( $manufacturer = "Nikon", $cost = 180, $megaPixels = 32, $maxFocalLength = 300, $unitsInStock = 5 ); +$cameras[] = new Camera ( $manufacturer = "Nikon", $cost = 220, $megaPixels = 40, $maxFocalLength = 300, $unitsInStock = 20 ); +$cameras[] = new Camera ( $manufacturer = "Canon", $cost = 200, $megaPixels = 30.4, $maxFocalLength = 400, $unitsInStock = 30 ); +$cameras[] = new Camera ( $manufacturer = "Olympus", $cost = 250, $megaPixels = 32.5, $maxFocalLength = 600, $unitsInStock = 4 ); +$cameras[] = new Camera ( $manufacturer = "Olympus", $cost = 390, $megaPixels = 40, $maxFocalLength = 600, $unitsInStock = 6 ); +$cameras[] = new Camera ( $manufacturer = "Fuji", $cost = 410, $megaPixels = 45, $maxFocalLength = 700, $unitsInStock = 1 ); +$cameras[] = new Camera ( $manufacturer = "Fuji", $cost = 590, $megaPixels = 45, $maxFocalLength = 700, $unitsInStock = 5 ); +$cameras[] = new Camera ( $manufacturer = "Fuji", $cost = 650, $megaPixels = 61, $maxFocalLength = 800, $unitsInStock = 17 ); +$cameras[] = new Camera ( $manufacturer = "Fuji", $cost = 850, $megaPixels = 102, $maxFocalLength = 800, $unitsInStock = 19 ); + +$session = $store->openSession(); +try { + foreach ($cameras as $camera) + { + $session->store($camera); + } + + $session->saveChanges(); +} finally { + $session->close(); +} +`} + + + + + + +## Facets - Basics + +#### Facets definition: + +* Define a list of facets to aggregate the data by. + +* There are two **Facet types**: + * `Facet` - returns a count for each unique term found in the specified index-field. + * `RangeFacet` - returns a count per range within the specified index-field. + + + +{`// Define a list of facets to query by: +// ==================================== +$facets = []; + +// Define a Facet: +// =============== +$facet = new Facet(); +// Specify the index-field for which to get count of documents per unique ITEM +// e.g. get the number of Camera documents for each unique Brand +$facet->setFieldName("Brand"); +// Set a display name for this field in the results (optional) +$facet->setDisplayFieldName("Camera Brand"); + +$facets[] = $facet; + +// Define a RangeFacet: for Cameras_ByFeatures_IndexEntry +// ==================== +$rangeFacet = new RangeFacet(); + +// Specify ranges within an index-field in order to get count per RANGE +// e.g. get the number of Camera documents that cost below 200, between 200 & 400, etc... +$rangeFacet->setRanges([ + "price < 200", + "price >= 200 and price <= 400", + "price >= 400 and price <= 600", + "price >= 600 and price <= 800", + "price >= 800" +]); + +// Set a display name for this field in the results (optional) +$rangeFacet->setDisplayFieldName("Camera Price"); + +$facets[] = $rangeFacet; +`} + + +#### Query the index for facets results: + +* Query the index to get the aggregated facets information. + +* Either: + + * Pass the facets definition from above directly to the query + + * Or - construct a facet using a builder with the Fluent API option, as shown below. + + + + +{`$results = $session + // Query the index + ->query(Cameras_ByFeatures_IndexEntry::class, Cameras_ByFeatures::class) + // Call 'AggregateBy' to aggregate the data by facets + // Pass the defined facets from above + ->aggregateBy($facets) + ->execute(); +`} + + + + +{`$results = $session + // Query the index + ->query(Cameras_ByFeatures_IndexEntry::class, Cameras_ByFeatures::class) + // Call 'AggregateBy' to aggregate the data by facets + // Use a builder as follows: + ->aggregateBy(function($builder) { + return $builder + // Specify the index-field (e.g. 'Brand') for which to get count per unique ITEM + ->byField("Brand") + // Set a display name for the field in the results (optional) + ->withDisplayName("Camera Brand"); + }) + ->andAggregateBy(function($builder) { + return $builder + // Specify ranges within an index field (e.g. 'Price') in order to get count per RANGE + ->byRanges([ + "Price < 200", + "Price >= 200 && Price < 400", + "Price >= 400 && Price < 600", + "Price >= 600 && Price < 800", + "Price >= 800" + ]) + // Set a display name for the field in the results (optional) + ->withDisplayName("Camera Price"); + }) + ->execute(); +`} + + + + +{`$results = $session->advanced() + // Query the index + // Provide the RQL string to the RawQuery method + ->rawQuery(Camera::class, + "from index 'Cameras/ByFeatures' + select + facet(Brand) as 'Camera Brand', + facet(Price < 200.0, + Price >= 200.0 and Price < 400.0, + Price >= 400.0 and Price < 600.0, + Price >= 600.0 and Price < 800.0, + Price >= 800.0) as 'Camera Price'" + ) + // Execute the query + ->executeAggregation(); +`} + + + + +{`from index "Cameras/ByFeatures" +select + facet(Brand) as "Camera Brand", + facet(Price < 200.0, + Price >= 200.0 and Price < 400.0, + Price >= 400.0 and Price < 600.0, + Price >= 600.0 and Price < 800.0, + Price >= 800.0) as "Camera Price" +`} + + + +#### Query results: + +* **Query results** are Not the collection documents, they are of type: + `Dict[str, FacetResult]` which is the facets results per index-field specified. + +* Using the sample data from this article, the resulting aggregations will be: + + + +{`// The resulting aggregations per display name will contain: +// ========================================================= + +// For the "Camera Brand" Facet: +// "canon" - Count: 1 +// "fuji" - Count: 4 +// "nikon" - Count: 3 +// "olympus" - Count: 2 +// "sony" - Count: 2 + +// For the "Camera Price" Ranges: +// "Price < 200" - Count: 3 +// "Price >= 200.0 and Price < 400.0" - Count: 5 +// "Price >= 400.0 and Price < 600.0" - Count: 2 +// "Price >= 600.0 and Price < 800.0" - Count: 1 +// "Price >= 800.0" - Count: 1 +`} + + + + +{`// Get facets results for index-field 'Brand' using the display name specified: +// ============================================================================ +/** @var FacetResult $brandFacets */ +$brandFacets = $results["Camera Brand"]; +$numberOfBrands = count($brandFacets->getValues()); // 5 unique brands + +// Get the aggregated facet value for a specific Brand: +/** @var FacetValue $facetValue */ +$facetValue = $brandFacets->getValues()[0]; +// The brand name is available in the 'Range' property +// Note: value is lower-case since the default RavenDB analyzer was used by the index + +$this->assertEquals("canon", $facetValue->getRange()); +// Number of documents for 'Canon' is available in the 'Count' property +$this->assertEquals(1, $facetValue->getCount()); + +// Get facets results for index-field 'Price' using the display name specified: +// ============================================================================ +/** @var FacetResult $priceFacets */ +$priceFacets = $results["Camera Price"]; +$numberOfRanges = count($priceFacets->getValues()); // 5 different ranges + +// Get the aggregated facet value for a specific Range: +/** @var FacetValue $facetValue */ +$facetValue = $priceFacets->getValues()[0]; +$this->assertEquals("Price < 200", $facetValue->getRange()); // The range string +$this->assertEquals(3, $facetValue->getCount()); // Number of documents in this range +`} + + + + + +**Query further**: + +* Typically, after presenting users with the initial facets results which show the available options, + users can select specific categories to explore further. + +* For example, if the user selects Fuji and Nikon, + then your next query can include a filter to focus only on those selected brands. + + + +{`$filteredResults = $session + ->query(Cameras_ByFeatures_IndexEntry::class, Cameras_ByFeatures::class) + // Limit query results to the selected brands: + ->whereIn("Brand", ["Fuji", "Nikon"]) + ->aggregateBy($facets) + ->execute(); +`} + + + + + + + +## Facets - Options + +#### Facets definition: + +* **Options** are available only for the `Facet` type. + +* Available options: + * `Start` - The position from which to send items (how many to skip). + * `PageSize` - Number of items to return. + * `IncludeRemainingTerms` - Show summary of items that didn't make it into the requested PageSize. + * `TermSortMode` - Set the sort order on the resulting items. + + + +{`// Define the list of facets to query by: +// ====================================== +$facetsWithOptions = []; + + // Define a Facet: +$facet = new Facet(); + +// Specify the index-field for which to get count of documents per unique ITEM +$facet->setFieldName("Brand"); + +// Set some facets options +$options = new FacetOptions(); +// Return the top 3 brands with most items count: +$options->setPageSize(3); +$options->setTermSortMode(FacetTermSortMode::countDesc()); + +$facet->setOptions($options); + +$facetsWithOptions[] = $facet; +`} + + +#### Query the index for facets results: + + + + +{`$results = $session + // Query the index + ->query(Cameras_ByFeatures_IndexEntry::class, Cameras_ByFeatures::class) + // Call 'aggregateBy' to aggregate the data by facets + // Pass the defined facets from above + ->aggregateBy($facetsWithOptions) + ->execute(); +`} + + + + +{`$results = $session + // Query the index + ->query(Cameras_ByFeatures_IndexEntry::class, Cameras_ByFeatures::class) + // Call 'AggregateBy' to aggregate the data by facets + // Use a builder as follows: + ->aggregateBy(function($builder) { + $options = new FacetOptions(); + // Return the top 3 brands with most items count: + $options->setPageSize(3); + $options->setTermSortMode(FacetTermSortMode::countDesc()); + + return $builder + // Specify an index-field (e.g. 'Brand') for which to get count per unique ITEM + ->byField("Brand") + // Specify the facets options + ->withOptions($options); + }) + ->execute(); +`} + + + + +{`$results = $session->advanced() + // Query the index + // Provide the RQL string to the RawQuery method + ->rawQuery(Camera::class, "from index 'Cameras/ByFeatures'select facet(Brand, \\$p0)") + // Add the facet options to the "p0" parameter + ->addParameter("p0", [ "PageSize" => 3, "TermSortMode" => FacetTermSortMode::countDesc() ]) + // Execute the query + ->executeAggregation(); +`} + + + + +{`from index "Cameras/ByFeatures" +select facet(Brand, $p0) +{"p0": { "TermSortMode": "CountDesc", "PageSize": 3 }} +`} + + + +#### Query results: + + + +{`// The resulting items will contain: +// ================================= + +// For the "Brand" Facet: +// "fuji" - Count: 4 +// "nikon" - Count: 3 +// "olympus" - Count: 2 + +// As requested, only 3 unique items are returned, ordered by documents count descending: +`} + + + + +{`// Get facets results for index-field 'Brand': +// =========================================== +/** @var FacetResult $brandFacets */ +$brandFacets = $results["Brand"]; +$numberOfBrands = count($brandFacets->getValues()); // 3 brands + +// Get the aggregated facet value for a specific Brand: +/** @var FacetValue $facetValue */ +$facetValue = $brandFacets->getValues()[0]; +// The brand name is available in the 'Range' property +// Note: value is lower-case since the default RavenDB analyzer was used by the index +$this::assertEquals("fuji", $facetValue->getRange()); +// Number of documents for 'Fuji' is available in the 'Count' property +$this->assertEquals(4, $facetValue->getCount()); +`} + + + + + +## Facets - Aggregations + +#### Facets definition: + +* Aggregation of data is available for an index-field per unique Facet or Range item. + For example: + * Get the total number of UnitsInStock per Brand + * Get the highest MegaPixels value for documents that cost between 200 & 400 + +* The following aggregation operations are available: + * sum + * average + * min + * max + +* Multiple operations can be added on each facet, for multiple fields. + + + +{`// Define the list of facets to query by: +// ====================================== +$facetsWithAggregations = []; + +// Define a Facet: +// =============== +$facet = new Facet(); +$facet->setFieldName("Brand"); + +$aggregations = new AggregationArray(); + +$aggregations->set( + // Set the aggregation operation: + FacetAggregation::sum(), + // Get total number of UnitsInStock for each group of documents per range specified + [ + // Get total number of UnitsInStock per Brand + new FacetAggregationField($name = "UnitsInStock") + ] +); + +$aggregations->set(FacetAggregation::average(), [ + // Get average Price per Brand + new FacetAggregationField($name = "Price") +]); + +$aggregations->set(FacetAggregation::min(), [ + // Get min Price per Brand + new FacetAggregationField($name = "Price") +]); + +$aggregations->set(FacetAggregation::max(), [ + // Get max MegaPixels per Brand + new FacetAggregationField($name = "MegaPixels"), + // Get max MaxFocalLength per Brand + new FacetAggregationField($name = "MaxFocalLength") +]); + +$facet->setAggregations($aggregations); + +// Define a RangeFacet: +// ==================== +$rangeFacet = new RangeFacet(); +$rangeFacet->setRanges([ + "Price < 200", + "Price >= 200 && Price < 400", + "Price >= 400 && Price < 600", + "Price >= 600 && Price < 800", + "Price >= 800" +]); + +$rangeAggregations = new AggregationArray(); + +$rangeAggregations->set(FacetAggregation::sum(), [ + // Get total number of UnitsInStock for each group of documents per range specified + new FacetAggregationField($name = "UnitsInStock") +]); +$rangeAggregations->set(FacetAggregation::average(), [ + // Get average Price of each group of documents per range specified + new FacetAggregationField($name = "Price") +]); +$rangeAggregations->set(FacetAggregation::min(), [ + // Get min Price of each group of documents per range specified + new FacetAggregationField($name = "Price") +]); + +$rangeAggregations->set(FacetAggregation::max(), [ + // Get max MegaPixels for each group of documents per range specified + new FacetAggregationField($name = "MegaPixels"), + // Get max MaxFocalLength for each group of documents per range specified + new FacetAggregationField($name = "MaxFocalLength") + +]); + +$rangeFacet->setAggregations($rangeAggregations); +`} + + +#### Query the index for facets results: + + + + +{`$results = $session + // Query the index + ->query(Cameras_ByFeatures_IndexEntry::class, Cameras_ByFeatures::class) + // Call 'AggregateBy' to aggregate the data by facets + // Pass the defined facets from above + ->aggregateBy($facetsWithAggregations) + ->execute(); +`} + + + + +{`$results = $session + // Query the index + ->query(Cameras_ByFeatures_IndexEntry::class, Cameras_ByFeatures::class) + // Call 'AggregateBy' to aggregate the data by facets + // Use a builder as follows: + ->aggregateBy(function($builder) { + + return $builder + // Specify an index-field (e.g. 'Brand') for which to get count per unique ITEM + ->byField("Brand") + // Specify the aggregations per the Brand facet: + ->sumOn("UnitsInStock") + ->averageOn("Price") + ->minOn("Price") + ->maxOn("MegaPixels") + ->maxOn("MaxFocalLength"); + }) + ->andAggregateBy(function($builder) { + return $builder + // Specify ranges within an index field (e.g. 'Price') in order to get count per RANGE + ->byRanges([ + "Price < 200", + "Price >= 200 && Price < 400", + "Price >= 400 && Price < 600", + "Price >= 600 && Price < 800", + "Price >= 800" + ]) + // Specify the aggregations per the Price range: + ->sumOn("UnitsInStock") + ->averageOn("Price") + ->minOn("Price") + ->maxOn("MegaPixels") + ->maxOn("MaxFocalLength"); + }) + ->execute(); +`} + + + + +{`$results = $session->advanced() + // Query the index + // Provide the RQL string to the RawQuery method + ->rawQuery(Camera::class, + "from index 'Cameras/ByFeatures' + select + facet(Brand, + sum(UnitsInStock), + avg(Price), + min(Price), + max(MegaPixels), + max(MaxFocalLength)), + facet(Price < \\$p0, + Price >= \\$p1 and Price < \\$p2, + Price >= \\$p3 and Price < \\$p4, + Price >= \\$p5 and Price < \\$p6, + Price >= \\$p7, + sum(UnitsInStock), + avg(Price), + min(Price), + max(MegaPixels), + max(MaxFocalLength))" + ) + // Add the parameters' values + ->addParameter("p0", 200.0) + ->addParameter("p1", 200.0) + ->addParameter("p2", 400.0) + ->addParameter("p3", 400.0) + ->addParameter("p4", 600.0) + ->addParameter("p5", 600.0) + ->addParameter("p6", 800.0) + ->addParameter("p7", 800.0) + // Execute the query + ->executeAggregation(); +`} + + + + +{`from index "Cameras/ByFeatures" +select + facet(Brand, + sum(UnitsInStock), + avg(Price), + min(Price), + max(MegaPixels), + max(MaxFocalLength)), + facet(Price < $p0, + Price >= $p1 and Price < $p2, + Price >= $p3 and Price < $p4, + Price >= $p5 and Price < $p6, + Price >= $p7, + sum(UnitsInStock), + avg(Price), + min(Price), + max(MegaPixels), + max(MaxFocalLength)) +{"p0":200.0,"p1":200.0,"p2":400.0,"p3":400.0,"p4":600.0,"p5":600.0,"p6":800.0,"p7":800.0} +`} + + + +#### Query results: + + + +{`// The resulting items will contain (Showing partial results): +// =========================================================== + +// For the "Brand" Facet: +// "canon" Count:1, Sum: 30, Name: UnitsInStock +// "canon" Count:1, Min: 200, Average: 200, Name: Price +// "canon" Count:1, Max: 30.4, Name: MegaPixels +// "canon" Count:1, Max: 400, Name: MaxFocalLength +// +// "fuji" Count:4, Sum: 42, Name: UnitsInStock +// "fuji" Count:4, Min: 410, Name: Price +// "fuji" Count:4, Max: 102, Name: MegaPixels +// "fuji" Count:4, Max: 800, Name: MaxFocalLength +// +// etc..... + +// For the "Price" Ranges: +// "Price < 200.0" Count:3, Sum: 17, Name: UnitsInStock +// "Price < 200.0" Count:3, Min: 100, Average: 133.33, Name: Price +// "Price < 200.0" Count:3, Max: 32, Name: MegaPixels +// "Price < 200.0" Count:3, Max: 300, Name: MaxFocalLength +// +// "Price < 200.0 and Price > 400.0" Count:5, Sum: 75, Name: UnitsInStock +// "Price < 200.0 and Price > 400.0" Count:5, Min: 200, Average: 252, Name: Price +// "Price < 200.0 and Price > 400.0" Count:5, Max: 40, Name: MegaPixels +// "Price < 200.0 and Price > 400.0" Count:5, Max: 600, Name: MaxFocalLength +// +// etc..... +`} + + + + +{`// Get results for the 'Brand' Facets: +// ========================================== +/** @var FacetResult $brandFacets */ +$brandFacets = $results["Brand"]; + +// Get the aggregated facet value for a specific Brand: +/** @var FacetValue $facetValue */ +$facetValue = $brandFacets->getValues()[0]; +// The brand name is available in the 'Range' property: +$this->assertEquals("canon", $facetValue->getRange()); +// The index-field on which aggregation was done is in the 'Name' property: +$this->assertEquals("UnitsInStock", $facetValue->getName()); +// The requested aggregation result: +$this->assertEquals(30, $facetValue->getSum()); + +// Get results for the 'Price' RangeFacets: +// ======================================= +/** @var FacetResult $priceRangeFacets */ +$priceRangeFacets = $results["Price"]; + +// Get the aggregated facet value for a specific Brand: +/** @var FacetValue $facetValue */ +$facetValue = $priceRangeFacets->getValues()[0]; +// The range string is available in the 'Range' property: +$this->assertEquals("Price < 200.0", $facetValue->getRange()); +// The index-field on which aggregation was done is in the 'Name' property: +$this->assertEquals("UnitsInStock", $facetValue->getName()); +// The requested aggregation result: +$this->assertEquals(17, $facetValue->getSum()); +`} + + + + + +## Storing facets definition in a document + +#### Define and store facets in a document: + +* The facets definitions can be stored in a document. + +* That document can then be used by a faceted search query. + + + +{`// Create a FacetSetup object: +// =========================== +$facetSetup = new FacetSetup(); +// Provide the ID of the document in which the facet setup will be stored. +// This is optional - +// if not provided then the session will assign an ID for the stored document. +$facetSetup->setId("customDocumentID"); + +// Define Facets and RangeFacets to query by: +$facetSetup->setFacets([ + new Facet("Brand") +]); + + +$facetSetup->setRangeFacets([ + new RangeFacet( + $parent = null, + $ranges = [ + "MegaPixels < 20", + "MegaPixels >= 20 && MegaPixels < 30", + "MegaPixels >= 30 && MegaPixels < 50", + "MegaPixels >= 50" + ] + ) +]); + +// Store the facet setup document and save changes: +// ================================================ +$session->store($facetSetup); +$session->saveChanges(); + +// The document will be stored under the 'FacetSetups' collection +`} + + +#### Query using facets from document: + + + + +{`$results = $session + // Query the index + ->query(Cameras_ByFeatures_IndexEntry::class, Cameras_ByFeatures::class) + // Call 'AggregateUsing' + // Pass the ID of the document that contains your facets setup + ->aggregateUsing("customDocumentID") + ->execute(); +`} + + + + +{`$results = $session->advanced() + // Query the index + // Provide the RQL string to the RawQuery method + ->rawQuery( + $className = Camera::class, + $query = "from index 'Cameras/ByFeatures' + select facet(id('customDocumentID'))" + ) + // Execute the query + ->executeAggregation(); +`} + + + + +{`from index "Cameras/ByFeatures" +select facet(id("customDocumentID")) +`} + + + + + + +## Syntax + + + +{`public function aggregateBy(Callable|FacetBase|FacetBaseArray|array ...$builderOrFacets): AggregationDocumentQueryInterface; + +// You can call it +// ->aggregateBy(FacetBase $facet); +// ->aggregateBy(FacetBase $facet1, FacetBase $facet2, ...); +// ->aggregateBy(FacetBaseArray|array $facets); +// ->aggregateBy(function(FacetBuilderInterface $builder) \{ ...\}); + +public function aggregateUsing(?string $facetSetupDocumentId): AggregationDocumentQueryInterface; +`} + + + +| Parameter | Type | Description | +|------------------------------|----------------------------|-----------------------| +| **builderOrFacets** | `Callable` **or** `FacetBase` **or** `FacetBaseArray` **or** `array` | Builder with a fluent API that constructs a `FacetBase` implementation instance **or** `FacetBase` implementation instance | +| **facets** | `FacetBaseArray` **or** `array` | A list of `FacetBase` implementations instances. | +| **facetSetupDocumentId** | `string ` | ID of a document containing `FacetSetup` | + + + + +{`class Facet +{ + private ?string $fieldName = null; + private ?FacetOptions $options = null; + + // ... getters and setters +} +`} + + + + +{`class RangeFacet +{ + private StringArray $ranges; + private ?FacetOptions $options = null; + + // ... getters and setters +} +`} + + + + +{`class FacetBase +{ + private ?AggregationArray $aggregations = null; + private ?string $displayFieldName = null; + + // ... getters and setters +} +`} + + + + +{`interface FacetAggregation +{ + public function isNone(): bool; + public function isMax(): bool; + public function isMin(): bool; + public function isAverage(): bool; + public function isSum(): bool; + + public static function none(): FacetAggregation; + public static function max(): FacetAggregation; + public static function min(): FacetAggregation; + public static function average(): FacetAggregation; + public static function sum(): FacetAggregation; +} +`} + + + + +**Fluent API builder methods**: + + + +{`public function byField(string $fieldName): FacetOperationsInterface; +public function byRanges(?RangeBuilder $range, ?RangeBuilder ...$ranges): FacetOperationsInterface; + +public function withDisplayName(string $displayName): FacetOperationsInterface; +public function withOptions(FacetOptions $options): FacetOperationsInterface; + +public function sumOn(string $path, ?string $displayName = null): FacetOperationsInterface; +public function minOn(string $path, ?string $displayName = null): FacetOperationsInterface; +public function maxOn(string $path, ?string $displayName = null): FacetOperationsInterface; +public function averageOn(string $path, ?string $displayName = null): FacetOperationsInterface; +`} + + + +| Parameter | Type | Description | +|------------------|-----------------------------|-------------| +| **range** | `RangeBuilder` | A range of indexes | +| **ranges** | `RangeBuilder` | Multiple index ranges (at least one), separated by `,` | +| **fieldName** | `string` | The index-field to use for the facet | +| **path** | `string` | Points to the index-field to use for the facet (`ByRanges`, `ByField`) or for the aggregation (`SUM_ON`, `MIN_ON`, `MAX_ON`, `AVERAGE_ON`) | +| **displayName** | `string` | If set, results of a facet will be returned under this name | +| **options** | `FacetOptions` | Non-default options to use in the facet definition | + + + +**Options**: + + + +{`class FacetOptions +\{ + private FacetTermSortMode $termSortMode; // default value FacetTermSortMode::valueAsc() + private bool $includeRemainingTerms = false; + private int $start = 0; + private int $pageSize = 0; + + // ... getters and setters +\} +`} + + + +| Option | Type | Description | +|---------------------------|---------------------|-------------------------------------------------------------------------------------------------------------| +| **termSortMode** | `FacetTermSortMode` | Set the sort order on the resulting items
(`VALUE_ASC` (Default), `VALUE_DESC`, `COUNT_ASC`, `COUNT_DESC`) | +| **start** | `int` | The position from which to send items (how many to skip) | +| **pageSize** | `int` | Number of items to return | +| **includeRemainingTerms** | `bool` | Indicates if remaining terms that didn't make it into the requested PageSize should be included in results
Default value: `False` | + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_faceted-search-python.mdx b/versioned_docs/version-7.1/indexes/querying/_faceted-search-python.mdx new file mode 100644 index 0000000000..0a7c6a2f00 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_faceted-search-python.mdx @@ -0,0 +1,945 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* A **Faceted Search** provides an efficient way to explore and navigate through large datasets or search results. + +* Multiple filters (facets) are applied to narrow down the search results according to different attributes or categories. + +![Facets](./assets/CNET_faceted_search.jpg) +* In this page + * [Define an index](../../indexes/querying/faceted-search.mdx#define-an-index) + * [Facets - Basics](../../indexes/querying/faceted-search.mdx#facets---basics) + * [Facets - Options](../../indexes/querying/faceted-search.mdx#facets---options) + * [Facets - Aggregations](../../indexes/querying/faceted-search.mdx#facets---aggregations) + * [Storing facets definition in a document](../../indexes/querying/faceted-search.mdx#storing-facets-definition-in-a-document) + * [Syntax](../../indexes/querying/faceted-search.mdx#syntax) + + +## Define an index + +* To make a faceted search, **a static-index must be defined** for the fields you want to query and apply facets on. + +* The examples in this article will be based on the following Class, Index, and Sample Data: + + + + +{`class Camera: + def __init__( + self, + manufacturer: str = None, + cost: float = None, + mega_pixels: float = None, + max_focal_length: int = None, + units_in_stock: int = None, + ): + self.manufacturer = manufacturer + self.cost = cost + self.mega_pixels = mega_pixels + self.max_focal_length = max_focal_length + self.units_in_stock = units_in_stock +`} + + + + +{`class Cameras_ByFeatures(AbstractIndexCreationTask): + class IndexEntry: + def __init__( + self, + brand: str = None, + price: float = None, + mega_pixels: float = None, + max_focal_length: int = None, + units_in_stock: int = None, + ): + self.brand = brand + self.price = price + self.mega_pixels = mega_pixels + self.max_focal_length = max_focal_length + self.units_in_stock = units_in_stock + + def __init__(self): + super().__init__() + self.map = ( + "from camera in docs.Cameras " + "select new " + "{ " + " brand = camera.manufacturer," + " price = camera.cost," + " mega_pixels = camera.mega_pixels," + " max_focal_length = camera.max_focal_length," + " units_in_stock = camera.units_in_stock" + "}" + ) +`} + + + + +{`# Creating sample data for the examples in this article: +# ====================================================== + +cameras = [ + Camera(manufacturer="Sony", cost=100, mega_pixels=20.1, max_focal_length=200, units_in_stock=10), + Camera(manufacturer="Sony", cost=200, mega_pixels=29, max_focal_length=250, units_in_stock=15), + Camera(manufacturer="Nikon", cost=120, mega_pixels=22.3, max_focal_length=300, units_in_stock=2), + Camera(manufacturer="Nikon", cost=180, mega_pixels=32, max_focal_length=300, units_in_stock=5), + Camera(manufacturer="Nikon", cost=220, mega_pixels=40, max_focal_length=300, units_in_stock=20), + Camera(manufacturer="Canon", cost=200, mega_pixels=30.4, max_focal_length=400, units_in_stock=30), + Camera(manufacturer="Olympus", cost=250, mega_pixels=32.5, max_focal_length=600, units_in_stock=4), + Camera(manufacturer="Olympus", cost=390, mega_pixels=40, max_focal_length=600, units_in_stock=6), + Camera(manufacturer="Fuji", cost=410, mega_pixels=45, max_focal_length=700, units_in_stock=1), + Camera(manufacturer="Fuji", cost=590, mega_pixels=45, max_focal_length=700, units_in_stock=5), + Camera(manufacturer="Fuji", cost=650, mega_pixels=61, max_focal_length=800, units_in_stock=17), + Camera(manufacturer="Fuji", cost=850, mega_pixels=102, max_focal_length=800, units_in_stock=19), +] + +with store.open_session() as session: + for camera in cameras: + session.store(camera) + session.save_changes() +`} + + + + + + +## Facets - Basics + +#### Facets definition: + +* Define a list of facets by which to aggregate the data. + +* There are two **Facet types**: + * `Facet` - returns a count for each unique term found in the specified index-field. + * `RangeFacet` - returns a count per range within the specified index-field. + + + +{`# Define a Facet: +# =============== +facet = Facet( + # Specify the index-field for which to get count of documents per unique ITEM + # e.g. get the number of Camera documents for each unique brand + field_name="brand", +) + +# Set a display name for this field in the results (optional) +facet.display_field_name = "Camera Brand" + +# Define a RangeFacet: +# ==================== +range_facet = RangeFacet() +# Specify ranges within an index-field in order to get count per RANGE +# e.g. get the number of Camera documents that cost below 200, between 200 & 400, etc... +range_facet.ranges = [ + "price < 200", + "price between 200 and 400", + "price between 400 and 600", + "price between 600 and 800", + "price >= 800", +] + +# Set a display name for this field in the results (optional) +range_facet.display_field_name = "Camera Price" + +# Define a list of facets to query by: +# ==================================== +facets = [facet, range_facet] +`} + + +#### Query the index for facets results: + +* Query the index to get the aggregated facets information. + +* Either: + + * Pass the facets definition from above directly to the query + + * Or - construct a facet using a builder with the Fluent API option, as shown below. + + + + +{`results = ( + session + # Query the index + .query_index_type(Cameras_ByFeatures, Cameras_ByFeatures.IndexEntry) + # Call 'aggregate_by' to aggregate the data by facets + # Pass the defined facets from above + .aggregate_by_facets(facets).execute() +) +`} + + + + +{`# Query the index +results = ( + session.query_index_type(Cameras_ByFeatures, Cameras_ByFeatures.IndexEntry) + # Call 'aggregate_by' to aggregate the data by facets + # Use a builder as follows: + .aggregate_by( + lambda builder: builder + # Specify the index-field (e.g. 'brand') for which to get count per unique ITEM + .by_field("brand") + # Set a display name for the field in the results (optional) + .with_display_name("Camera Brand") + ) + .and_aggregate_by( + lambda builder: builder + # Specify ranges within an index field (e.g. 'Price') in order to get count per RANGE + .by_ranges( + RangeBuilder("price").is_less_than(200), + RangeBuilder("price").is_greater_than_or_equal_to(200).is_less_than(400), + RangeBuilder("price").is_greater_than_or_equal_to(400).is_less_than(600), + RangeBuilder("price").is_greater_than_or_equal_to(600).is_less_than(800), + RangeBuilder("price").is_greater_than_or_equal_to(800), + ) + # Set a display name for the field in the results (optional) + .with_display_name("Camera Price") + ) + .execute() +) +`} + + + + +{`results = ( + session.advanced + # Query the index + # Provide the RQL string to the raw_query method + .raw_query( + """from index 'Cameras/ByFeatures' + select + facet(brand) as 'Camera Brand', + facet(price < 200.0, + price >= 200.0 and price < 400.0, + price >= 400.0 and price < 600.0, + price >= 600.0 and price < 800.0, + price >= 800.0) as 'Camera Price'""", + object_type=Camera, + ) + # Execute the query + .execute_aggregation() +) +`} + + + + +{`from index "Cameras/ByFeatures" +select + facet(Brand) as "Camera Brand", + facet(Price < 200.0, + Price >= 200.0 and Price < 400.0, + Price >= 400.0 and Price < 600.0, + Price >= 600.0 and Price < 800.0, + Price >= 800.0) as "Camera Price" +`} + + + +#### Query results: + +* **Query results** are Not the collection documents, they are of type: + `Dict[str, FacetResult]` which is the facets results per index-field specified. + +* Using the sample data from this article, the resulting aggregations will be: + + + +{`# The resulting aggregations per display name will contain: +# ========================================================= + +# For the "Camera Brand" Facet: +# "canon" - Count: 1 +# "fuji" - Count: 4 +# "nikon" - Count: 3 +# "olympus" - Count: 2 +# "sony" - Count: 2 + +# For the "Camera Price" Ranges: +# "price < 200" - Count: 3 +# "200 <= price < 400" - Count: 5 +# "400 <= price < 600" - Count: 2 +# "600 <= price < 800" - Count: 1 +# "price >= 800" - Count: 1 +`} + + + + +{`# Get facets results for index-field 'brand' using the display name specified: +# ============================================================================ +brand_facets = results["Camera Brand"] +number_of_brands = len(brand_facets.values) # 5 unique brands + +# Get the aggregated facet value for a specific brand: +facet_value = brand_facets.values[0] +# The brand name is available in the 'Range' property +# Note: value is lower-case since the default RavenDB analyzer was used by the index +self.assertEqual("canon", facet_value.range_) +# Number of documents for 'Canon' is available in the 'Count' property +self.assertEqual(1, facet_value.count_) + +# Get facets results for index-field 'Price' using the display name specified: +# ============================================================================ +price_facets = results["Camera Price"] +number_of_ranges = len(price_facets.values) # 5 different ranges + +# Get the aggregated facet value for a specific Range: +facet_value = price_facets.values[0] +self.assertEqual("price < 200", facet_value.range_) # The range string +self.assertEqual(3, facet_value.count_) +`} + + + + + +**Query further**: + +* Typically, after presenting users with the initial facets results which show the available options, + users can select specific categories to explore further. + +* For example, if the user selects Fuji and Nikon, + then your next query can include a filter to focus only on those selected brands. + + + +{`filtered_results = list( + session.query_index_type(Cameras_ByFeatures, Cameras_ByFeatures.IndexEntry) + .where_in("brand", ["Fuji", "Nikon"]) + .aggregate_by_facets(facets) + .execute() +) +`} + + + + + + + +## Facets - Options + +#### Facets definition: + +* **Options** are available only for the `Facet` type. + +* Available options: + + * `start` - The position from which to send items (how many to skip). + * `page_size` - Number of items to return. + * `include_remaining_terms` - Show summary of items that didn't make it into the requested PageSize. + * `term_sort_mode` - Set the sort order on the resulting items. + + + +{`# Define the list of facets to query by: +# ====================================== +facets_with_options = [ + # Define a Facet: + Facet( + # Specify the index-field for which to get count of documents per unique ITEM + field_name="brand", + ) +] +# Set some facets options +# Assign facet options after creating the object +facets_with_options[0].options = FacetOptions() +# Return the top 3 brands with most items count: +facets_with_options[0].options.page_size = 3 +facets_with_options[0].options.term_sort_mode = FacetTermSortMode.COUNT_DESC +facets_with_options[0].options.start = 0 +`} + + +#### Query the index for facets results: + + + + +{`results = ( + session + # Query the index + .query_index_type(Cameras_ByFeatures, Cameras_ByFeatures.IndexEntry) + # Call 'aggregate_by' to aggregate the data by facets + # Pass the defined facets from above + .aggregate_by_facets(facets_with_options).execute() +) +`} + + + + +{`# Return the top 3 brands with most items count: +facet_options = FacetOptions() +facet_options.start = 0 +facet_options.page_size = 3 +facet_options.term_sort_mode = FacetTermSortMode.COUNT_DESC + +results = ( + session + # Query the index + .query_index_type(Cameras_ByFeatures, Cameras_ByFeatures.IndexEntry) + # Call 'aggregate_by' to aggregate the data by facets + # Use a builder as follows: + .aggregate_by( + lambda builder: builder + # Specify an index-field (e.g. 'brand') for which to get count per unique ITEM + .by_field("brand") + # Specify the facets options + .with_options(facet_options) + ).execute() +) +`} + + + + +{`results = ( + session.advanced + # Query the index + # Provide the RQL string to the raw_query method + .raw_query( + """from index 'Cameras/ByFeatures' + select facet(brand, $p0)""", + object_type=Camera, + ) + # Add the facet options to the "p0" parameter + .add_parameter("p0", {"PageSize": 3, "TermSortMode": FacetTermSortMode.COUNT_DESC}) + # Execute the query + .execute_aggregation() +) +`} + + + + +{`from index "Cameras/ByFeatures" +select facet(Brand, $p0) +{"p0": { "TermSortMode": "CountDesc", "PageSize": 3 }} +`} + + + +#### Query results: + + + +{`# The resulting items will contain: +# ================================= +# For the "brand" Facet: +# "fuji" - Count: 4 +# "nikon" - Count: 3 +# "olympus" - Count: 2 +# As requested, only 3 unique items are returned, ordered by documents count descending: +`} + + + + +{`# Get facets results for index-field 'brand': +# =========================================== +brand_facets = results["brand"] +number_of_brands = len(brand_facets.values) # 3 brands + +# Get the aggregated facet value for a specific brand: +facet_value = brand_facets.values[0] +# The brand name is available in the 'Range' property +# Note: value is lower-case since the default RavenDB analyzer was used by the index +self.assertEqual("fuji", facet_value.range_) +# Number of documents for 'Fuji' is available in the 'Count' property +self.assertEqual(4, facet_value.count_) +`} + + + + + +## Facets - Aggregations + +#### Facets definition: + +* Aggregation of data is available for an index-field per unique Facet or Range item. + For example: + * Get the total number of UnitsInStock per Brand + * Get the highest MegaPixels value for documents that cost between 200 & 400 + +* The following aggregation operations are available: + * Sum + * Average + * Min + * Max + +* Multiple operations can be added on each facet, for multiple fields. + + + +{`# Define the list of facets to query by: +# ===================================== + +# Define a facet: +# =============== +facet_with_aggregations = Facet() +facet_with_aggregations.field_name = "brand" +facet_with_aggregations.aggregations = \{ + # Set the aggregation operation: + FacetAggregation.SUM: + # Create a set specifying the index-fields for which to perform the aggregation + \{ + # Get total number of units_in_stock per brand + FacetAggregationField("units_in_stock") + \}, + FacetAggregation.AVERAGE: \{ + # Get average price per brand + FacetAggregationField("price") + \}, + FacetAggregation.MIN: \{ + # Get min price per brand + FacetAggregationField("price") + \}, + FacetAggregation.MAX: \{ + # Get max mega_pixels per brand + FacetAggregationField("mega_pixels"), + # Get max max_focal_length per brand + FacetAggregationField("max_focal_length"), + \}, +\} + +# Define a RangeFacet: +# =================== +range_facet_with_aggregations = RangeFacet() +range_facet_with_aggregations.ranges = [ + "price < 200", + "price between 200 and 400", + "price between 400 and 600", + "price between 600 and 800", + "price >= 800", +] +range_facet_with_aggregations.aggregations = \{ + FacetAggregation.SUM: \{ + # Get total number of units_in_stock for each group of documents per range specified + FacetAggregationField("units_in_stock") + \}, + FacetAggregation.AVERAGE: \{ + # Get average price of each group of documents per range specified + FacetAggregationField("price") + \}, + FacetAggregation.MIN: \{ + # Get min price of each group of documents per range specified + FacetAggregationField("price") + \}, + FacetAggregation.MAX: \{ + # Get max mega_pixels for each group of documents per range specified + FacetAggregationField("mega_pixels"), + # Get max max_focal_length for each group of documents per range specified + FacetAggregationField("max_focal_length"), + \}, +\} + +facets_with_aggregations = [facet_with_aggregations, range_facet_with_aggregations] +`} + + +#### Query the index for facets results: + + + + +{`results = ( + session + # Query the index + .query_index_type(Cameras_ByFeatures, Cameras_ByFeatures.IndexEntry) + # Call 'aggregate_by_facets' to aggregate the data by facets + # Pass the defined facets from above + .aggregate_by_facets(facets_with_aggregations).execute() +) +`} + + + + +{`results = ( + session + # Query the index + .query_index_type(Cameras_ByFeatures, Cameras_ByFeatures.IndexEntry) + # Call 'aggregate_by' to aggregate the data by facets + # Use a builder as follows: + .aggregate_by( + lambda builder: builder + # Specify an index-field (e.g. 'brand') for which to get count per unique ITEM + .by_field("brand") + # Specify the aggregations per the brand facet: + .sum_on("units_in_stock") + .average_on("price") + .min_on("price") + .max_on("mega_pixels") + .max_on("max_focal_length") + ) + .and_aggregate_by( + lambda builder: builder + # Specify ranges within an index field (e.g. 'price') in order to get count per RANGE + .by_ranges( + RangeBuilder("price").is_less_than(200), + RangeBuilder("price").is_greater_than_or_equal_to(200).is_less_than(400), + RangeBuilder("price").is_greater_than_or_equal_to(400).is_less_than(600), + RangeBuilder("price").is_greater_than_or_equal_to(600).is_less_than(800), + RangeBuilder("price").is_greater_than_or_equal_to(800), + ) + # Specify the aggregations per the price range: + .sum_on("units_in_stock") + .average_on("price") + .min_on("price") + .max_on("mega_pixels") + .max_on("max_focal_length") + ) + .execute() +) +`} + + + + +{`results = ( + session.advanced + # Query the index + # Provide the RQL string to the raw_query method + .raw_query( + """ + from index 'Cameras/ByFeatures' + select + facet(brand, + sum(units_in_stock), + avg(price), + min(price), + max(mega_pixels), + max(max_focal_length)), + facet(price < $p0, + price >= $p1 and price < $p2, + price >= $p3 and price < $p4, + price >= $p5 and price < $p6, + price >= $p7, + sum(units_in_stock), + avg(price), + min(price), + max(mega_pixels), + max(max_focal_length)) + """ + ) + .add_parameter("p0", 200.0) + .add_parameter("p1", 200.0) + .add_parameter("p2", 400.0) + .add_parameter("p3", 400.0) + .add_parameter("p4", 600.0) + .add_parameter("p5", 600.0) + .add_parameter("p6", 800.0) + .add_parameter("p7", 800.0) + # Execute the query + .execute_aggregation() +) +`} + + + + +{`from index "Cameras/ByFeatures" +select + facet(Brand, + sum(UnitsInStock), + avg(Price), + min(Price), + max(MegaPixels), + max(MaxFocalLength)), + facet(Price < $p0, + Price >= $p1 and Price < $p2, + Price >= $p3 and Price < $p4, + Price >= $p5 and Price < $p6, + Price >= $p7, + sum(UnitsInStock), + avg(Price), + min(Price), + max(MegaPixels), + max(MaxFocalLength)) +{"p0":200.0,"p1":200.0,"p2":400.0,"p3":400.0,"p4":600.0,"p5":600.0,"p6":800.0,"p7":800.0} +`} + + + +#### Query results: + + + +{`# The resulting items will contain (Showing partial results): +# =========================================================== + +# For the "brand" Facet: +# "canon" Count:1, Sum: 30, Name: UnitsInStock +# "canon" Count:1, Min: 200, Average: 200, Name: Price +# "canon" Count:1, Max: 30.4, Name: MegaPixels +# "canon" Count:1, Max: 400, Name: MaxFocalLength + +# "fuji" Count:4, Sum: 42, Name: UnitsInStock +# "fuji" Count:4, Min: 410, Name: Price +# "fuji" Count:4, Max: 102, Name: MegaPixels +# "fuji" Count:4, Max: 800, Name: MaxFocalLength + +# etc..... +# +# For the "Price" Ranges: +# "Price < 200.0" Count:3, Sum: 17, Name: UnitsInStock +# "Price < 200.0" Count:3, Min: 100, Average: 133.33, Name: Price +# "Price < 200.0" Count:3, Max: 32, Name: MegaPixels +# "Price < 200.0" Count:3, Max: 300, Name: MaxFocalLength + +# "Price < 200.0 and Price > 400.0" Count:5, Sum: 75, Name: UnitsInStock +# "Price < 200.0 and Price > 400.0" Count:5, Min: 200, Average: 252, Name: Price +# "Price < 200.0 and Price > 400.0" Count:5, Max: 40, Name: MegaPixels +# "Price < 200.0 and Price > 400.0" Count:5, Max: 600, Name: MaxFocalLength + +# etc..... +`} + + + + +{`# Get results for the 'brand' facets: +# ======================================== +brand_facets = results["brand"] + +# Get the aggregated facet value for a specific brand: +facet_value = brand_facets.values[0] +# The brand name is available in the 'Range' property: +self.assertEqual("canon", facet_value.range_) +# The index-field on which aggregation was done is in the 'name' property: +self.assertEqual("units_in_stock", facet_value.name) +# The requested aggregation result +self.assertEqual(30, facet_value.sum_) + +# Get results for the 'price' RangeFacets: +# ======================================== +price_range_facets = results["price"] + +# Get the aggregated facet value for a specific brand: +facet_value = price_range_facets.values[0] +# The range string is available in the 'Range' property: +self.assertEqual("price < 200", facet_value.range_) +# The index-field on which aggregation was done is in the 'Name' property: +self.assertEqual("units_in_stock", facet_value.name) +# The requested aggregation result: +self.assertEqual(17, facet_value.sum_) +`} + + + + + +## Storing facets definition in a document + +#### Define and store facets in a document: + +* The facets definitions can be stored in a document. + +* That document can then be used by a faceted search query. + + + +{`facet_setup = FacetSetup() +# Provide the ID of the document in which the facet setup will be stored. +# This is optional - +# if not provided then the session will assign an ID for the stored document. +facet_setup.Id = "customDocumentID" + +# Define Facets and RangeFacets to query by: +facet = Facet("brand") +range_facet = RangeFacet() +range_facet.ranges = [ + "mega_pixels < 20", + "mega_pixels between 20 and 30", + "mega_pixels between 30 and 50", + "mega_pixels >= 50", +] + +facet_setup.facets = [facet] +facet_setup.range_facets = [range_facet] + +# Store the facet setup document and save changes: +# =============================================== +session.store(facet_setup) +session.save_changes() + +# The document will be stored under the 'FacetSetups' collection +`} + + +#### Query using facets from document: + + + + +{`results = ( + session + # Query the index + .query_index_type(Cameras_ByFeatures, Cameras_ByFeatures.IndexEntry) + # Call 'aggregate_using' + # Pass the ID of the document that contains your facets setup + .aggregate_using("customDocumentID").execute() +) +`} + + + + +{`results = ( + session.advanced + # Query the index + # Provide the RQL string to the raw_query method + .raw_query("from index 'Cameras/ByFeatures' select facet(id('customDocumentID'))", Camera) + # Execute the query + .execute_aggregation() +) +`} + + + + +{`from index "Cameras/ByFeatures" +select facet(id("customDocumentID")) +`} + + + + + + +## Syntax + + + +{`def aggregate_by( + self, builder_or_facet: Union[Callable[[FacetBuilder], None], FacetBase] +) -> AggregationDocumentQuery[_T]: ... + +def aggregate_by_facets(self, facets: List[FacetBase]) -> AggregationDocumentQuery[_T]: ... + +def aggregate_using(self, facet_setup_document_id: str) -> AggregationDocumentQuery[_T]: ... +`} + + + +| Parameter | Type | Description | +|------------------------------|----------------------------|-----------------------| +| **builder_or_facet** (Union) | `Callable[[FacetBuilder]`
**or**
`FacetBase` | Builder with a fluent API that constructs a `FacetBase` implementation instance
**or**
`FacetBase` implementation instance | +| **facets** | `List[FacetBase]` | A list of `FacetBase` implementations instances. | +| **facet_setup_document_id** | `str` | ID of a document containing `FacetSetup` | + + + + +{`class Facet(FacetBase): + def __init__(self, field_name: str = None): + super().__init__() + self.field_name = field_name +`} + + + + +{`class RangeFacet(FacetBase): + def __init__(self, parent: Optional[FacetBase] = None): + super().__init__() + self.ranges: List[str] = [] +`} + + + + +{`class FacetBase(ABC): + def __init__(self): + self.display_field_name: Union[None, str] = None + self.options: Union[None, FacetOptions] = None + self.aggregations: Dict[FacetAggregation, Set[FacetAggregationField]] = {} +`} + + + + +{`class FacetAggregation(enum.Enum): + NONE = "None" + MAX = "Max" + MIN = "Min" + AVERAGE = "Average" + SUM = "Sum" +`} + + + + +**Fluent API builder methods**: + + + +{`def by_ranges(self, range_: RangeBuilder, *ranges: RangeBuilder) -> FacetOperations[_T]: ... + +def by_field(self, field_name: str) -> FacetOperations[_T]: ... + +def with_display_name(self, display_name: str) -> FacetOperations[_T]: ... + +def with_options(self, options: FacetOptions) -> FacetOperations[_T]: ... + +def sum_on(self, path: str, display_name: Optional[str] = None) -> FacetOperations[_T]: ... + +def min_on(self, path: str, display_name: Optional[str] = None) -> FacetOperations[_T]: ... + +def max_on(self, path: str, display_name: Optional[str] = None) -> FacetOperations[_T]: ... + +def average_on(self, path: str, display_name: Optional[str] = None) -> FacetOperations[_T]: ... +`} + + + +| Parameter | Type | Description | +|------------------|-----------------------------|-------------| +| **range_** | `RangeBuilder` | A range of indexes | +| **\*ranges** | `RangeBuilder` | Multiple index ranges (at least one), separated by `,` | +| **field_name** | `str` | The index-field to use for the facet | +| **path** | `str` | Points to the index-field to use for the facet (`ByRanges`, `ByField`) or for the aggregation (`SUM_ON`, `MIN_ON`, `MAX_ON`, `AVERAGE_ON`) | +| **display_name** | `str` | If set, results of a facet will be returned under this name | +| **options** | `FacetOptions` | Non-default options to use in the facet definition | + + + +**Options**: + + + +{`class FacetOptions: + def __init__(self): + self.page_size: int = constants.int_max + self.start: Union[None, int] = None + self.term_sort_mode: FacetTermSortMode = FacetTermSortMode.VALUE_ASC + self.include_remaining_terms: bool = False +`} + + + +| Option | Type | Description | +|---------------------------|---------------------|-------------------------------------------------------------------------------------------------------------| +| **term_sort_mode** | `FacetTermSortMode` | Set the sort order on the resulting items
(`VALUE_ASC` (Default), `VALUE_DESC`, `COUNT_ASC`, `COUNT_DESC`) | +| **start** | `int` | The position from which to send items (how many to skip) | +| **page_size** | `int` | Number of items to return | +| **include_remaining_terms** | `bool` | Indicates if remaining terms that didn't make it into the requested PageSize should be included in results
Default value: `False` | + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_filtering-csharp.mdx b/versioned_docs/version-7.1/indexes/querying/_filtering-csharp.mdx new file mode 100644 index 0000000000..aff995d8d6 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_filtering-csharp.mdx @@ -0,0 +1,593 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* One of the most basic functionalities of querying is the ability to filter out data and return records that match a given condition. + +* RavenDB provides several ways to run queries, including: + * [Query](../../client-api/session/querying/how-to-query.mdx) from the basic `Session` API + * [DocumentQuery](../../client-api/session/querying/document-query/what-is-document-query.mdx) from the `Session.Advanced` API + * [RQL](../../client-api/session/querying/what-is-rql.mdx) - Raven Query Language + +* The examples in this page demonstrate how filtering is applied by each of the above querying methods. + +* In this page: + * [Where](../../indexes/querying/filtering.mdx#where) + * [Where - Numeric Property](../../indexes/querying/filtering.mdx#where---numeric-property) + * [Where - Nested Property](../../indexes/querying/filtering.mdx#where---nested-property) + * [Where + Any](../../indexes/querying/filtering.mdx#where-+-any) + * [Where + In](../../indexes/querying/filtering.mdx#where-+-in) + * [Where + ContainsAny](../../indexes/querying/filtering.mdx#where-+-containsany) + * [Where + ContainsAll](../../indexes/querying/filtering.mdx#where-+-containsall) + * [Where - StartsWith](../../indexes/querying/filtering.mdx#where---startswith) + * [Where - EndsWith](../../indexes/querying/filtering.mdx#where---endswith) + * [Where - Identifier Property](../../indexes/querying/filtering.mdx#where---identifier-property) + * [Where - Exists](../../indexes/querying/filtering.mdx#where---exists) + * [Remarks](../../indexes/querying/filtering.mdx#remarks) + + + +## Where + + + + +{`IList results = session + .Query() // query 'Employees/ByFirstAndLastName' index + .Where(x => x.FirstName == "Robert" && x.LastName == "King") // filtering predicates + .ToList(); // materialize query by sending it to server for processing +`} + + + + +{`IList results = session + .Advanced + .DocumentQuery() // query 'Employees/ByFirstAndLastName' index + .WhereEquals(x => x.FirstName, "Robert") // filtering predicates + .AndAlso() // by default OR is between each condition + .WhereEquals(x => x.LastName, "King") // filtering predicates + .ToList(); // materialize query by sending it to server for processing +`} + + + + +{`public class Employees_ByFirstAndLastName : AbstractIndexCreationTask +{ + public Employees_ByFirstAndLastName() + { + Map = employees => from employee in employees + select new + { + FirstName = employee.FirstName, + LastName = employee.LastName + }; + } +} +`} + + + + +{`from index 'Employees/ByFirstAndLastName' +where FirstName = 'Robert' and LastName = 'King' +`} + + + + + + +## Where - Numeric Property + + + + +{`IList results = session + .Query() // query 'Products/ByUnitsInStock' index + .Where(x => x.UnitsInStock > 50) // filtering predicates + .ToList(); // materialize query by sending it to server for processing +`} + + + + +{`IList results = session + .Advanced + .DocumentQuery() // query 'Products/ByUnitsInStock' index + .WhereGreaterThan(x => x.UnitsInStock, 50) // filtering predicates + .ToList(); // materialize query by sending it to server for processing +`} + + + + +{`private class Products_ByUnitsInStock : AbstractIndexCreationTask +{ + public Products_ByUnitsInStock() + { + Map = products => from product in products + select new + { + product.UnitsInStock + }; + } +} +`} + + + + +{`from index 'Products/ByUnitsInStock' +where UnitsInStock > 50 +`} + + + + + + +## Where - Nested Property + + + + +{`// return all orders that were shipped to 'Albuquerque' +IList results = session + .Query() + .Where(x => x.ShipTo.City == "Albuquerque") + .ToList(); +`} + + + + +{`// return all orders that were shipped to 'Albuquerque' +IList results = session + .Advanced + .DocumentQuery() + .WhereEquals(x => x.ShipTo.City, "Albuquerque") + .ToList(); +`} + + + + +{`from Orders +where ShipTo.City = 'Albuquerque' +`} + + + + + + + +{`IList results = session + .Query() // query 'Order/ByOrderLinesCount' index + .Where(x => x.Lines.Count > 50) // filtering predicates + .ToList(); // materialize query by sending it to server for processing +`} + + + + +{`IList results = session + .Advanced + .DocumentQuery() // query 'Order/ByOrderLinesCount' index + .WhereGreaterThan(x => x.Lines.Count, 50) // filtering predicates + .ToList(); // materialize query by sending it to server for processing +`} + + + + +{`private class Order_ByOrderLinesCount : AbstractIndexCreationTask +{ + public Order_ByOrderLinesCount() + { + Map = orders => from order in orders + select new + { + Lines_Count = order.Lines.Count + }; + } +} +`} + + + + +{`from index 'Order/ByOrderLinesCount' +where Lines.Count > 50 +`} + + + + + + +## Where + Any + +`Any` is useful when you have a collection of items (e.g. `Order` contains `OrderLines`) and you want to filter out based on values from this collection. For example, let's retrieve all orders that contain an `OrderLine` with a given product. + + + + +{`IList results = session + .Query() // query 'Order/ByOrderLines/ProductName' index + .Where(x => x.Lines.Any(l => l.ProductName == "Teatime Chocolate Biscuits")) // filtering predicates + .ToList(); // materialize query by sending it to server for processing +`} + + + + +{`IList results = session + .Advanced + .DocumentQuery() // query 'Order/ByOrderLines/ProductName' index + .WhereEquals("Lines_ProductName", "Teatime Chocolate Biscuits") // filtering predicates + .ToList(); // materialize query by sending it to server for processing +`} + + + + +{`public class Order_ByOrderLines_ProductName : AbstractIndexCreationTask +{ + public Order_ByOrderLines_ProductName() + { + Map = orders => from order in orders + select new + { + Lines_ProductName = order.Lines.Select(x => x.ProductName) + }; + } +} +`} + + + + +{`from index 'Order/ByOrderLinesCount' +where Lines_ProductName = 'Teatime Chocolate Biscuits' +`} + + + + + + +## Where + In + +When you want to check a single value against multiple values, the `In` operator can become handy. To retrieve all employees where `FirstName` is either `Robert` or `Nancy`, we can issue the following query: + + + + +{`IList results = session + .Query() // query 'Employees/ByFirstAndLastName' index + .Where(x => x.FirstName.In("Robert", "Nancy")) // filtering predicates (remember to add \`Raven.Client.Linq\` namespace to usings) + .ToList(); // materialize query by sending it to server for processing +`} + + + + +{`IList results = session + .Advanced + .DocumentQuery() // query 'Employees/ByFirstAndLastName' index + .WhereIn(x => x.FirstName, new[] { "Robert", "Nancy" }) // filtering predicates + .ToList(); // materialize query by sending it to server for processing +`} + + + + +{`public class Employees_ByFirstAndLastName : AbstractIndexCreationTask +{ + public Employees_ByFirstAndLastName() + { + Map = employees => from employee in employees + select new + { + FirstName = employee.FirstName, + LastName = employee.LastName + }; + } +} +`} + + + + +{`from index 'Employees/ByFirstAndLastName' +where FirstName IN ('Robert', 'Nancy') +`} + + + + + +Remember to add the `Raven.Client.Documents.Linq` namespace to usings if you want to use `In` extension method. + + + + +## Where + ContainsAny + +To check if enumeration contains **any** of the values from a specified collection, you can use the `ContainsAny` method. + +Let's assume that we want to return all `BlogPosts` that contain any of the specified `Tags`. + + + + +{`IList results = session + .Query() // query 'BlogPosts/ByTags' index + .Where(x => x.Tags.ContainsAny(new[] { "Development", "Research" })) // filtering predicates (remember to add \`Raven.Client.Linq\` namespace to usings) + .ToList(); // materialize query by sending it to server for processing +`} + + + + +{`IList results = session + .Advanced + .DocumentQuery() // query 'BlogPosts/ByTags' index + .ContainsAny("Tags", new[] { "Development", "Research" }) // filtering predicates + .ToList(); // materialize query by sending it to server for processing +`} + + + + +{`public class BlogPosts_ByTags : AbstractIndexCreationTask +{ + public BlogPosts_ByTags() + { + Map = posts => from post in posts + select new + { + post.Tags + }; + } +} +`} + + + + +{`from index 'BlogPosts/ByTags' +where Tags IN ('Development', 'Research') +`} + + + + + +Remember to add the `Raven.Client.Documents.Linq` namespace to usings if you want to use the `ContainsAny` extension method. + + + + +## Where + ContainsAll + +To check if an enumeration contains **all** of the values from a specified collection, you can use the `ContainsAll` method. + +Let's assume that we want to return all the `BlogPosts` that contain all of the specified `Tags`. + + + + +{`IList results = session + .Query() // query 'BlogPosts/ByTags' index + .Where(x => x.Tags.ContainsAll(new[] { "Development", "Research" })) // filtering predicates (remember to add \`Raven.Client.Linq\` namespace to usings) + .ToList(); // materialize query by sending it to server for processing +`} + + + + +{`IList results = session + .Advanced + .DocumentQuery() // query 'BlogPosts/ByTags' index + .ContainsAll("Tags", new[] { "Development", "Research" }) // filtering predicates + .ToList(); // materialize query by sending it to server for processing +`} + + + + +{`public class BlogPosts_ByTags : AbstractIndexCreationTask +{ + public BlogPosts_ByTags() + { + Map = posts => from post in posts + select new + { + post.Tags + }; + } +} +`} + + + + +{`from index 'BlogPosts/ByTags' +where Tags ALL IN ('Development', 'Research') +`} + + + + + +Remember to add the `Raven.Client.Documents.Linq` namespace to usings if you want to use the `ContainsAll` extension method. + + + + +## Where - StartsWith + + + + +{`// return all products which name starts with 'ch' +IList results = session + .Query() + .Where(x => x.Name.StartsWith("ch")) + .ToList(); +`} + + + + +{`// return all products which name starts with 'ch' +IList results = session + .Advanced + .DocumentQuery() + .WhereStartsWith(x => x.Name, "ch") + .ToList(); +`} + + + + +{`from Products +where startsWith(Name, 'ch') +`} + + + + + + +## Where - EndsWith + + + + +{`// return all products which name ends with 'ra' +IList results = session + .Query() + .Where(x => x.Name.EndsWith("ra")) + .ToList(); +`} + + + + +{`// return all products which name ends with 'ra' +IList results = session + .Advanced + .DocumentQuery() + .WhereEndsWith(x => x.Name, "ra") + .ToList(); +`} + + + + +{`from Products +where endsWith(Name, 'ra') +`} + + + + + + +## Where - Identifier Property + +Once a property used in the `Where` clause is recognized as an identity property of a given entity type +(according to [`FindIdentityProperty` convention](../../client-api/configuration/identifier-generation/global.mdx#findidentityproperty)) +and there aren't any other fields involved in the query, then it is called a "collection query". +Simple collection queries that ask about documents with given IDs or where identifiers start with a given prefix +and don't require any additional handling like ordering, full-text searching, etc, are handled directly by the storage engine. +It means that querying by ID doesn't create an auto-index and has no extra cost. In terms of efficiency, it is the same as +loading documents with [`session.Load`](../../client-api/session/loading-entities.mdx) usage. + + + + + +{`Order order = session + .Query() + .Where(x => x.Id == "orders/1-A") + .FirstOrDefault(); +`} + + + + +{`Order order = session + .Advanced + .DocumentQuery() + .WhereEquals(x => x.Id, "orders/1-A") + .FirstOrDefault(); +`} + + + + +{`from Orders +where id() = 'orders/1-A' +`} + + + + + + + +{`IList orders = session + .Query() + .Where(x => x.Id.StartsWith("orders/1")) + .ToList(); +`} + + + + +{`IList orders = session + .Advanced + .DocumentQuery() + .WhereStartsWith(x => x.Id, "orders/1") + .ToList(); +`} + + + + +{`from Orders +where startsWith(id(), 'orders/1') +`} + + + + + + +## Where - Exists + +To find all documents in a collection that have a specified field, see [How to Filter by Field Presence](../../client-api/session/querying/how-to-filter-by-field.mdx). + +To find all documents in a collection that don't have a specified field, see [How to Filter by Non-Existing Field](../../client-api/session/querying/how-to-filter-by-non-existing-field.mdx). + + + +## Remarks + + +`Query` and `DocumentQuery` are converting predicates to the `IndexQuery` class +so they can issue a query from a **low-level operation method**. + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_filtering-java.mdx b/versioned_docs/version-7.1/indexes/querying/_filtering-java.mdx new file mode 100644 index 0000000000..429accd5b7 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_filtering-java.mdx @@ -0,0 +1,328 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +One of the most basic functionalities of querying is the ability to filter out data and return records that match a given condition. There are couple of ways to do this. + +The following examples demonstrate how to add simple conditions to a query using all of those methods. + +## Where + + + + +{`List results = session + .query(Employee.class, Employees_ByFirstAndLastName.class) // query 'Employees/ByFirstAndLastName' index + .whereEquals("FirstName", "Robert") // filtering predicates + .andAlso() // by default OR is between each condition + .whereEquals("LastName", "King") // materialize query by sending it to server for processing + .toList(); +`} + + + + +{`public class Employees_ByFirstAndLastName extends AbstractIndexCreationTask { + public Employees_ByFirstAndLastName() { + map = "docs.Employees.Select(employee => new {" + + " FirstName = employee.FirstName," + + " LastName = employee.LastName" + + "})"; + } +} +`} + + + + +{`from index 'Employees/ByFirstAndLastName' +where FirstName = 'Robert' and LastName = 'King' +`} + + + + +## Where - Numeric Property + + + + +{`List results = session + .query(Product.class, Products_ByUnitsInStock.class) // query 'Products/ByUnitsInStock' index + .whereGreaterThan("UnitsInStock", 50) // filtering predicates + .toList(); // materialize query by sending it to server for processing +`} + + + + +{`public class Products_ByUnitsInStock extends AbstractIndexCreationTask { + public Products_ByUnitsInStock() { + map = "docs.Products.Select(product => new {" + + " UnitsInStock = product.UnitsInStock" + + " })"; + } +} +`} + + + + +{`from index 'Products/ByUnitsInStock' +where UnitsInStock > 50 +`} + + + + +## Where - Nested Property + + + + +{`// return all orders that were shipped to 'Albuquerque' +List results = session + .query(Order.class) + .whereEquals("ShipTo_city", "Albuquerque") + .toList(); +`} + + + + +{`from Orders +where ShipTo.City = 'Albuquerque' +`} + + + + + + + +{`List results = session + .query(Order.class, Order_ByOrderLinesCount.class) // query 'Order/ByOrderLinesCount' index + .whereGreaterThan("Lines_count", 50) // filtering predicates + .toList(); // materialize query by sending it to server for processing +`} + + + + +{`public class Order_ByOrderLinesCount extends AbstractIndexCreationTask { + public Order_ByOrderLinesCount() { + map = "docs.Orders.Select(order => new {" + + " Lines_count = order.Lines.Count" + + "})"; + } +} +`} + + + + +{`from index 'Order/ByOrderLinesCount' +where Lines.Count > 50 +`} + + + + +## Where + Any + +`Any` is useful when you have a collection of items (e.g. `Order` contains `OrderLines`) and you want to filter out based on values from this collection. For example, let's retrieve all orders that contain an `OrderLine` with a given product. + + + + +{`session + .query(Order.class, Order_ByOrderLines_ProductName.class) // query 'Order/ByOrderLines/ProductName' index + .whereEquals("Lines_productName", "Teatime Chocolate Biscuits") // filtering predicates + .toList(); // materialize query by sending it to server for processing +`} + + + + +{`public class Order_ByOrderLines_ProductName extends AbstractIndexCreationTask { + public Order_ByOrderLines_ProductName() { + map = "docs.Orders.Select(order => new {" + + " Lines_productName = order.Lines.Select(x => x.ProductName)" + + "})"; + } +} +`} + + + + +{`from index 'Order/ByOrderLinesCount' +where Lines_ProductName = 'Teatime Chocolate Biscuits' +`} + + + + +## Where + In + +When you want to check a single value against multiple values, the `In` operator can become handy. To retrieve all employees where `FirstName` is either `Robert` or `Nancy`, we can issue the following query: + + + + +{`List results = session + .query(Employee.class, Employees_ByFirstAndLastName.class) // query 'Employees/ByFirstAndLastName' index + .whereIn("FirstName", Arrays.asList("Robert", "Nancy")) // filtering predicates + .toList();// materialize query by sending it to server for processing +`} + + + + +{`public class Employees_ByFirstAndLastName extends AbstractIndexCreationTask { + public Employees_ByFirstAndLastName() { + map = "docs.Employees.Select(employee => new {" + + " FirstName = employee.FirstName," + + " LastName = employee.LastName" + + "})"; + } +} +`} + + + + +{`from index 'Employees/ByFirstAndLastName' +where FirstName IN ('Robert', 'Nancy') +`} + + + + +## Where + ContainsAny + +To check if enumeration contains **any** of the values from a specified collection, you can use the `containsAny` method. + +Let's assume that we want to return all `BlogPosts` that contain any of the specified `tags`. + + + + +{`List results = session + .query(BlogPost.class, BlogPosts_ByTags.class) // query 'BlogPosts/ByTags' index + .containsAny("tags", Arrays.asList("Development", "Research")) // filtering predicates + .toList(); // materialize query by sending it to server for processing +`} + + + + +{`public class BlogPosts_ByTags extends AbstractIndexCreationTask { + public BlogPosts_ByTags() { + map = "docs.BlogPosts.Select(post => new {" + + " tags = post.tags" + + "})"; + } +} +`} + + + + +{`from index 'BlogPosts/ByTags' +where Tags IN ('Development', 'Research') +`} + + + + +## Where + ContainsAll + +To check if an enumeration contains **all** of the values from a specified collection, you can use the `containsAll` method. + +Let's assume that we want to return all the `BlogPosts` that contain all of the specified `tags`. + + + + +{`List results = session + .query(BlogPost.class, BlogPosts_ByTags.class) // query 'BlogPosts/ByTags' index + .containsAll("tags", Arrays.asList("Development", "Research")) // filtering predicates + .toList(); // materialize query by sending it to server for processing +`} + + + + +{`public class BlogPosts_ByTags extends AbstractIndexCreationTask { + public BlogPosts_ByTags() { + map = "docs.BlogPosts.Select(post => new {" + + " tags = post.tags" + + "})"; + } +} +`} + + + + +{`from index 'BlogPosts/ByTags' +where Tags ALL IN ('Development', 'Research') +`} + + + + +## Where - StartsWith + + + + +{`// return all products which name starts with 'ch' +List results = session + .query(Product.class) + .whereStartsWith("Name", "ch") + .toList(); +`} + + + + +{`from Products +where startsWith(Name, 'ch') +`} + + + + +## Where - EndsWith + + + + +{`// return all products which name ends with 'ra' +List results = session + .query(Product.class) + .whereEndsWith("Name", "ra") + .toList(); +`} + + + + +{`from Products +where endsWith(Name, 'ra') +`} + + + + + +## Remarks + + +Underneath, `Query` is converting predicates to the `IndexQuery` class so they can issue a query from a **low-level operation method**. + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_filtering-nodejs.mdx b/versioned_docs/version-7.1/indexes/querying/_filtering-nodejs.mdx new file mode 100644 index 0000000000..8b5df0de19 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_filtering-nodejs.mdx @@ -0,0 +1,526 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* One of the most basic functionalities of querying is the ability to filter out data and return records that match a given condition. + +* The following examples demonstrate how to add simple conditions to a query: + * [Where - equals](../../indexes/querying/filtering.mdx#where---equals) + * [Where - numeric property](../../indexes/querying/filtering.mdx#where---numeric-property) + * [Where - nested property](../../indexes/querying/filtering.mdx#where---nested-property) + * [Where - multiple values](../../indexes/querying/filtering.mdx#where---multiple-values) + * [Where - in](../../indexes/querying/filtering.mdx#where---in) + * [Where - containsAny](../../indexes/querying/filtering.mdx#where---containsany) + * [Where - containsAll](../../indexes/querying/filtering.mdx#where---containsall) + * [Where - startsWith](../../indexes/querying/filtering.mdx#where---startswith) + * [Where - endsWith](../../indexes/querying/filtering.mdx#where---endswith) + * [Where - exists](../../indexes/querying/filtering.mdx#where---exists) + * [Where - filter by ID](../../indexes/querying/filtering.mdx#where---filter-by-id) + + +## Where - equals + + + + +{`// Basic filtering using "whereEquals": +// ==================================== + +const filteredEmployees = await session + // Query an index + .query({ indexName: "Employees/ByFirstAndLastName" }) + // The filtering predicate + .whereEquals("FirstName", "Robert") + // By default AND is applied between both 'where' predicates + .whereEquals("LastName", "King") + // Execute the query, send it to the server for processing + .all(); + +// Results will include all Employee documents +// with FirstName equals to 'Robert' AND LastName equal to 'King' +`} + + + + +{`class Employees_ByFirstAndLastName extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("employees", employee => { + return { + FirstName: employee.FirstName, + LastName: employee.LastName + }; + }); + } +} +`} + + + + +{`from index "Employees/ByFirstAndLastName" +where FirstName == "Robert" and LastName == "King" +`} + + + + + + +## Where - numeric Property + + + + +{`// Filter with "whereGreaterThan": +// =============================== + +const filteredProducts = await session + // Query an index + .query({ indexName: "Products/ByUnitsInStock" }) + // The filtering predicate + .whereGreaterThan("UnitsInStock", 20) + .all(); + +// Results will include all Product documents having 'UnitsInStock' > 20 +`} + + + + +{`class Products_ByUnitsInStock extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("products", product => { + return { + UnitsInStock: product.UnitsInStock + }; + }); + } +} +`} + + + + +{`from index "Products/ByUnitsInStock" +where UnitsInStock > 20 +`} + + + + + + + +{`// Filter with "whereLessThan": +// ============================ + +const filteredProducts = await session + // Query an index + .query({ indexName: "Products/ByUnitsInStock" }) + // The filtering predicate + .whereLessThan("UnitsInStock", 20) + .all(); + +// Results will include all Product documents having 'UnitsInStock'< 20 +`} + + + + +{`class Products_ByUnitsInStock extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("products", product => { + return { + UnitsInStock: product.UnitsInStock + }; + }); + } +} +`} + + + + +{`from index "Products/ByUnitsInStock" +where UnitsInStock < 20 +`} + + + + + + +## Where - nested property + + + + +{`// Filter by a nested property: +// ============================ + +const filteredOrders = await session + // Query a collection + .query({ collection: "Orders" }) + // Filter by the nested property 'ShipTo.City' from the Order document + .whereEquals("ShipTo.City", "Albuquerque") + .all(); + +// * Results will include all Order documents with an order that ships to 'Albuquerque' +// * An auto-index will be created +`} + + + + +{`from "Orders" +where ShipTo.City == "Albuquerque" +`} + + + + + + +## Where - multiple values + + + + +{`// Filter by multiple values: +// ========================== + +const filteredOrders = await session + // Query an index + .query({ indexName: "Orders/ByProductNamesPerOrderLine" }) + // Filter by multiple values + .whereEquals("ProductName", "Teatime Chocolate Biscuits") + .all(); + +// Results will include all Order documents that contain ALL values in "Teatime Chocolate Biscuits" +`} + + + + +{`class Orders_ByProductNamesPerOrderLine extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("orders", order => { + return { + // Index field 'ProductNames' will contain the product names per Order Line + ProductNames: order.Lines.map(x => x.ProductName) + }; + }); + } +} +`} + + + + +{`from index "Orders/ByProductNamesPerOrderLine" +where ProductNames == "Teatime Chocolate Biscuits" +`} + + + + + + +## Where - in + +Use `whereIn` when you want to filter by a single value out of multiple given values. + + + + +{`// Filter with "whereIn": +// ====================== + +const filteredEmployees = await session + // Query an index + .query({ indexName: "Employees/ByFirstAndLastName" }) + // The filtering predicate + .whereIn("FirstName", [ "Robert", "Nancy" ]) + .all(); + +// Results will include all Employee documents that have either 'Robert' OR 'Nancy' in their 'FirstName' field +`} + + + + +{`class Employees_ByFirstAndLastName extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("employees", employee => { + return { + FirstName: employee.FirstName, + LastName: employee.LastName + }; + }); + } +} +`} + + + + +{`from index "Employees/ByFirstAndLastName" +where FirstName in ("Robert", "Nancy") +`} + + + + + + +## Where - containsAny + +Use `containsAny` to check if an enumeration contains any of the values from the specified list. + + + + +{`// Filter with "containsAny": +// ========================== + +const filteredOrders = await session + // Query an index + .query({ indexName: "Orders/ByProductNames" }) + // The filtering predicate + .containsAny("ProductNames", ["Ravioli", "Coffee"]) + .all(); + +// Results will include all Order documents that have either 'Ravioli' OR 'Coffee' in their order +`} + + + + +{`class Orders_ByProductNames extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("orders", order => { + return { + // Index field 'ProductNames' will contain a list of all product names + ProductNames: order.Lines.flatMap(x => x.ProductName.split(" ")) + }; + }); + } +} +`} + + + + +{`from index "Orders/ByProductNames" +where ProductNames in ("ravioli", "coffee") +`} + + + + + + +## Where - containsAll + +Use `containsAll` to check if an enumeration contains all of the values from the specified list. + + + + +{`// Filter with "containsAll": +// ========================== + +const filteredOrders = await session + // Query an index + .query({ indexName: "Orders/ByProductNames" }) + // The filtering predicate + .containsAll("ProductNames", ["Ravioli", "Pepper"]) + .all(); + +// Results will include all Order documents that have both 'Ravioli' AND 'Pepper' in their order +`} + + + + +{`class Orders_ByProductNames extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("orders", order => { + return { + // Index field 'ProductNames' will contain a list of all product names + ProductNames: order.Lines.flatMap(x => x.ProductName.split(" ")) + }; + }); + } +} +`} + + + + +{`from index "Orders/ByProductNames" +where ProductNames all in ("ravioli", "pepper") +`} + + + + + + +## Where - startsWith + + + + +{`// Filter with "whereStartsWith": +// ============================== + +const filteredProducts = await session + // Query a collection + .query({ collection: "Products" }) + // The filtering predicate + .whereStartsWith("Name", "ch") + .all(); + +// * Results will include all Product documents with a name that starts with 'ch' +// * An auto-index will be created +`} + + + + +{`from "Products" +where startsWith(Name, "ch") +`} + + + + + + +## Where - endsWith + + + + +{`// Filter with "whereEndsWith": +// =========================== + +const filteredProducts = await session + // Query a collection + .query({ collection: "Products" }) + // The filtering predicate + .whereEndsWith("Name", "es") + .all(); + +// * Results will include all Product documents with a name that ends with 'es' +// * An auto-index will be created +`} + + + + +{`from Products +where endsWith(Name, 'ra') +`} + + + + + + +## Where - exists + +* To find all documents in a collection that have a specified field, + see [How to Filter by Field Presence](../../client-api/session/querying/how-to-filter-by-field.mdx). + +* To find all documents in a collection that don't have a specified field, + see [How to Filter by Non-Existing Field](../../client-api/session/querying/how-to-filter-by-non-existing-field.mdx). + + + +## Where - filter by ID + +* Once the property used in the `whereEquals` clause is recognized as an identity property of a given entity type, + and there aren't any other fields involved in the query predicate, then this query is considered a "Collection Query". + +* Such collection queries that ask about documents with given IDs, or where identifiers start with a given prefix + and don't require any additional handling like ordering, full-text searching, etc, are handled directly by the storage engine. + +* This means that querying by ID doesn't create an auto-index and has no extra cost. + In terms of efficiency, it is the same as loading documents with [`session.load`](../../client-api/session/loading-entities.mdx) usage. + + + + +{`// Filter by id: +// ============= + +const order = await session + // Query a collection + .query({ collection: "Orders" }) + // The filtering predicate + .whereEquals("id", "orders/1-A") + .firstOrNull(); + +// * Results will include the Order document having ID 'orders/1-A' +// * An auto-index is NOT created +`} + + + + +{`from "Orders" +where id() == "orders/1-A" +`} + + + + + + + +{`// Filter by whereStartsWith id: +// ============================= + +const filteredOrders = await session + // Query a collection + .query({ collection: "Orders" }) + // The filtering predicate + .whereStartsWith("id", "orders/1") + .all(); + +// * Results will include all Order documents having ID that starts with 'orders/1' +// * An auto-index is NOT created +`} + + + + +{`from "Orders" +where startsWith(id(), "orders/1") +`} + + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_filtering-php.mdx b/versioned_docs/version-7.1/indexes/querying/_filtering-php.mdx new file mode 100644 index 0000000000..11c88f1d7e --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_filtering-php.mdx @@ -0,0 +1,544 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* One of the most basic functionalities of querying is the ability to filter out data and return records that match a given condition. + +* Index queries can be executed using - + * [query_index_type](../../client-api/session/querying/how-to-query.mdx) from the basic `session` API + * [RQL](../../client-api/session/querying/what-is-rql.mdx) - Raven Query Language + +* The examples in this page demonstrate how filtering is applied by each of these methods. + +* In this page: + * [`whereEquals` - Where equals](../../indexes/querying/filtering.mdx#whereequals---where-equals) + * [`whereGreaterThan` - Numeric property](../../indexes/querying/filtering.mdx#wheregreaterthan---numeric-property) + * [`whereGreaterThan` - Nested property](../../indexes/querying/filtering.mdx#wheregreaterthan---nested-property) + * [`whereIn` - Single -vs- Multiple values](../../indexes/querying/filtering.mdx#wherein---single--vs--multiple-values) + * [`containsAny` - Any value from specified collection](../../indexes/querying/filtering.mdx#containsany---any-value-from-specified-collection) + * [`containsAll` - All values from specified collection](../../indexes/querying/filtering.mdx#containsall---all-values-from-specified-collection) + * [`whereStartsWith` - All records with given prefix](../../indexes/querying/filtering.mdx#wherestartswith---all-records-with-given-prefix) + * [`whereEndsWith` - All records with given suffix](../../indexes/querying/filtering.mdx#whereendswith---all-records-with-given-suffix) + * [Where - Identifier Property](../../indexes/querying/filtering.mdx#where---identifier-property) + * [`whereExists` - Where exists or doesn't exist](../../indexes/querying/filtering.mdx#whereexists---where-exists-or-doesn) + + + +## `whereEquals` - Where equals + + + + +{`/** @var array $results */ +$results = $session + ->query(Employee::class, Employees_ByFirstAndLastName::class) // query 'Employees/ByFirstAndLastName' index + ->whereEquals("FirstName", "Robert") + ->andAlso() + ->whereEquals("LastName", "King") // filtering predicates + ->toList(); // materialize query by sending it to server for processing +`} + + + + +{`/** @var array $results */ +$results = $session + ->advanced() + ->documentQuery(Employee::class, Employees_ByFirstAndLastName::class) // query 'Employees/ByFirstAndLastName' index + ->whereEquals("FirstName", "Robert") // filtering predicates + ->andAlso() // by default OR is between each condition + ->whereEquals("LastName", "King") // filtering predicates + ->toList(); // materialize query by sending it to server for processing +`} + + + + +{`class Employees_ByFirstAndLastName extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "docs.Employees.Select(employee => new {" . + " FirstName = employee.FirstName," . + " LastName = employee.LastName" . + "})"; + } +} +`} + + + + +{`from index 'Employees/ByFirstAndLastName' +where FirstName = 'Robert' and LastName = 'King' +`} + + + + + + +## `whereGreaterThan` - Numeric property + + + + +{`/** @var array $results */ +$results = $session + ->query(Product::class, Products_ByUnitsInStock::class) // query 'Products/ByUnitsInStock' index + ->whereGreaterThan("UnitsInStock", 50) // filtering predicates + ->toList(); // materialize query by sending it to server for processing +`} + + + + +{`/** @var array $results */ +$results = $session + ->advanced() + ->documentQuery(Product::class, Products_ByUnitsInStock::class) // query 'Products/ByUnitsInStock' index + ->whereGreaterThan("UnitsInStock", 50) // filtering predicates + ->toList(); // materialize query by sending it to server for processing +`} + + + + +{`class Products_ByUnitsInStock extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "docs.Products.Select(product => new {" . + " UnitsInStock = product.UnitsInStock" . + " })"; + } +} +`} + + + + +{`from index 'Products/ByUnitsInStock' +where UnitsInStock > 50 +`} + + + + + + +## `whereGreaterThan` - Nested property + + + + +{`// return all orders that were shipped to 'Albuquerque' +/** @var array $results */ +$results = $session + ->query(Order::class) + ->whereEquals("ShipTo_City", "Albuquerque") + ->toList(); +`} + + + + +{`// return all orders that were shipped to 'Albuquerque' +$results = $session + ->advanced() + ->documentQuery(Order::class) + ->whereEquals("ShipTo_City", "Albuquerque") + ->toList(); +`} + + + + +{`from Orders +where ShipTo.City = 'Albuquerque' +`} + + + + + + + +{`/** @var array $results */ +$results = $session + ->query(Order::class, Order_ByOrderLinesCount::class) // query 'Order/ByOrderLinesCount' index + ->whereGreaterThan("Lines_Count", 50) // filtering predicates + ->toList(); // materialize query by sending it to server for processing +`} + + + + +{`/** @var array $results */ +$results = $session + ->advanced() + ->documentQuery(Order::class, Order_ByOrderLinesCount::class) // query 'Order/ByOrderLinesCount' index + ->whereGreaterThan("Lines_Count", 50) // filtering predicates + ->toList(); // materialize query by sending it to server for processing +`} + + + + +{`class Order_ByOrderLinesCount extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "docs.Orders.Select(order => new {" . + " Lines_Count = order.Lines.Count" . + "})"; + } +} +`} + + + + +{`from index 'Order/ByOrderLinesCount' +where Lines.Count > 50 +`} + + + + + + +## `whereIn` - Single -vs- Multiple values + +When you want to check a single value against multiple values, `whereIn` can become handy. +To retrieve all employees where `FirstName` is either `Robert` or `Nancy`, we can issue the following query: + + + + +{`/** @var array $results */ +$results = $session + ->query(Employee::class, Employees_ByFirstAndLastName::class) // query 'Employees/ByFirstAndLastName' index + ->whereIn("FirstName", ["Robert", "Nancy"]) // filtering predicates (remember to add \`Raven.Client.Linq\` namespace to usings) + ->toList(); // materialize query by sending it to server for processing +`} + + + + +{`/** @var array $results */ +$results = $session + ->advanced() + ->documentQuery(Employee::class, Employees_ByFirstAndLastName::class) // query 'Employees/ByFirstAndLastName' index + ->whereIn("FirstName", [ "Robert", "Nancy" ]) // filtering predicates + ->toList(); // materialize query by sending it to server for processing +`} + + + + +{`class Employees_ByFirstAndLastName extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "docs.Employees.Select(employee => new {" . + " FirstName = employee.FirstName," . + " LastName = employee.LastName" . + "})"; + } +} +`} + + + + +{`from index 'Employees/ByFirstAndLastName' +where FirstName IN ('Robert', 'Nancy') +`} + + + + + + +## `containsAny` - Any value from specified collection + +To check if enumeration contains **any** of the values from a specified collection, +use the `containsAny` method. + +For example, if you want to return all `BlogPosts` that contain any of the specified `Tags`: + + + + +{`/** @var array $results */ +$results = $session + ->query(BlogPost::class, BlogPosts_ByTags::class) // query 'BlogPosts/ByTags' index + ->containsAny("Tags", [ "Development", "Research" ]) // filtering predicates + ->toList(); // materialize query by sending it to server for processing +`} + + + + +{`/** @var array $results */ +$results = $session + ->advanced() + ->documentQuery(BlogPost::class, BlogPosts_ByTags::class) // query 'BlogPosts/ByTags' index + ->containsAny("Tags", [ "Development", "Research" ]) // filtering predicates + ->toList(); // materialize query by sending it to server for processing +`} + + + + +{`class BlogPosts_ByTags extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "docs.BlogPosts.Select(post => new {" . + " tags = post.tags" . + "})"; + } +} +`} + + + + +{`from index 'BlogPosts/ByTags' +where Tags IN ('Development', 'Research') +`} + + + + + + +## `containsAll` - All values from specified collection + +To check if an enumeration contains **all** of the values from a specified collection, +use the `containsAll` method. + +For example, if you want to return all the `BlogPosts` that contain all of the specified `Tags`: + + + + +{`/** @var array $results */ +$results = $session + ->query(BlogPost::class, BlogPosts_ByTags::class) // query 'BlogPosts/ByTags' index + ->containsAll("Tags", [ "Development", "Research" ]) // filtering predicates + ->toList(); // materialize query by sending it to server for processing +`} + + + + +{`/** @var array $results */ +$results = $session + ->advanced() + ->documentQuery(BlogPost::class, BlogPosts_ByTags::class) // query 'BlogPosts/ByTags' index + ->containsAll("Tags", [ "Development", "Research" ]) // filtering predicates + ->toList(); // materialize query by sending it to server for processing +`} + + + + +{`class BlogPosts_ByTags extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "docs.BlogPosts.Select(post => new {" . + " tags = post.tags" . + "})"; + } +} +`} + + + + +{`from index 'BlogPosts/ByTags' +where Tags ALL IN ('Development', 'Research') +`} + + + + + + +## `whereStartsWith` - All records with given prefix + + + + +{`// return all products which name starts with 'ch' +/** @var array $results */ +$results = $session + ->query(Product::class) + ->whereStartsWith("Name", "ch") + ->toList(); +`} + + + + +{`// return all products which name starts with 'ch' +/** @var array $results */ +$results = $session + ->advanced() + ->documentQuery(Product::class) + ->whereStartsWith("Name", "ch") + ->toList(); +`} + + + + +{`from Products +where startsWith(Name, 'ch') +`} + + + + + + +## `whereEndsWith` - All records with given suffix + + + + +{`// return all products which name ends with 'ra' +/** @var array $results */ +$results = $session + ->query(Product::class) + ->whereEndsWith("Name", "ra") + ->toList(); +`} + + + + +{`// return all products which name ends with 'ra' +/** @var array $results */ +$results = $session + ->advanced() + ->documentQuery(Product::class) + ->whereEndsWith("Name", "ra") + ->toList(); +`} + + + + +{`from Products +where endsWith(Name, 'ra') +`} + + + + + + +## Where - Identifier Property + +Once a property used in the `whereEquals` clause is recognized as an identity property of a given entity type +(according to [`FindIdentityProperty` convention](../../client-api/configuration/identifier-generation/global.mdx#findidentityproperty)) +and there aren't any other fields involved in the query, then it is called a "collection query". +Simple collection queries that ask about documents with given IDs or where identifiers start with a given prefix +and don't require any additional handling like ordering, full-text searching, etc, are handled directly by the storage engine. +It means that querying by ID doesn't create an auto-index and has no extra cost. In terms of efficiency, it is the same as +loading documents using [`session->load`](../../client-api/session/loading-entities.mdx). + + + + + +{`/** @var Order $order */ +$order = $session + ->query(Order::class) + ->whereEquals("Id", "orders/1-A") + ->firstOrDefault(); +`} + + + + +{`/** @var Order $order */ +$order = $session + ->advanced() + ->documentQuery(Order::class) + ->whereEquals("Id", "orders/1-A") + ->firstOrDefault(); +`} + + + + +{`from Orders +where id() = 'orders/1-A' +`} + + + + + + + +{`/** @var array $orders */ +$orders = $session + ->query(Order::class) + ->whereStartsWith("Id", "orders/1") + ->toList(); +`} + + + + +{`/** @var array $orders */ +$orders = $session + ->advanced() + ->documentQuery(Order::class) + ->whereStartsWith("Id", "orders/1") + ->toList(); +`} + + + + +{`from Orders +where startsWith(id(), 'orders/1') +`} + + + + + + +## `whereExists` - Where exists or doesn't exist + +To find all documents in a collection that have a specified field, see [How to Filter by Field Presence](../../client-api/session/querying/how-to-filter-by-field.mdx). + +To find all documents in a collection that don't have a specified field, see [How to Filter by Non-Existing Field](../../client-api/session/querying/how-to-filter-by-non-existing-field.mdx). + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_filtering-python.mdx b/versioned_docs/version-7.1/indexes/querying/_filtering-python.mdx new file mode 100644 index 0000000000..3f5068cc25 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_filtering-python.mdx @@ -0,0 +1,361 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* One of the most basic functionalities of querying is the ability to filter out data and return records that match a given condition. + +* Index queries can be executed using - + * [query_index_type](../../client-api/session/querying/how-to-query.mdx) from the basic `session` API + * [RQL](../../client-api/session/querying/what-is-rql.mdx) - Raven Query Language + +* The examples in this page demonstrate how filtering is applied by each of these methods. + +* In this page: + * [`where_equals` - Where equals](../../indexes/querying/filtering.mdx#where_equals---where-equals) + * [`where_greater_than` - Numeric property](../../indexes/querying/filtering.mdx#where_greater_than---numeric-property) + * [`where_greater_than` - Nested property](../../indexes/querying/filtering.mdx#where_greater_than---nested-property) + * [`where_in` - Single -vs- Multiple values](../../indexes/querying/filtering.mdx#where_in---single--vs--multiple-values) + * [`contains_any` - Any value from specified collection](../../indexes/querying/filtering.mdx#contains_any---any-value-from-specified-collection) + * [`contains_all` - All values from specified collection](../../indexes/querying/filtering.mdx#contains_all---all-values-from-specified-collection) + * [`where_starts_with` - All records with given prefix](../../indexes/querying/filtering.mdx#where_starts_with---all-records-with-given-prefix) + * [`where_ends_with` - All records with given suffix](../../indexes/querying/filtering.mdx#where_ends_with---all-records-with-given-suffix) + * [Where - Identifier Property](../../indexes/querying/filtering.mdx#where---identifier-property) + * [`where_exists` - Where exists or doesn't exist](../../indexes/querying/filtering.mdx#where_exists---where-exists-or-doesn) + + + +## `where_equals` - Where equals + + + + +{`results = list( # Materialize query by sending it to server for processing + session.query_index_type( + Employees_ByFirstAndLastName, Employee + ) # query 'Employees/ByFirstAndLastName' index + .where_equals("FirstName", "Robert") # filtering predicates + .and_also() + .where_equals("LastName", "King") +) +`} + + + + +{`class Employees_ByFirstAndLastName(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = "from e in docs.Employees select new {FirstName = e.FirstName, LastName = e.LastName}" +`} + + + + +{`from index 'Employees/ByFirstAndLastName' +where FirstName = 'Robert' and LastName = 'King' +`} + + + + + + +## `where_greater_than` - Numeric property + + + + +{`results = list( # Materialize query by sending it to server for processing + session.query_index_type( + Products_ByUnitsInStock, Product # query 'Products/ByUnitsInStock' index + ).where_greater_than( + "UnitsInStock", 50 + ) # filtering predicates +) +`} + + + + +{`class Products_ByUnitsInStock(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = "from p in docs.Products select new {p.UnitsInStock}" +`} + + + + +{`from index 'Products/ByUnitsInStock' +where UnitsInStock > 50 +`} + + + + + + +## `where_greater_than` - Nested property + + + + +{`# return all orders that were shipped to 'Albuquerque' +results = list(session.query(object_type=Order).where_equals("ShipTo.City", "Albuquerque")) +`} + + + + +{`from Orders +where ShipTo.City = 'Albuquerque' +`} + + + + + + + +{`results = list( # Materialize query by sending it to server for processing + session.query_index_type( + Order_ByOrderLinesCount, Order # query 'Orders/ByOrderLinesCount' index + ).where_greater_than( + "Lines.Count", 50 + ) # filtering predicates +) +`} + + + + +{`class Order_ByOrderLinesCount(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = "from o in docs.Orders select new {Lines_Count = order.Lines.Count}" +`} + + + + +{`from index 'Order/ByOrderLinesCount' +where Lines.Count > 50 +`} + + + + + + +## `where_in` - Single -vs- Multiple values + +When you want to check a single value against multiple values, `where_in` can become handy. +To retrieve all employees where `FirstName` is either `Robert` or `Nancy`, we can issue the following query: + + + + +{`results = list( # Materialize query by sending it to server for processing + session.query_index_type( + Employees_ByFirstAndLastName, Employee # query 'Employees/ByFirstAndLastName' index + ).where_in( + "FirstName", ["Robert", "Nancy"] + ) # filtering predicates +) +`} + + + + +{`class Employees_ByFirstAndLastName(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = "from e in docs.Employees select new {FirstName = e.FirstName, LastName = e.LastName}" +`} + + + + +{`from index 'Employees/ByFirstAndLastName' +where FirstName IN ('Robert', 'Nancy') +`} + + + + + + +## `contains_any` - Any value from specified collection + +To check if enumeration contains **any** of the values from a specified collection, +use the `contains_any` method. + +For example, if you want to return all `BlogPosts` that contain any of the specified `Tags`: + + + + +{`results = list( # Materialize query by sending it to server for processing + session.query_index_type(BlogPosts_ByTags, BlogPost).contains_any( # query 'BlogPosts/ByTags' index + "Tags", ["Development", "Research"] + ) # filtering predicates +) +`} + + + + +{`class BlogPosts_ByTags(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = "from post in posts select new {post.Tags}" +`} + + + + +{`from index 'BlogPosts/ByTags' +where Tags IN ('Development', 'Research') +`} + + + + + + +## `contains_all` - All values from specified collection + +To check if an enumeration contains **all** of the values from a specified collection, +use the `contains_all` method. + +For example, if you want to return all the `BlogPosts` that contain all of the specified `Tags`: + + + + +{`results = list( # Materialize query by sending it to server for processing + session.query_index_type(BlogPosts_ByTags, BlogPost).contains_all( # query 'BlogPosts/ByTags' index + "Tags", ["Development", "Research"] + ) # filtering predicates +) +`} + + + + +{`class BlogPosts_ByTags(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = "from post in posts select new {post.Tags}" +`} + + + + +{`from index 'BlogPosts/ByTags' +where Tags ALL IN ('Development', 'Research') +`} + + + + + + +## `where_starts_with` - All records with given prefix + + + + +{`# return all products which name starts with 'ch' +results = list(session.query(object_type=Product).where_starts_with("Name", "ch")) +`} + + + + +{`from Products +where startsWith(Name, 'ch') +`} + + + + + + +## `where_ends_with` - All records with given suffix + + + + +{`results = list(session.query(object_type=Product).where_ends_with("Name", "ra")) +`} + + + + +{`from Products +where endsWith(Name, 'ra') +`} + + + + + + +## Where - Identifier Property + +Once a property used in the `where_equals` clause is recognized as an identity property of a given entity type +(according to [`FindIdentityProperty` convention](../../client-api/configuration/identifier-generation/global.mdx#findidentityproperty)) +and there aren't any other fields involved in the query, then it is called a "collection query". +Simple collection queries that ask about documents with given IDs or where identifiers start with a given prefix +and don't require any additional handling like ordering, full-text searching, etc, are handled directly by the storage engine. +It means that querying by ID doesn't create an auto-index and has no extra cost. In terms of efficiency, it is the same as +loading documents using [`session.load`](../../client-api/session/loading-entities.mdx). + + + + + +{`order = session.query(object_type=Order).where_equals("Id", "orders/1-A").first() +`} + + + + +{`from Orders +where id() = 'orders/1-A' +`} + + + + + + + +{`orders = list(session.query(object_type=Order).where_starts_with("Id", "orders/1")) +`} + + + + +{`from Orders +where startsWith(id(), 'orders/1') +`} + + + + + + +## `where_exists` - Where exists or doesn't exist + +To find all documents in a collection that have a specified field, see [How to Filter by Field Presence](../../client-api/session/querying/how-to-filter-by-field.mdx). + +To find all documents in a collection that don't have a specified field, see [How to Filter by Non-Existing Field](../../client-api/session/querying/how-to-filter-by-non-existing-field.mdx). + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_highlighting-csharp.mdx b/versioned_docs/version-7.1/indexes/querying/_highlighting-csharp.mdx new file mode 100644 index 0000000000..07db21c4f4 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_highlighting-csharp.mdx @@ -0,0 +1,440 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When making a [Full-Text Search query](../../indexes/querying/searching.mdx), + in addition to retrieving documents that contain the searched terms, you can + also request to get a list of text fragments that highlight the searched terms. + +* This article provides examples of highlighting search results when querying a static-index. + Prior to reading this article, please refer to [Highlight search results](../../client-api/session/querying/text-search/highlight-query-results.mdx) + for general knowledge about Highlighting and for dynamic-queries examples. + +* To search and get fragments with highlighted terms when querying a static-index, + the index field on which you search must be configured for highlighting. See examples below. + +* In this page: + * [Highlight results - Map index](../../indexes/querying/highlighting.mdx#highlight-results---map-index) + * [Highlight results - Map-Reduce index](../../indexes/querying/highlighting.mdx#highlight-results---map-reduce-index) + * [Customize highlight tags](../../indexes/querying/highlighting.mdx#customize-highlight-tags) + + +## Highlight results - Map index + +#### Configure a Map index for highlighting: + +
To search and get fragments with highlighted terms, +the index-field on which you search **must be configured as follows**: + + * **`FieldStorage.Yes`** - store the field in the index + * **`FieldIndexing.Search`** - allow Full-Text search + * **`FieldTermVector.WithPositionsAndOffsets`** - store the term's position and offsets + + + +{`// Define a Map index: +// =================== +public class Employees_ByNotes : + AbstractIndexCreationTask +\{ + // The IndexEntry class defines index-field 'EmployeeNotes' + public class IndexEntry + \{ + public string EmployeeNotes \{ get; set; \} + \} + + public Employees_ByNotes() + \{ + // The 'Map' function defines the content of index-field 'EmployeeNotes' + Map = employees => from employee in employees + select new IndexEntry + \{ + EmployeeNotes = employee.Notes[0] + \}; + + // Configure index-field 'EmployeeNotes' for highlighting: + // ======================================================= + Store(x => x.EmployeeNotes, FieldStorage.Yes); + Index(x => x.EmployeeNotes, FieldIndexing.Search); + TermVector(x => x.EmployeeNotes, FieldTermVector.WithPositionsAndOffsets); + \} +\} +`} + + +#### Query the index with `Search`: + + + + +{`List employeesResults = session + // Query the map index + .Query() + // Search for documents containing the term 'manager' + .Search(x => x.EmployeeNotes, "manager") + // Request to highlight the searched term by calling 'Highlight' + .Highlight(x => x.EmployeeNotes, 35, 2, out Highlightings managerHighlights) + .OfType() + .ToList(); +`} + + + + +{`List employeesResults = await asyncSession + // Query the map index + .Query() + // Search for documents containing the term 'manager' + .Search(x => x.EmployeeNotes, "manager") + // Request to highlight the searched term by calling 'Highlight' + .Highlight(x => x.EmployeeNotes, 35, 2, out Highlightings managerHighlights) + .OfType() + .ToListAsync(); +`} + + + + +{`List employeesResults = session.Advanced + // Query the map index + .DocumentQuery() + // Search for documents containing the term 'manager' + .Search(x => x.EmployeeNotes, "manager") + // Request to highlight the searched term by calling 'Highlight' + .Highlight(x => x.EmployeeNotes, 35, 2, out Highlightings managerHighlights) + .OfType() + .ToList(); +`} + + + + +{`from index "Employees/ByNotes" +where search(EmployeeNotes, "manager") +include highlight(EmployeeNotes, 35, 2) +`} + + + +#### Query the index with `Where`: + + + + +{`List employeesResults = session + // Query the map index + .Query() + // Request to highlight the searched term by calling 'Highlight' + .Highlight(x => x.EmployeeNotes, 35, 2, out Highlightings managerHighlights) + // Search for documents containing the term 'manager' + .Where(x => x.EmployeeNotes.Contains("manager")) + .OfType() + .ToList(); +`} + + + + +{`List employeesResults = await asyncSession + // Query the map index + .Query() + // Request to highlight the searched term by calling 'Highlight' + .Highlight(x => x.EmployeeNotes, 35, 2, out Highlightings managerHighlights) + // Search for documents containing the term 'manager' + .Where(x => x.EmployeeNotes.Contains("manager")) + .OfType() + .ToListAsync(); +`} + + + + +{`List employeesResults = session.Advanced + // Query the map index + .DocumentQuery() + // Request to highlight the searched term by calling 'Highlight' + .Highlight(x => x.EmployeeNotes, 35, 2, out Highlightings managerHighlights) + // Search for documents containing the term 'manager' + .WhereEquals("EmployeeNotes", "manager") + .OfType() + .ToList(); +`} + + + + +{`from index "Employees/ByNotes" +where EmployeeNotes == "manager" +include highlight(EmployeeNotes, 35, 2) +`} + + + +#### Process results: + + + +{`// 'employeesResults' contains all Employee DOCUMENTS that contain the term 'manager'. +// 'managerHighlights' contains the text FRAGMENTS that highlight the 'manager' term. + +StringBuilder builder = new StringBuilder().AppendLine("
    "); + +foreach (var employee in employeesResults) +\{ + // Call 'GetFragments' to get all fragments for the specified employee Id + string[] fragments = managerHighlights.GetFragments(employee.Id); + foreach (var fragment in fragments) + \{ + builder.AppendLine($"
  • Doc: \{employee.Id\}
  • "); + builder.AppendLine($"
  • Fragment: \{fragment\}
  • "); + builder.AppendLine($"
  • "); + \} +\} + +string fragmentsHtml = builder.AppendLine("
").ToString(); + +// The resulting fragmentsHtml: +// ============================ + +//
    +//
  • Doc: employees/2-A
  • +//
  • Fragment: to sales manager in January
  • +//
  • Doc: employees/5-A
  • +//
  • Fragment: to sales manager in March
  • +//
  • +//
+`} +
+
+ + + +## Highlight results - Map-Reduce index + +#### Configure a Map-Reduce index for highlighting: + +
To search and get fragments with highlighted terms in a Map-Reduce index: + + * The index-field on which you **search** must be configured with: + + * **`FieldStorage.Yes`** - store the field in the index + * **`FieldIndexing.Search`** - allow Full-Text search + * **`FieldTermVector.WithPositionsAndOffsets`** - store the term's position and offsets + + * The index-field by which you **group-by** must configured with: + + * **`FieldStorage.Yes`** - store the field in the index + + + +{`// Define a Map-Reduce index: +// ========================== +public class ContactDetailsPerCountry : + AbstractIndexCreationTask +\{ + // The IndexEntry class defines the index-fields + public class IndexEntry + \{ + public string Country \{ get; set; \} + public string ContactDetails \{ get; set; \} + \} + + public ContactDetailsPerCountry() + \{ + // The 'Map' function defines what will be indexed from each document in the collection + Map = companies => from company in companies + select new IndexEntry + \{ + Country = company.Address.Country, + ContactDetails = $"\{company.Contact.Name\} \{company.Contact.Title\}" + \}; + + // The 'Reduce' function specifies how data is grouped and aggregated + Reduce = results => from result in results + group result by result.Country into g + select new IndexEntry + \{ + // Set 'Country' as the group-by key + // 'ContactDetails' will be grouped per 'Country' + Country = g.Key, + + // Specify the aggregation + // here we use string.Join as the aggregation function + ContactDetails = string.Join(" ", g.Select(x => x.ContactDetails)) + \}; + + // Configure index-field 'Country' for Highlighting: + // ================================================= + Store(x => x.Country, FieldStorage.Yes); + + // Configure index-field 'ContactDetails' for Highlighting: + // ======================================================== + Store(x => x.ContactDetails, FieldStorage.Yes); + Index(x => x.ContactDetails, FieldIndexing.Search); + TermVector(x => x.ContactDetails, FieldTermVector.WithPositionsAndOffsets); + \} +\} +`} + + +#### Query the index: + + + + +{`// Define the key by which the resulting fragments are grouped: +// ============================================================ +HighlightingOptions options = new HighlightingOptions +{ + // Set 'GroupKey' to be the index's group-by key + // The resulting fragments will be grouped per 'Country' + GroupKey = "Country" +}; + +// Query the map-reduce index: +// =========================== +List detailsPerCountry = session + .Query() + // Search for results containing the term 'agent' + .Search(x => x.ContactDetails, "agent") + // Request to highlight the searched term by calling 'Highlight' + // Pass the defined 'options' + .Highlight(x => x.ContactDetails, 35, 2, options, out Highlightings agentHighlights) + .ToList(); +`} + + + + +{`// Define the key by which the resulting fragments are grouped +// =========================================================== +HighlightingOptions options = new HighlightingOptions +{ + // Set 'GroupKey' to be the index's group-by key + // The resulting fragments will be grouped per 'Country' + GroupKey = "Country" +}; + +// Query the map-reduce index: +// =========================== +List detailsPerCountry = await asyncSession + .Query() + // Search for results containing the term 'agent' + .Search(x => x.ContactDetails, "agent") + // Request to highlight the searched term by calling 'Highlight' + // Pass the defined 'options' + .Highlight(x => x.ContactDetails, 35, 2, options, out Highlightings agentHighlights) + .ToListAsync(); +`} + + + + +{`// Define the key by which the resulting fragments are grouped +// =========================================================== +HighlightingOptions options = new HighlightingOptions +{ + // Set 'GroupKey' to be the index's group-by key + // The resulting fragments will be grouped per 'Country' + GroupKey = "Country" +}; + +// Query the map-reduce index: +// =========================== +List detailsPerCountry = session.Advanced + .DocumentQuery() + // Search for results containing the term 'agent' + .Search(x => x.ContactDetails, "agent") + // Request to highlight the searched term by calling 'Highlight' + // Pass the defined 'options' + .Highlight(x => x.ContactDetails, 35, 2, options, out Highlightings agentHighlights) + .ToList(); +`} + + + + +{`from index "ContactDetailsPerCountry" +where search(ContactDetails, "agent") +include highlight(ContactDetails, 35, 2, $p0) +{"p0":{"GroupKey":"Country"}} +`} + + + +#### Process results: + + + +{`// 'detailsPerCountry' contains the contacts details grouped per country. +// 'agentHighlights' contains the text FRAGMENTS that highlight the 'agent' term. + +StringBuilder builder = new StringBuilder().AppendLine("
    "); + +foreach (var item in detailsPerCountry) +\{ + // Call 'GetFragments' to get all fragments for the specified country key + string[] fragments = agentHighlights.GetFragments(item.Country); + foreach (var fragment in fragments) + \{ + builder.AppendLine($"
  • Country: \{item.Country\}
  • "); + builder.AppendLine($"
  • Fragment: \{fragment\}
  • "); + builder.AppendLine($"
  • "); + \} +\} + +string fragmentsHtml = builder.AppendLine("
").ToString(); + +// The resulting fragmentsHtml: +// ============================ + +//
    +//
  • Country: UK
  • +//
  • Fragment: Devon Sales Agent Helen Bennett
  • +//
  • +//
  • Country: France
  • +//
  • Fragment: Sales Agent Carine Schmit
  • +//
  • +//
  • Country: France
  • +//
  • Fragment: Saveley Sales Agent Paul Henriot
  • +//
  • +//
  • Country: Argentina
  • +//
  • Fragment: Simpson Sales Agent Yvonne Moncad
  • +//
  • +//
  • Country: Argentina
  • +//
  • Fragment: Moncada Sales Agent Sergio
  • +//
  • +//
  • Country: Brazil
  • +//
  • Fragment: Sales Agent Anabela
  • +//
  • +//
  • Country: Belgium
  • +//
  • Fragment: Dewey Sales Agent Pascale
  • +//
  • +//
+`} +
+
+ + + +## Customize highlight tags + +* **Default tags**: + + * Please refer to [Highlight tags](../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight-tags) to learn about the default html tags used to wrap the highlighted terms. + +* **Customizing tags**: + + * The default html tags that wrap the highlighted terms can be customized to any other tags. + + * Customizing the wrapping tags when querying an index is done exactly the same as when making + a dynamic query where a `HighlightingOptions` object is passed to the `Highlight` method. + + * Follow the example in [Highlight - customize tags](../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight---customize-tags). + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_highlighting-java.mdx b/versioned_docs/version-7.1/indexes/querying/_highlighting-java.mdx new file mode 100644 index 0000000000..4f012156c6 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_highlighting-java.mdx @@ -0,0 +1,320 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Another feature called `Highlighting` has been added to RavenDB to enhance the search UX. + +## Setup + + + + +{`public class Blog { + private String id; + private String title; + private String category; + private String content; + private Date publishedAt; + private String[] tags; + private List comments; + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getTitle() { + return title; + } + + public void setTitle(String title) { + this.title = title; + } + + public String getCategory() { + return category; + } + + public void setCategory(String category) { + this.category = category; + } + + public String getContent() { + return content; + } + + public void setContent(String content) { + this.content = content; + } + + public Date getPublishedAt() { + return publishedAt; + } + + public void setPublishedAt(Date publishedAt) { + this.publishedAt = publishedAt; + } + + public String[] getTags() { + return tags; + } + + public void setTags(String[] tags) { + this.tags = tags; + } + + public List getComments() { + return comments; + } + + public void setComments(List comments) { + this.comments = comments; + } +} +`} + + + + +{`public class BlogComment { + + private String title; + private String content; + + public String getTitle() { + return title; + } + + public void setTitle(String title) { + this.title = title; + } + + public String getContent() { + return content; + } + + public void setContent(String content) { + this.content = content; + } +} +`} + + + + + + + +{`public static class BlogPosts_ByContent extends AbstractIndexCreationTask { + public static class Result { + private String content; + + public String getContent() { + return content; + } + + public void setContent(String content) { + this.content = content; + } + } + + public BlogPosts_ByContent() { + map = "docs.Posts.Select(post => new { post.content })"; + index("content", FieldIndexing.SEARCH); + store("content", FieldStorage.YES); + termVector("content", FieldTermVector.WITH_POSITIONS_AND_OFFSETS); + } +} +`} + + + + + + +Each of the fields on which we want to use **Highlighting** needs to have: + +- **FieldIndexing** set to `SEARCH` +- **FieldStorage** set to `YES` +- **FieldTermVector** set to `WITH_POSITIONS_AND_OFFSETS` + + + +## Usage + +To use Highlighting we just need to use one of the `highlight` query methods. The basic usage can be as simple as: + + + +{`Reference highlightsRef = new Reference<>(); +List results = session + .advanced() + .documentQuery(Blog.class, BlogPosts_ByContent.class) + .highlight("content", 128, 1, highlightsRef) + .search("content", "raven") + .toList(); + +StringBuilder builder = new StringBuilder(); +builder.append("
    "); + +for (Blog result : results) \{ + String[] fragments = highlightsRef.value.getFragments(result.getId()); + builder.append("
  • ") + .append(fragments[0]) + .append("
  • "); +\} + +builder.append("
"); +String ul = builder.toString(); +`} +
+
+ +This will return the list of results and for each result we will be displaying first found fragment with the length up to 128 characters. + +### Highlighting + Projections + +Highlighting can also be done when projections are performed. + + + + +{`Reference highlightsRef = new Reference<>(); +HighlightingOptions highlightingOptions = new HighlightingOptions(); +highlightingOptions.setPreTags(new String[] { "**" }); +highlightingOptions.setPostTags(new String[] { "**" }); +List results = session + .query(BlogPosts_ByContent.class, BlogPosts_ByContent.class) + .highlight("content", 128, 1, highlightingOptions, highlightsRef) + .search("content", "raven") + .selectFields(BlogPosts_ByContent.Result.class) + .toList(); +`} + + + + +{`public static class BlogPosts_ByContent extends AbstractIndexCreationTask { + public static class Result { + private String content; + + public String getContent() { + return content; + } + + public void setContent(String content) { + this.content = content; + } + } + + public BlogPosts_ByContent() { + map = "docs.Posts.Select(post => new { post.content })"; + index("content", FieldIndexing.SEARCH); + store("content", FieldStorage.YES); + termVector("content", FieldTermVector.WITH_POSITIONS_AND_OFFSETS); + } +} +`} + + + + +### Highlighting + Map-Reduce + +Highlighting can be performed when executing queries on map-reduce indexes. + + + + +{`// highlighting 'content', but marking 'category' as key +Reference highlightsRef = new Reference<>(); +HighlightingOptions highlightingOptions = new HighlightingOptions(); +highlightingOptions.setPreTags(new String[] { "**" }); +highlightingOptions.setPostTags(new String[] { "**" }); +highlightingOptions.setGroupKey("category"); +List results = session + .advanced() + .documentQuery(BlogPosts_ByCategory_Content.Result.class, BlogPosts_ByCategory_Content.class) + .highlight("content", 128, 1, highlightingOptions, highlightsRef) + .search("content", "raven") + .toList(); + +// get fragments for 'news' category +String[] newsHighlightings = highlightsRef.value.getFragments("news"); +`} + + + + +{`public static class BlogPosts_ByCategory_Content extends AbstractIndexCreationTask { + public static class Result { + private String category; + private String content; + + public String getCategory() { + return category; + } + + public void setCategory(String category) { + this.category = category; + } + + public String getContent() { + return content; + } + + public void setContent(String content) { + this.content = content; + } + } + + public BlogPosts_ByCategory_Content() { + map = "docs.Posts.Select(post => new { post.category, post.content })"; + + reduce = "results.GroupBy(result => result.Category).Select(g => new {" + + " category = g.Key, " + + " Content = string.Join(\\" \\", g.Select(r => r.content)) " + + "}"; + + index("content", FieldIndexing.SEARCH); + store("content", FieldStorage.YES); + termVector("content", FieldTermVector.WITH_POSITIONS_AND_OFFSETS); + } +} +`} + + + + +## Remarks + + +Default `` tags are coloured and colours are returned in following order: + +- <span style="border-left: 10px solid yellow"> </span>yellow, +- <span style="border-left: 10px solid lawngreen"> </span>lawngreen, +- <span style="border-left: 10px solid aquamarine"> </span>aquamarine, +- <span style="border-left: 10px solid magenta"> </span>magenta, +- <span style="border-left: 10px solid palegreen"> </span>palegreen, +- <span style="border-left: 10px solid coral"> </span>coral, +- <span style="border-left: 10px solid wheat"> </span>wheat, +- <span style="border-left: 10px solid khaki"> </span>khaki, +- <span style="border-left: 10px solid lime"> </span>lime, +- <span style="border-left: 10px solid deepskyblue"> </span>deepskyblue, +- <span style="border-left: 10px solid deeppink"> </span>deeppink, +- <span style="border-left: 10px solid salmon"> </span>salmon, +- <span style="border-left: 10px solid peachpuff"> </span>peachpuff, +- <span style="border-left: 10px solid violet"> </span>violet, +- <span style="border-left: 10px solid mediumpurple"> </span>mediumpurple, +- <span style="border-left: 10px solid palegoldenrod"> </span>palegoldenrod, +- <span style="border-left: 10px solid darkkhaki"> </span>darkkhaki, +- <span style="border-left: 10px solid springgreen"> </span>springgreen, +- <span style="border-left: 10px solid turquoise"> </span>turquoise, +- <span style="border-left: 10px solid powderblue"> </span>powderblue + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_highlighting-nodejs.mdx b/versioned_docs/version-7.1/indexes/querying/_highlighting-nodejs.mdx new file mode 100644 index 0000000000..d82faa655c --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_highlighting-nodejs.mdx @@ -0,0 +1,362 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When making a [Full-Text Search query](../../indexes/querying/searching.mdx), + in addition to retrieving documents that contain the searched terms, you can + also request to get a list of text fragments that highlight the searched terms. + +* This article provides examples of highlighting search results when querying a static-index. + Prior to reading this article, please refer to [Highlight search results](../../client-api/session/querying/text-search/highlight-query-results.mdx) + for general knowledge about Highlighting and for dynamic-queries examples. + +* To search and get fragments with highlighted terms when querying a static-index, + the index field on which you search must be configured for highlighting. See examples below. + +* In this page: + * [Highlight results - Map index](../../indexes/querying/highlighting.mdx#highlight-results---map-index) + * [Highlight results - Map-Reduce index](../../indexes/querying/highlighting.mdx#highlight-results---map-reduce-index) + * [Customize highlight tags](../../indexes/querying/highlighting.mdx#customize-highlight-tags) + + +## Highlight results - Map index + + + +**Configure a Map index for highlighting**: +To search and get fragments with highlighted terms, +the index-field on which you search **must be configured as follows**: + + * Store the index-field in the index + * Configure the index-field for Full-Text search + * Store the index-field term vector with position and offsets + + + +{`// Define a Map index: +// =================== +class Employees_ByNotes extends AbstractJavaScriptIndexCreationTask \{ + constructor () \{ + super(); + + this.map("employees", employee => \{ + return \{ + EmployeeNotes: employee.Notes[0] + \}; + \}); + + // Configure index-field 'EmployeeNotes' for highlighting: + // ======================================================= + this.store("EmployeeNotes", "Yes"); + this.index("EmployeeNotes", "Search"); + this.termVector("EmployeeNotes", "WithPositionsAndOffsets"); + \} +\} +`} + + + + + + + +**Query the index with `search`**: + + + +{`// Define a param that will get the highlighted text fragments +let managerHighlights; + +const employeesResults = await session + // Query the map index + .query({ indexName: "Employees/ByNotes" }) + // Search for documents containing the term 'manager' + .search("EmployeeNotes", "manager") + // Request to highlight the searched term by calling 'highlight' + .highlight({ + fieldName: "EmployeeNotes", + fragmentLength: 35, + fragmentCount: 2 + }, x => { managerHighlights = x; }) + .all(); +`} + + + + +{`from index "Employees/ByNotes" +where search(EmployeeNotes, "manager") +include highlight(EmployeeNotes, 35, 2) +`} + + + + + + + + +**Query the index with `whereEquals`**: + + + +{`// Define a param that will get the highlighted text fragments +let managerHighlights; + +const employeesResults = await session + // Query the map index + .query({ indexName: "Employees/ByNotes" }) + // Search for documents containing the term 'manager' + .whereEquals("EmployeeNotes", "manager") + // Request to highlight the searched term by calling 'highlight' + .highlight({ + fieldName: "EmployeeNotes", + fragmentLength: 35, + fragmentCount: 2 + }, x => { managerHighlights = x; }) + .all(); +`} + + + + +{`from index "Employees/ByNotes" +where EmployeeNotes == "manager" +include highlight(EmployeeNotes, 35, 2) +`} + + + + + + + + +**Process results**: + + +{`// 'employeesResults' contains all Employee DOCUMENTS that contain the term 'manager'. +// 'managerHighlights' contains the text FRAGMENTS that highlight the 'manager' term. + +let fragmentsHtml = "
    "; + +employeesResults.forEach((employee) => \{ + // Call 'getFragments' to get all fragments for the specified employee id + let fragments = managerHighlights.getFragments(employee.id); + + fragments.forEach((fragment) => \{ + fragmentsHtml += \`
  • Doc: $\{employee.id\}
  • \` + fragmentsHtml += \`
  • Fragment: $\{fragment\}
  • \`; + fragmentsHtml += \`
  • \`; + \}); +\}); + +fragmentsHtml += "
"; + +// The resulting fragmentsHtml: +// ============================ + +//
    +//
  • Doc: employees/2-A
  • +//
  • Fragment: to sales manager in January
  • +//
  • +//
  • Doc: employees/5-A
  • +//
  • Fragment: to sales manager in March
  • +//
  • +//
+`} +
+
+ +
+ + + +## Highlight results - Map-Reduce index + + + +**Configure a Map-Reduce index for highlighting**: +In order to search and get fragments with highlighted terms in a Map-Reduce index: + + * The index-field on which you **search** must be configured with: + + * Store the index-field in the index + + * Configure the index-field for Full-Text search + + * Store the index-field term vector with position and offsets + + * The index-field by which you **group-by** must be stored in the index. + + + +{`// Define a Map-Reduce index: +// ========================== +class ContactDetailsPerCountry extends AbstractJavaScriptIndexCreationTask \{ + constructor () \{ + super(); + + // The 'map' function defines what will be indexed from each document in the collection + this.map("companies", company => \{ + return \{ + Country: company.Address.Country, + ContactDetails: company.Contact.Name + " " + company.Contact.Title + \}; + \}); + + // The 'reduce' function specifies how data is grouped and aggregated + this.reduce(results => results.groupBy(x => x.Country).aggregate(g => \{ + return \{ + // Set 'Country' as the group-by key + // 'ContactDetails' will be grouped per 'Country' + Country: g.key, + + // Specify the aggregation + // here we use 'join' as the aggregation function + ContactDetails: g.values.map(x => x.ContactDetails).join(' ') + \} + \})); + + // Configure index-field 'Country' for highlighting: + // ================================================= + this.store("Country", "Yes"); + + // Configure index-field 'ContactDetails' for highlighting: + // ======================================================= + this.store("ContactDetails", "Yes"); + this.index("ContactDetails", "Search"); + this.termVector("ContactDetails", "WithPositionsAndOffsets"); + \} +\} +`} + + + + + + + +**Query the index**: + + + +{`// Define the key by which the resulting fragments are grouped: +// ============================================================ +const options = { + // Set 'groupKey' to be the index's group-by key + // The resulting fragments will be grouped per 'Country' + groupKey: "Country" +}; + +let agentHighlights; + +// Query the map-reduce index: +// =========================== +const detailsPerCountry = await session + .query({ indexName: "ContactDetailsPerCountry" }) + // Search for results containing the term 'agent' + .search("ContactDetails", "agent") + // Request to highlight the searched term by calling 'highlight' + // Pass the defined 'options' + .highlight({ + fieldName: "ContactDetails", + fragmentLength: 35, + fragmentCount: 2, + ...options + }, x => { agentHighlights = x; }) + .all(); +`} + + + + +{`from index "ContactDetailsPerCountry" +where search(ContactDetails, "agent") +include highlight(ContactDetails, 35, 2, $p0) +{"p0":{"groupKey":"Country"}} +`} + + + + + + + + +**Process results**: + + +{`// 'detailsPerCountry' contains the contacts details grouped per country. +// 'agentHighlights' contains the text FRAGMENTS that highlight the 'agent' term. + +let fragmentsHtml = "
    "; + +employeesResults.forEach((item) => \{ + // Call 'getFragments' to get all fragments for the specified country key + let fragments = agentHighlights.getFragments(item.Country); + + fragments.forEach((fragment) => \{ + fragmentsHtml += \`
  • Doc: $\{item.Country\}
  • \` + fragmentsHtml += \`
  • Fragment: $\{fragment\}
  • \`; + fragmentsHtml += \`
  • \`; + \}); +\}); + +fragmentsHtml += "
"; + +// The resulting fragmentsHtml: +// ============================ + +//
    +//
  • Country: UK
  • +//
  • Fragment: Devon Sales Agent Helen Bennett
  • +//
  • +//
  • Country: France
  • +//
  • Fragment: Sales Agent Carine Schmit
  • +//
  • +//
  • Country: France
  • +//
  • Fragment: Saveley Sales Agent Paul Henriot
  • +//
  • +//
  • Country: Argentina
  • +//
  • Fragment: Simpson Sales Agent Yvonne Moncad
  • +//
  • +//
  • Country: Argentina
  • +//
  • Fragment: Moncada Sales Agent Sergio
  • +//
  • +//
  • Country: Brazil
  • +//
  • Fragment: Sales Agent Anabela
  • +//
  • +//
  • Country: Belgium
  • +//
  • Fragment: Dewey Sales Agent Pascale
  • +//
  • +//
+`} +
+
+ +
+ + + +## Customize highlight tags + +* **Default tags**: + + * Please refer to [Highlight tags](../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight-tags) to learn about the default html tags used to wrap the highlighted terms. + +* **Customizing tags**: + + * The default html tags that wrap the highlighted terms can be customized to any other tags. + + * Customizing the wrapping tags when querying an index is done exactly the same as when making + a dynamic query where the `preTags` and `postTags` parameters are passed to the `highlight` method. + + * Follow the example in [Highlight - customize tags](../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight---customize-tags). + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_highlighting-php.mdx b/versioned_docs/version-7.1/indexes/querying/_highlighting-php.mdx new file mode 100644 index 0000000000..ed87be03da --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_highlighting-php.mdx @@ -0,0 +1,371 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When making a [Full-Text Search query](../../indexes/querying/searching.mdx), + in addition to retrieving documents that contain the searched terms, you can + also request to get a list of text fragments that highlight the searched terms. + +* This article provides examples of highlighting search results when querying a static-index. + Prior to reading this article, please refer to [Highlight search results](../../client-api/session/querying/text-search/highlight-query-results.mdx) + for general knowledge about Highlighting and for dynamic-queries examples. + +* To search and get fragments with highlighted terms when querying a static-index, + the index field on which you search must be configured for highlighting. See examples below. + +* In this page: + * [Highlight results - Map index](../../indexes/querying/highlighting.mdx#highlight-results---map-index) + * [Highlight results - Map-Reduce index](../../indexes/querying/highlighting.mdx#highlight-results---map-reduce-index) + * [Customize highlight tags](../../indexes/querying/highlighting.mdx#customize-highlight-tags) + + +## Highlight results - Map index + +#### Configure a Map index for highlighting: + +To search and get fragments with highlighted terms, +the index-field on which you search **must be configured as follows**: + +* **`FieldStorage::yes`** - store the field in the index +* **`FieldIndexing::search`** - allow Full-Text search +* **`FieldTermVector::withPositionsAndOffsets`** - store the term's position and offsets + + + +{`// Define a Map index: +// =================== +// The IndexEntry class defines index-field 'EmployeeNotes' +class Employees_ByNotes_IndexEntry +\{ + public ?string $employeeNotes = null; + + public function getEmployeeNotes(): ?string + \{ + return $this->employeeNotes; + \} + + public function setEmployeeNotes(?string $employeeNotes): void + \{ + $this->employeeNotes = $employeeNotes; + \} +\} + +class Employees_ByNotes extends AbstractIndexCreationTask +\{ + public function __construct() + \{ + parent::__construct(); + + // The 'Map' function defines the content of index-field 'EmployeeNotes' + $this->map = + "from employee in docs.Employees " . + "select new \{ " . + " EmployeeNotes = employee.notes[0] " . + "\}"; + + // Configure index-field 'EmployeeNotes' for highlighting: + // ======================================================= + $this->store("EmployeeNotes", FieldStorage::yes()); + $this->index("EmployeeNotes", FieldIndexing::search()); + $this->termVector("EmployeeNotes", FieldTermVector::withPositionsAndOffsets()); + \} +\} +`} + + +#### Query the index with `search`: + + + + +{`$managerHighlights = new Highlightings(); + +/** @var array $employeesResults */ +$employeesResults = $session + // Query the map index + ->query(Employees_ByNotes_IndexEntry::class, Employees_ByNotes::class) + // Search for documents containing the term 'manager' + ->search("EmployeeNotes", "manager") + // Request to highlight the searched term by calling 'Highlight' + ->highlight("EmployeeNotes", 35, 2, null, $managerHighlights) + ->ofType(Employee::class) + ->toList(); +`} + + + + +{`from index "Employees/ByNotes" +where search(EmployeeNotes, "manager") +include highlight(EmployeeNotes, 35, 2) +`} + + + +#### Query the index with `whereEquals`: + + + + +{`$managerHighlights = new Highlightings(); + +/** @var array $employeesResults */ +$employeesResults = $session + // Query the map index + ->query(Employees_ByNotes_IndexEntry::class, Employees_ByNotes::class) + // Request to highlight the searched term by calling 'Highlight' + ->highlight("EmployeeNotes", 35, 2, null, $managerHighlights) + // Search for documents containing the term 'manager' + ->whereEquals("EmployeeNotes", "manager") + ->ofType(Employee::class) + ->toList(); +`} + + + + +{`from index "Employees/ByNotes" +where EmployeeNotes == "manager" +include highlight(EmployeeNotes, 35, 2) +`} + + + +#### Process results: + + + +{`// 'employeesResults' contains all Employee DOCUMENTS that contain the term 'manager'. +// 'managerHighlights' contains the text FRAGMENTS that highlight the 'manager' term. + +$builder = "
    "; + +foreach ($employeesResults as $employee) +\{ + // Call 'GetFragments' to get all fragments for the specified employee Id + $fragments = $managerHighlights->getFragments($employee->getId()); + foreach ($fragments as $fragment) + \{ + $builder .= "
  • Doc: " . $employee->getId() . "
  • "; + $builder .= "
  • Fragment: " . $fragment . "
  • "; + $builder .= "
  • "; + \} +\} + +$fragmentsHtml = $builder . "
"; + +// The resulting fragmentsHtml: +// ============================ + +//
    +//
  • Doc: employees/2-A
  • +//
  • Fragment: to sales manager in January
  • +//
  • Doc: employees/5-A
  • +//
  • Fragment: to sales manager in March
  • +//
  • +//
+`} +
+
+ + + +## Highlight results - Map-Reduce index + +#### Configure a Map-Reduce index for highlighting: + +To search and get fragments with highlighted terms in a Map-Reduce index: + +* The index-field on which you **search** must be configured with: + * **`FieldStorage::yes`** - store the field in the index + * **`FieldIndexing::search`** - allow Full-Text search + * **`FieldTermVector::withPositionsAndOffsets`** - store the term's position and offsets + +* The index-field by which you **group-by** must configured with: + * **`FieldStorage::yes`** - store the field in the index + + + +{`// Define a Map-Reduce index: +// ========================== + +// The IndexEntry class defines the index-fields +class ContactDetailsPerCountry_IndexEntry +\{ + private ?string $country = null; + private ?string $contactDetails = null; + + public function getCountry(): ?string + \{ + return $this->country; + \} + + public function setCountry(?string $country): void + \{ + $this->country = $country; + \} + + public function getContactDetails(): ?string + \{ + return $this->contactDetails; + \} + + public function setContactDetails(?string $contactDetails): void + \{ + $this->contactDetails = $contactDetails; + \} +\} +class ContactDetailsPerCountry extends AbstractIndexCreationTask +\{ + public function __construct() + \{ + parent::__construct(); + + // The 'Map' function defines what will be indexed from each document in the collection + $this->map = + "from company in docs.Companies " . + "select new \{ " . + " Country = company.Address.Country, " . + " ContactDetails = company.Contact.Name + ' ' + company.Contact.Title ". + "\}"; + + // The 'Reduce' function specifies how data is grouped and aggregated + $this->reduce = + "from result in results " . + "group result by result.country into g " . + "select new \{ " . + // Set 'Country' as the group-by key + // 'ContactDetails' will be grouped per 'Country' + " Country = g.key, " . + // Specify the aggregation + // here we use string.Join as the aggregation function + " ContactDetails = string.Join(\\" \\", g.Select(x => x.contact_details) )" . + "\}" ; + + // Configure index-field 'Country' for Highlighting: + // ================================================= + $this->store("Country", FieldStorage::yes()); + + // Configure index-field 'ContactDetails' for Highlighting: + // ======================================================== + $this->store("ContactDetails", FieldStorage::yes()); + $this->index("ContactDetails", FieldIndexing::search()); + $this->termVector("ContactDetails", FieldTermVector::withPositionsAndOffsets()); + \} +\} +`} + + +#### Query the index: + + + + +{`// Define the key by which the resulting fragments are grouped: +// ============================================================ +$options = new HighlightingOptions(); +// Set 'GroupKey' to be the index's group-by key +// The resulting fragments will be grouped per 'Country' +$options->setGroupKey("Country"); + +$agentHighlights = new Highlightings(); + +// Query the map-reduce index: +// =========================== +/** @var array $detailsPerCountry */ +$detailsPerCountry = $session + ->query(ContactDetailsPerCountry_IndexEntry::class, ContactDetailsPerCountry::class) + // Search for results containing the term 'agent' + ->search("ContactDetails", "agent") + // Request to highlight the searched term by calling 'Highlight' + // Pass the defined 'options' + ->highlight("ContactDetails", 35, 2, $options, $agentHighlights) + ->toList(); +`} + + + + +{`from index "ContactDetailsPerCountry" +where search(ContactDetails, "agent") +include highlight(ContactDetails, 35, 2, $p0) +{"p0":{"GroupKey":"Country"}} +`} + + + +#### Process results: + + + +{`// 'detailsPerCountry' contains the contacts details grouped per country. +// 'agentHighlights' contains the text FRAGMENTS that highlight the 'agent' term. + +$builder = "
    "; + +foreach ($detailsPerCountry as $item) \{ + // Call 'GetFragments' to get all fragments for the specified country key + $fragments = $agentHighlights->getFragments($item->getCountry()); + foreach ($fragments as $fragment) + \{ + $builder .= "
  • Country: " . $item->getCountry() . "
  • "; + $builder .= "
  • Fragment: " . $fragment . "
  • "; + $builder .= "
  • "; + \} +\} + +$fragmentsHtml = $builder . "
"; + +// The resulting fragmentsHtml: +// ============================ + +//
    +//
  • Country: UK
  • +//
  • Fragment: Devon Sales Agent Helen Bennett
  • +//
  • +//
  • Country: France
  • +//
  • Fragment: Sales Agent Carine Schmit
  • +//
  • +//
  • Country: France
  • +//
  • Fragment: Saveley Sales Agent Paul Henriot
  • +//
  • +//
  • Country: Argentina
  • +//
  • Fragment: Simpson Sales Agent Yvonne Moncad
  • +//
  • +//
  • Country: Argentina
  • +//
  • Fragment: Moncada Sales Agent Sergio
  • +//
  • +//
  • Country: Brazil
  • +//
  • Fragment: Sales Agent Anabela
  • +//
  • +//
  • Country: Belgium
  • +//
  • Fragment: Dewey Sales Agent Pascale
  • +//
  • +//
+`} +
+
+ + + +## Customize highlight tags + +* **Default tags**: + + * Please refer to [Highlight tags](../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight-tags) to learn about the default html tags used to wrap the highlighted terms. + +* **Customizing tags**: + + * The default html tags that wrap the highlighted terms can be customized to any other tags. + + * Customizing the wrapping tags when querying an index is done exactly the same as when making + a dynamic query where a `HighlightingOptions` object is passed to the `Highlight` method. + + * Follow the example in [Highlight - customize tags](../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight---customize-tags). + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_highlighting-python.mdx b/versioned_docs/version-7.1/indexes/querying/_highlighting-python.mdx new file mode 100644 index 0000000000..fc444ff354 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_highlighting-python.mdx @@ -0,0 +1,331 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* When making a [Full-Text Search query](../../indexes/querying/searching.mdx), + in addition to retrieving documents that contain the searched terms, you can + also request to get a list of text fragments that highlight the searched terms. + +* This article provides examples of highlighting search results when querying a static-index. + Prior to reading this article, please refer to [Highlight search results](../../client-api/session/querying/text-search/highlight-query-results.mdx) + for general knowledge about Highlighting and for dynamic-queries examples. + +* To search and get fragments with highlighted terms when querying a static-index, + the index field on which you search must be configured for highlighting. See examples below. + +* In this page: + * [Highlight results - Map index](../../indexes/querying/highlighting.mdx#highlight-results---map-index) + * [Highlight results - Map-Reduce index](../../indexes/querying/highlighting.mdx#highlight-results---map-reduce-index) + * [Customize highlight tags](../../indexes/querying/highlighting.mdx#customize-highlight-tags) + + +## Highlight results - Map index + +#### Configure a Map index for highlighting: + +
To search and get fragments with highlighted terms, +the index-field on which you search **must be configured as follows**: + + * **`FieldStorage.YES`** - store the field in the index + * **`FieldIndexing.SEARCH`** - allow Full-Text search + * **`FieldTermVector.WITH_POSITIONS_AND_OFFSETS`** - store the term's position and offsets + + + +{`# Define a Map index: +# =================== +class Employees_ByNotes(AbstractIndexCreationTask): + # The IndexEntry defines index-field 'employee_notes' + class IndexEntry: + def __init__(self, employee_notes: str = None): + self.employee_notes = employee_notes + + def __init__(self): + super().__init__() + self.map = "from employee in docs.Employees select new \{ employee_notes = employee.notes[0] \}" + + # Configure index-field 'employee_notes' for highlighting: + # ======================================================== + self._store("employee_notes", FieldStorage.YES) + self._index("employees_notes", FieldIndexing.SEARCH) + self._term_vector("employee_notes", FieldTermVector.WITH_POSITIONS_AND_OFFSETS) +`} + + +#### Query the index with `search`: + + + + +{`# Define a callback function that takes Highlightings as an argument +# This function will be called by passing resulting Highlightings +def __highlight_callback(highlightings: Highlightings): + # Process the results here + highlightings.get_fragments(...) + ... + +employees_results = list( + session + # Query the map index + .query_index_type(Employees_ByNotes, Employees_ByNotes.IndexEntry) + # Search for documents containing the term 'manager' + .search("employee_notes", "manager") + # Request to highlight the searched term by calling 'highlight' + # Pass the callback function + .highlight("employee_notes", 35, 2, __highlight_callback).of_type(Employee) +) +`} + + + + +{`from index "Employees/ByNotes" +where search(EmployeeNotes, "manager") +include highlight(EmployeeNotes, 35, 2) +`} + + + +#### Query the index with `where_equals`: + + + + +{`manager_highlights: Optional[Highlightings] = None + +# Define a callback function that takes Highlightings as an argument +# This function will be called by passing resulting Highlightings +def __manager_highlights_callback(highlightings: Highlightings): + # Process the results here or assign to nonlocal variable to access them later + # e.g. highlightings.get_fragments(...) + nonlocal manager_highlights + manager_highlights = highlightings + +employees_results = list( + session + # Query the map index + .query_index_type(Employees_ByNotes, Employees_ByNotes.IndexEntry) + # Request to highlight the searched term by calling 'highlight' + .highlight("employees_notes", 35, 2, __manager_highlights_callback) + # Search for documents containing the term 'manager' + .where_equals("employee_notes", "manager").of_type(Employee) +) +`} + + + + +{`from index "Employees/ByNotes" +where EmployeeNotes == "manager" +include highlight(EmployeeNotes, 35, 2) +`} + + + +#### Process results: + + + +{`# 'employees_results' contains all Employee DOCUMENTS that contain the term 'manager'. +# 'manager_highlights' contains the text FRAGMENTS that highlight the 'manager' term. +builder = ["
    "] +for employee in employees_results: + # Call 'get_fragments' to get all fragments for the specified employee Id + fragments = manager_highlights.get_fragments(employee.Id) + for fragment in fragments: + builder.append(f"\\n
  • Doc: \{employee.Id\}
  • ") + builder.append(f"\\n
  • Fragment: \{fragment\}
  • ") + builder.append("\\n
  • ") + +builder.append("\\n
") +fragments_html = "".join(builder) + +# The resulting fragments_html: +# ============================= + +#
    +#
  • Doc: employees/2-A
  • +#
  • Fragment: to sales manager in January
  • +#
  • Doc: employees/5-A
  • +#
  • Fragment: to sales manager in March
  • +#
  • +#
+`} +
+
+ + + +## Highlight results - Map-Reduce index + +#### Configure a Map-Reduce index for highlighting: + +
To search and get fragments with highlighted terms in a Map-Reduce index: + + * The index-field on which you **search** must be configured with: + + * **`FieldStorage.YES`** - store the field in the index + * **`FieldIndexing.SEARCH`** - allow Full-Text search + * **`FieldTermVector.WITH_POSITIONS_AND_OFFSETS`** - store the term's position and offsets + + * The index-field by which you **group-by** must configured with: + + * **`FieldStorage.YES`** - store the field in the index + + + +{`# Define a Map-Reduce index: +# ========================== +class ContactDetailsPerCountry(AbstractIndexCreationTask): + # The IndexEntry class defines the index-fields + class IndexEntry: + def __init__(self, country: str = None, contact_details: str = None): + self.country = country + self.contact_details = contact_details + + def __init__(self): + super().__init__() + # The 'map' function defines what will be indexed from each document in the collection + self.map = "from company in docs.Companies select new \{ country = company.Address.Country, contact_details = company.Contact.Name + ' ' + company.Contact.Title \}" + + # The 'reduce' function specifies how data is grouped and aggregated + + # Set 'country' as the group-by key + # 'contact_details' will be grouped per 'country' + + # Specify the aggregation + # we'll use string.Join as the aggregation function + self.reduce = ( + "from result in results group result by result.country into g select new \{ country = g.key, contact_details = string.Join(" + ", g.Select(x => x.contact_details) )\}" + ) + + # Configure index-field 'country' for Highlighting: + # ================================================= + self._store("country", FieldStorage.YES) + + # Configure index-field 'contact_details' for Highlighting + self._store("contact_details", FieldStorage.YES) + self._index("contact_details", FieldIndexing.SEARCH) + self._term_vector("contact_details", FieldTermVector.WITH_POSITIONS_AND_OFFSETS) +`} + + +#### Query the index: + + + + +{`# Define the key by which the resulting fragments are grouped: +# ============================================================ +options = HighlightingOptions( + # Set 'group_key' to be the index's group-by key + # The resulting fragments will be grouped per 'country' + group_key="Country" +) + +# Define a callback function that takes Highlightings as an argument: +# =================================================================== +agent_highlights: Optional[Highlightings] = None + +def __agent_highlights_callback(highlightings: Highlightings): + # Process the results here or assign to nonlocal variable to access them later + nonlocal agent_highlights + agent_highlights = highlightings + +# Query the map-reduce index: +# =========================== +details_per_country = list( + session.query_index_type(ContactDetailsPerCountry, ContactDetailsPerCountry.IndexEntry) + # Search for results containing the term 'agent' + .search("contact_details", "agent") + # Request to highlight the searched term by calling 'highlight' + # Pass the defined 'options' + .highlight("contact_details", 35, 2, __agent_highlights_callback, options) +) +`} + + + + +{`from index "ContactDetailsPerCountry" +where search(ContactDetails, "agent") +include highlight(ContactDetails, 35, 2, $p0) +{"p0":{"GroupKey":"Country"}} +`} + + + +#### Process results: + + + +{`# 'details_per_country' contains the contacts details grouped per country. +# 'agent_highlights' contains the text FRAGMENTS that highlight the 'agent' term. + +builder = ["
    "] + +for item in details_per_country: + # Call 'get_fragments' to get all fragments for the specified country key + fragments = agent_highlights.get_fragments(item.country) + for fragment in fragments: + builder.append(f"\\n
  • Country: \{item.country\}
  • ") + builder.append(f"\\n
  • Fragment: \{fragment\}
  • ") + builder.append(f"\\n
  • ") +builder.append("\\n
") + +fragments_html = "".join(builder) + +# The resulting fragmentsHtml: +# ============================ +# +#
    +#
  • Country: UK
  • +#
  • Fragment: Devon Sales Agent Helen Bennett
  • +#
  • +#
  • Country: France
  • +#
  • Fragment: Sales Agent Carine Schmit
  • +#
  • +#
  • Country: France
  • +#
  • Fragment: Saveley Sales Agent Paul Henriot
  • +#
  • +#
  • Country: Argentina
  • +#
  • Fragment: Simpson Sales Agent Yvonne Moncad
  • +#
  • +#
  • Country: Argentina
  • +#
  • Fragment: Moncada Sales Agent Sergio
  • +#
  • +#
  • Country: Brazil
  • +#
  • Fragment: Sales Agent Anabela
  • +#
  • +#
  • Country: Belgium
  • +#
  • Fragment: Dewey Sales Agent Pascale
  • +#
  • +#
+`} +
+
+ + + +## Customize highlight tags + +* **Default tags**: + + * Please refer to [Highlight tags](../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight-tags) to learn about the default html tags used to wrap the highlighted terms. + +* **Customizing tags**: + + * The default html tags that wrap the highlighted terms can be customized to any other tags. + + * Customizing the wrapping tags when querying an index is done exactly the same as when making + a dynamic query where a `HighlightingOptions` object is passed to the `Highlight` method. + + * Follow the example in [Highlight - customize tags](../../client-api/session/querying/text-search/highlight-query-results.mdx#highlight---customize-tags). + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_include-explanations-csharp.mdx b/versioned_docs/version-7.1/indexes/querying/_include-explanations-csharp.mdx new file mode 100644 index 0000000000..e8b37a7f45 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_include-explanations-csharp.mdx @@ -0,0 +1,366 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article provides examples of including explanations when querying a static-index. + +* **Prior to this article**, please refer to [Include Query Explanations](../../client-api/session/querying/debugging/include-explanations.mdx) for dynamic-query examples + and general knowledge about including explanations to retrieve the score of resulting documents. + + + * Including explanations is available only when using **Lucene** as the underlying search engine for static-indexes. + * You can configure which search engine will be used. Learn how in [Selecting the search engine](../../indexes/search-engine/corax.mdx#selecting-the-search-engine). + +* In this page: + * [Include explanations when querying Map index](../../indexes/querying/include-explanations.mdx#include-explanations-when-querying-map-index) + * [Include explanations when querying Map-Reduce index](../../indexes/querying/include-explanations.mdx#include-explanations-when-querying-map-reduce-index) + * [Syntax](../../indexes/querying/include-explanations.mdx#syntax) + + +## Include explanations when querying Map index + + + + +{`public class Products_BySearchName : AbstractIndexCreationTask +{ + public class IndexEntry + { + public string Name { get; set; } + } + + public Products_BySearchName() + { + Map = products => from product in products + select new IndexEntry() + { + Name = product.Name + }; + + // Configure the index-field 'Name' for FTS + Indexes.Add(x => x.Name, FieldIndexing.Search); + } +} +`} + + + + +{`List products = session + // Query the index + .Query() + // Convert the IRavenQueryable to IDocumentQuery + // to be able to use 'IncludeExplanations' + .ToDocumentQuery() + // Call 'IncludeExplanations', provide an out param for the explanations results + .IncludeExplanations(out Explanations explanations) + // Convert back to IRavenQueryable + // to continue building the query using LINQ + .ToQueryable() + // Define query criteria + // e.g. search for docs containing Syrup -or- Lager in their Name field + .Search(x => x.Name, "Syrup Lager" ) + .OfType() + .ToList(); + +// When running the above query on the RavenDB sample data +// the results contain 3 product documents. + +// To get the score details for the first document from the results +// call 'GetExplanations' on the resulting Explanations object as follows: +string[] scoreDetails = explanations.GetExplanations(products[0].Id); +`} + + + + +{`List products = await asyncSession + // Query the index + .Query() + // Convert the IRavenQueryable to IDocumentQuery + // to be able to use 'IncludeExplanations' + .ToAsyncDocumentQuery() + // Call 'IncludeExplanations', provide an out param for the explanations results + .IncludeExplanations(out Explanations explanations) + // Convert back to IRavenQueryable + // to continue building the query using LINQ + .ToQueryable() + // Define query criteria + // e.g. search for docs containing Syrup -or- Lager in their Name field + .Search(x => x.Name, "Syrup Lager" ) + .OfType() + .ToListAsync(); + +// When running the above query on the RavenDB sample data +// the results contain 3 product documents. + +// To get the score details for the first document from the results +// call 'GetExplanations' on the resulting Explanations object as follows: +string[] scoreDetails = explanations.GetExplanations(products[0].Id); +`} + + + + +{`List products = session.Advanced + // Query the index + .DocumentQuery() + // Call 'IncludeExplanations', provide an out param for the explanations results + .IncludeExplanations(out Explanations explanations) + // Define query criteria + // e.g. search for docs containing Syrup -or- Lager in their Name field + .Search(x => x.Name, "Syrup Lager" ) + .OfType() + .ToList(); + +// When running the above query on the RavenDB sample data +// the results contain 3 product documents. + +// To get the score details for the first document from the results +// call 'GetExplanations' on the resulting Explanations object as follows: +string[] scoreDetails = explanations.GetExplanations(products[0].Id); +`} + + + + +{`List products = await asyncSession.Advanced + // Query the index + .AsyncDocumentQuery() + // Call 'IncludeExplanations', provide an out param for the explanations results + .IncludeExplanations(out Explanations explanations) + // Define query criteria + // e.g. search for docs containing Syrup -or- Lager in their Name field + .Search(x => x.Name, "Syrup Lager" ) + .OfType() + .ToListAsync(); + +// When running the above query on the RavenDB sample data +// the results contain 3 product documents. + +// To get the score details for the first document from the results +// call 'GetExplanations' on the resulting Explanations object as follows: +string[] scoreDetails = explanations.GetExplanations(products[0].Id); +`} + + + + +{`from index "Products/BySearchName" +where search(Name, "Syrup Lager") +include explanations() +// Or: +from index "Products/BySearchName" +where search(Name, "Syrup") or search(Name, "Lager") +include explanations() +`} + + + + + + +## Include explanations when querying Map-Reduce index + + + + +{`// This index counts the number of units ordered per category in all Product documents +// =================================================================================== + +public class NumberOfUnitsOrdered_PerCategory : + AbstractIndexCreationTask +{ + public class IndexEntry + { + public string Category { get; set; } + public int NumberOfUnitsOrdered { get; set; } + } + + public NumberOfUnitsOrdered_PerCategory() + { + Map = products => from product in products + // Load the products' category + let categoryName = LoadDocument(product.Category).Name + + select new IndexEntry() + { + Category = categoryName, + NumberOfUnitsOrdered = product.UnitsOnOrder + }; + + Reduce = results => from result in results + group result by result.Category + into g + let unitsOrdered = g.Sum(x => x.NumberOfUnitsOrdered) + + select new IndexEntry() + { + Category = g.Key, + NumberOfUnitsOrdered = unitsOrdered + }; + } +} +`} + + + + +{`List results = session + // Query the Map-Reduce index + .Query() + // Convert the IRavenQueryable to IDocumentQuery + // to be able to use 'IncludeExplanations' + .ToDocumentQuery() + // Call 'IncludeExplanations', provide: + // * The group key for each result item + // * An out param for the explanations results + .IncludeExplanations( + new ExplanationOptions { GroupKey = "Category" }, + out Explanations explanations) + // Convert back to IRavenQueryable + // to continue building the query using LINQ + .ToQueryable() + // Query for categories that have a total of more than a 400 units ordered + .Where(x => x.NumberOfUnitsOrdered > 400) + .ToList(); + +// Get the score details for an item in the results +// Pass the group key (Category, in this case) to 'GetExplanations' +string[] scoreDetails = explanations.GetExplanations(results[0].Category); +`} + + + + +{`List results = await asyncSession + // Query the Map-Reduce index + .Query() + // Convert the IRavenQueryable to IDocumentQuery + // to be able to use 'IncludeExplanations' + .ToAsyncDocumentQuery() + // Call 'IncludeExplanations', provide: + // * The group key for each result item + // * An out param for the explanations results + .IncludeExplanations( + new ExplanationOptions { GroupKey = "Category" }, + out Explanations explanations) + // Convert back to IRavenQueryable + // to continue building the query using LINQ + .ToQueryable() + // Query for categories that have a total of more than a 400 units ordered + .Where(x => x.NumberOfUnitsOrdered > 400) + .ToListAsync(); + +// Get the score details for an item in the results +// Pass the group key (Category, in this case) to 'GetExplanations' +string[] scoreDetails = explanations.GetExplanations(results[0].Category); +`} + + + + +{`List results = session.Advanced + // Query the Map-Reduce index + .DocumentQuery() + // Call 'IncludeExplanations', provide: + // * The group key for each result item + // * An out param for the explanations results + .IncludeExplanations( + new ExplanationOptions { GroupKey = "Category" }, + out Explanations explanations) + // Query for categories that have a total of more than a 400 units ordered + .WhereGreaterThan(x => x.NumberOfUnitsOrdered, 400) + .ToList(); + +// Get the score details for an item in the results +// Pass the group key (Category, in this case) to 'GetExplanations' +string[] scoreDetails = explanations.GetExplanations(results[0].Category); +`} + + + + +{`List results = await asyncSession.Advanced + // Query the Map-Reduce index + .AsyncDocumentQuery() + // Call 'IncludeExplanations', provide: + // * The group key for each result item + // * An out param for the explanations results + .IncludeExplanations( + new ExplanationOptions { GroupKey = "Category" }, + out Explanations explanations) + // Query for categories that have a total of more than a 400 units ordered + .WhereGreaterThan(x => x.NumberOfUnitsOrdered, 400) + .ToListAsync(); + +// Get the score details for an item in the results +// Pass the group key (Category, in this case) to 'GetExplanations' +string[] scoreDetails = explanations.GetExplanations(results[0].Category); +`} + + + + +{`from index "NumberOfUnitsOrdered/PerCategory" +where NumberOfUnitsOrdered > 400 +include explanations($p0) +{"p0" : { "GroupKey" : "Category" }} +`} + + + + + + +## Syntax + + + +{`// Use this overload when querying a Map index +IDocumentQuery IncludeExplanations(out Explanations explanations); + +// Use this overload when querying a Map-Reduce index +IDocumentQuery IncludeExplanations(ExplanationOptions options, out Explanations explanations); +`} + + + +| Parameter | Type | Description | +|-------------------|----------------------|-------------------------------------------------------------------------| +| **explanations** | `Explanations` | An _out_ param that will be filled with the explanations results. | +| **options** | `ExplanationOptions` | An object that specifies the GroupKey when querying a Map-Reduce index. | + + + +{`public class Explanations +\{ + // Returns a list with all explanations. + // Pass the document ID of a document from the results to get its score details (Map index) + // Pass the GroupKey of an item from the results to get its score details (Map-Reduce index) + public string[] GetExplanations(string key); +\} +`} + + + + +{`public sealed class ExplanationOptions +\{ + // The group key that was used to group by the items in the Map-Reduce index + public string GroupKey \{ get; set; \} +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_include-explanations-nodejs.mdx b/versioned_docs/version-7.1/indexes/querying/_include-explanations-nodejs.mdx new file mode 100644 index 0000000000..de5810e8d0 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_include-explanations-nodejs.mdx @@ -0,0 +1,195 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article provides examples of including explanations when querying a static-index. + +* **Prior to this article**, please refer to [Include Query Explanations](../../client-api/session/querying/debugging/include-explanations.mdx) for dynamic-query examples + and general knowledge about including explanations to retrieve the score of resulting documents. + + + * Including explanations is available only when using **Lucene** as the underlying search engine for static-indexes. + * You can configure which search engine will be used. Learn how in [Selecting the search engine](../../indexes/search-engine/corax.mdx#selecting-the-search-engine). + +* In this page: + * [Include explanations when querying Map index](../../indexes/querying/include-explanations.mdx#include-explanations-when-querying-map-index) + * [Include explanations when querying Map-Reduce index](../../indexes/querying/include-explanations.mdx#include-explanations-when-querying-map-reduce-index) + * [Syntax](../../indexes/querying/include-explanations.mdx#syntax) + + +## Include explanations when querying Map index + + + + +{`class Products_BySearchName extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("Products", product => { + return { + name: product.Name + }; + }); + + // Configure the index-field 'Name' for FTS + this.index("name", "Search"); + } +} +`} + + + + +{`// Define an object that will receive the explanations results +let explanationsResults; + +const products = await session + // Query the index + .query({ indexName: "Products/BySearchName" }) + // Call 'includeExplanations', provide an object for the explanations results + .includeExplanations(e => explanationsResults = e) + .search("name", "Syrup Lager") + .all(); + +// When running the above query on the RavenDB sample data +// the results contain 3 product documents. + +// Get the score details for a specific document from 'explanationsResults' +const id = session.advanced.getDocumentId(products[0]); +const scoreDetails = explanationsResults.explanations[products[0].id]; +`} + + + + +{`from index "Products/BySearchName" +where search(name, "Syrup Lager") +include explanations() +// Or: +from index "Products/BySearchName" +where search(name, "Syrup") or search(name, "Lager") +include explanations() +`} + + + + + + +## Include explanations when querying Map-Reduce index + + + + +{`class NumberOfUnitsOrdered_PerCategory extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + const { load } = this.mapUtils(); + + this.map("Products", product => { + return { + category: load(product.Category, "Categories").Name, + numberOfUnitsOrdered: product.UnitsOnOrder + }; + }); + + this.reduce(results => results.groupBy(x => x.category).aggregate(g => { + return { + category: g.key, + numberOfUnitsOrdered: g.values.reduce((a, b) => a + b.numberOfUnitsOrdered, 0) + } + })); + } +} +`} + + + + +{`// Define an object that will receive the explanations results +let explanationsResults; + +// Define the group key by which to get explanations results +const explanationsOptions = { groupKey: "category" } + +const items = await session + // Query the Map-Reduce index + .query({ indexName: "NumberOfUnitsOrdered/PerCategory" }) + // Call 'includeExplanations', provide: + // * Options with the defined group key + // * An object for the explanations results + .includeExplanations(explanationsOptions, e => explanationsResults = e) + // Query for categories that have a total of more than a 400 units ordered + .whereGreaterThan("numberOfUnitsOrdered", 400) + .all(); + +// Get the score details for an item in the results +// Pass the group key (category, in this case) to 'explanations' +const scoreDetails = explanationsResults.explanations[items[0].category]; +`} + + + + +{`from index "NumberOfUnitsOrdered/PerCategory" +where numberOfUnitsOrdered > 400 +include explanations($p0) +{"p0" : { "GroupKey": "category" }} +`} + + + + + + +## Syntax + + + +{`// Use this overload when querying a Map index +query.includeExplanations(explanationsCallback) + +// Use this overload when querying a Map-Reduce index +query.includeExplanations(options, explanationsCallback) +`} + + + +| Parameter | Type | Description | +|--------------------------|---------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **explanationsCallback** | `(explanationsResults) => void` | <ul><li>A callback function with an output parameter.</li><li>The parameter passed to the callback will be filled with the `Explanations` object when query returns.</li></ul> | +| **options** | `object` | An object that specifies the group key when querying a Map-Reduce index. | + + + +{`// The Explanations object: +// ======================== + +class Explanations \{ + get explanations(): \{ + [key: string]: string[]; // An explanations list per key + \}; +\} +`} + + + + +{`// The Explanation Options object: +// =============================== +\{ + // The group key that was used to group by the items in the Map-Reduce index + groupKey; // string; +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_intersection-csharp.mdx b/versioned_docs/version-7.1/indexes/querying/_intersection-csharp.mdx new file mode 100644 index 0000000000..cc3aaa1609 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_intersection-csharp.mdx @@ -0,0 +1,186 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To allow users to `Intersect` queries on the server-side and return only documents + that match **all** the provided sub-queries, we introduced the query intersection feature. + +* In this page: + * [Intersection](../../indexes/querying/intersection.mdx#intersection) + + +## Intersection + +Let's consider a case where we have a T-Shirt class: + + + +{`public class TShirt +\{ + public string Id \{ get; set; \} + + public int ReleaseYear \{ get; set; \} + + public string Manufacturer \{ get; set; \} + + public List Types \{ get; set; \} +\} + +public class TShirtType +\{ + public string Color \{ get; set; \} + + public string Size \{ get; set; \} +\} +`} + + + +We will fill our database with a few records: + + + +{`session.Store(new TShirt +\{ + Id = "tshirts/1", + Manufacturer = "Raven", + ReleaseYear = 2010, + Types = new List + \{ + new TShirtType \{Color = "Blue", Size = "Small"\}, + new TShirtType \{Color = "Black", Size = "Small"\}, + new TShirtType \{Color = "Black", Size = "Medium"\}, + new TShirtType \{Color = "Gray", Size = "Large"\} + \} +\}); + +session.Store(new TShirt +\{ + Id = "tshirts/2", + Manufacturer = "Wolf", + ReleaseYear = 2011, + Types = new List + \{ + new TShirtType \{ Color = "Blue", Size = "Small" \}, + new TShirtType \{ Color = "Black", Size = "Large" \}, + new TShirtType \{ Color = "Gray", Size = "Medium" \} + \} +\}); + +session.Store(new TShirt +\{ + Id = "tshirts/3", + Manufacturer = "Raven", + ReleaseYear = 2011, + Types = new List + \{ + new TShirtType \{ Color = "Yellow", Size = "Small" \}, + new TShirtType \{ Color = "Gray", Size = "Large" \} + \} +\}); + +session.Store(new TShirt +\{ + Id = "tshirts/4", + Manufacturer = "Raven", + ReleaseYear = 2012, + Types = new List + \{ + new TShirtType \{ Color = "Blue", Size = "Small" \}, + new TShirtType \{ Color = "Gray", Size = "Large" \} + \} +\}); +`} + + + +Now we want to return all the T-shirts that are manufactured by `Raven` and contain both +`Small Blue` and `Large Gray` types. + +To do this, we need to do the following: + +- add the `Raven.Client.Documents` namespace to usings +- use the `Intersect` query extension: + + + + +{`IList results = session.Query() + .Where(x => x.Manufacturer == "Raven") + .Intersect() + .Where(x => x.Color == "Blue" && x.Size == "Small") + .Intersect() + .Where(x => x.Color == "Gray" && x.Size == "Large") + .OfType() + .ToList(); +`} + + + + +{`IList results = session + .Advanced + .DocumentQuery() + .WhereEquals("Manufacturer", "Raven") + .Intersect() + .WhereEquals("Color", "Blue") + .AndAlso() + .WhereEquals("Size", "Small") + .Intersect() + .WhereEquals("Color", "Gray") + .AndAlso() + .WhereEquals("Size", "Large") + .ToList(); +`} + + + + +{`public class TShirts_ByManufacturerColorSizeAndReleaseYear : AbstractIndexCreationTask +{ + public class Result + { + public string Manufacturer { get; set; } + + public string Color { get; set; } + + public string Size { get; set; } + + public int ReleaseYear { get; set; } + } + + public TShirts_ByManufacturerColorSizeAndReleaseYear() + { + Map = tshirts => from tshirt in tshirts + from type in tshirt.Types + select new + { + Manufacturer = tshirt.Manufacturer, + Color = type.Color, + Size = type.Size, + ReleaseYear = tshirt.ReleaseYear + }; + } +} +`} + + + + +{`from index 'TShirts/ByManufacturerColorSizeAndReleaseYear' +where intersect(Manufacturer = 'Raven', Color = 'Blue' and Size = 'Small', Color = 'Gray' and Size = 'Large') +`} + + + + +The above query will return `tshirts/1` and `tshirts/4`, that match **all** sub-queries. +`tshirts/2` will not be included in the results because it is not manufactured by `Raven`, +and `tshirts/3` will not be included because it is not available in `Small Blue`. + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_intersection-java.mdx b/versioned_docs/version-7.1/indexes/querying/_intersection-java.mdx new file mode 100644 index 0000000000..6561135bac --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_intersection-java.mdx @@ -0,0 +1,234 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To allow users to `intersect` queries on the server-side and return only documents + that match **all** the provided sub-queries, we introduced the query intersection feature. + +* In this page: + * [Intersection](../../indexes/querying/intersection.mdx#intersection) + + +## Intersection + +Let's consider a case where we have a T-Shirt class: + + + +{`public class TShirt \{ + private String id; + private int releaseYear; + private String manufacturer; + private List types; + + public String getId() \{ + return id; + \} + + public void setId(String id) \{ + this.id = id; + \} + + public int getReleaseYear() \{ + return releaseYear; + \} + + public void setReleaseYear(int releaseYear) \{ + this.releaseYear = releaseYear; + \} + + public String getManufacturer() \{ + return manufacturer; + \} + + public void setManufacturer(String manufacturer) \{ + this.manufacturer = manufacturer; + \} + + public List getTypes() \{ + return types; + \} + + public void setTypes(List types) \{ + this.types = types; + \} +\} + +public class TShirtType \{ + private String color; + private String size; + + public String getColor() \{ + return color; + \} + + public void setColor(String color) \{ + this.color = color; + \} + + public String getSize() \{ + return size; + \} + + public void setSize(String size) \{ + this.size = size; + \} + + public TShirtType() \{ + \} + + public TShirtType(String color, String size) \{ + this.color = color; + this.size = size; + \} +\} +`} + + + +We will fill our database with few records: + + + +{`TShirt tShirt1 = new TShirt(); +tShirt1.setId("tshirts/1"); +tShirt1.setManufacturer("Raven"); +tShirt1.setReleaseYear(2010); +tShirt1.setTypes(Arrays.asList( + new TShirtType("Blue", "Small"), + new TShirtType("Black", "Small"), + new TShirtType("Black", "Medium"), + new TShirtType("Gray", "Large") +)); +session.store(tShirt1); + +TShirt tShirt2 = new TShirt(); +tShirt2.setId("tshirts/2"); +tShirt2.setManufacturer("Wolf"); +tShirt2.setReleaseYear(2011); +tShirt2.setTypes(Arrays.asList( + new TShirtType("Blue", "Small"), + new TShirtType("Black", "Large"), + new TShirtType("Gray", "Medium") +)); +session.store(tShirt2); + +TShirt tShirt3 = new TShirt(); +tShirt3.setId("tshirts/3"); +tShirt3.setManufacturer("Raven"); +tShirt3.setReleaseYear(2011); +tShirt3.setTypes(Arrays.asList( + new TShirtType("Yellow", "Small"), + new TShirtType("Gray", "Large") +)); +session.store(tShirt3); + +TShirt tShirt4 = new TShirt(); +tShirt4.setId("tshirts/4"); +tShirt4.setManufacturer("Raven"); +tShirt4.setReleaseYear(2012); +tShirt4.setTypes(Arrays.asList( + new TShirtType("Blue", "Small"), + new TShirtType("Gray", "Large") +)); +session.store(tShirt4); +`} + + + +Now we want to return all the T-shirts that are manufactured by `Raven` and contain both +`Small Blue` and `Large Gray` types. + +To do this, we need to use the `intersect` query method: + + + + +{`List result = session.query(TShirts_ByManufacturerColorSizeAndReleaseYear.Result.class, + TShirts_ByManufacturerColorSizeAndReleaseYear.class) + .whereEquals("manufacturer", "Raven") + .intersect() + .whereEquals("color", "Blue") + .andAlso() + .whereEquals("size", "Small") + .intersect() + .whereEquals("color", "Gray") + .andAlso() + .whereEquals("size", "large") + .toList(); +`} + + + + +{`public static class TShirts_ByManufacturerColorSizeAndReleaseYear extends AbstractIndexCreationTask { + public static class Result { + private String manufacturer; + private String color; + private String size; + private int releaseYear; + + public String getManufacturer() { + return manufacturer; + } + + public void setManufacturer(String manufacturer) { + this.manufacturer = manufacturer; + } + + public String getColor() { + return color; + } + + public void setColor(String color) { + this.color = color; + } + + public String getSize() { + return size; + } + + public void setSize(String size) { + this.size = size; + } + + public int getReleaseYear() { + return releaseYear; + } + + public void setReleaseYear(int releaseYear) { + this.releaseYear = releaseYear; + } + } + + public TShirts_ByManufacturerColorSizeAndReleaseYear() { + map = "docs.TShirts.SelectMany(tshirt => tshirt.types, (tshirt, type) => new {" + + " manufacturer = tshirt.manufacturer," + + " color = type.color," + + " size = type.size," + + " releaseYear = tshirt.releaseYear" + + "})"; + } +} +`} + + + + +{`from index 'TShirts/ByManufacturerColorSizeAndReleaseYear' +where intersect(manufacturer = 'Raven', color = 'Blue' and size = 'Small', color = 'Gray' and size = 'Large') +`} + + + + +The above query will return `tshirts/1` and `tshirts/4` as a result. +The document `tshirts/2` will not be included because it is not manufactured by `Raven`, +and `tshirts/3` is not available in `Small Blue` so it does not match **all** the sub-queries. + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_intersection-nodejs.mdx b/versioned_docs/version-7.1/indexes/querying/_intersection-nodejs.mdx new file mode 100644 index 0000000000..ec54611d3a --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_intersection-nodejs.mdx @@ -0,0 +1,161 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To allow users to `intersect` queries on the server-side and return only documents + that match **all** the provided sub-queries, we introduced the query intersection feature. + +* In this page: + * [Intersection](../../indexes/querying/intersection.mdx#intersection) + + +## Intersection + +Let's consider a case where we have a T-Shirt class: + + + +{`class TShirt \{ + constructor(data) \{ + this.id = data.id; + this.manufacturer = data.manufacturer; + this.releaseYear = data.releaseYear; + this.types = data.types; + \} +\} + +class TShirtType \{ + constructor(color, size) \{ + this.color = color; + this.size = size; + \} +\} +`} + + + +We will fill our database with few records: + + + +{`const tShirt1 = new TShirt(\{ + id: "tshirts/1", + manufacturer: "Raven", + releaseYear: 2010, + types: [ + new TShirtType("Blue", "Small"), + new TShirtType("Black", "Small"), + new TShirtType("Black", "Medium"), + new TShirtType("Gray", "Large") + ] +\}); +await session.store(tShirt1); + +const tShirt2 = new TShirt(\{ + id: "tshirts/2", + manufacturer: "Wolf", + releaseYear: 2011, + types: [ + new TShirtType("Blue", "Small"), + new TShirtType("Black", "Large"), + new TShirtType("Gray", "Medium") + ] +\}); +await session.store(tShirt2); + +const tShirt3 = new TShirt(\{ + id: "tshirts/3", + manufacturer: "Raven", + releaseYear: 2011, + types: [ + new TShirtType("Yellow", "Small"), + new TShirtType("Gray", "Large") + ] +\}); +await session.store(tShirt3); + +const tShirt4 = new TShirt(\{ + id: "tshirts/4", + manufacturer: "Raven", + releaseYear: 2012, + types: [ + new TShirtType("Blue", "Small"), + new TShirtType("Gray", "Large") + ] +\}); +await session.store(tShirt4); +`} + + + +Now we want to return all the T-shirts that are manufactured by `Raven` and contain both +`Small Blue` and `Large Gray` types. + +To do this, we need to use the `intersect` query method: + + + + +{`const result = await session.query({ + indexName: "TShirts/ByManufacturerColorSizeAndReleaseYear" + }) + .whereEquals("manufacturer", "Raven") + .intersect() + .whereEquals("color", "Blue") + .andAlso() + .whereEquals("size", "Small") + .intersect() + .whereEquals("color", "Gray") + .andAlso() + .whereEquals("size", "large") + .ofType(TShirts_ByManufacturerColorSizeAndReleaseYearResult) + .all(); +`} + + + + +{`class TShirts_ByManufacturerColorSizeAndReleaseYearResult { + constructor(data) { + this.manufacturer = data.manufacturer; + this.color = data.color; + this.size = data.size; + this.releaseYear = data.releaseYear; + } +} + +class TShirts_ByManufacturerColorSizeAndReleaseYear extends AbstractIndexCreationTask { + + constructor() { + super(); + + this.map = \`docs.TShirts.SelectMany(tshirt => tshirt.types, (tshirt, type) => new { + manufacturer = tshirt.manufacturer, + color = type.color, + size = type.size, + releaseYear = tshirt.releaseYear + })\`; + } +} +`} + + + + +{`from index 'TShirts/ByManufacturerColorSizeAndReleaseYear' +where intersect(manufacturer = 'Raven', color = 'Blue' and size = 'Small', color = 'Gray' and size = 'Large') +`} + + + + +The above query will return `tshirts/1` and `tshirts/4`, that match **all** sub-queries. +`tshirts/2` will not be included in the results because it is not manufactured by `Raven`, +and `tshirts/3` will not be included because it is not available in `Small Blue`. + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_intersection-php.mdx b/versioned_docs/version-7.1/indexes/querying/_intersection-php.mdx new file mode 100644 index 0000000000..d39efc9dd5 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_intersection-php.mdx @@ -0,0 +1,242 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To allow users to `intersect` queries on the server-side and return only documents + that match **all** the provided sub-queries, we introduced the query intersection feature. + +* In this page: + * [Intersection](../../indexes/querying/intersection.mdx#intersection) + + +## Intersection + +Let's consider a case where we have a T-Shirt class: + + + +{`class TShirt +\{ + private ?string $id = null; + private ?int $releaseYear = null; + private ?string $manufacturer = null; + private ?TShirtTypeList $types = null; + + public function getId(): ?string + \{ + return $this->id; + \} + + public function setId(?string $id): void + \{ + $this->id = $id; + \} + + public function getReleaseYear(): ?int + \{ + return $this->releaseYear; + \} + + public function setReleaseYear(?int $releaseYear): void + \{ + $this->releaseYear = $releaseYear; + \} + + public function getManufacturer(): ?string + \{ + return $this->manufacturer; + \} + + public function setManufacturer(?string $manufacturer): void + \{ + $this->manufacturer = $manufacturer; + \} + + public function getTypes(): ?TShirtTypeList + \{ + return $this->types; + \} + + public function setTypes(?TShirtTypeList $types): void + \{ + $this->types = $types; + \} +\} + +class TShirtType +\{ + private ?string $color = null; + private ?string $size = null; + + public function __construct(?string $color, ?string $size) + \{ + $this->color = $color; + $this->size = $size; + \} + + public function getColor(): ?string + \{ + return $this->color; + \} + + public function setColor(?string $color): void + \{ + $this->color = $color; + \} + + public function getSize(): ?string + \{ + return $this->size; + \} + + public function setSize(?string $size): void + \{ + $this->size = $size; + \} +\} + +class TShirtTypeList extends TypedList +\{ + protected function __construct() + \{ + parent::__construct(TShirtType::class); + \} +\} +`} + + + +We will fill our database with a few records: + + + +{`// first tShirt +$tShirt = new TShirt(); +$tShirt->setId("tshirts/1"); +$tShirt->setManufacturer("Raven"); +$tShirt->setReleaseYear(2010); + +$types = TShirtTypeList::fromArray([ + new TShirtType(color: "Blue", size: "Small"), + new TShirtType(color: "Black", size: "Small"), + new TShirtType(color: "Black", size: "Medium"), + new TShirtType(color: "Gray", size: "Large") +]); +$tShirt->setTypes($types); + +$session->store($tShirt); + +// second tShirt +$tShirt = new TShirt(); +$tShirt->setId("tshirts/2"); +$tShirt->setManufacturer("Wolf"); +$tShirt->setReleaseYear(2011); + +$types = TShirtTypeList::fromArray([ + new TShirtType(color: "Blue", size: "Small"), + new TShirtType(color: "Black", size: "Large"), + new TShirtType(color: "Black", size: "Medium") +]); +$tShirt->setTypes($types); + +$session->store($tShirt); + +// third tShirt +$tShirt = new TShirt(); +$tShirt->setId("tshirts/3"); +$tShirt->setManufacturer("Raven"); +$tShirt->setReleaseYear(2011); + +$types = TShirtTypeList::fromArray([ + new TShirtType(color: "Yellow", size: "Small"), + new TShirtType(color: "Gray", size: "Large") +]); +$tShirt->setTypes($types); + +$session->store($tShirt); + +// fourth tShirt +$tShirt = new TShirt(); +$tShirt->setId("tshirts/4"); +$tShirt->setManufacturer("Raven"); +$tShirt->setReleaseYear(2012); + +$types = TShirtTypeList::fromArray([ + new TShirtType(color: "Blue", size: "Small"), + new TShirtType(color: "Gray", size: "Large") +]); +$tShirt->setTypes($types); + +$session->store($tShirt); +`} + + + +Now we can use the `intersect` method to return all the T-shirts that are +manufactured by `Raven` and contain both `Small Blue` and `Large Gray` types. + + + + +{`/** @var array $results */ +$results = $session->query(TShirts_ByManufacturerColorSizeAndReleaseYear_Result::class, TShirts_ByManufacturerColorSizeAndReleaseYear::class) + ->whereEquals("Manufacturer", "Raven") + ->intersect() + ->whereEquals("Color", "Blue") + ->andAlso() + ->whereEquals("Size", "Small") + ->intersect() + ->whereEquals("Color", "Gray") + ->andAlso() + ->whereEquals("Size", "Large") + ->ofType(TShirt::class) + ->toList(); +`} + + + + +{`class TShirts_ByManufacturerColorSizeAndReleaseYear_Result +{ + private ?string $manufacturer = null; + private ?string $color = null; + private ?string $size = null; + private ?int $releaseYear = null; + // ... getters and setters +} +class TShirts_ByManufacturerColorSizeAndReleaseYear extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "docs.TShirts.SelectMany(tshirt => tshirt.types, (tshirt, type) => new {" . + " manufacturer = tshirt.manufacturer," . + " color = type.color," . + " size = type.size," . + " releaseYear = tshirt.releaseYear" . + "})"; + } +} +`} + + + + +{`from index 'TShirts/ByManufacturerColorSizeAndReleaseYear' +where intersect(Manufacturer = 'Raven', Color = 'Blue' and Size = 'Small', Color = 'Gray' and Size = 'Large') +`} + + + + +The above query will return `tshirts/1` and `tshirts/4`, that match **all** sub-queries. +`tshirts/2` will not be included in the results because it is not manufactured by `Raven`, +and `tshirts/3` will not be included because it is not available in `Small Blue`. + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_intersection-python.mdx b/versioned_docs/version-7.1/indexes/querying/_intersection-python.mdx new file mode 100644 index 0000000000..ea1460c65e --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_intersection-python.mdx @@ -0,0 +1,153 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* To allow users to `intersect` queries on the server-side and return only documents + that match **all** the provided sub-queries, we introduced the query intersection feature. + +* In this page: + * [Intersection](../../indexes/querying/intersection.mdx#intersection) + + +## Intersection + +Let's consider a case where we have a T-Shirt class: + + + +{`class TShirtType: + def __init__(self, color: str = None, size: str = None): + self.color = color + self.size = size + + +class TShirt: + def __init__( + self, Id: str = None, release_year: int = None, manufacturer: str = None, types: List[TShirtType] = None + ): + self.Id = Id + self.release_year = release_year + self.manufacturer = manufacturer + self.types = types +`} + + + +We will fill our database with a few records: + + + +{`session.store( + TShirt( + Id="tshirts/1", + manufacturer="Raven", + release_year=2010, + types=[ + TShirtType(color="Blue", size="Small"), + TShirtType(color="Black", size="Small"), + TShirtType(color="Black", size="Medium"), + TShirtType(color="Gray", size="Large"), + ], + ) +) + +session.store( + TShirt( + Id="tshirts/2", + manufacturer="Wolf", + release_year=2011, + types=[ + TShirtType(color="Blue", size="Small"), + TShirtType(color="Black", size="Large"), + TShirtType(color="Gray", size="Medium"), + ], + ) +) + +session.store( + TShirt( + Id="tshirts/3", + manufacturer="Raven", + release_year=2011, + types=[TShirtType(color="Yellow", size="Small"), TShirtType(color="Gray", size="Large")], + ) +) + +session.store( + TShirt( + Id="tshirts/4", + manufacturer="Raven", + release_year=2012, + types=[TShirtType(color="Blue", size="Small"), TShirtType(color="Gray", size="Large")], + ) +) +`} + + + +Now we can use the `intersect` method to return all the T-shirts that are +manufactured by `Raven` and contain both `Small Blue` and `Large Gray` types. + + + + +{`results = list( + session.query_index_type( + TShirts_ByManufacturerColorSizeAndReleaseYear, + TShirts_ByManufacturerColorSizeAndReleaseYear.Result, + ) + .where_equals("Manufacturer", "Raven") + .intersect() + .where_equals("Color", "Blue") + .and_also() + .where_equals("Size", "Small") + .intersect() + .where_equals("Color", "Gray") + .and_also() + .where_equals("Size", "Large") +) +`} + + + + +{`class TShirts_ByManufacturerColorSizeAndReleaseYear(AbstractIndexCreationTask): + class Result: + def __init__(self, manufacturer: str = None, color: str = None, size: str = None, release_year: int = None): + self.manufacturer = manufacturer + self.color = color + self.size = size + self.release_year = release_year + + def __init__(self): + super().__init__() + self.map = ( + "from tshirt in docs.TShirts from type in tshirt.types select new {" + " manufacturer = tshirt.manufacturer," + " color = tshirt.color," + " size = type.size," + " release_year = tshirt.release_year" + "}" + ) +`} + + + + +{`from index 'TShirts/ByManufacturerColorSizeAndReleaseYear' +where intersect(Manufacturer = 'Raven', Color = 'Blue' and Size = 'Small', Color = 'Gray' and Size = 'Large') +`} + + + + +The above query will return `tshirts/1` and `tshirts/4`, that match **all** sub-queries. +`tshirts/2` will not be included in the results because it is not manufactured by `Raven`, +and `tshirts/3` will not be included because it is not available in `Small Blue`. + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_morelikethis-csharp.mdx b/versioned_docs/version-7.1/indexes/querying/_morelikethis-csharp.mdx new file mode 100644 index 0000000000..dbd5bbd8ad --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_morelikethis-csharp.mdx @@ -0,0 +1,191 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* `MoreLikeThis` returns a list of documents that are related to a given document. +* This feature can be used, for example, to show a list of related articles at the + bottom of the currently-read article page, as done in many news sites. +* To accomplish this, RavenDB uses the Lucene contrib project `MoreLikeThis` feature. + +* In this page: + + * [Setup](../../indexes/querying/morelikethis.mdx#setup) + * [Basic Usage](../../indexes/querying/morelikethis.mdx#basic-usage) + * [Options](../../indexes/querying/morelikethis.mdx#options) + * [Stop Words](../../indexes/querying/morelikethis.mdx#stop-words) + * [Remarks](../../indexes/querying/morelikethis.mdx#remarks) + + +## Setup + +To be able to work, `MoreLikeThis` requires access to the index text. +The queried index needs, therefore, to [store](../../indexes/storing-data-in-index.mdx) +the fields or the [term vectors](../../indexes/using-term-vectors.mdx) for these fields. + + + +{`public class Article +\{ + public string Id \{ get; set; \} + public string Name \{ get; set; \} + public string ArticleBody \{ get; set; \} +\} + +public class Articles_ByArticleBody : AbstractIndexCreationTask
+\{ + public Articles_ByArticleBody() + \{ + Map = docs => from doc in docs + select new + \{ + doc.ArticleBody + \}; + + Stores.Add(x => x.ArticleBody, FieldStorage.Yes); + Analyzers.Add(x => x.ArticleBody, "StandardAnalyzer"); + \} +\} +`} + + + + + +## Basic Usage + +Many `MoreLikeThis` options are set by default. +The simplest mode will satisfy most usage scenarios. + + + + +{`List
articles = session + .Query() + .MoreLikeThis(builder => builder + .UsingDocument(x => x.Id == "articles/1")) + .ToList(); +`} + + + + +{`List
articles = session.Advanced + .DocumentQuery() + .MoreLikeThis(builder => builder + .UsingDocument(x => x.WhereEquals(y => y.Id, "articles/1"))) + .ToList(); +`} + + + + +{`from index 'Articles/ByArticleBody' +where morelikethis(id() = 'articles/1') +`} + + + + +`MoreLikeThis` will use **all** the fields defined in an index. +To use only specific fields, pass these fields in the `MoreLikeThisOptions.Fields` property. + + + + +{`List
articles = session + .Query() + .MoreLikeThis(builder => builder + .UsingDocument(x => x.Id == "articles/1") + .WithOptions(new MoreLikeThisOptions + { + Fields = new[] { nameof(Article.ArticleBody) } + })) + .ToList(); +`} + + + + +{`List
articles = session.Advanced + .DocumentQuery() + .MoreLikeThis(builder => builder + .UsingDocument(x => x.WhereEquals(y => y.Id, "articles/1")) + .WithOptions(new MoreLikeThisOptions + { + Fields = new[] { nameof(Article.ArticleBody) } + })) + .ToList(); +`} + + + + +{`from index 'Articles/ByArticleBody' +where morelikethis(id() = 'articles/1', '{ "Fields" : [ "ArticleBody" ] }') +`} + + + + + + +## Options + +Default parameters can be changed by manipulating `MoreLikeThisOptions` properties and passing them +to `MoreLikeThis`. + +| Option | Type | Description | +| ------------- | ------------- | ----- | +| **MinimumTermFrequency** | `int?` | Ignores terms with less than this frequency in the source doc | +| **MaximumQueryTerms** | `int?` | Returns a query with no more than this many terms | +| **MaximumNumberOfTokensParsed** | `int?` | The maximum number of tokens to parse in each example doc field that is not stored with TermVector support | +| **MinimumWordLength** | `int?` | Ignores words less than this length or, if 0, then this has no effect | +| **MaximumWordLength** | `int?` | Ignores words greater than this length or if 0 then this has no effect | +| **MinimumDocumentFrequency** | `int?` | Ignores words which do not occur in at least this many documents | +| **MaximumDocumentFrequency** | `int?` | Ignores words which occur in more than this many documents | +| **MaximumDocumentFrequencyPercentage** | `int?` | Ignores words which occur in more than this percentage of documents | +| **Boost** | `bool?` | Boost terms in query based on score | +| **BoostFactor** | `float?` | Boost factor when boosting based on score | +| **StopWordsDocumentId** | `string` | Document ID containing custom stop words | +| **Fields** | `string[]` | Fields to compare | + + + +## Stop Words + +Some Lucene analyzers have a built-in list of common English words that are usually not useful +for searching, like "a", "as", "the", etc. +These words, called *stop words*, are considered uninteresting and are ignored. +If a used analyzer does not support *stop words*, or you need to overload these terms, you can +specify your own set of stop words. +A document with a list of stop words can be stored in RavenDB by storing the `MoreLikeThisStopWords` document: + + + +{`session.Store(new MoreLikeThisStopWords +\{ + Id = "Config/Stopwords", + StopWords = new List \{ "I", "A", "Be" \} +\}); +`} + + + +The document ID will then be set in the `MoreLikeThisOptions`. + + + +## Remarks + + +Please note that default values for settings, like `MinimumDocumentFrequency`, `MinimumTermFrequency`, +and `MinimumWordLength`, may result in filtering out related articles, especially with a small data set +(e.g. during development). + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_morelikethis-java.mdx b/versioned_docs/version-7.1/indexes/querying/_morelikethis-java.mdx new file mode 100644 index 0000000000..0d547755b4 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_morelikethis-java.mdx @@ -0,0 +1,183 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* `MoreLikeThis` returns a list of documents that are related to a given document. +* This feature can be used, for example, to show a list of related articles at the + bottom of the currently-read article page, as done in many news sites. +* To accomplish this, RavenDB uses the Lucene contrib project `MoreLikeThis` feature. + +* In this page: + + * [Setup](../../indexes/querying/morelikethis.mdx#setup) + * [Basic Usage](../../indexes/querying/morelikethis.mdx#basic-usage) + * [Options](../../indexes/querying/morelikethis.mdx#options) + * [Stop Words](../../indexes/querying/morelikethis.mdx#stop-words) + * [Remarks](../../indexes/querying/morelikethis.mdx#remarks) + + +## Setup + +To be able to work, `MoreLikeThis` requires access to the index text. +The queried index needs, therefore, to [store](../../indexes/storing-data-in-index.mdx) +the fields or the [term vectors](../../indexes/using-term-vectors.mdx) for these fields. + + + +{`public class Article \{ + private String id; + private String name; + private String articleBody; + + public String getId() \{ + return id; + \} + + public void setId(String id) \{ + this.id = id; + \} + + public String getName() \{ + return name; + \} + + public void setName(String name) \{ + this.name = name; + \} + + public String getArticleBody() \{ + return articleBody; + \} + + public void setArticleBody(String articleBody) \{ + this.articleBody = articleBody; + \} +\} + +public class Articles_ByArticleBody extends AbstractIndexCreationTask \{ + public Articles_ByArticleBody() \{ + map = "from doc in docs.articles " + + "select new \{" + + " doc.articleBody " + + "\}"; + + store("articleBody", FieldStorage.YES); + analyze("articleBody", "StandardAnalyzer"); + \} +\} +`} + + + + + +## Basic Usage + +Many `MoreLikeThis` options are set by default. +The simplest mode will satisfy most usage scenarios. + + + + +{`List
articles = session + .query(Article.class, Articles_ByArticleBody.class) + .moreLikeThis(builder -> builder.usingDocument(x -> x.whereEquals("id()", "articles/1"))) + .toList(); +`} + + + + +{`from index 'Articles/ByArticleBody' +where morelikethis(id() = 'articles/1') +`} + + + + +`MoreLikeThis` will use **all** the fields defined in an index. +To use only specific fields, pass these fields in the `MoreLikeThisOptions.Fields` property. + + + + +{`MoreLikeThisOptions options = new MoreLikeThisOptions(); +options.setFields(new String[]{ "articleBody" }); +List
articles = session + .query(Article.class, Articles_ByArticleBody.class) + .moreLikeThis(builder -> builder + .usingDocument(x -> x.whereEquals("id()", "articles/1")) + .withOptions(options)) + .toList(); +`} + + + + +{`from index 'Articles/ByArticleBody' +where morelikethis(id() = 'articles/1', '{ "Fields" : [ "articleBody" ] }') +`} + + + + + + +## Options + +Default parameters can be changed by manipulating `MoreLikeThisOptions` properties and passing them +to `MoreLikeThis`. + +| Options | | | +| ------------- | ------------- | ----- | +| **MinimumTermFrequency** | `Integer` | Ignores terms with less than this frequency in the source doc | +| **MaximumQueryTerms** | `Integer` | Returns a query with no more than this many terms | +| **MaximumNumberOfTokensParsed** | `Integer` | The maximum number of tokens to parse in each example doc field that is not stored with TermVector support | +| **MinimumWordLength** | `Integer` | Ignores words less than this length or, if 0, then this has no effect | +| **MaximumWordLength** | `Integer` | Ignores words greater than this length or if 0 then this has no effect | +| **MinimumDocumentFrequency** | `Integer` | Ignores words which do not occur in at least this many documents | +| **MaximumDocumentFrequency** | `Integer` | Ignores words which occur in more than this many documents | +| **MaximumDocumentFrequencyPercentage** | `Integer` | Ignores words which occur in more than this percentage of documents | +| **Boost** | `Boolean` | Boost terms in query based on score | +| **BoostFactor** | `Float` | Boost factor when boosting based on score | +| **StopWordsDocumentId** | `String` | Document ID containing custom stop words | +| **Fields** | `String[]` | Fields to compare | + + + +## Stop Words + +Some Lucene analyzers have a built-in list of common English words that are usually not useful +for searching, like "a", "as", "the", etc. +These words, called *stop words*, are considered uninteresting and are ignored. +If a used analyzer does not support *stop words*, or you need to overload these terms, you can +specify your own set of stop words. +A document with a list of stop words can be stored in RavenDB by storing the `MoreLikeThisStopWords` document: + + + +{`MoreLikeThisStopWords stopWords = new MoreLikeThisStopWords(); +stopWords.setStopWords(Arrays.asList("I", "A", "Be")); +session.store(stopWords, "Config/Stopwords"); +`} + + + +The document ID will then be set in the `MoreLikeThisOptions`. + + + +## Remarks + + +Please note that default values for settings, like `MinimumDocumentFrequency`, `MinimumTermFrequency`, +and `MinimumWordLength`, may result in filtering out related articles, especially with a small data set +(e.g. during development). + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_morelikethis-nodejs.mdx b/versioned_docs/version-7.1/indexes/querying/_morelikethis-nodejs.mdx new file mode 100644 index 0000000000..59c5db11c0 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_morelikethis-nodejs.mdx @@ -0,0 +1,164 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* `moreLikeThis` returns a list of documents that are related to a given document. +* This feature can be used, for example, to show a list of related articles at the + bottom of the currently-read article page, as done in many news sites. +* To accomplish this, RavenDB uses the Lucene contrib project `moreLikeThis` feature. + +* In this page: + + * [Setup](../../indexes/querying/morelikethis.mdx#setup) + * [Basic Usage](../../indexes/querying/morelikethis.mdx#basic-usage) + * [Options](../../indexes/querying/morelikethis.mdx#options) + * [Stop Words](../../indexes/querying/morelikethis.mdx#stop-words) + * [Remarks](../../indexes/querying/morelikethis.mdx#remarks) + + +## Setup + +To be able to work, `MoreLikeThis` requires access to the index text. +The queried index needs, therefore, to [store](../../indexes/storing-data-in-index.mdx) +the fields or the [term vectors](../../indexes/using-term-vectors.mdx) for these fields. + + + +{`class Article \{ + constructor(id, name, articleBody) \{ + this.id = id; + this.name = name; + this.articleBody = articleBody; + \} +\} + +class Articles_ByArticleBody extends AbstractIndexCreationTask \{ + constructor() \{ + super(); + + this.map = \`from doc in docs.Articles select new \{ + doc.articleBody + \}\`; + + this.store("articleBody", "Yes"); + this.analyze("articleBody", "StandardAnalyzer"); + \} +\} +`} + + + + + +## Basic Usage + +Many `MoreLikeThis` options are set by default. +The simplest mode will satisfy most usage scenarios. + + + + +{`const articles = await session + .query({ indexName: "Articles/ByArticleBody" }) + .moreLikeThis(builder => + builder.usingDocument(x => + x.whereEquals("id()", "articles/1"))) + .all(); +`} + + + + +{`from index 'Articles/ByArticleBody' +where morelikethis(id() = 'articles/1') +`} + + + + +`MoreLikeThis` will use **all** the fields defined in an index. +To use only specific fields, pass these fields in the `MoreLikeThisOptions.fields` property. + + + + +{`const options = { + fields: [ "articleBody" ] +}; +const articles = await session + .query({ indexName: "Articles/ByArticleBody" }) + .moreLikeThis(builder => builder + .usingDocument(x => x.whereEquals("id()", "articles/1")) + .withOptions(options)) + .all(); +`} + + + + +{`from index 'Articles/ByArticleBody' +where morelikethis(id() = 'articles/1', '{ "Fields" : [ "articleBody" ] }') +`} + + + + + + +## Options + +Default parameters can be changed by manipulating `MoreLikeThisOptions` properties and passing them +to `MoreLikeThis`. + +| Options | | | +| ------------- | ------------- | ----- | +| **minimumTermFrequency** | `number` | Ignores terms with less than this frequency in the source doc | +| **maximumQueryTerms** | `number` | Returns a query with no more than this many terms | +| **maximumNumberOfTokensParsed** | `number` | The maximum number of tokens to parse in each example doc field that is not stored with TermVector support | +| **minimumWordLength** | `number` | Ignores words less than this length or, if 0, then this has no effect | +| **maximumWordLength** | `number` | Ignores words greater than this length or if 0 then this has no effect | +| **minimumDocumentFrequency** | `number` | Ignores words which do not occur in at least this many documents | +| **maximumDocumentFrequency** | `number` | Ignores words which occur in more than this many documents | +| **maximumDocumentFrequencyPercentage** | `number` | Ignores words which occur in more than this percentage of documents | +| **boost** | `boolean` | Boost terms in query based on score | +| **boostFactor** | `number` | Boost factor when boosting based on score | +| **stopWordsDocumentId** | `string` | Document ID containing custom stop words | +| **fields** | `string[]` | Fields to compare | + + + +## Stop Words + +Some Lucene analyzers have a built-in list of common English words that are usually not useful +for searching, like "a", "as", "the", etc. +These words, called *stop words*, are considered uninteresting and are ignored. +If a used analyzer does not support *stop words*, or you need to overload these terms, you can +specify your own set of stop words. +A document with a list of stop words can be stored in RavenDB by storing the `MoreLikeThisStopWords` document: + + + +{`const stopWords = new MoreLikeThisStopWords(); +stopWords.stopWords = [ "I", "A", "Be" ]; +await session.store(stopWords, "Config/Stopwords"); +`} + + + +The document ID will then be set in the `MoreLikeThisOptions`. + +## Remarks + + +Please note that default values for settings, like `minimumDocumentFrequency`, `minimumTermFrequency`, +and `minimumWordLength`, may result in filtering out related articles, especially with a small data set +(e.g. during development). + + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_morelikethis-php.mdx b/versioned_docs/version-7.1/indexes/querying/_morelikethis-php.mdx new file mode 100644 index 0000000000..7f0817466d --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_morelikethis-php.mdx @@ -0,0 +1,241 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* `moreLikeThis` returns a list of documents that are related to a given document. +* This feature can be used, for example, to show a list of related articles at the + bottom of the currently-read article page, as done in many news sites. +* To accomplish this, RavenDB uses the Lucene contrib project `MoreLikeThis` feature. + +* In this page: + + * [Setup](../../indexes/querying/morelikethis.mdx#setup) + * [Basic Usage](../../indexes/querying/morelikethis.mdx#basic-usage) + * [Options](../../indexes/querying/morelikethis.mdx#options) + * [Stop Words](../../indexes/querying/morelikethis.mdx#stop-words) + * [Remarks](../../indexes/querying/morelikethis.mdx#remarks) + + +## Setup + +To be able to work, `moreLikeThis` requires access to the index text. +The queried index needs, therefore, to [store](../../indexes/storing-data-in-index.mdx) +the fields or the [term vectors](../../indexes/using-term-vectors.mdx) for these fields. + + + +{`class Article +\{ + public ?string $id = null; + public ?string $name = null; + public ?string $articleBody = null; + + public function getId(): ?string + \{ + return $this->id; + \} + + public function setId(?string $id): void + \{ + $this->id = $id; + \} + + public function getName(): ?string + \{ + return $this->name; + \} + + public function setName(?string $name): void + \{ + $this->name = $name; + \} + + public function getArticleBody(): ?string + \{ + return $this->articleBody; + \} + + public function setArticleBody(?string $articleBody): void + \{ + $this->articleBody = $articleBody; + \} +\} + +class Articles_ByArticleBody extends AbstractIndexCreationTask +\{ + public function __construct() + \{ + parent::__construct(); + + $this->map = "from doc in docs.Articles select \{ doc.ArticleBody \}"; + + $this->store("ArticleBody", FieldStorage::yes()); + $this->analyze("ArticleBody", "StandardAnalyzer"); + \} +\} +`} + + + + + +## Basic Usage + +Many `moreLikeThis` options are set by default. +The simplest mode will satisfy most usage scenarios. + + + + +{`/** @var array
$articles */ +$articles = $session + ->query(Article::class, Articles_ByArticleBody::class) + ->moreLikeThis(function($builder) { + return $builder + ->usingDocument(function($x) { + return $x->whereEquals("id()", "articles/1"); + }); + }) + ->toList(); +`} + + + + +{`/** @var array
$articles */ +$articles = $session->advanced() + ->documentQuery(Article::class, Articles_ByArticleBody::class) + ->moreLikeThis(function($builder) { + return $builder + ->usingDocument(function($x) { + return $x->whereEquals("id()", "articles/1"); + }); + }) + ->toList(); +`} + + + + +{`from index 'Articles/ByArticleBody' +where morelikethis(id() = 'articles/1') +`} + + + + +`moreLikeThis` will use **all** the fields defined in an index. +To use only specific fields, pass these fields in the `MoreLikeThisOptions` fields property. + + + + +{`/** @var array
$articles */ +$articles = $session + ->query(Article::class, Articles_ByArticleBody::class) + ->moreLikeThis(function($builder) { + $mlt = new MoreLikeThisOptions(); + $mlt->setFields(["ArticleBody"]); + + return $builder + ->usingDocument(function($x) { + return $x->whereEquals("id()", "articles/1"); + }) + ->withOptions($mlt); + }) + ->toList(); +`} + + + + +{`/** @var array
$articles */ +$articles = $session->advanced() + ->documentQuery(Article::class, Articles_ByArticleBody::class) + ->moreLikeThis(function($builder) { + $mlt = new MoreLikeThisOptions(); + $mlt->setFields(["ArticleBody"]); + + return $builder + ->usingDocument(function($x) { + return $x->whereEquals("id()", "articles/1"); + }) + ->withOptions($mlt); + }) + ->toList(); +`} + + + + +{`from index 'Articles/ByArticleBody' +where morelikethis(id() = 'articles/1', '{ "Fields" : [ "ArticleBody" ] }') +`} + + + + + + +## Options + +Default parameters can be changed by manipulating `MoreLikeThisOptions` properties and passing them +to `moreLikeThis`. + +| Option | Type | Description | +| ------------- | ------------- | ----- | +| **MinimumTermFrequency** | `int?` | Ignores terms with less than this frequency in the source doc | +| **MaximumQueryTerms** | `int?` | Returns a query with no more than this many terms | +| **MaximumNumberOfTokensParsed** | `int?` | The maximum number of tokens to parse in each example doc field that is not stored with TermVector support | +| **MinimumWordLength** | `int?` | Ignores words less than this length or, if 0, then this has no effect | +| **MaximumWordLength** | `int?` | Ignores words greater than this length or if 0 then this has no effect | +| **MinimumDocumentFrequency** | `int?` | Ignores words which do not occur in at least this many documents | +| **MaximumDocumentFrequency** | `int?` | Ignores words which occur in more than this many documents | +| **MaximumDocumentFrequencyPercentage** | `int?` | Ignores words which occur in more than this percentage of documents | +| **Boost** | `bool?` | Boost terms in query based on score | +| **BoostFactor** | `float?` | Boost factor when boosting based on score | +| **StopWordsDocumentId** | `string` | Document ID containing custom stop words | +| **Fields** | `string[]` | Fields to compare | + + + + + + +## Stop Words + +Some Lucene analyzers have a built-in list of common English words that are usually not useful +for searching, like "a", "as", "the", etc. +These words, called *stop words*, are considered uninteresting and are ignored. +If a used analyzer does not support *stop words*, or you need to overload these terms, you can +specify your own set of stop words. +A document with a list of stop words can be stored in RavenDB by storing the `MoreLikeThisStopWords` document: + + + +{`$mlt = new MoreLikeThisStopWords(); +$mlt->setId("Config/Stopwords"); +$mlt->setStopWords(["I", "A", "Be"]); +$session->store($mlt); +`} + + + +The document ID will then be set in the `MoreLikeThisOptions`. + + + +## Remarks + + +Please note that default values for settings, like `MinimumDocumentFrequency`, `MinimumTermFrequency`, +and `MinimumWordLength`, may result in filtering out related articles, especially with a small data set +(e.g. during development). + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_morelikethis-python.mdx b/versioned_docs/version-7.1/indexes/querying/_morelikethis-python.mdx new file mode 100644 index 0000000000..12d1e39d04 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_morelikethis-python.mdx @@ -0,0 +1,162 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* `more_like_this` returns a list of documents that are related to a given document. +* This feature can be used, for example, to show a list of related articles at the + bottom of the currently-read article page, as done in many news sites. +* To accomplish this, RavenDB uses the Lucene contrib project `MoreLikeThis` feature. + +* In this page: + + * [Setup](../../indexes/querying/morelikethis.mdx#setup) + * [Basic Usage](../../indexes/querying/morelikethis.mdx#basic-usage) + * [Options](../../indexes/querying/morelikethis.mdx#options) + * [Stop Words](../../indexes/querying/morelikethis.mdx#stop-words) + * [Remarks](../../indexes/querying/morelikethis.mdx#remarks) + + +## Setup + +To be able to work, `more_like_this` requires access to the index text. +The queried index needs, therefore, to [store](../../indexes/storing-data-in-index.mdx) +the fields or the [term vectors](../../indexes/using-term-vectors.mdx) for these fields. + + + +{`from ravendb import AbstractIndexCreationTask, MoreLikeThisOptions +from ravendb.documents.indexes.definitions import FieldStorage +from ravendb.documents.queries.more_like_this import MoreLikeThisStopWords + +from examples_base import ExampleBase + + +class Article: + def __init__(self, Id: str = None, name: str = None, article_type: str = None): + self.Id = Id + self.name = name + self.article_type = article_type + + +class Articles_ByArticleBody(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = "from doc in docs.Articles select \{ doc.article_body \}" + self._store("article_body", FieldStorage.YES) + self._analyze("article_body", "StandardAnalyzer") +`} + + + + + +## Basic Usage + +Many `more_like_this` options are set by default. +The simplest mode will satisfy most usage scenarios. + + + + +{`articles = list( + session.query_index_type(Articles_ByArticleBody, Article).more_like_this( + lambda builder: builder.using_document(lambda x: x.where_equals("id()", "articles/1")) + ) +) +`} + + + + +{`from index 'Articles/ByArticleBody' +where morelikethis(id() = 'articles/1') +`} + + + + +`more_like_this` will use **all** the fields defined in an index. +To use only specific fields, pass these fields in the `MoreLikeThisOptions` fields property. + + + + +{`options = MoreLikeThisOptions(fields=["article_body"]) +articles = list( + session.query_index_type(Articles_ByArticleBody, Article).more_like_this( + lambda builder: builder.using_document( + lambda x: x.where_equals("id()", "articles/1") + ).with_options(options) + ) +) +`} + + + + +{`from index 'Articles/ByArticleBody' +where morelikethis(id() = 'articles/1', '{ "Fields" : [ "ArticleBody" ] }') +`} + + + + + + +## Options + +Default parameters can be changed by manipulating `MoreLikeThisOptions` properties and passing them +to `more_like_this`. + +| Option | Type | Description | +| ------------- | ------------- | ----- | +| **minimum_term_frequency** | `int` | Ignores terms with less than this frequency in the source doc | +| **maximum_query_terms** | `int` | Returns a query with no more than this many terms | +| **maximum_number_of_tokens_parsed** | `int` | The maximum number of tokens to parse in each example doc field that is not stored with TermVector support | +| **minimum_word_length** | `int` | Ignores words less than this length or, if 0, then this has no effect | +| **maximum_word_length** | `int` | Ignores words greater than this length or if 0 then this has no effect | +| **minimum_document_frequency** | `int` | Ignores words which do not occur in at least this many documents | +| **maximum_document_frequency** | `int` | Ignores words which occur in more than this many documents | +| **maximum_document_frequency_percentage** | `int` | Ignores words which occur in more than this percentage of documents | +| **boost** | `bool` | Boost terms in query based on score | +| **boost_factor** | `float` | Boost factor when boosting based on score | +| **stop_words_document_id** | `str` | Document ID containing custom stop words | +| **fields** | `List[str]` | Fields to compare | + + + +## Stop Words + +Some Lucene analyzers have a built-in list of common English words that are usually not useful +for searching, like "a", "as", "the", etc. +These words, called *stop words*, are considered uninteresting and are ignored. +If a used analyzer does not support *stop words*, or you need to overload these terms, you can +specify your own set of stop words. +A document with a list of stop words can be stored in RavenDB by storing the `MoreLikeThisStopWords` document: + + + +{`stop_words = MoreLikeThisStopWords(stop_words=["I", "A", "Be"]) +session.store(stop_words, "Config/Stopwords") +`} + + + +The document ID will then be set in the `MoreLikeThisOptions`. + + + +## Remarks + + +Please note that default values for settings, like `MinimumDocumentFrequency`, `MinimumTermFrequency`, +and `MinimumWordLength`, may result in filtering out related articles, especially with a small data set +(e.g. during development). + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_paging-csharp.mdx b/versioned_docs/version-7.1/indexes/querying/_paging-csharp.mdx new file mode 100644 index 0000000000..7bba94c71c --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_paging-csharp.mdx @@ -0,0 +1,783 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Paging**: + Paging is the process of fetching a subset (a page) of results from a dataset, rather than retrieving the entire results at once. + This method enables processing query results one page at a time. + +* **Default page size**: + + * Querying **Lucene** indexes: + If the client's query definition does Not explicitly specify the page size, the server will default to `int.MaxValue` (2,147,483,647). + In such case, all results will be returned in a single server call. + + * Querying **Corax** indexes: + The default page size is the same as the one employed by Lucene. + Note: when using [Corax](../../indexes/search-engine/corax.mdx) as the search engine, indexes with more than `int.MaxValue` entries can be created and used. + To match this capacity, queries over Corax indexes can skip a number of results that exceed this max value and take documents from that location. + +* **Performance**: + Using paging is beneficial when handling large result datasets, contributing to improved performance. + See [paging and performance](../../indexes/querying/paging.mdx#paging-and-performance) here below. + +* In this page: + + * [No-paging example](../../indexes/querying/paging.mdx#no---paging-example) + * [Paging examples](../../indexes/querying/paging.mdx#paging-examples) + * [Paging and performance](../../indexes/querying/paging.mdx#paging-and-performance) + * [Paging through tampered results](../../indexes/querying/paging.mdx#paging-through-tampered-results) + + +## No-paging example + + + + +{`// A simple query without paging: +// ============================== + +List allResults = session + .Query() + .Where(x => x.UnitsInStock > 10) + .OfType() + .ToList(); + +// Executing the query on the Northwind sample data +// will result in all 47 Product documents that match the query predicate. +`} + + + + +{`// A simple query without paging: +// ============================== + +List allResults = await asyncSession + .Query() + .Where(x => x.UnitsInStock > 10) + .OfType() + .ToListAsync(); + +// Executing the query on the Northwind sample data +// will result in all 47 Product documents that match the query predicate. +`} + + + + +{`// A simple DocumentQuery without paging: +// ====================================== + +List allResults = session.Advanced + .DocumentQuery() + .WhereGreaterThan(x => x.UnitsInStock, 10) + .OfType() + .ToList(); + +// Executing the query on the Northwind sample data +// will result in all 47 Product documents that match the query predicate. +`} + + + + +{`public class Products_ByUnitsInStock : AbstractIndexCreationTask +{ + public class IndexEntry + { + public int UnitsInStock { get; set; } + } + + public Products_ByUnitsInStock() + { + Map = products => from product in products + select new + { + UnitsInStock = product.UnitsInStock + }; + } +} +`} + + + + +{`from index "Products/ByUnitsInStock" +where UnitsInStock > 10 +`} + + + + + + +## Paging examples + +#### Retrieve a specific page: + + + + +{`// Retrieve only the 3'rd page - when page size is 10: +// =================================================== + +List thirdPageResults = session + .Query() + // Get the query stats if you wish to know the TOTAL number of results + .Statistics(out QueryStatistics stats) + // Apply some filtering condition as needed + .Where(x => x.UnitsInStock > 10) + .OfType() + // Call 'Skip', pass the number of items to skip from the beginning of the result set + // Skip the first 20 resulting documents + .Skip(20) + // Call 'Take' to define the number of documents to return + // Take up to 10 products => so 10 is the "Page Size" + .Take(10) + .ToList(); + +// When executing this query on the Northwind sample data, +// results will include only 10 Product documents ("products/45-A" to "products/54-A") + +long totalResults = stats.TotalResults; + +// While the query returns only 10 results, +// \`totalResults\` will hold the total number of matching documents (47). +`} + + + + +{`// Retrieve only the 3'rd page - when page size is 10: +// =================================================== + +List thirdPageResults = await asyncSession + .Query() + // Get the query stats if you wish to know the TOTAL number of results + .Statistics(out QueryStatistics stats) + // Apply some filtering condition as needed + .Where(x => x.UnitsInStock > 10) + .OfType() + // Call 'Skip', pass the number of items to skip from the beginning of the result set + // Skip the first 20 resulting documents + .Skip(20) + // Call 'Take' to define the number of documents to return + // Take up to 10 products => so 10 is the "Page Size" + .Take(10) + .ToListAsync(); + +// When executing this query on the Northwind sample data, +// results will include only 10 Product documents ("products/45-A" to "products/54-A") + +long totalResults = stats.TotalResults; + +// While the query returns only 10 results, +// \`totalResults\` will hold the total number of matching documents (47). +`} + + + + +{`// Retrieve only the 3'rd page - when page size is 10: +// =================================================== + +List thirdPageResults = session.Advanced + .DocumentQuery() + // Get the query stats if you wish to know the TOTAL number of results + .Statistics(out QueryStatistics stats) + // Apply some filtering condition as needed + .WhereGreaterThan(x => x.UnitsInStock, 10) + .OfType() + // Call 'Skip', pass the number of items to skip from the beginning of the result set + // Skip the first 20 resulting documents + .Skip(20) + // Call 'Take' to define the number of documents to return + // Take up to 10 products => so 10 is the "Page Size" + .Take(10) + .ToList(); + +// When executing this query on the Northwind sample data, +// results will include only 10 Product documents ("products/45-A" to "products/54-A") + +long totalResults = stats.TotalResults; + +// While the query returns only 10 results, +// \`totalResults\` will hold the total number of matching documents (47). +`} + + + + +{`public class Products_ByUnitsInStock : AbstractIndexCreationTask +{ + public class IndexEntry + { + public int UnitsInStock { get; set; } + } + + public Products_ByUnitsInStock() + { + Map = products => from product in products + select new + { + UnitsInStock = product.UnitsInStock + }; + } +} +`} + + + + +{`from index "Products/ByUnitsInStock" +where UnitsInStock > 10 +limit 20, 10 // skip 20, take 10 +`} + + + +#### Page through all results: + + + + +{`// Query for all results - page by page: +// ===================================== + +List pagedResults; +int pageNumber = 0; +int pageSize = 10; + +do +{ + pagedResults = session + .Query() + // Apply some filtering condition as needed + .Where(x => x.UnitsInStock > 10) + .OfType() + // Skip the number of results that were already fetched + .Skip(pageNumber * pageSize) + // Request to get 'pageSize' results + .Take(pageSize) + .ToList(); + + pageNumber++; + + // Make any processing needed with the current paged results here + // ... +} +while (pagedResults.Count > 0); // Fetch next results +`} + + + + +{`// Query for all results - page by page: +// ===================================== + +List pagedResults; +int pageNumber = 0; +int pageSize = 10; + +do +{ + pagedResults = await asyncSession + .Query() + // Apply some filtering condition as needed + .Where(x => x.UnitsInStock > 10) + .OfType() + // Skip the number of results that were already fetched + .Skip(pageNumber * pageSize) + // Request to get 'pageSize' results + .Take(pageSize) + .ToListAsync(); + + pageNumber++; + + // Make any processing needed with the current paged results here + // ... +} +while (pagedResults.Count > 0); // Fetch next results +`} + + + + +{`// Query for all results - page by page: +// ===================================== + +List pagedResults; +int pageNumber = 0; +int pageSize = 10; + +do +{ + pagedResults = session.Advanced + .DocumentQuery() + // Apply some filtering condition as needed + .WhereGreaterThan(x => x.UnitsInStock, 10) + .OfType() + // Skip the number of results that were already fetched + .Skip(pageNumber * pageSize) + // Request to get 'pageSize' results + .Take(pageSize) + .ToList(); + + pageNumber++; + + // Make any processing needed with the current paged results here + // ... +} +while (pagedResults.Count > 0); // Fetch next results +`} + + + + +{`public class Products_ByUnitsInStock : AbstractIndexCreationTask +{ + public class IndexEntry + { + public int UnitsInStock { get; set; } + } + + public Products_ByUnitsInStock() + { + Map = products => from product in products + select new + { + UnitsInStock = product.UnitsInStock + }; + } +} +`} + + + + +{`from index "Products/ByUnitsInStock" +where UnitsInStock > 10 +limit 0, 10 // First loop will skip 0, take 10 + +// The next loops in the code will each generate the above RQL with an increased 'skip' value: +// limit 10, 10 +// limit 20, 10 +// limit 30, 10 +// ... +`} + + + + + + +## Paging and performance + +#### Better performance: + +It is recommended to explicitly set a page size when making a query that is expected to generate a significant number of results. +This practice has several benefits: + +* Optimizes bandwidth usage by reducing data transfer between the server and client. +* Prevents delays in response times caused by sending too much data over the network. +* Avoids high memory consumption when dealing with numerous documents. +* Ensures a more manageable user experience by not overwhelming users with massive datasets at once. +#### Performance hints: + +* By default, if the number of returned results exceeds **2048**, the server will issue a "Page size too big" notification (visible in the Studio) with information about the query. + +* This threshold can be customized by modifying the value of the [PerformanceHints.MaxNumberOfResults](../../server/configuration/performance-hints-configuration.mdx#performancehintsmaxnumberofresults) configuration key. + +* As suggested by the hint, you may consider using [Streaming query results](../../client-api/session/querying/how-to-stream-query-results.mdx) instead of paging. + +![Figure 1. Performance Hint](./assets/performance-hint.png) + + + +## Paging through tampered results + +* The `QueryStatistics` object contains the `TotalResults` property, + which represents the total number of matching documents found in the query results. + +* The `QueryStatistics` object also contains the `SkippedResults` property. + Whenever this property is greater than **0**, that implies the server has skipped that number of results from the index. + +* The server will skip duplicate results internally in the following two scenarios: + + 1. When making a [Projection query](../../indexes/querying/projections.mdx) with [Distinct](../../indexes/querying/distinct.mdx). + + 2. When querying a [Fanout index](../../indexes/indexing-nested-data.mdx#fanout-index---multiple-index-entries-per-document). + +* In those cases: + + * The `SkippedResults` property from the stats object will hold the count of skipped (duplicate) results. + + * The `TotalResults` property will be invalidated - + it will Not deduct the number of skipped results from the total number of results. + +* In order to do proper paging in those scenarios: + include the `SkippedResults` value when specifying the number of documents to skip for each page using: + `(currentPage * pageSize) + SkippedResults`. + +## Examples + +#### A projection query with Distinct: + + + + +{`List pagedResults; + +long totalResults = 0; +long skippedResults = 0; +int totalUniqueResults = 0; + +int pageNumber = 0; +int pageSize = 10; + +do +{ + pagedResults = session + .Query() + .Statistics(out QueryStatistics stats) + .Where(x => x.UnitsInStock > 10) + .OfType() + // Define a projection + .Select(x => new ProjectedClass + { + Category = x.Category, + Supplier = x.Supplier + }) + // Call Distinct to remove duplicate projected results + .Distinct() + // Add the number of skipped results to the "start location" + .Skip((pageNumber * pageSize) + skippedResults) + // Define how many items to return + .Take(pageSize) + .ToList(); + + totalResults = stats.TotalResults; // Number of total matching documents (includes duplicates) + skippedResults += stats.SkippedResults; // Number of duplicate results that were skipped + totalUniqueResults += pagedResults.Count; // Number of unique results returned in this server call + + pageNumber++; +} +while (pagedResults.Count > 0); // Fetch next results + +// When executing the query on the Northwind sample data: +// ====================================================== + +// The total matching results reported in the stats is 47 (totalResults), +// but the total unique objects returned while paging the results is only 29 (totalUniqueResults) +// due to the 'Distinct' usage which removes duplicates. + +// This is solved by adding the skipped results count to Skip(). +`} + + + + +{`List pagedResults; + +long totalResults = 0; +long skippedResults = 0; +int totalUniqueResults = 0; + +int pageNumber = 0; +int pageSize = 10; + +do +{ + pagedResults = await asyncSession + .Query() + .Statistics(out QueryStatistics stats) + .Where(x => x.UnitsInStock > 10) + .OfType() + // Define a projection + .Select(x => new ProjectedClass + { + Category = x.Category, + Supplier = x.Supplier + }) + // Call Distinct to remove duplicate projected results + .Distinct() + // Add the number of skipped results to the "start location" + .Skip((pageNumber * pageSize) + skippedResults) + .Take(pageSize) + .ToListAsync(); + + totalResults = stats.TotalResults; // Number of total matching documents (includes duplicates) + skippedResults += stats.SkippedResults; // Number of duplicate results that were skipped + totalUniqueResults += pagedResults.Count; // Number of unique results returned in this server call + + pageNumber++; +} +while (pagedResults.Count > 0); // Fetch next results + +// When executing the query on the Northwind sample data: +// ====================================================== + +// The total matching results reported in the stats is 47 (totalResults), +// but the total unique objects returned while paging the results is only 29 (totalUniqueResults) +// due to the 'Distinct' usage which removes duplicates. + +// This is solved by adding the skipped results count to Skip(). +`} + + + + +{`List pagedResults; + +long totalResults = 0; +long skippedResults = 0; +int totalUniqueResults = 0; + +int pageNumber = 0; +int pageSize = 10; + +do +{ + pagedResults = session.Advanced + .DocumentQuery() + .Statistics(out QueryStatistics stats) + .WhereGreaterThan(x => x.UnitsInStock, 10) + .OfType() + // Define a projection + .SelectFields() + // Call Distinct to remove duplicate projected results + .Distinct() + // Add the number of skipped results to the "start location" + .Skip((pageNumber * pageSize) + skippedResults) + .Take(pageSize) + .ToList(); + + totalResults = stats.TotalResults; // Number of total matching documents (includes duplicates) + skippedResults += stats.SkippedResults; // Number of duplicate results that were skipped + totalUniqueResults += pagedResults.Count; // Number of unique results returned in this server call + + pageNumber++; +} +while (pagedResults.Count > 0); // Fetch next results + +// When executing the query on the Northwind sample data: +// ====================================================== + +// The total matching results reported in the stats is 47 (totalResults), +// but the total unique objects returned while paging the results is only 29 (totalUniqueResults) +// due to the 'Distinct' usage which removes duplicates. + +// This is solved by adding the skipped results count to Skip(). +`} + + + + +{`public class Products_ByUnitsInStock : AbstractIndexCreationTask +{ + public class IndexEntry + { + public int UnitsInStock { get; set; } + } + + public Products_ByUnitsInStock() + { + Map = products => from product in products + select new + { + UnitsInStock = product.UnitsInStock + }; + } +} +`} + + + + +{`public class ProjectedClass +{ + public string Category { get; set; } + public string Supplier { get; set; } +} +`} + + + + +{`from index "Products/ByUnitsInStock" +where UnitsInStock > 10 +select distinct Category, Supplier +limit 0, 10 // First loop will skip 0, take 10, etc. +`} + + + + +#### Querying a Fanout index: + + + + +{`List pagedResults; + +long totalResults = 0; +long skippedResults = 0; +int totalUniqueResults = 0; + +int pageNumber = 0; +int pageSize = 50; + +do +{ + pagedResults = session + .Query() + .Statistics(out QueryStatistics stats) + .OfType() + // Add the number of skipped results to the "start location" + .Skip((pageNumber * pageSize) + skippedResults) + .Take(pageSize) + .ToList(); + + totalResults = stats.TotalResults; + skippedResults += stats.SkippedResults; + totalUniqueResults += pagedResults.Count; + + pageNumber++; +} +while (pagedResults.Count > 0); // Fetch next results + +// When executing the query on the Northwind sample data: +// ====================================================== + +// The total results reported in the stats is 2155 (totalResults), +// which represent the multiple index-entries generated as defined by the fanout index. + +// By adding the skipped results count to the Skip() method, +// we get the correct total unique results which is 830 Order documents. +`} + + + + +{`List pagedResults; + +long totalResults = 0; +long skippedResults = 0; +int totalUniqueResults = 0; + +int pageNumber = 0; +int pageSize = 50; + +do +{ + pagedResults = await asyncSession + .Query() + .Statistics(out QueryStatistics stats) + .OfType() + // Add the number of skipped results to the "start location" + .Skip((pageNumber * pageSize) + skippedResults) + .Take(pageSize) + .ToListAsync(); + + totalResults = stats.TotalResults; + skippedResults += stats.SkippedResults; + totalUniqueResults += pagedResults.Count; + + pageNumber++; +} +while (pagedResults.Count > 0); // Fetch next results + +// When executing the query on the Northwind sample data: +// ====================================================== + +// The total results reported in the stats is 2155 (totalResults), +// which represent the multiple index-entries generated as defined by the fanout index. + +// By adding the skipped results count to the Skip() method, +// we get the correct total unique results which is 830 Order documents. +`} + + + + +{`List pagedResults; + +long totalResults = 0; +long skippedResults = 0; +int totalUniqueResults = 0; + +int pageNumber = 0; +int pageSize = 50; + +do +{ + pagedResults = session.Advanced + .DocumentQuery() + .Statistics(out QueryStatistics stats) + .OfType() + // Add the number of skipped results to the "start location" + .Skip((pageNumber * pageSize) + skippedResults) + .Take(pageSize) + .ToList(); + + totalResults = stats.TotalResults; + skippedResults += stats.SkippedResults; + totalUniqueResults += pagedResults.Count; + + pageNumber++; +} +while (pagedResults.Count > 0); // Fetch next results + +// When executing the query on the Northwind sample data: +// ====================================================== + +// The total results reported in the stats is 2155 (totalResults), +// which represent the multiple index-entries generated as defined by the fanout index. + +// By adding the skipped results count to the Skip() method, +// we get the correct total unique results which is 830 Order documents. +`} + + + + +{`// A fanout index - creating MULTIPLE index-entries per document: +// ============================================================== + +public class Orders_ByProductName : AbstractIndexCreationTask +{ + public class IndexEntry + { + public string ProductName { get; set; } + } + + public Orders_ByProductName() + { + Map = orders => + from order in orders + from line in order.Lines + select new IndexEntry + { + ProductName = line.ProductName + }; + } +} +`} + + + + +{`from index "Orders/ByProductName" +limit 0, 50 // First loop will skip 0, take 50, etc. +`} + + + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_paging-java.mdx b/versioned_docs/version-7.1/indexes/querying/_paging-java.mdx new file mode 100644 index 0000000000..c69b33f0d7 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_paging-java.mdx @@ -0,0 +1,307 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Paging**: + Paging is the process of fetching a subset (a page) of results from a dataset, rather than retrieving the entire results at once. + This method enables processing query results one page at a time. + +* **Default page size**: + + * Querying **Lucene** indexes: + If the client's query definition does Not explicitly specify the page size, the server will default to `int.MaxValue` (2,147,483,647). + In such case, all results will be returned in a single server call. + + * Querying **Corax** indexes: + The default page size is the same as the one employed by Lucene. + Note: when using [Corax](../../indexes/search-engine/corax.mdx) as the search engine, indexes with more than `int.MaxValue` entries can be created and used. + To match this capacity, queries over Corax indexes can skip a number of results that exceed this max value and take documents from that location. + +* **Performance**: + Using paging is beneficial when handling large result datasets, contributing to improved performance. + See [paging and performance](../../indexes/querying/paging.mdx#paging-and-performance) here below. + +* In this page: + + * [No-paging example](../../indexes/querying/paging.mdx#no---paging-example) + * [Paging examples](../../indexes/querying/paging.mdx#paging-examples) + * [Paging and performance](../../indexes/querying/paging.mdx#paging-and-performance) + * [Paging through tampered results](../../indexes/querying/paging.mdx#paging-through-tampered-results) + + +## No-paging example + +The queries below will return all the results available. + + + + +{`List results = session + .query(Product.class, Products_ByUnitsInStock.class) + .whereGreaterThan("UnitsInStock", 10) + .toList(); +`} + + + + +{`public static class Products_ByUnitsInStock extends AbstractIndexCreationTask { + public Products_ByUnitsInStock() { + map = "docs.Products.Select(product => new {" + + " UnitsInStock = product.UnitsInStock" + + "})"; + } +} +`} + + + + +{`from index "Products/ByUnitsInStock" +where UnitsInStock > 10 +`} + + + + + + +## Paging examples + +#### Basic paging: + +Let's assume that our page size is `10`, and we want to retrieve the 3rd page. To do this, we need to issue following query: + + + + +{`List results = session + .query(Product.class, Products_ByUnitsInStock.class) + .whereGreaterThan("UnitsInStock", 10) + .skip(20) // skip 2 pages worth of products + .take(10) // take up to 10 products + .toList(); // execute query +`} + + + + +{`public static class Products_ByUnitsInStock extends AbstractIndexCreationTask { + public Products_ByUnitsInStock() { + map = "docs.Products.Select(product => new {" + + " UnitsInStock = product.UnitsInStock" + + "})"; + } +} +`} + + + + +{`from index "Products/ByUnitsInStock" +where UnitsInStock > 10 +limit 20, 10 // skip 20, take 10 +`} + + + +#### Find total results count when paging: + +While paging, you sometimes need to know the exact number of results returned from the query. The Client API supports this explicitly: + + + + +{`Reference stats = new Reference(); + +List results = session + .query(Product.class, Products_ByUnitsInStock.class) + .statistics(stats) + .whereGreaterThan("UnitsInStock", 10) + .skip(20) + .take(10) + .toList(); + +int totalResults = stats.value.getTotalResults(); +`} + + + + +{`public static class Products_ByUnitsInStock extends AbstractIndexCreationTask { + public Products_ByUnitsInStock() { + map = "docs.Products.Select(product => new {" + + " UnitsInStock = product.UnitsInStock" + + "})"; + } +} +`} + + + + +{`from index "Products/ByUnitsInStock" +where UnitsInStock > 10 +limit 20, 10 // skip 20, take 10 +`} + + + + +While the query will return with just 10 results, `totalResults` will hold the total number of matching documents. + + + +## Paging and performance + +#### Better performance: + +It is recommended to explicitly set a page size when making a query that is expected to generate a significant number of results. +This practice has several benefits: + +* Optimizes bandwidth usage by reducing data transfer between the server and client. +* Prevents delays in response times caused by sending too much data over the network. +* Avoids high memory consumption when dealing with numerous documents. +* Ensures a more manageable user experience by not overwhelming users with massive datasets at once. +#### Performance hints: + +* By default, if the number of returned results exceeds **2048**, the server will issue a "Page size too big" notification (visible in the Studio) with information about the query. + +* This threshold can be customized by modifying the value of the [PerformanceHints.MaxNumberOfResults](../../server/configuration/performance-hints-configuration.mdx#performancehintsmaxnumberofresults) configuration key. + +* As suggested by the hint, you may consider using [Streaming query results](../../client-api/session/querying/how-to-stream-query-results.mdx) instead of paging. + +![Figure 1. Performance Hint](./assets/performance-hint.png) + + + +## Paging through tampered results + +* The `QueryStatistics` object contains the `TotalResults` property, + which represents the total number of matching documents found in the query results. + +* The `QueryStatistics` object also contains the `SkippedResults` property. + Whenever this property is greater than **0**, that implies the server has skipped that number of results from the index. + +* The server will skip duplicate results internally in the following two scenarios: + + 1. When making a [Projection query](../../indexes/querying/projections.mdx) with [Distinct](../../indexes/querying/distinct.mdx). + + 2. When querying a [Fanout index](../../indexes/indexing-nested-data.mdx#fanout-index---multiple-index-entries-per-document). + +* In those cases: + + * The `SkippedResults` property from the stats object will hold the count of skipped (duplicate) results. + + * The `TotalResults` property will be invalidated - + it will Not deduct the number of skipped results from the total number of results. + +* In order to do proper paging in those scenarios: + include the `SkippedResults` value when specifying the number of documents to skip for each page using: + `(currentPage * pageSize) + SkippedResults`. + +## Examples + +#### A projection query with Distinct: + + + + +{`List results; +int pageNumber = 0; +int pageSize = 10; +int skippedResults = 0; +Reference stats = new Reference<>(); + +do { + + results = session + .query(Product.class, Products_ByUnitsInStock.class) + .statistics(stats) + .skip((pageNumber * pageSize) + skippedResults) + .take(pageSize) + .whereGreaterThan("UnitsInStock", 10) + .distinct() + .toList(); + + skippedResults += stats.value.getSkippedResults(); + pageNumber++; +} while (results.size() > 0); +`} + + + + +{`public static class Products_ByUnitsInStock extends AbstractIndexCreationTask { + public Products_ByUnitsInStock() { + map = "docs.Products.Select(product => new {" + + " UnitsInStock = product.UnitsInStock" + + "})"; + } +} +`} + + + + +{`from index "Products/ByUnitsInStock" +where UnitsInStock > 10 +select distinct * +limit 0, 10 // First loop will skip 0, take 10, etc. +`} + + + + +#### Querying a Fanout index: + + + + +{`List results; +int pageNumber = 0; +int pageSize = 50; +int skippedResults = 0; +Reference stats = new Reference<>(); + +do { + results = session + .query(Order.class, Order_ByOrderLines_ProductName.class) + .statistics(stats) + .skip((pageNumber * pageSize) + skippedResults) + .take(pageSize) + .toList(); + + skippedResults += stats.value.getSkippedResults(); + pageNumber++; +} while (results.size() > 0); +`} + + + + +{`public static class Orders_ByOrderLines_ProductName extends AbstractIndexCreationTask { + public Orders_ByOrderLines_ProductName() { + map = "docs.Orders.SelectMany(order => order.Lines, (order, line) => new {" + + " Product = line.ProductName " + + "})"; + } +} +`} + + + + +{`from index "Order/ByOrderLines/ProductName" +limit 0, 50 // First loop will skip 0, take 50, etc. +`} + + + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_paging-nodejs.mdx b/versioned_docs/version-7.1/indexes/querying/_paging-nodejs.mdx new file mode 100644 index 0000000000..253f2a5e98 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_paging-nodejs.mdx @@ -0,0 +1,400 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Paging**: + Paging is the process of fetching a subset (a page) of results from a dataset, rather than retrieving the entire results at once. + This method enables processing query results one page at a time. + +* **Default page size**: + + * Querying **Lucene** indexes: + If the client's query definition does Not explicitly specify the page size, the server will default to `2,147,483,647` (equivalent to `int.MaxValue` in C#). + In such case, all results will be returned in a single server call. + + * Querying **Corax** indexes: + The default page size is the same as the one employed by Lucene. + Note: when using [Corax](../../indexes/search-engine/corax.mdx) as the search engine, indexes with more than `2,147,483,647` entries can be created and used. + To match this capacity, queries over Corax indexes can skip a number of results that exceed this max value and take documents from that location. + +* **Performance**: + Using paging is beneficial when handling large result datasets, contributing to improved performance. + See [paging and performance](../../indexes/querying/paging.mdx#paging-and-performance) here below. + +* In this page: + + * [No-paging example](../../indexes/querying/paging.mdx#no---paging-example) + * [Paging examples](../../indexes/querying/paging.mdx#paging-examples) + * [Paging and performance](../../indexes/querying/paging.mdx#paging-and-performance) + * [Paging through tampered results](../../indexes/querying/paging.mdx#paging-through-tampered-results) + + +## No-paging example + + + + +{`// A simple query without paging: +// ============================== + +const allResults = await session + .query({ indexName: "Products/ByUnitsInStock" }) + .whereGreaterThan("UnitsInStock", 10) + .all(); + +// Executing the query on the Northwind sample data +// will result in all 47 Product documents that match the query predicate. +`} + + + + +{`class Products_ByUnitsInStock extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("Products", p => ({ + UnitsInStock: p.UnitsInStock + })); + } +} +`} + + + + +{`from index "Products/ByUnitsInStock" +where UnitsInStock > 10 +`} + + + + + + +## Paging examples + +#### Retrieve a specific page: + + + + +{`// Retrieve only the 3'rd page - when page size is 10: +// =================================================== + +// Define an output param for getting the query stats +let stats; + +const thirdPageResults = await session + .query({ indexName: "Products/ByUnitsInStock" }) + // Get the query stats if you wish to know the TOTAL number of results + .statistics(s => stats = s) + // Apply some filtering condition as needed + .whereGreaterThan("UnitsInStock", 10) + // Call 'Skip', pass the number of items to skip from the beginning of the result set + // Skip the first 20 resulting documents + .skip(20) + // Call 'Take' to define the number of documents to return + // Take up to 10 products => so 10 is the "Page Size" + .take(10) + .all(); + +// When executing this query on the Northwind sample data, +// results will include only 10 Product documents ("products/45-A" to "products/54-A") + +const totalResults = stats.totalResults; + +// While the query returns only 10 results, +// \`totalResults\` will hold the total number of matching documents (47). +`} + + + + +{`class Products_ByUnitsInStock extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("Products", p => ({ + UnitsInStock: p.UnitsInStock + })); + } +} +`} + + + + +{`from index "Products/ByUnitsInStock" +where UnitsInStock > 10 +limit 20, 10 // skip 20, take 10 +`} + + + +#### Page through all results: + + + + +{`// Query for all results - page by page: +// ===================================== + +const PAGE_SIZE = 10; +let pageNumber = 0; +let pagedResults; + +do { + pagedResults = await session + .query({ indexName: "Products/ByUnitsInStock" }) + // Apply some filtering condition as needed + .whereGreaterThan("UnitsInStock", 10) + // Skip the number of results that were already fetched + .skip(pageNumber * PAGE_SIZE) + // Request to get 'pageSize' results + .take(PAGE_SIZE) + .all(); + + pageNumber++; + + // Make any processing needed with the current paged results here + // ... +} +while (pagedResults.length > 0); // Fetch next results +`} + + + + +{`class Products_ByUnitsInStock extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("Products", p => ({ + UnitsInStock: p.UnitsInStock + })); + } +} +`} + + + + +{`from index "Products/ByUnitsInStock" +where UnitsInStock > 10 +limit 0, 10 // First loop will skip 0, take 10 + +// The next loops in the code will each generate the above RQL with an increased 'skip' value: +// limit 10, 10 +// limit 20, 10 +// limit 30, 10 +// ... +`} + + + + + + +## Paging and performance +**Better performance**: + +It is recommended to explicitly set a page size when making a query that is expected to generate a significant number of results. +This practice has several benefits: + +* Optimizes bandwidth usage by reducing data transfer between the server and client. +* Prevents delays in response times caused by sending too much data over the network. +* Avoids high memory consumption when dealing with numerous documents. +* Ensures a more manageable user experience by not overwhelming users with massive datasets at once. +**Performance hints**: + +* By default, if the number of returned results exceeds **2048**, the server will issue a "Page size too big" notification (visible in the Studio) with information about the query. + +* This threshold can be customized by modifying the value of the [PerformanceHints.MaxNumberOfResults](../../server/configuration/performance-hints-configuration.mdx#performancehintsmaxnumberofresults) configuration key. + +* As suggested by the hint, you may consider using [Streaming query results](../../client-api/session/querying/how-to-stream-query-results.mdx) instead of paging. + +![Figure 1. Performance Hint](./assets/performance-hint.png) + + + +## Paging through tampered results + +* The `QueryStatistics` object contains the `totalResults` property, + which represents the total number of matching documents found in the query results. + +* The `QueryStatistics` object also contains the `skippedResults` property. + Whenever this property is greater than **0**, that implies the server has skipped that number of results from the index. + +* The server will skip duplicate results internally in the following two scenarios: + + 1. When making a [Projection query](../../indexes/querying/projections.mdx) with [distinct](../../indexes/querying/distinct.mdx). + + 2. When querying a [Fanout index](../../indexes/indexing-nested-data.mdx#fanout-index---multiple-index-entries-per-document). + +* In those cases: + + * The `skippedResults` property from the stats object will hold the count of skipped (duplicate) results. + + * The `totalResults` property will be invalidated - + it will Not deduct the number of skipped results from the total number of results. + +* In order to do proper paging in those scenarios: + include the `skippedResults` value when specifying the number of documents to skip for each page using: + `(currentPage * pageSize) + skippedResults`. + +## Examples + +#### A projection query with Distinct: + + + + +{`let pagedResults; +let stats; + +let totalResults = 0; +let totalUniqueResults = 0; +let skippedResults = 0; + +let pageNumber = 0; +const PAGE_SIZE = 10; + +do { + pagedResults = await session + .query({ indexName: "Products/ByUnitsInStock" }) + .statistics(s => stats = s) + .whereGreaterThan("UnitsInStock", 10) + // Define a projection + .selectFields(["Category", "Supplier"]) + // Call Distinct to remove duplicate projected results + .distinct() + // Add the number of skipped results to the "start location" + .skip((pageNumber * PAGE_SIZE) + skippedResults) + // Define how many items to return + .take(PAGE_SIZE) + .all(); + + totalResults = stats.totalResults; // Number of total matching documents (includes duplicates) + skippedResults += stats.skippedResults; // Number of duplicate results that were skipped + totalUniqueResults += pagedResults.length; // Number of unique results returned in this server call + + pageNumber++; +} +while (pagedResults.length > 0); // Fetch next results + +// When executing the query on the Northwind sample data: +// ====================================================== + +// The total matching results reported in the stats is 47 (totalResults), +// but the total unique objects returned while paging the results is only 29 (totalUniqueResults) +// due to the 'distinct' usage which removes duplicates. + +// This is solved by adding the skipped results count to skip(). +`} + + + + +{`class Products_ByUnitsInStock extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("Products", p => ({ + UnitsInStock: p.UnitsInStock + })); + } +} +`} + + + + +{`from index "Products/ByUnitsInStock" +where UnitsInStock > 10 +select distinct Category, Supplier +limit 0, 10 // First loop will skip 0, take 10, etc. +`} + + + + +#### Querying a Fanout index: + + + + +{`let pagedResults; +let stats; + +let totalResults = 0; +let totalUniqueResults = 0; +let skippedResults = 0; + +let pageNumber = 0; +const PAGE_SIZE = 50; + +do { + pagedResults = await session + .query({ indexName: "Orders/ByProductName" }) + .statistics(s => stats = s) + // Add the number of skipped results to the "start location" + .skip((pageNumber * PAGE_SIZE) + skippedResults) + .take(PAGE_SIZE) + .all(); + + totalResults = stats.totalResults; + skippedResults += stats.skippedResults; + totalUniqueResults += pagedResults.length; + + pageNumber++; +} +while (pagedResults.length > 0); // Fetch next results + +// When executing the query on the Northwind sample data: +// ====================================================== + +// The total results reported in the stats is 2155 (totalResults), +// which represent the multiple index-entries generated as defined by the fanout index. + +// By adding the skipped results count to the Skip() method, +// we get the correct total unique results which is 830 Order documents. +`} + + + + +{`// A fanout index - creating MULTIPLE index-entries per document: +// ============================================================== + +class Orders_ByProductName extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("Orders", order => { + return order.Lines.map(line => { + return { + ProductName: line.ProductName + }; + }); + }); + } +} +`} + + + + +{`from index "Orders/ByProductName" +limit 0, 50 // First loop will skip 0, take 50, etc. +`} + + + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_paging-php.mdx b/versioned_docs/version-7.1/indexes/querying/_paging-php.mdx new file mode 100644 index 0000000000..596a8e12fd --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_paging-php.mdx @@ -0,0 +1,688 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Paging**: + Paging is the process of fetching a subset (a page) of results from a dataset, rather than retrieving the entire results at once. + This method enables processing query results one page at a time. + +* **Default page size**: + + * Querying **Lucene** indexes: + If the client's query definition does Not explicitly specify the page size, + the server will default to a C# `int.MaxValue` (2,147,483,647). + In such a case, all results will be returned in a single server call. + + * Querying **Corax** indexes: + The default page size is the same as the one employed by Lucene. + Note: when using [Corax](../../indexes/search-engine/corax.mdx) as the search engine, + indexes with more than a C# `int.MaxValue` entries can be created and used. + To match this capacity, queries over Corax indexes can skip a number of results that exceed this max value and take documents from that location. + +* **Performance**: + Using paging is beneficial when handling large result datasets, contributing to improved performance. + See [paging and performance](../../indexes/querying/paging.mdx#paging-and-performance) here below. + +* In this page: + + * [No-paging example](../../indexes/querying/paging.mdx#no-paging-example) + * [Paging examples](../../indexes/querying/paging.mdx#paging-examples) + * [Paging and performance](../../indexes/querying/paging.mdx#paging-and-performance) + * [Paging through tampered results](../../indexes/querying/paging.mdx#paging-through-tampered-results) + + +## No-paging example + + + + +{`// A simple query without paging: +// ============================== + +/** @var array $allResults */ +$allResults = $session + ->query(Products_ByUnitsInStock_IndexEntry::class, Products_ByUnitsInStock::class) + ->whereGreaterThan("UnitsInStock", 10) + ->ofType(Product::class) + ->toList(); + +// Executing the query on the Northwind sample data +// will result in all 47 Product documents that match the query predicate. +`} + + + + +{`// A simple DocumentQuery without paging: +// ====================================== + +/** @var array $allResults */ +$allResults = $session->advanced() + ->documentQuery(Products_ByUnitsInStock_IndexEntry::class, Products_ByUnitsInStock::class) + ->whereGreaterThan("UnitsInStock", 10) + ->ofType(Product::class) + ->toList(); + +// Executing the query on the Northwind sample data +// will result in all 47 Product documents that match the query predicate. +`} + + + + +{`class Products_ByUnitsInStock_IndexEntry +{ + private ?int $unitsInStock = null; + + public function getUnitsInStock(): ?int + { + return $this->unitsInStock; + } + + public function setUnitsInStock(?int $unitsInStock): void + { + $this->unitsInStock = $unitsInStock; + } +} + +class Products_ByUnitsInStock extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "docs.Products.Select(product => new {" . + " UnitsInStock = product.UnitsInStock" . + "})"; + } +} +`} + + + + +{`from index "Products/ByUnitsInStock" +where UnitsInStock > 10 +`} + + + + + + +## Paging examples + +#### Retrieve a specific page: + + + + +{`// Retrieve only the 3'rd page - when page size is 10: +// =================================================== + +$stats = new QueryStatistics(); + +/** @var array $thirdPageResults */ +$thirdPageResults = $session + ->query(Products_ByUnitsInStock_IndexEntry::class, Products_ByUnitsInStock::class) + // Get the query stats if you wish to know the TOTAL number of results + ->statistics($stats ) + // Apply some filtering condition as needed + ->whereGreaterThan("UnitsInStock", 10) + ->ofType(Product::class) + // Call 'Skip', pass the number of items to skip from the beginning of the result set + // Skip the first 20 resulting documents + ->skip(20) + // Call 'Take' to define the number of documents to return + // Take up to 10 products => so 10 is the "Page Size" + ->take(10) + ->toList(); + +// When executing this query on the Northwind sample data, +// results will include only 10 Product documents ("products/45-A" to "products/54-A") + +$totalResults = $stats->getTotalResults(); + +// While the query returns only 10 results, +// \`totalResults\` will hold the total number of matching documents (47). +`} + + + + +{`// Retrieve only the 3'rd page - when page size is 10: +// =================================================== + +$stats = new QueryStatistics(); + +/** @var array $thirdPageResults */ +$thirdPageResults = $session->advanced() + ->documentQuery(Products_ByUnitsInStock_IndexEntry::class, Products_ByUnitsInStock::class) + // Get the query stats if you wish to know the TOTAL number of results + ->statistics($stats) + // Apply some filtering condition as needed + ->whereGreaterThan("UnitsInStock", 10) + ->ofType(Product::class) + // Call 'Skip', pass the number of items to skip from the beginning of the result set + // Skip the first 20 resulting documents + ->skip(20) + // Call 'Take' to define the number of documents to return + // Take up to 10 products => so 10 is the "Page Size" + ->take(10) + ->toList(); + +// When executing this query on the Northwind sample data, +// results will include only 10 Product documents ("products/45-A" to "products/54-A") + +$totalResults = $stats->getTotalResults(); + +// While the query returns only 10 results, +// \`totalResults\` will hold the total number of matching documents (47). +`} + + + + +{`class Products_ByUnitsInStock_IndexEntry +{ + private ?int $unitsInStock = null; + + public function getUnitsInStock(): ?int + { + return $this->unitsInStock; + } + + public function setUnitsInStock(?int $unitsInStock): void + { + $this->unitsInStock = $unitsInStock; + } +} + +class Products_ByUnitsInStock extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "docs.Products.Select(product => new {" . + " UnitsInStock = product.UnitsInStock" . + "})"; + } +} +`} + + + + +{`from index "Products/ByUnitsInStock" +where UnitsInStock > 10 +limit 20, 10 // skip 20, take 10 +`} + + + +#### Page through all results: + + + + +{`// Query for all results - page by page: +// ===================================== + +$pagedResults = null; +$pageNumber = 0; +$pageSize = 10; + +do +{ + $pagedResults = $session + ->query(Products_ByUnitsInStock_IndexEntry::class, Products_ByUnitsInStock::class) + // Apply some filtering condition as needed + ->whereGreaterThan("UnitsInStock", 10) + ->ofType(Product::class) + // Skip the number of results that were already fetched + ->skip($pageNumber * $pageSize) + // Request to get 'pageSize' results + ->take($pageSize) + ->toList(); + + $pageNumber++; + + // Make any processing needed with the current paged results here + // ... +} +while (count($pagedResults) > 0); // Fetch next results +`} + + + + +{`// Query for all results - page by page: +// ===================================== + +$pagedResults = null; +$pageNumber = 0; +$pageSize = 10; + +do +{ + $pagedResults = $session->advanced() + ->documentQuery(Products_ByUnitsInStock_IndexEntry::class, Products_ByUnitsInStock::class) + // Apply some filtering condition as needed + ->whereGreaterThan("UnitsInStock", 10) + ->ofType(Product::class) + // Skip the number of results that were already fetched + ->skip($pageNumber * $pageSize) + // Request to get 'pageSize' results + ->take($pageSize) + ->toList(); + + $pageNumber++; + + // Make any processing needed with the current paged results here + // ... +} +while (count($pagedResults) > 0); // Fetch next results +`} + + + + +{`class Products_ByUnitsInStock_IndexEntry +{ + private ?int $unitsInStock = null; + + public function getUnitsInStock(): ?int + { + return $this->unitsInStock; + } + + public function setUnitsInStock(?int $unitsInStock): void + { + $this->unitsInStock = $unitsInStock; + } +} + +class Products_ByUnitsInStock extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "docs.Products.Select(product => new {" . + " UnitsInStock = product.UnitsInStock" . + "})"; + } +} +`} + + + + +{`from index "Products/ByUnitsInStock" +where UnitsInStock > 10 +limit 0, 10 // First loop will skip 0, take 10 + +// The next loops in the code will each generate the above RQL with an increased 'skip' value: +// limit 10, 10 +// limit 20, 10 +// limit 30, 10 +// ... +`} + + + + + + +## Paging and performance + +#### Better performance: + +It is recommended to explicitly set a page size when making a query that is expected to generate a significant number of results. +This practice has several benefits: + +* Optimizes bandwidth usage by reducing data transfer between the server and client. +* Prevents delays in response times caused by sending too much data over the network. +* Avoids high memory consumption when dealing with numerous documents. +* Ensures a more manageable user experience by not overwhelming users with massive datasets at once. +#### Performance hints: + +* By default, if the number of returned results exceeds **2048**, the server will issue a "Page size too big" notification (visible in the Studio) with information about the query. + +* This threshold can be customized by modifying the value of the [PerformanceHints.MaxNumberOfResults](../../server/configuration/performance-hints-configuration.mdx#performancehintsmaxnumberofresults) configuration key. + +* As suggested by this performance hint, you may consider using [Streaming query results](../../client-api/session/querying/how-to-stream-query-results.mdx) instead of paging. + + ![Figure 1. Performance Hint](./assets/performance-hint.png) + + + +## Paging through tampered results + +* The `QueryStatistics` object contains the `totalResults` property, + which represents the total number of matching documents found in the query results. + +* The `QueryStatistics` object also contains the `skippedResults` property. + Whenever this property is greater than **0**, that implies the server has skipped that number of results from the index. + +* The server will skip duplicate results internally in the following two scenarios: + + 1. When making a [Projection query](../../indexes/querying/projections.mdx) with [Distinct](../../indexes/querying/distinct.mdx). + + 2. When querying a [Fanout index](../../indexes/indexing-nested-data.mdx#fanout-index---multiple-index-entries-per-document). + +* In these cases: + + * The `skippedResults` property from the stats object will hold the count of skipped (duplicate) results. + + * The `totalResults` property will be invalidated - + it will Not deduct the number of skipped results from the total number of results. + +* To do proper paging in these scenarios: + include the `skippedResults` value when specifying the number of documents to skip for each page using: + `(currentPage * pageSize) + skippedResults`. + +## Examples + +#### A projection query with Distinct: + + + + +{`$pagedResults = null; + +$totalResults = 0; +$totalUniqueResults = 0; +$skippedResults = 0; + +$pageNumber = 0; +$pageSize = 10; + +do +{ + $pagedResults = $session + ->query(Products_ByUnitsInStock_IndexEntry::class, Products_ByUnitsInStock::class) + ->statistics($stats) + ->whereGreaterThan("UnitsInStock", 10) + ->ofType(Product::class) + // Define a projection + ->selectFields(ProjectedClass::class) + // Call Distinct to remove duplicate projected results + ->distinct() + // Add the number of skipped results to the "start location" + ->skip(($pageNumber * $pageSize) + $skippedResults) + // Define how many items to return + ->take($pageSize) + ->toList(); + + $totalResults = $stats->getTotalResults(); // Number of total matching documents (includes duplicates) + $skippedResults += $stats->getSkippedResults(); // Number of duplicate results that were skipped + $totalUniqueResults += count($pagedResults); // Number of unique results returned in this server call + + $pageNumber++; +} +while (count($pagedResults) > 0); // Fetch next results + +// When executing the query on the Northwind sample data: +// ====================================================== + +// The total matching results reported in the stats is 47 (totalResults), +// but the total unique objects returned while paging the results is only 29 (totalUniqueResults) +// due to the 'Distinct' usage which removes duplicates. + +// This is solved by adding the skipped results count to Skip(). +`} + + + + +{`$pagedResults = null; + +$totalResults = 0; +$totalUniqueResults = 0; +$skippedResults = 0; + +$pageNumber = 0; +$pageSize = 10; + +do +{ + $pagedResults = $session->advanced() + ->documentQuery(Products_ByUnitsInStock_IndexEntry::class, Products_ByUnitsInStock::class) + ->statistics($stats) + ->whereGreaterThan("UnitsInStock", 10) + ->ofType(Product::class) + // Define a projection + ->selectFields(ProjectedClass::class) + // Call Distinct to remove duplicate projected results + ->distinct() + // Add the number of skipped results to the "start location" + ->skip(($pageNumber * $pageSize) + $skippedResults) + ->take($pageSize) + ->toList(); + + $totalResults = $stats->getTotalResults(); // Number of total matching documents (includes duplicates) + $skippedResults += $stats->getSkippedResults(); // Number of duplicate results that were skipped + $totalUniqueResults += count($pagedResults); // Number of unique results returned in this server call + + $pageNumber++; +} +while (count($pagedResults) > 0); // Fetch next results + +// When executing the query on the Northwind sample data: +// ====================================================== + +// The total matching results reported in the stats is 47 (totalResults), +// but the total unique objects returned while paging the results is only 29 (totalUniqueResults) +// due to the 'Distinct' usage which removes duplicates. + +// This is solved by adding the skipped results count to Skip(). +`} + + + + +{`class Products_ByUnitsInStock_IndexEntry +{ + private ?int $unitsInStock = null; + + public function getUnitsInStock(): ?int + { + return $this->unitsInStock; + } + + public function setUnitsInStock(?int $unitsInStock): void + { + $this->unitsInStock = $unitsInStock; + } +} + +class Products_ByUnitsInStock extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "docs.Products.Select(product => new {" . + " UnitsInStock = product.UnitsInStock" . + "})"; + } +} +`} + + + + +{`class ProjectedClass +{ + public ?string $category = null; + public ?string $supplier = null; + + public function getCategory(): ?string + { + return $this->category; + } + + public function setCategory(?string $category): void + { + $this->category = $category; + } + + public function getSupplier(): ?string + { + return $this->supplier; + } + + public function setSupplier(?string $supplier): void + { + $this->supplier = $supplier; + } +} +`} + + + + +{`from index "Products/ByUnitsInStock" +where UnitsInStock > 10 +select distinct Category, Supplier +limit 0, 10 // First loop will skip 0, take 10, etc. +`} + + + + +#### Querying a Fanout index: + + + + +{`$pagedResults = null; + +$totalResults = 0; +$totalUniqueResults = 0; +$skippedResults = 0; + +$pageNumber = 0; +$pageSize = 50; + +do +{ + $pagedResults = $session + ->query(Orders_ByProductName_IndexEntry::class, Orders_ByProductName::class) + ->statistics($stats) + ->ofType(Order::class) + // Add the number of skipped results to the "start location" + ->skip(($pageNumber * $pageSize) + $skippedResults) + ->take($pageSize) + ->toList(); + + $totalResults = $stats->getTotalResults(); + $skippedResults += $stats->getSkippedResults(); + $totalUniqueResults += count($pagedResults); + + $pageNumber++; +} +while (count($pagedResults) > 0); // Fetch next results + +// When executing the query on the Northwind sample data: +// ====================================================== + +// The total results reported in the stats is 2155 (totalResults), +// which represent the multiple index-entries generated as defined by the fanout index. + +// By adding the skipped results count to the Skip() method, +// we get the correct total unique results which is 830 Order documents. +`} + + + + +{`$pagedResults = null; + +$totalResults = 0; +$totalUniqueResults = 0; +$skippedResults = 0; + +$pageNumber = 0; +$pageSize = 50; + +do +{ + $pagedResults = $session->advanced() + ->documentQuery(Orders_ByProductName_IndexEntry::class, Orders_ByProductName::class) + ->statistics($stats) + ->ofType(Order::class) + // Add the number of skipped results to the "start location" + ->skip(($pageNumber * $pageSize) + $skippedResults) + ->take($pageSize) + ->toList(); + + $totalResults = $stats->getTotalResults(); + $skippedResults += $stats->getSkippedResults(); + $totalUniqueResults += count($pagedResults); + + $pageNumber++; +} +while (count($pagedResults) > 0); // Fetch next results + +// When executing the query on the Northwind sample data: +// ====================================================== + +// The total results reported in the stats is 2155 (totalResults), +// which represent the multiple index-entries generated as defined by the fanout index. + +// By adding the skipped results count to the Skip() method, +// we get the correct total unique results which is 830 Order documents. +`} + + + + +{`// A fanout index - creating MULTIPLE index-entries per document: +// ============================================================== + +class Orders_ByProductName_IndexEntry +{ + private ?string $productName = null; + + public function getProductName(): ?string + { + return $this->productName; + } + + public function setProductName(?string $productName): void + { + $this->productName = $productName; + } +} +class Orders_ByProductName extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "docs.Orders.SelectMany(order => order.Lines, (order, line) => new {" . + " Product = line.ProductName " . + "})"; + } +} +`} + + + + +{`from index "Orders/ByProductName" +limit 0, 50 // First loop will skip 0, take 50, etc. +`} + + + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_paging-python.mdx b/versioned_docs/version-7.1/indexes/querying/_paging-python.mdx new file mode 100644 index 0000000000..3ca064b801 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_paging-python.mdx @@ -0,0 +1,431 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* **Paging**: + Paging is the process of fetching a subset (a page) of results from a dataset, rather than retrieving the entire results at once. + This method enables processing query results one page at a time. + +* **Default page size**: + + * Querying **Lucene** indexes: + If the client's query definition does Not explicitly specify the page size, + the server will default to a C# `int.MaxValue` (2,147,483,647). + In such a case, all results will be returned in a single server call. + + * Querying **Corax** indexes: + The default page size is the same as the one employed by Lucene. + Note: when using [Corax](../../indexes/search-engine/corax.mdx) as the search engine, + indexes with more than a C# `int.MaxValue` entries can be created and used. + To match this capacity, queries over Corax indexes can skip a number of results that exceed this max value and take documents from that location. + +* **Performance**: + Using paging is beneficial when handling large result datasets, contributing to improved performance. + See [paging and performance](../../indexes/querying/paging.mdx#paging-and-performance) here below. + +* In this page: + + * [No-paging example](../../indexes/querying/paging.mdx#no---paging-example) + * [Paging examples](../../indexes/querying/paging.mdx#paging-examples) + * [Paging and performance](../../indexes/querying/paging.mdx#paging-and-performance) + * [Paging through tampered results](../../indexes/querying/paging.mdx#paging-through-tampered-results) + + +## No-paging example + + + + +{`# A simple query without paging: +# ============================== +all_results = list( + session.query_index_type(Products_ByUnitsInStock, Products_ByUnitsInStock.IndexEntry) + .where_greater_than("units_in_stock", 10) + .of_type(Product) +) + +# Executing the query on the Northwind sample data +# will result in all 47 Product documents that match the query predicate. +`} + + + + +{`class Products_ByUnitsInStock(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = "from product in docs.Products select new { units_in_stock = product.UnitsInStock }" + + class IndexEntry: + def __init__(self, units_in_stock: int = None): + self.units_in_stock = units_in_stock +`} + + + + +{`from index "Products/ByUnitsInStock" +where UnitsInStock > 10 +`} + + + + + + +## Paging examples + +#### Retrieve a specific page: + + + + +{`# Retrieve only the 3'rd page - when page size is 10: +# =================================================== +def __stats_callback(statistics: QueryStatistics): + total_results = statistics.total_results + # While the query below returns only 10 results, + # 'total_results' will hold the total number of matching documents (47). + +third_page_results = list( + session.query_index_type(Products_ByUnitsInStock, Products_ByUnitsInStock.IndexEntry) + # Get the query stats if you wish to know the TOTAL number of results + .statistics(__stats_callback) + # Apply some filtering condition as needed + .where_greater_than("units_in_stock", 10).of_type(Product) + # Call 'skip', pass the number of items to skip from the beginning of the result set + # Skip the first 20 resulting documents + .skip(20) + # Call 'take' to define the number of documents to return + # Take up to 10 products => so 10 is the "Page Size" + .take(10) +) + +en executing this query on the Northwind sample data, +sults will include only 10 Product documents ("products/45-A" to "products/54-A") + + store.open_session() as session: +# region paging_2_1 +# Query for all results - page by page: +# ===================================== +paged_results: List[Product] = [] +page_number = 0 +page_size = 10 + +while True: + paged_results = list( + session.query_index_type(Products_ByUnitsInStock, Products_ByUnitsInStock.IndexEntry) + # Apply some filtering condition as needed + .where_greater_than("units_in_stock", 10).of_type(Product) + # Skip the number of results that were already fetched + .skip(page_number * page_size) + # Request to get 'page_size' results + .take(page_size) + ) + page_number += 1 + + if len(paged_results) == 0: + break + + # Make any processing needed with the current paged results here + # ... +`} + + + + +{`class Products_ByUnitsInStock(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = "from product in docs.Products select new { units_in_stock = product.UnitsInStock }" + + class IndexEntry: + def __init__(self, units_in_stock: int = None): + self.units_in_stock = units_in_stock +`} + + + + +{`from index "Products/ByUnitsInStock" +where UnitsInStock > 10 +limit 20, 10 // skip 20, take 10 +`} + + + +#### Page through all results: + + + + +{`# Query for all results - page by page: +# ===================================== +paged_results: List[Product] = [] +page_number = 0 +page_size = 10 + +while True: + paged_results = list( + session.query_index_type(Products_ByUnitsInStock, Products_ByUnitsInStock.IndexEntry) + # Apply some filtering condition as needed + .where_greater_than("units_in_stock", 10).of_type(Product) + # Skip the number of results that were already fetched + .skip(page_number * page_size) + # Request to get 'page_size' results + .take(page_size) + ) + page_number += 1 + + if len(paged_results) == 0: + break + + # Make any processing needed with the current paged results here + # ... +`} + + + + +{`class Products_ByUnitsInStock(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = "from product in docs.Products select new { units_in_stock = product.UnitsInStock }" + + class IndexEntry: + def __init__(self, units_in_stock: int = None): + self.units_in_stock = units_in_stock +`} + + + + +{`from index "Products/ByUnitsInStock" +where UnitsInStock > 10 +limit 0, 10 // First loop will skip 0, take 10 + +// The next loops in the code will each generate the above RQL with an increased 'skip' value: +// limit 10, 10 +// limit 20, 10 +// limit 30, 10 +// ... +`} + + + + + + +## Paging and performance + +#### Better performance: + +It is recommended to explicitly set a page size when making a query that is expected to generate a significant number of results. +This practice has several benefits: + +* Optimizes bandwidth usage by reducing data transfer between the server and client. +* Prevents delays in response times caused by sending too much data over the network. +* Avoids high memory consumption when dealing with numerous documents. +* Ensures a more manageable user experience by not overwhelming users with massive datasets at once. +#### Performance hints: + +* By default, if the number of returned results exceeds **2048**, the server will issue a "Page size too big" notification (visible in the Studio) with information about the query. + +* This threshold can be customized by modifying the value of the [PerformanceHints.MaxNumberOfResults](../../server/configuration/performance-hints-configuration.mdx#performancehintsmaxnumberofresults) configuration key. + +* As suggested by this performance hint, you may consider using [Streaming query results](../../client-api/session/querying/how-to-stream-query-results.mdx) instead of paging. + + ![Figure 1. Performance Hint](./assets/performance-hint.png) + + + +## Paging through tampered results + +* The `QueryStatistics` object contains the `total_results` property, + which represents the total number of matching documents found in the query results. + +* The `QueryStatistics` object also contains the `skipped_results` property. + Whenever this property is greater than **0**, that implies the server has skipped that number of results from the index. + +* The server will skip duplicate results internally in the following two scenarios: + + 1. When making a [Projection query](../../indexes/querying/projections.mdx) with [Distinct](../../indexes/querying/distinct.mdx). + + 2. When querying a [Fanout index](../../indexes/indexing-nested-data.mdx#fanout-index---multiple-index-entries-per-document). + +* In these cases: + + * The `skipped_results` property from the stats object will hold the count of skipped (duplicate) results. + + * The `total_results` property will be invalidated - + it will Not deduct the number of skipped results from the total number of results. + +* To do proper paging in these scenarios: + include the `skipped_results` value when specifying the number of documents to skip for each page using: + `(current_page * page_size) + skipped_results`. + +## Examples + +#### A projection query with Distinct: + + + + +{`paged_results: List[ProjectedClass] = [] + +total_results = 0 +total_unique_results = 0 +skipped_results = 0 + +page_number = 0 +page_size = 10 + +def __stats_callback(statistics: QueryStatistics): + total_results = statistics.total_results + nonlocal skipped_results + skipped_results += statistics.skipped_results + +while True: + paged_results = list( + session.query_index_type(Products_ByUnitsInStock, Products_ByUnitsInStock.IndexEntry) + .statistics(__stats_callback) + .where_greater_than("units_in_stock", 10) + .of_type(Product) + # Define a projection + .select_fields(ProjectedClass) + # Call distinct to remove duplicate projected results + .distinct() + # Add the number of skipped results to the "start location" + .skip((page_size * page_size) + skipped_results) + .take(page_size) + ) + + total_unique_results += len(paged_results) + + if len(paged_results) == 0: + break + +# When executing the query on the Northwind sample data: +# ====================================================== + +# The total matching results reported in the stats is 47 (totalResults), +# but the total unique objects returned while paging the results is only 29 (totalUniqueResults) +# due to the 'Distinct' usage which removes duplicates. + +# This is solved by adding the skipped results count to Skip(). +`} + + + + +{`class Products_ByUnitsInStock(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = "from product in docs.Products select new { units_in_stock = product.UnitsInStock }" + + class IndexEntry: + def __init__(self, units_in_stock: int = None): + self.units_in_stock = units_in_stock +`} + + + + +{`class ProjectedClass: + def __init__(self, category: str = None, supplier: str = None): + self.category = category + self.supplier = supplier + + # Handle different casing by implementing from_json class method + @classmethod + def from_json(cls, json_dict: Dict[str, Any]): + return cls(json_dict["Category"], json_dict["Supplier"]) +`} + + + + +{`from index "Products/ByUnitsInStock" +where UnitsInStock > 10 +select distinct Category, Supplier +limit 0, 10 // First loop will skip 0, take 10, etc. +`} + + + + +#### Querying a Fanout index: + + + + +{`paged_results: List[Order] = [] + +total_results = 0 +total_unique_results = 0 +skipped_results = 0 + +page_number = 0 +page_size = 50 + +def __stats_callback(statistics: QueryStatistics): + nonlocal skipped_results + skipped_results += statistics.skipped_results + total_results = statistics.total_results + +while True: + paged_results = list( + session.query_index_type(Orders_ByProductName, Orders_ByProductName.IndexEntry) + .statistics(__stats_callback) + .of_type(Order) + # Add the number of skipped results to the "start location" + .skip((page_size * page_size) + skipped_results) + .take(page_size) + ) + + total_unique_results += len(paged_results) + + if len(paged_results) == 0: + break + +# When executing the query on the Northwind sample data: +# ====================================================== + +# The total results reported in the stats is 2155 (total_results), +# which represent the multiple index-entries generated as defined by the fanout index. + +# By adding the skipped results count to the skip() method, +# we get the correct total unique results which is 830 Order documents. +`} + + + + +{`# A fanout index - creating MULTIPLE index-entries per document: +# ============================================================== +class Orders_ByProductName(AbstractIndexCreationTask): + class IndexEntry: + def __init__(self, product_name: str = None): + self.product_name = product_name + + def __init__(self): + super().__init__() + self.map = "from order in docs.Orders from line in order.Lines select new { product_name = line.ProductName }" +`} + + + + +{`from index "Orders/ByProductName" +limit 0, 50 // First loop will skip 0, take 50, etc. +`} + + + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_projections-csharp.mdx b/versioned_docs/version-7.1/indexes/querying/_projections-csharp.mdx new file mode 100644 index 0000000000..967f21666a --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_projections-csharp.mdx @@ -0,0 +1,1503 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article provides examples of projecting query results when querying a **static-index**. + +* Prior to reading this article, please refer to [query results projection overview](../../client-api/session/querying/how-to-project-query-results.mdx) + for general knowledge about Projections and for dynamic-queries examples. + +* In this page: + * [Projection Methods](../../indexes/querying/projections.mdx#select): + * [Select](../../indexes/querying/projections.mdx#select) + * [ProjectInto](../../indexes/querying/projections.mdx#projectinto) + * [SelectFields](../../indexes/querying/projections.mdx#selectfields) + * [Projection behavior with a static-index](../../indexes/querying/projections.mdx#projection-behavior-with-a-static-index) + * [OfType](../../indexes/querying/projections.mdx#oftype) + + +## Select + + + +**Example I - Projecting individual fields of the document**: + + + + +{`var projectedResults = session + // Query the index + .Query() + // Can filter by any index-field, e.g.filter by index-field 'Title' + .Where(x => x.Title == "sales representative") + // Call 'Select' to return only the first and last name per matching document + .Select(x => new + { + EmployeeFirstName = x.FirstName, + EmployeeLastName = x.LastName + }) + .ToList(); + +// Each resulting object in the list is Not an 'Employee' entity, +// it is a new object containing ONLY the fields specified in the Select +// ('EmployeeFirstName' & 'EmployeeLastName'). +`} + + + + +{`var projectedResults = await asyncSession + // Query the index + .Query() + // Can filter by any index-field, e.g.filter by index-field 'Title' + .Where(x => x.Title == "sales representative") + // Call 'Select' to return only the first and last name per matching document + .Select(x => new + { + EmployeeFirstName = x.FirstName, + EmployeeLastName = x.LastName + }) + .ToListAsync(); + +// Each resulting object in the list is Not an 'Employee' entity, +// it is a new object containing ONLY the fields specified in the Select +// ('EmployeeFirstName' & 'EmployeeLastName'). +`} + + + + +{`public class Employees_ByNameAndTitle : AbstractIndexCreationTask +{ + public class IndexEntry + { + public string FirstName { get; set; } + public string LastName { get; set; } + public string Title { get; set; } + } + + public Employees_ByNameAndTitle() + { + Map = employees => from employee in employees + + select new IndexEntry + { + FirstName = employee.FirstName, + LastName = employee.LastName, + Title = employee.Title + }; + } +} +`} + + + + +{`from index "Employees/ByNameAndTitle" +where Title == "sales representative" +select FirstName as EmployeeFirstName, LastName as EmployeeLastName +`} + + + + +* **Type of projection fields**: + + * In the above example, the fields to return by the projection that are specified in the `Select` method + (`x.FirstName` & `x.LastName`) are recognized by the compiler as fields of the `IndexEntry` class. + + * If you wish to specify fields from the original 'Employee' class type then follow [this example](../../indexes/querying/projections.mdx#oftype) that uses `OfType`. + +* **Source of projection fields**: + + * Since the index-fields in this example are not [Stored in the index](../../indexes/storing-data-in-index.mdx), and no projection behavior was defined, + resulting values for `FirstName` & `LastName` will be retrieved from the matching Employee document in the storage. + + * This behavior can be modified by setting the [projection behavior](../../indexes/querying/projections.mdx#projection-behavior-with-a-static-index) used when querying a static-index. + + + + + +**Example II - Projecting stored fields**: + + + + +{`var projectedResults = session + .Query() + .Select(x => new + { + // Project fields 'FirstName' and 'LastName' which are STORED in the index + EmployeeFirstName = x.FirstName, + EmployeeLastName = x.LastName + }) + .ToList(); +`} + + + + +{`var projectedResults = await asyncSession + .Query() + .Select(x => new + { + // Project fields 'FirstName' and 'LastName' which are STORED in the index + EmployeeFirstName = x.FirstName, + EmployeeLastName = x.LastName + }) + .ToListAsync(); +`} + + + + +{`public class Employees_ByNameAndTitleWithStoredFields : AbstractIndexCreationTask +{ + public class IndexEntry + { + public string FirstName { get; set; } + public string LastName { get; set; } + public string Title { get; set; } + } + + public Employees_ByNameAndTitleWithStoredFields() + { + Map = employees => from employee in employees + select new IndexEntry + { + FirstName = employee.FirstName, + LastName = employee.LastName, + Title = employee.Title + }; + + // Store some fields in the index: + Stores.Add(x => x.FirstName, FieldStorage.Yes); + Stores.Add(x => x.LastName, FieldStorage.Yes); + } +} +`} + + + + +{`from index "Employees/ByNameAndTitleWithStoredFields" +select FirstName as EmployeeFirstName, LastName as EmployeeLastName +`} + + + + +* In this example, the projected fields (`FirstName` and `LastName`) are stored in the index, + so by default, the resulting values will come directly from the index and Not from the Employee document in the storage. + +* This behavior can be modified by setting the [projection behavior](../../indexes/querying/projections.mdx#projection-behavior-with-a-static-index) used when querying a static-index. + + + + +**Example III - Projecting arrays and objects**: + + + + +{`var projectedResults = session + .Query() + .Where(x => x.Company == "companies/65-A") + .Select(x => new + { + // Retrieve a property from an object + ShipToCity = x.ShipTo.City, + // Retrieve all product names from the Lines array + Products = x.Lines.Select(y => y.ProductName) + }) + .ToList(); +`} + + + + +{`var projectedResults = await asyncSession + .Query() + .Where(x => x.Company == "companies/65-A") + .Select(x => new + { + // Retrieve a property from an object + ShipToCity = x.ShipTo.City, + // Retrieve all product names from the Lines array + Products = x.Lines.Select(y => y.ProductName) + }) + .ToListAsync(); +`} + + + + +{`public class Orders_ByCompanyAndShipToAndLines : AbstractIndexCreationTask +{ + public class IndexEntry + { + public string Company { get; set; } + public Address ShipTo { get; set; } + public List Lines { get; set; } + } + + public Orders_ByCompanyAndShipToAndLines() + { + Map = orders => from order in orders + select new IndexEntry + { + Company = order.Company, + ShipTo = order.ShipTo, + Lines = order.Lines + }; + } +} + +// public class Address +// { +// public string Line1 { get; set; } +// public string Line2 { get; set; } +// public string City { get; set; } +// public string Region { get; set; } +// public string PostalCode { get; set; } +// public string Country { get; set; } +// public Location Location { get; set; } +// } + +// public class OrderLine +// { +// public string Product { get; set; } +// public string ProductName { get; set; } +// public decimal PricePerUnit { get; set; } +// public int Quantity { get; set; } +// public decimal Discount { get; set; } +// } +`} + + + + +{`// Using simple expression: +from index "Orders/ByCompanyAndShipToAndLines" +where Company == "companies/65-A" +select ShipTo.City as ShipToCity, Lines[].ProductName as Products + +// Using JavaScript object literal syntax: +from index "Orders/ByCompanyAndShipToAndLines" as x +where Company == "companies/65-A" +select { + ShipToCity: x.ShipTo.City, + Products: x.Lines.map(y => y.ProductName) +} +`} + + + + + + + + +**Example IV - Projection with expression**: + + + + +{`var projectedResults = session + .Query() + .Select(x => new + { + // Any expression can be provided for the projected content + FullName = x.FirstName + " " + x.LastName + }) + .ToList(); +`} + + + + +{`var projectedResults = await asyncSession + .Query() + .Select(x => new + { + // Any expression can be provided for the projected content + FullName = x.FirstName + " " + x.LastName + }) + .ToListAsync(); +`} + + + + +{`public class Employees_ByNameAndTitle : AbstractIndexCreationTask +{ + public class IndexEntry + { + public string FirstName { get; set; } + public string LastName { get; set; } + public string Title { get; set; } + } + + public Employees_ByNameAndTitle() + { + Map = employees => from employee in employees + + select new IndexEntry + { + FirstName = employee.FirstName, + LastName = employee.LastName, + Title = employee.Title + }; + } +} +`} + + + + +{`from index "Employees/ByNameAndTitle" as x +select +{ + FullName : x.FirstName + " " + x.LastName +} +`} + + + + + + + + +**Example V - Projection with calculations**: + + + + +{`var projectedResults = session + .Query() + .Select(x => new + { + // Any calculations can be done within a projection + TotalProducts = x.Lines.Count, + TotalDiscountedProducts = x.Lines.Count(x => x.Discount > 0), + TotalPrice = x.Lines.Sum(l => l.PricePerUnit * l.Quantity) + }) + .ToList(); +`} + + + + +{`var projectedResults = await asyncSession + .Query() + .Select(x => new + { + // Any calculations can be done within a projection + TotalProducts = x.Lines.Count, + TotalDiscountedProducts = x.Lines.Count(x => x.Discount > 0), + TotalPrice = x.Lines.Sum(l => l.PricePerUnit * l.Quantity) + }) + .ToListAsync(); +`} + + + + +{`public class Orders_ByCompanyAndShipToAndLines : AbstractIndexCreationTask +{ + public class IndexEntry + { + public string Company { get; set; } + public Address ShipTo { get; set; } + public List Lines { get; set; } + } + + public Orders_ByCompanyAndShipToAndLines() + { + Map = orders => from order in orders + select new IndexEntry + { + Company = order.Company, + ShipTo = order.ShipTo, + Lines = order.Lines + }; + } +} + +// public class Address +// { +// public string Line1 { get; set; } +// public string Line2 { get; set; } +// public string City { get; set; } +// public string Region { get; set; } +// public string PostalCode { get; set; } +// public string Country { get; set; } +// public Location Location { get; set; } +// } + +// public class OrderLine +// { +// public string Product { get; set; } +// public string ProductName { get; set; } +// public decimal PricePerUnit { get; set; } +// public int Quantity { get; set; } +// public decimal Discount { get; set; } +// } +`} + + + + +{`from index "Orders/ByCompanyAndShipToAndLines" as x +select { + TotalProducts: x.Lines.length, + TotalDiscountedProducts: x.Lines.filter(x => x.Discount > 0).length, + TotalPrice: x.Lines + .map(l => l.PricePerUnit * l.Quantity) + .reduce((a, b) => a + b, 0) +} +`} + + + + + + + + +**Example VI - Projecting using functions**: + + + + +{`var projectedResults = + // Use LINQ query syntax notation + (from x in session + .Query() + // Define a function + let format = + (Func)(p => + p.FirstName + " " + p.LastName) + select new + { + // Call the function from the projection + FullName = format(x) + }) + .ToList(); +`} + + + + +{`var projectedResults = + // Use LINQ query syntax notation + await (from x in asyncSession + .Query() + // Define a function + let format = + (Func)(p => + p.FirstName + " " + p.LastName) + select new + { + // Call the function from the projection + FullName = format(x) + }) + .ToListAsync(); +`} + + + + +{`public class Employees_ByNameAndTitle : AbstractIndexCreationTask +{ + public class IndexEntry + { + public string FirstName { get; set; } + public string LastName { get; set; } + public string Title { get; set; } + } + + public Employees_ByNameAndTitle() + { + Map = employees => from employee in employees + + select new IndexEntry + { + FirstName = employee.FirstName, + LastName = employee.LastName, + Title = employee.Title + }; + } +} +`} + + + + +{`declare function output(x) { +var format = p => p.FirstName + " " + p.LastName; + return { FullName: format(x) }; +} +from index "Employees/ByNameAndTitle" as e +select output(e) +`} + + + + + + + + +**Example VII - Projecting using a loaded document**: + + + + +{`var projectedResults = + // Use LINQ query syntax notation + (from o in session + .Query() + // Use RavenQuery.Load to load the related Company document + let c = RavenQuery.Load(o.Company) + select new + { + CompanyName = c.Name, // info from the related Company document + ShippedAt = o.ShippedAt // info from the Order document + }) + .ToList(); +`} + + + + +{`// Use LINQ query syntax notation +var projectedResults = + await (from o in asyncSession + .Query() + // Use RavenQuery.Load to load the related Company document + let c = RavenQuery.Load(o.Company) + select new + { + CompanyName = c.Name, // info from the related Company document + ShippedAt = o.ShippedAt // info from the Order document + }) + .ToListAsync(); +`} + + + + +{`public class Orders_ByCompanyAndShippedAt : AbstractIndexCreationTask +{ + public class IndexEntry + { + public string Company { get; set; } + public DateTime? ShippedAt { get; set; } + } + + public Orders_ByCompanyAndShippedAt() + { + Map = orders => from order in orders + + select new IndexEntry + { + Company = order.Company, + ShippedAt = order.ShippedAt + }; + } +} +`} + + + + +{`from index "Orders/ByCompanyAndShippedAt" as o +load o.Company as c +select { + CompanyName: c.Name, + ShippedAt: o.ShippedAt +} +`} + + + + + + + + +**Example VIII - Projection with dates**: + + + + +{`var projectedResults = session + .Query() + .Select(x => new + { + DayOfBirth = x.Birthday.Day, + MonthOfBirth = x.Birthday.Month, + Age = DateTime.Today.Year - x.Birthday.Year + }) + .ToList(); +`} + + + + +{`var projectedResults = await asyncSession + .Query() + .Select(x => new + { + DayOfBirth = x.Birthday.Day, + MonthOfBirth = x.Birthday.Month, + Age = DateTime.Today.Year - x.Birthday.Year + }) + .ToListAsync(); +`} + + + + +{`public class Employees_ByFirstNameAndBirthday : AbstractIndexCreationTask +{ + public class IndexEntry + { + public string FirstName { get; set; } + public DateTime Birthday { get; set; } + } + + public Employees_ByFirstNameAndBirthday() + { + Map = employees => from employee in employees + + select new IndexEntry + { + FirstName = employee.FirstName, + Birthday = employee.Birthday + }; + } +} +`} + + + + +{`from index "Employees/ByFirstNameAndBirthday" as x +select { + DayOfBirth: new Date(Date.parse(x.Birthday)).getDate(), + MonthOfBirth: new Date(Date.parse(x.Birthday)).getMonth() + 1, + Age: new Date().getFullYear() - new Date(Date.parse(x.Birthday)).getFullYear() +} +`} + + + + + + + + +**Example IX - Projection with raw JavaScript code**: + + + + +{`var projectedResults = session + .Query() + .Select(x => new + { + // Provide a JavaScript expression to the RavenQuery.Raw method + Date = RavenQuery.Raw("new Date(Date.parse(x.Birthday))"), + Name = RavenQuery.Raw(x.FirstName, "substr(0,3)") + }) + .ToList(); +`} + + + + +{`var projectedResults = await asyncSession + .Query() + .Select(x => new + { + // Provide a JavaScript expression to the RavenQuery.Raw method + Date = RavenQuery.Raw("new Date(Date.parse(x.Birthday))"), + Name = RavenQuery.Raw(x.FirstName, "substr(0,3)") + }) + .ToListAsync(); +`} + + + + +{`public class Employees_ByFirstNameAndBirthday : AbstractIndexCreationTask +{ + public class IndexEntry + { + public string FirstName { get; set; } + public DateTime Birthday { get; set; } + } + + public Employees_ByFirstNameAndBirthday() + { + Map = employees => from employee in employees + + select new IndexEntry + { + FirstName = employee.FirstName, + Birthday = employee.Birthday + }; + } +} +`} + + + + +{`from index "Employees/ByFirstNameAndBirthday" as x +select { + Date: new Date(Date.parse(x.Birthday)), + Name: x.FirstName.substr(0,3) +} +`} + + + + + + + + +**Example X - Projection with metadata**: + + + + +{`var projectedResults = session + .Query() + .Select(x => new + { + Name = x.FirstName, + Metadata = RavenQuery.Metadata(x) // Get the metadata + }) + .ToList(); +`} + + + + +{`var projectedResults = await asyncSession + .Query() + .Select(x => new + { + Name = x.FirstName, + Metadata = RavenQuery.Metadata(x) // Get the metadata + }) + .ToListAsync(); +`} + + + + +{`public class Employees_ByFirstNameAndBirthday : AbstractIndexCreationTask +{ + public class IndexEntry + { + public string FirstName { get; set; } + public DateTime Birthday { get; set; } + } + + public Employees_ByFirstNameAndBirthday() + { + Map = employees => from employee in employees + + select new IndexEntry + { + FirstName = employee.FirstName, + Birthday = employee.Birthday + }; + } +} +`} + + + + +{`from index "Employees/ByFirstNameAndBirthday" as x +select { + Name : x.FirstName, + Metadata : getMetadata(x) +} +`} + + + + + + + +## ProjectInto + +* Instead of `Select`, you can use `ProjectInto` to project all public fields from a generic type. + +* The results will be projected into objects of the specified projection class. + + + + + + +{`var projectedResults = session + .Query() + .Where(x => x.ContactTitle == "owner") + // Call 'ProjectInto' instead of using 'Select' + // Pass the projection class + .ProjectInto() + .ToList(); + +// Each resulting object in the list is Not a 'Company' entity, +// it is an object of type 'ContactDetails'. +`} + + + + +{`var projectedResults = await asyncSession + .Query() + .Where(x => x.ContactTitle == "owner") + // Call 'ProjectInto' instead of using 'Select' + // Pass the projection class + .ProjectInto() + .ToListAsync(); + +// Each resulting object in the list is Not a 'Company' entity, +// it is an object of type 'ContactDetails'. +`} + + + + +{`public class Companies_ByContactDetailsAndPhone : AbstractIndexCreationTask +{ + public class IndexEntry + { + public string ContactName { get; set; } + public string ContactTitle { get; set; } + public string Phone { get; set; } + } + + public Companies_ByContactDetailsAndPhone() + { + Map = companies => companies + .Select(x => new IndexEntry + { + ContactName = x.Contact.Name, + ContactTitle = x.Contact.Title, + Phone = x.Phone + }); + } +} +`} + + + + +{`public class ContactDetails +{ + // The projection class contains field names from the index-fields + public string ContactName { get; set; } + public string ContactTitle { get; set; } +} +`} + + + + +{`from index "Companies/ByContactDetailsAndPhone" +where ContactTitle == "owner" +select ContactName, ContactTitle +`} + + + + + + + +## SelectFields + +The `SelectFields` method can only be used by a [Document Query](../../client-api/session/querying/document-query/what-is-document-query.mdx). +It has two overloads: + + + +{`// 1) Select fields to project by the projection class type +IDocumentQuery SelectFields(); + +// 2) Select specific fields to project +IDocumentQuery SelectFields(params string[] fields); +`} + + + + + +**Using projection class type**: + +* The projection class fields are the fields that you want to project from the 'IndexEntry' class. + + + + +{`// Query an index with DocumentQuery +var projectedResults = session.Advanced + .DocumentQuery() + // Call 'SelectFields' + // Pass the projection class type + .SelectFields() + .ToList(); + +// Each resulting object in the list is Not a 'Product' entity, +// it is an object of type 'ProductDetails'. +`} + + + + +{`// Query an index with DocumentQuery +var projectedResults = await asyncSession.Advanced + .AsyncDocumentQuery() + // Call 'SelectFields' + // Pass the projection class type + .SelectFields() + .ToListAsync(); + +// Each resulting object in the list is Not a 'Product' entity, +// it is an object of type 'ProductDetails'. +`} + + + + +{`public class Products_ByNamePriceQuantityAndUnits : AbstractIndexCreationTask +{ + public class IndexEntry + { + public string ProductName { get; set; } + public string QuantityPerUnit { get; set; } + public decimal PricePerUnit { get; set; } + public int UnitsInStock { get; set; } + public int UnitsOnOrder { get; set; } + } + + public Products_ByNamePriceQuantityAndUnits() + { + Map = products => from product in products + + select new IndexEntry + { + ProductName = product.Name, + QuantityPerUnit = product.QuantityPerUnit, + PricePerUnit = product.PricePerUnit, + UnitsInStock = product.UnitsInStock, + UnitsOnOrder = product.UnitsOnOrder + }; + } +} +`} + + + + +{`public class ProductDetails +{ + // The projection class contains field names from the index-fields + public string ProductName { get; set; } + public decimal PricePerUnit { get; set; } + public int UnitsInStock { get; set; } +} +`} + + + + +{`from index "Products/ByNamePriceQuantityAndUnits" +select ProductName, PricePerUnit, UnitsInStock +`} + + + + + + + + +**Using specific fields**: + +* The fields specified are the fields that you want to project from the projection class. + + + + +{`// Define an array with the field names that will be projected +var fields = new string[] { + "ProductName", + "PricePerUnit" +}; + +// Query an index with DocumentQuery +var projectedResults = session.Advanced + .DocumentQuery() + // Call 'SelectFields' + // Pass the projection class type & the fields to be projected from it + .SelectFields(fields) + .ToList(); + +// Each resulting object in the list is Not a 'Product' entity, +// it is an object of type 'ProductDetails' containing data ONLY for the specified fields. +`} + + + + +{`// Define an array with the field names that will be projected +var fields = new string[] { + "ProductName", + "PricePerUnit" +}; + +// Query an index with DocumentQuery +var projectedResults = await asyncSession.Advanced + .AsyncDocumentQuery() + // Call 'SelectFields' + // Pass the projection class type & the fields to be projected from it + .SelectFields(fields) + .ToListAsync(); + +// Each resulting object in the list is Not a 'Product' entity, +// it is an object of type 'ProductDetails' containing data ONLY for the specified fields. +`} + + + + +{`public class Products_ByNamePriceQuantityAndUnits : AbstractIndexCreationTask +{ + public class IndexEntry + { + public string ProductName { get; set; } + public string QuantityPerUnit { get; set; } + public decimal PricePerUnit { get; set; } + public int UnitsInStock { get; set; } + public int UnitsOnOrder { get; set; } + } + + public Products_ByNamePriceQuantityAndUnits() + { + Map = products => from product in products + + select new IndexEntry + { + ProductName = product.Name, + QuantityPerUnit = product.QuantityPerUnit, + PricePerUnit = product.PricePerUnit, + UnitsInStock = product.UnitsInStock, + UnitsOnOrder = product.UnitsOnOrder + }; + } +} +`} + + + + +{`public class ProductDetails +{ + // The projection class contains field names from the index-fields + public string ProductName { get; set; } + public decimal PricePerUnit { get; set; } + public int UnitsInStock { get; set; } +} +`} + + + + +{`from index "Companies/ByContactDetailsAndPhone" +select ProductName, PricePerUnit +`} + + + + + + + + +## Projection behavior with a static-index + +* **By default**, when querying a static-index and projecting query results, + the server will try to retrieve the fields' values from the fields [stored in the index](../../indexes/storing-data-in-index.mdx). + If the index does Not store those fields then the fields' values will be retrieved from the documents. + +* This behavior can be modified by setting the **projection behavior**. + +* Note: Storing fields in the index can increase query performance when projecting, + but this comes at the expense of the disk space used by the index. + + + +**Example**: + + + + +{`var projectedResults = session + .Query() + // Call 'Customize' + // Pass the requested projection behavior to the 'Projection' method + .Customize(x => x.Projection(ProjectionBehavior.FromIndexOrThrow)) + // Select the fields that will be returned by the projection + .Select(x => new EmployeeDetails + { + FirstName = x.FirstName, + Title = x.Title + }) + .ToList(); +`} + + + + +{`var projectedResults = session.Advanced + .DocumentQuery() + // Pass the requested projection behavior to the 'SelectFields' method + .SelectFields(ProjectionBehavior.FromIndexOrThrow) + .ToList(); +`} + + + + +{`var projectedResults = session.Advanced + // Define an RQL query that returns a projection + .RawQuery( + @"from index 'Employees/ByNameAndTitleWithStoredFields' select FirstName, Title") + // Pass the requested projection behavior to the 'Projection' method + .Projection(ProjectionBehavior.FromIndexOrThrow) + .ToList(); +`} + + + + +{`public class Employees_ByNameAndTitleWithStoredFields : AbstractIndexCreationTask +{ + public class IndexEntry + { + public string FirstName { get; set; } + public string LastName { get; set; } + public string Title { get; set; } + } + + public Employees_ByNameAndTitleWithStoredFields() + { + Map = employees => from employee in employees + select new IndexEntry + { + FirstName = employee.FirstName, + LastName = employee.LastName, + Title = employee.Title + }; + + // Store some fields in the index: + Stores.Add(x => x.FirstName, FieldStorage.Yes); + Stores.Add(x => x.LastName, FieldStorage.Yes); + } +} +`} + + + + +{`public class EmployeeDetails +{ + public string FirstName { get; set; } + public string Title { get; set; } +} +`} + + + + +{`from index "Employees/ByNameAndTitleWithStoredFields" +select FirstName, Title +`} + + + + +The projection behavior in the above example is set to `FromIndexOrThrow` and so the following applies: + +* Field `FirstName` is stored in the index so the server will fetch its values from the index. + +* However, field `Title` is Not stored in the index so an exception will be thrown when the query is executed. + + + + + +**Syntax for projection behavior**: + + +{`// For Query: +IDocumentQueryCustomization Projection(ProjectionBehavior projectionBehavior); + +// For DocumentQuery: +IDocumentQuery SelectFields( + ProjectionBehavior projectionBehavior, params string[] fields); + +IDocumentQuery SelectFields( + ProjectionBehavior projectionBehavior); + +// Projection behavior options: +public enum ProjectionBehavior { + Default, + FromIndex, + FromIndexOrThrow, + FromDocument, + FromDocumentOrThrow +} +`} + + +* `Default` + Retrieve values from the stored index fields when available. + If fields are not stored then get values from the document, + a field that is not found in the document is skipped. + +* `FromIndex` + Retrieve values from the stored index fields when available. + A field that is not stored in the index is skipped. + +* `FromIndexOrThrow` + Retrieve values from the stored index fields when available. + An exception is thrown if the index does not store the requested field. + +* `FromDocument` + Retrieve values directly from the documents store. + A field that is not found in the document is skipped. + +* `FromDocumentOrThrow` + Retrieve values directly from the documents store. + An exception is thrown if the document does not contain the requested field. + + + + + +## OfType + +* When making a projection query, converting the shape of the matching documents to the requested projection is done on the **server-side**. + +* On the other hand, `OfType` is a **client-side** type conversion that is only used to map the resulting objects to the provided type. + +* We differentiate between the following cases: + * Using _OfType_ with projection queries - resulting objects are Not tracked by the session + * Using _OfType_ with non-projection queries - resulting documents are tracked by the session + + + +**Using OfType with projection queries**: + + + + +{`// Make a projection query: +// ======================== + +var projectedResults = session + .Query() + // Here we filter by an IndexEntry field + // The compiler recognizes 'x' as an IndexEntry type + .Where(x => x.ContactTitle == "owner") + // Now, if you wish to project based on the 'Company' document + // then use 'OfType' to let the compiler recognize the type + .OfType() + // Select which fields from the matching document will be returned + .Select(x => new + { + // The compiler now recognizes 'x' as a 'Company' class type + // e.g. 'Name' & 'Address.Country' are properties of the 'Company' document + CompanyName = x.Name, + CompanyCountry = x.Address.Country + }) + .ToList(); + +// Each resulting object has the 'CompanyName' & 'CompanyCountry' fields specified in the projection. +// The resulting objects are NOT TRACKED by the session. +`} + + + + +{`// Make a projection query: +// ======================== + +var projectedResults = await asyncSession + .Query() + // Here we filter by an IndexEntry field + // The compiler recognizes 'x' as an IndexEntry type + .Where(x => x.ContactTitle == "owner") + // Now, if you wish to project based on the 'Company' document + // then use 'OfType' to let the compiler recognize the type + .OfType() + // Select which fields from the matching document will be returned + .Select(x => new + { + // The compiler now recognizes 'x' as a 'Company' class type + // e.g. 'Name' & 'Address.Country' are properties of the 'Company' document + CompanyName = x.Name, + CompanyCountry = x.Address.Country + }) + .ToListAsync(); + +// Each resulting object has the 'CompanyName' & 'CompanyCountry' fields specified in the projection. +// The resulting objects are NOT TRACKED by the session. +`} + + + + +{`public class Companies_ByContactDetailsAndPhone : AbstractIndexCreationTask +{ + public class IndexEntry + { + public string ContactName { get; set; } + public string ContactTitle { get; set; } + public string Phone { get; set; } + } + + public Companies_ByContactDetailsAndPhone() + { + Map = companies => companies + .Select(x => new IndexEntry + { + ContactName = x.Contact.Name, + ContactTitle = x.Contact.Title, + Phone = x.Phone + }); + } +} +`} + + + + +{`from index "Companies/ByContactDetailsAndPhone" +where ContactTitle == "owner" +`} + + + + + + + + +**Using OfType with non-projection queries**: + + + + +{`// Make a non-projecting query: +// ============================ + +List results = session + .Query() + // Here we filter by an IndexEntry field + // The compiler recognizes 'x' as an IndexEntry type + .Where(x => x.ContactTitle == "owner") + // A type conversion is now required for the compiler to understand the resulting objects' shape. + // Use 'OfType to let the compiler know that resulting objects are of type 'Company' documents. + .OfType() + .ToList(); + +// The resulting objects are full 'Company' document entities (not projected). +// Each 'Company' entity is TRACKED by the session. +`} + + + + +{`// Make a non-projecting query: +// ============================ + +List results = await asyncSession + .Query() + // Here we filter by an IndexEntry field + // The compiler recognizes 'x' as an IndexEntry type + .Where(x => x.ContactTitle == "owner") + // A type conversion is now required for the compiler to understand the resulting objects' shape. + // Use 'OfType to let the compiler know that resulting objects are of type 'Company' documents. + .OfType() + .ToListAsync(); + +// The resulting objects are full 'Company' document entities (not projected). +// Each 'Company' entity is TRACKED by the session. +`} + + + + +{`public class Companies_ByContactDetailsAndPhone : AbstractIndexCreationTask +{ + public class IndexEntry + { + public string ContactName { get; set; } + public string ContactTitle { get; set; } + public string Phone { get; set; } + } + + public Companies_ByContactDetailsAndPhone() + { + Map = companies => companies + .Select(x => new IndexEntry + { + ContactName = x.Contact.Name, + ContactTitle = x.Contact.Title, + Phone = x.Phone + }); + } +} +`} + + + + +{`from index "Companies/ByContactDetailsAndPhone" +where ContactTitle == "owner" +`} + + + + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_projections-java.mdx b/versioned_docs/version-7.1/indexes/querying/_projections-java.mdx new file mode 100644 index 0000000000..64e7ba1d65 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_projections-java.mdx @@ -0,0 +1,435 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +There are couple of ways to perform projections in RavenDB: + +- projections using [SelectFields](../../indexes/querying/projections.mdx#selectfields) +- using [OfType](../../indexes/querying/projections.mdx#oftype) + +## What are Projections and When to Use Them + +When performing a query, we usually pull the full document back from the server. + +However, we often need to display the data to the user. Instead of pulling the whole document back and picking just what we'll show, we can ask the server to send us just the details we want to show the user and thus reduce the amount of traffic on the network. + +The savings can be very significant if we need to show just a bit of information on a large document. + +A good example in the sample data set would be the order document. If we ask for all the Orders where Company is "companies/65-A", the size of the result that we get back from the server is 19KB. + +However, if we perform the same query and ask to get back only the Employee and OrderedAt fields, the size of the result is only 5KB. + +Aside from allowing you to pick only a portion of the data, projection functions give you the ability to rename some fields, load external documents, and perform transformations on the results. + +## Projections are Applied as the Last Stage in the Query + +It is important to understand that projections are applied after the query has been processed, filtered, sorted, and paged. The projection doesn't apply to all the documents in the database, only to the results that are actually returned. +This reduces the load on the server significantly, since we can avoid doing work only to throw it immediately after. It also means that we cannot do any filtering work as part of the projection. You can filter what will be returned, but not which documents will be returned. That has already been determined earlier in the query pipeline. + +## The Cost of Running a Projection + +Another consideration to take into account is the cost of running the projection. It is possible to make the projection query expensive to run. RavenDB has limits on the amount of time it will spend in evaluating the projection, and exceeding these (quite generous) limits will fail the query. + +## Projections and Stored Fields + +If a projection function only requires fields that are stored, then the document will not be loaded from storage and all data will come from the index directly. This can increase query performance (by the cost of disk space used) in many situations when whole document is not needed. You can read more about field storing [here](../../indexes/storing-data-in-index.mdx). + +## SelectFields +The most basic projection can be done using `selectFields` method: + +### Example I - Projecting Individual Fields of the Document + + + + +{`List results = session + .query(Employee.class, Employees_ByFirstAndLastName.class) + .selectFields(FirstAndLastName.class, "FirstName", "LastName") + .toList(); +`} + + + + +{`public static class Employees_ByFirstAndLastName extends AbstractIndexCreationTask { + public Employees_ByFirstAndLastName() { + map = "docs.Employees.Select(employee => new {" + + " FirstName = employee.FirstName," + + " LastName = employee.LastName" + + "})"; + } +} +`} + + + + +{`from index 'Employees/ByFirstAndLastName' +select FirstName, LastName +`} + + + + +This will issue a query to a database, requesting only `FirstName` and `LastName` from all documents that index entries match query predicate from `Employees/ByFirstAndLastName` index. What does it mean? If an index entry matches our query predicate, then we will try to extract all requested fields from that particular entry. If all requested fields are available in there, then we do not download it from storage. The index `Employees/ByFirstAndLastName` used in the above query is not storing any fields, so the documents will be fetched from storage. + +### Example II - Projecting Stored Fields + +If we create an index that stores `FirstName` and `LastName` and it requests only those fields in query, then **the data will come from the index directly**. + + + + +{`List results = session + .query(Employee.class, Employees_ByFirstAndLastNameWithStoredFields.class) + .selectFields(FirstAndLastName.class, "FirstName", "LastName") + .toList(); +`} + + + + +{`public static class Employees_ByFirstAndLastNameWithStoredFields extends AbstractIndexCreationTask { + public Employees_ByFirstAndLastNameWithStoredFields() { + map = "docs.Employees.Select(employee => new {" + + " FirstName = employee.FirstName," + + " LastName = employee.LastName" + + "})"; + + storeAllFields(FieldStorage.YES); // firstName and lastName fields can be retrieved directly from index + } +} +`} + + + + +{`from index 'Employees/ByFirstAndLastNameWithStoredFields' +select FirstName, LastName +`} + + + + +### Example III - Projecting Arrays and Objects + + + + +{`QueryData queryData = new QueryData(new String[]{"ShipTo", "Lines[].ProductName"}, + new String[]{"ShipTo", "Products"}); + +List results = session.query(Order.class) + .selectFields(ShipToAndProducts.class, queryData) + .toList(); +`} + + + + +{`public static class Orders_ByShipToAndLines extends AbstractIndexCreationTask { + public Orders_ByShipToAndLines() { + map = "docs.Orders.Select(order => new {" + + " ShipTo = order.ShipTo," + + " Lines = order.Lines" + + "})"; + } +} +`} + + + + +{`from index 'Orders/ByShipToAndLines' as o +select +{ + ShipTo: o.ShipTo, + Products : o.Lines.map(function(y){return y.ProductName;}) +} +`} + + + + +### Example IV - Projection with Expression + + + + +{`List results = session.advanced().rawQuery(FullName.class, "from Employees as e " + + "select {" + + " FullName : e.FirstName + \\" \\" + e.LastName " + + "}").toList(); +`} + + + + +{`public static class Employees_ByFirstAndLastName extends AbstractIndexCreationTask { + public Employees_ByFirstAndLastName() { + map = "docs.Employees.Select(employee => new {" + + " FirstName = employee.FirstName," + + " LastName = employee.LastName" + + "})"; + } +} +`} + + + + +{`from index 'Employees/ByFirstAndLastName' as e +select +{ + FullName : e.FirstName + " " + e.LastName +} +`} + + + + +### Example V - Projection with `declared function` + + + +{`List results = session.advanced().rawQuery(Employee.class, "declare function output(e) { " + + " var format = function(p){ return p.FirstName + \\" \\" + p.LastName; }; " + + " return { FullName : format(e) }; " + + "} " + + "from Employees as e select output(e)").toList(); +`} + + + + +{`public static class Employees_ByFirstAndLastName extends AbstractIndexCreationTask { + public Employees_ByFirstAndLastName() { + map = "docs.Employees.Select(employee => new {" + + " FirstName = employee.FirstName," + + " LastName = employee.LastName" + + "})"; + } +} +`} + + + + +{`declare function output(e) { + var format = function(p){ return p.FirstName + " " + p.LastName; }; + return { FullName : format(e) }; +} +from index 'Employees/ByFirstAndLastName' as e select output(e) +`} + + + + +### Example VI - Projection with Calculation + + + + +{`List results = session.advanced().rawQuery(Total.class, "from Orders as o " + + "select { " + + " Total : o.Lines.reduce( " + + " (acc , l) => acc += l.PricePerUnit * l.Quantity, 0) " + + "}").toList(); +`} + + + + +{`public static class Orders_ByShipToAndLines extends AbstractIndexCreationTask { + public Orders_ByShipToAndLines() { + map = "docs.Orders.Select(order => new {" + + " ShipTo = order.ShipTo," + + " Lines = order.Lines" + + "})"; + } +} +`} + + + + +{`from index 'Orders/ByShipToAndLines' as o +select { + Total : o.Lines.reduce( + (acc , l) => acc += l.PricePerUnit * l.Quantity, 0) +} +`} + + + + +### Example VII - Projection Using a Loaded Document + + + + +{`List results = session.advanced().rawQuery(OrderProjection.class, "from Orders as o " + + "load o.company as c " + + "select { " + + " CompanyName: c.Name," + + " ShippedAt: o.ShippedAt" + + "}").toList(); +`} + + + + +{`public static class Orders_ByShippedAtAndCompany extends AbstractIndexCreationTask { + public Orders_ByShippedAtAndCompany() { + map = "docs.Orders.Select(order => new {" + + " ShippedAt = order.ShippedAt," + + " Company = order.Company" + + "})"; + } +} +`} + + + + +{`from index 'Orders/ByShippedAtAndCompany' as o +load o.Company as c +select { + CompanyName: c.Name, + ShippedAt: o.ShippedAt +} +`} + + + + +### Example VIII - Projection with Dates + + + + +{`List results = session.advanced().rawQuery(EmployeeProjection.class, "from Employees as e " + + "select { " + + " DayOfBirth : new Date(Date.parse(e.Birthday)).getDate(), " + + " MonthOfBirth : new Date(Date.parse(e.Birthday)).getMonth() + 1, " + + " Age : new Date().getFullYear() - new Date(Date.parse(e.Birthday)).getFullYear() " + + "}").toList(); +`} + + + + +{`public static class Employees_ByFirstNameAndBirthday extends AbstractIndexCreationTask { + public Employees_ByFirstNameAndBirthday() { + map = "docs.Employees.Select(employee => new {" + + " FirstName = employee.FirstName," + + " Birthday = employee.Birthday" + + "})"; + } +} +`} + + + + +{`from index 'Employees/ByFirstNameAndBirthday' as e +select { + DayOfBirth : new Date(Date.parse(e.Birthday)).getDate(), + MonthOfBirth : new Date(Date.parse(e.Birthday)).getMonth() + 1, + Age : new Date().getFullYear() - new Date(Date.parse(e.Birthday)).getFullYear() +} +`} + + + + +### Example IX - Projection with Raw JavaScript Code + + + + +{`List results = session.advanced().rawQuery(EmployeeProjection.class, "from Employees as e " + + "select { " + + " Date : new Date(Date.parse(e.Birthday)), " + + " Name : e.FirstName.substr(0,3) " + + "}").toList(); +`} + + + + +{`public static class Employees_ByFirstNameAndBirthday extends AbstractIndexCreationTask { + public Employees_ByFirstNameAndBirthday() { + map = "docs.Employees.Select(employee => new {" + + " FirstName = employee.FirstName," + + " Birthday = employee.Birthday" + + "})"; + } +} +`} + + + + +{`from index 'Employees/ByFirstNameAndBirthday' as e +select { + Date : new Date(Date.parse(e.Birthday)), + Name : e.FirstName.substr(0,3) +} +`} + + + + +### Example X - Projection with Metadata + + + + +{`List results = session.advanced().rawQuery(Employee.class, "from Employees as e " + + "select {" + + " Name : e.FirstName, " + + " Metadata : getMetadata(e)" + + "}").toList(); +`} + + + + +{`public static class Employees_ByFirstAndLastName extends AbstractIndexCreationTask { + public Employees_ByFirstAndLastName() { + map = "docs.Employees.Select(employee => new {" + + " FirstName = employee.FirstName," + + " LastName = employee.LastName" + + "})"; + } +} +`} + + + + +{`from index 'Employees/ByFirstAndLastName' as e +select { + Name : e.FirstName, + Metadata : getMetadata(e) +} +`} + + + + + + + +## OfType + +`OfType` is a client-side projection. You can read more about it [here](../../client-api/session/querying/how-to-project-query-results.mdx#oftype-(as)---simple-projection). + + + +## Projections and the Session +Because you are working with projections and not directly with documents, they are _not_ tracked by the session. Modifications to a projection will not modify the document when `saveChanges` is called. + + diff --git a/versioned_docs/version-7.1/indexes/querying/_projections-nodejs.mdx b/versioned_docs/version-7.1/indexes/querying/_projections-nodejs.mdx new file mode 100644 index 0000000000..31977fc4be --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_projections-nodejs.mdx @@ -0,0 +1,637 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article provides examples of projecting query results when querying a **static-index**. + +* Prior to reading this article, please refer to [query results projection overview](../../client-api/session/querying/how-to-project-query-results.mdx) + for general knowledge about Projections and for dynamic-queries examples. + +* In this page: + * [SelectFields](../../indexes/querying/projections.mdx#selectfields) + * [Projection behavior with a static-index](../../indexes/querying/projections.mdx#projection-behavior-with-a-static-index) + * [ofType](../../indexes/querying/projections.mdx#oftype) + + +## SelectFields + + + +**Example I - Projecting individual fields of the document**: + + + + +{`// Alias names for the projected fields can be defined using a QueryData object +const queryData = new QueryData( + ["FirstName", "LastName"], // Document-fields to project + ["EmployeeFirstName ", "EmployeeLastName"]); // An alias for each field + +const projectedResults = await session + // Query the index + .query({indexName: "Employees/ByNameAndTitle"}) + // Can filter by any index-field, e.g.filter by index-field 'Title' + .whereEquals('Title', 'sales representative') + // Call 'selectFields' + // Only the fields defined in 'queryData' will be returned per matching document + .selectFields(queryData) + .all(); + +// Each resulting object in the list is Not an 'Employee' entity, +// it is a new object containing ONLY the fields specified in the selectFields method +// ('EmployeeFirstName' & 'EmployeeLastName'). +`} + + + + +{`class Employees_ByNameAndTitle extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("Employees", e => { + return { + FirstName: e.FirstName, + LastName: e.LastName, + Title: e.Title + }; + }); + } +} +`} + + + + +{`from index "Employees/ByNameAndTitle" +where Title == "sales representative" +select FirstName as EmployeeFirstName, LastName as EmployeeLastName +`} + + + + +* Since the index-fields in this example are not [Stored in the index](../../indexes/storing-data-in-index.mdx), and no projection behavior was defined, + resulting values for `FirstName` & `LastName` will be retrieved from the matching Employee document in the storage. + +* This behavior can be modified by setting the [projection behavior](../../indexes/querying/projections.mdx#projection-behavior-with-a-static-index) used when querying a static-index. + + + + + +**Example II - Projecting stored fields**: + + + + +{`const projectedResults = await session + .query({ indexName: "Employees/ByNameAndTitleWithStoredFields" }) + // Call 'selectFields' + // Project fields 'FirstName' and 'LastName' which are STORED in the index + .selectFields(["FirstName", "LastName"]) + .all(); +`} + + + + +{`class Employees_ByNameAndTitleWithStoredFields extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("Employees", e => { + return { + FirstName: e.FirstName, + LastName: e.LastName, + Title: e.Title + }; + }); + + // Store some fields in the index: + this.store('FirstName', 'Yes'); + this.store('LastName', 'Yes'); + } +} +`} + + + + +{`from index "Employees/ByNameAndTitleWithStoredFields" +select FirstName, LastName +`} + + + + +* In this example, the projected fields (`FirstName` and `LastName`) are stored in the index, + so by default, the resulting values will come directly from the index and not from the Employee document in the storage. + +* This behavior can be modified by setting the [projection behavior](../../indexes/querying/projections.mdx#projection-behavior-with-a-static-index) used when querying a static-index. + + + + +**Example III - Projecting arrays and objects**: + + + + +{`const queryData = new QueryData( + // Retrieve the City property from the ShipTo object + // and all product names from the Lines array + [ "ShipTo.City", "Lines[].ProductName" ], + [ "ShipToCity", "Products" ]); + +const projectedResults = await session + .query({ indexName: "Employees/ByCompanyAndShipToAndLines" }) + .selectFields(queryData) + .all(); +`} + + + + +{`class Orders_ByCompanyAndShipToAndLines extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("Orders", o => { + return { + Company : o.Company, + ShipTo: o.ShipTo, + Lines: o.Lines + }; + }); + } +} +`} + + + + +{`// Using simple expression: +from index "Orders/ByCompanyAndShipToAndLines" +where Company == "companies/65-A" +select ShipTo.City as ShipToCity, Lines[].ProductName as Products + +// Using JavaScript object literal syntax: +from index "Orders/ByCompanyAndShipToAndLines" as x +where Company == "companies/65-A" +select { + ShipToCity: x.ShipTo.City, + Products: x.Lines.map(y => y.ProductName) +} +`} + + + + + + + + +**Example IV - Projection with expression**: + + + + +{`// Define the projected data expression within a custom function. +// Any expression can be provided for the projected content. +const queryData = QueryData.customFunction("e", \`{ + FullName: e.FirstName + " " + e.LastName +}\`); + +const projectedResults = await session + .query({indexName: "Employees/ByNameAndTitle"}) + .selectFields(queryData) + .all(); +`} + + + + +{`class Employees_ByNameAndTitle extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("Employees", e => { + return { + FirstName: e.FirstName, + LastName: e.LastName, + Title: e.Title + }; + }); + } +} +`} + + + + +{`from index "Employees/ByNameAndTitle" as e +select { + FullName : e.FirstName + " " + e.LastName +} +`} + + + + + + + + +**Example V - Projection with calculations**: + + + + +{`const projectedResults = await session.advanced + .rawQuery(\`from index "Orders/ByCompanyAndShipToAndLines" as x + select { + // Any calculations can be done within a projection + TotalProducts: x.Lines.length, + TotalDiscountedProducts: x.Lines.filter(x => x.Discount > 0).length, + TotalPrice: x.Lines + .map(l => l.PricePerUnit * l.Quantity) + .reduce((a, b) => a + b, 0) + }\`) + .all(); +`} + + + + +{`class Orders_ByCompanyAndShipToAndLines extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("Orders", o => { + return { + Company : o.Company, + ShipTo: o.ShipTo, + Lines: o.Lines + }; + }); + } +} +`} + + + + +{`from index "Orders/ByCompanyAndShipToAndLines" as x +select { + TotalProducts: x.Lines.length, + TotalDiscountedProducts: x.Lines.filter(x => x.Discount > 0).length, + TotalPrice: x.Lines + .map(l => l.PricePerUnit * l.Quantity) + .reduce((a, b) => a + b, 0) +} +`} + + + + + + + + +**Example VI - Projecting using functions**: + + + + +{`const projectedResults = await session.advanced + .rawQuery(\`// Define a function + declare function output(x) { + var format = p => p.FirstName + " " + p.LastName; + return { FullName: format(x) }; + } + + from index "Employees/ByNameAndTitle" as e + select output(e)\`) // Call the function from the projection + .all(); +`} + + + + +{`class Employees_ByNameAndTitle extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("Employees", e => { + return { + FirstName: e.FirstName, + LastName: e.LastName, + Title: e.Title + }; + }); + } +} +`} + + + + +{`declare function output(x) { + var format = p => p.FirstName + " " + p.LastName; + return { FullName: format(x) }; +} + +from index "Employees/ByNameAndTitle" as e +select output(e) +`} + + + + + + + + +**Example VII - Projecting using a loaded document**: + + + + +{`const projectedResults = await session.advanced + .rawQuery(\`from index "Orders/ByCompanyAndShippedAt" as o + load o.Company as c // Load the related document to use in the projection + select { + CompanyName: c.Name, // Info from the related Company document + ShippedAt: o.ShippedAt // Info from the Order document + }\`) + .all(); +`} + + + + +{`class Orders_ByCompanyAndShippedAt extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("Orders", o => { + return { + Company: o.Company, + ShippedAt: o.ShippedAt + }; + }); + } +} +`} + + + + +{`from index "Orders/ByCompanyAndShippedAt" as o +load o.Company as c +select { + CompanyName: c.Name, + ShippedAt: o.ShippedAt +} +`} + + + + + + + + +**Example VIII - Projection with dates**: + + + + +{`const projectedResults = await session.advanced + .rawQuery(\`from index "Employees/ByFirstNameAndBirthday" as x + select { + DayOfBirth: new Date(Date.parse(x.Birthday)).getDate(), + MonthOfBirth: new Date(Date.parse(x.Birthday)).getMonth() + 1, + Age: new Date().getFullYear() - new Date(Date.parse(x.Birthday)).getFullYear() + }\`) + .all(); +`} + + + + +{`class Employees_ByFirstNameAndBirthday extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("Employees", e => { + return { + FirstName: e.FirstName, + Birthday: e.Birthday + }; + }); + } +} +`} + + + + +{`from index "Employees/ByFirstNameAndBirthday" as x +select { + DayOfBirth: new Date(Date.parse(x.Birthday)).getDate(), + MonthOfBirth: new Date(Date.parse(x.Birthday)).getMonth() + 1, + Age: new Date().getFullYear() - new Date(Date.parse(x.Birthday)).getFullYear() +} +`} + + + + + + + + +**Example IX - Projection with metadata**: + + + + +{`const projectedResults = await session.advanced + .rawQuery(\`from index "Employees/ByFirstNameAndBirthday" as x + select { + Name: x.FirstName, + Metadata: getMetadata(x) // Get the metadata + }\`) + .all(); +`} + + + + +{`class Employees_ByFirstNameAndBirthday extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("Employees", e => { + return { + FirstName: e.FirstName, + Birthday: e.Birthday + }; + }); + } +} +`} + + + + +{`from index "Employees/ByFirstNameAndBirthday" as x +select { + Name : x.FirstName, + Metadata : getMetadata(x) +} +`} + + + + + + + +## Projection behavior with a static-index + +* **By default**, when querying a static-index and projecting query results, + the server will try to retrieve the fields' values from the fields [stored in the index](../../indexes/storing-data-in-index.mdx). + If the index does Not store those fields then the fields' values will be retrieved from the documents. + +* This behavior can be modified by setting the **projection behavior**. + +* Note: Storing fields in the index can increase query performance when projecting, + but this comes at the expense of the disk space used by the index. + + + +**Example**: + + + + +{`const projectedResults = await session + .query({ indexName: "Employees/ByNameAndTitleWithStoredFields" }) + // Pass the requested projection behavior to the 'SelectFields' method + .selectFields(["FirstName", "Title"], ProjectionClass, "FromIndexOrThrow") + .all(); +`} + + + + +{`class Employees_ByNameAndTitleWithStoredFields extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("Employees", e => { + return { + FirstName: e.FirstName, + LastName: e.LastName, + Title: e.Title + }; + }); + + // Store some fields in the index: + this.store('FirstName', 'Yes'); + this.store('LastName', 'Yes'); + } +} +`} + + + + +{`class ProjectionClass { + constructor(firstName, title) { + // The projection class contains field names from the index-fields + this.FirstName = firstName; + this.Title = title; + } +} +`} + + + + +{`from index "Employees/ByNameAndTitleWithStoredFields" +select FirstName, Title +`} + + + + +The projection behavior in the above example is set to `FromIndexOrThrow` and so the following applies: + +* Field `FirstName` is stored in the index so the server will fetch its values from the index. + +* However, field `Title` is Not stored in the index so an exception will be thrown when the query is executed. + + + + + +**Projection behavior options**: + +* `"Default"` + Retrieve values from the stored index fields when available. + If fields are not stored then get values from the document, + a field that is not found in the document is skipped. + +* `"FromIndex"` + Retrieve values from the stored index fields when available. + A field that is not stored in the index is skipped. + +* `"FromIndexOrThrow"` + Retrieve values from the stored index fields when available. + An exception is thrown if the index does not store the requested field. + +* `"FromDocument"` + Retrieve values directly from the documents store. + A field that is not found in the document is skipped. + +* `"FromDocumentOrThrow"` + Retrieve values directly from the documents store. + An exception is thrown if the document does not contain the requested field. + + + + + +## ofType + +* `ofType` is a client-side projection that is only used to map the resulting objects to the provided type. + +* As opposed to projection queries where results are not tracked by the session, + In the case of non-projecting queries that use _ofType_, the session does track the resulting document entities. + + + +{`// Make a query without a projection +const results = await session + .query(\{ indexName: "Employees/ByNameAndTitle" \}) + .whereEquals('Title', 'sales representative') + // Call 'ofType' + // The resulting objects will be of type 'Employee' + .ofType(Employee) + .all(); + +// In this case, the resulting objects are tracked by the session +`} + + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_projections-php.mdx b/versioned_docs/version-7.1/indexes/querying/_projections-php.mdx new file mode 100644 index 0000000000..2cba1b221e --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_projections-php.mdx @@ -0,0 +1,732 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article provides examples of projecting query results when querying a **static-index**. + +* Prior to reading this article, please refer to [query results projection overview](../../client-api/session/querying/how-to-project-query-results.mdx) + for general knowledge about Projections and for dynamic-queries examples. + +* Projections can be applied using the `selectFields` method. + +* In this page: + + * [What are Projections and When to Use Them](../../indexes/querying/projections.mdx#what-are-projections-and-when-to-use-them) + * [`selectFields`](../../indexes/querying/projections.mdx#selectfields) + * [Examples](../../indexes/querying/projections.mdx#examples) + * [Projection Behavior](../../indexes/querying/projections.mdx#projection-behavior) + * [Projections and the Session](../../indexes/querying/projections.mdx#projections-and-the-session) + + +## What are Projections and When to Use Them + +When performing a query, we usually pull the full document back from the server. + +However, we often need to display the data to the user. Instead of pulling the whole document back and picking +just what we'll show, we can ask the server to send us just the details we want to show the user and thus reduce +the amount of traffic on the network. + +The savings can be very significant if we need to show just a bit of information on a large document. + +A good example in the sample data set would be the order document. If we ask for all the Orders where Company +is "companies/65-A", the size of the result that we get back from the server is 19KB. + +However, if we perform the same query and ask to get back only the Employee and OrderedAt fields, the size of +the result is only 5KB. + +Aside from allowing you to pick only a portion of the data, projection functions give you the ability to +rename some fields, load external documents, and perform transformations on the results. + +#### Projections are Applied as the Last Stage in the Query +It is important to understand that projections are applied after the query has been processed, filtered, +sorted, and paged. The projection doesn't apply to all the documents in the database, only to the results +that are actually returned. +This reduces the load on the server significantly, since we can avoid doing work only to throw it immediately +after. It also means that we cannot do any filtering work as part of the projection. You can filter what will +be returned, but not which documents will be returned. That has already been determined earlier in the query +pipeline. + +#### The Cost of Running a Projection +Another consideration to take into account is the cost of running the projection. It is possible to make the +projection query expensive to run. RavenDB has limits on the amount of time it will spend in evaluating the +projection, and exceeding these (quite generous) limits will fail the query. + +#### Projections and Stored Fields +If a projection function only requires fields that are stored, then the document will not be loaded from +storage and all data will come from the index directly. This can increase query performance (by the cost +of disk space used) in many situations when whole document is not needed. You can read more about field +storing [here](../../indexes/storing-data-in-index.mdx). + + + +## `selectFields` + +Projections can be applied using the `selectFields` method. + +The projection fields can be specified as a `str` array of field names, +and the projection type can be passed as a generic parameter. + + + + +{`$fields = [ + "Name", + "Phone" +]; + +$results = $session + ->advanced() + ->documentQuery(Company::class, Companies_ByContact::class) + ->selectFields(ContactDetails::class, $fields) + ->toList(); +`} + + + + +{`class Companies_ByContact extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "companies.Select(x => new {Name = x.Contact.Name, Phone = x.Phone})"; + + $this->storeAllFields(FieldStorage::yes()); // Name and Phone fields can be retrieved directly from index + } +} +`} + + + + +{`class ContactDetails +{ + private ?string $name = null; + private ?string $phone = null; + + public function getName(): ?string + { + return $this->name; + } + + public function setName(?string $name): void + { + $this->name = $name; + } + + public function getPhone(): ?string + { + return $this->phone; + } + + public function setPhone(?string $phone): void + { + $this->phone = $phone; + } +} +`} + + + + +{`from index 'Companies/ByContact' +select Name, Phone +`} + + + + +The projection can also be defined by simply passing the projection type as a generic parameter. + + + + +{`$results = $session +->advanced() +->documentQuery(Company::class, Companies_ByContact::class) +->selectFields(ContactDetails::class) +->toList(); +`} + + + + +{`class Companies_ByContact extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "companies.Select(x => new {Name = x.Contact.Name, Phone = x.Phone})"; + + $this->storeAllFields(FieldStorage::yes()); // Name and Phone fields can be retrieved directly from index + } +} +`} + + + + +{`class ContactDetails +{ + private ?string $name = null; + private ?string $phone = null; + + public function getName(): ?string + { + return $this->name; + } + + public function setName(?string $name): void + { + $this->name = $name; + } + + public function getPhone(): ?string + { + return $this->phone; + } + + public function setPhone(?string $phone): void + { + $this->phone = $phone; + } +} +`} + + + + +{`from index 'Companies/ByContact' +select Name, Phone +`} + + + + + + +## Examples + +#### Example I - Projecting Individual Fields of the Document + + + + +{`$results = $session + ->query(Employee::class, Employees_ByFirstAndLastName::class) + ->selectFields(Employee::class, ["FirstName", "LastName"]) + ->toList(); +`} + + + + +{`class Employees_ByFirstAndLastName extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = + "from employee in docs.Employees " . + "select new " . + "{" . + " FirstName = employee.FirstName," . + " LastName = employee.LastName" . + "}"; + } +} +`} + + + + +{`from index 'Employees/ByFirstAndLastName' +select FirstName, LastName +`} + + + + +This will issue a query to a database, requesting only `FirstName` and `LastName` from all documents that +index entries match query predicate from `Employees/ByFirstAndLastName` index. What does it mean? If an index +entry matches our query predicate, then we will try to extract all requested fields from that particular entry. +If all requested fields are available in there, then we do not download it from storage. +The index `Employees/ByFirstAndLastName` used in the above query is not storing any fields, +so the documents will be fetched from storage. +#### Example II - Projecting Stored Fields + +If we create an index that stores `FirstName` and `LastName` and it requests only those fields in query, +then **the data will come from the index directly**. + + + + +{`$results = $session + ->query(Employee::class, Employees_ByFirstAndLastNameWithStoredFields::class) + ->selectFields(Employee::class, ["FirstName", "LastName"]) + ->toList(); +`} + + + + +{`class Employees_ByFirstAndLastNameWithStoredFields extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = + "from employee in docs.Employees " . + "select new" . + "{" . + " FirstName = employee.FirstName," . + " LastName = employee.LastName" . + "}"; + + $this->storeAllFields(FieldStorage::yes()); // FirstName and LastName fields can be retrieved directly from index + } +} +`} + + + + +{`from index 'Employees/ByFirstAndLastNameWithStoredFields' +select FirstName, LastName +`} + + + +#### Example III - Projecting Arrays and Objects + + + + +{`$queryData = new QueryData(["ShipTo", "Lines[].ProductName"], ["ShipTo", "Products"]); + +$results = $session + ->query(Order::class, Orders_ByShipToAndLines::class) + ->selectFields(ShipToAndProducts::class, $queryData) + ->toList(); +`} + + + + +{`class Orders_ByShipToAndLines extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "from order in docs.Orders" . + "select new { " . + " ShipTo = order.ShipTo, " . + " Lines = order.Lines " . + "}"; + } +} +`} + + + + +{`from index 'Orders/ByShipToAndLines' as o +select +{ + ShipTo: o.ShipTo, + Products : o.Lines.map(function(y){return y.ProductName;}) +} +`} + + + +#### Example IV - Projection with Expression + + + + +{`$results = $session + ->rawQuery(FullName::class, 'from Employees as e select { FullName: e.FirstName + " " + e.LastName }') + ->toList(); +`} + + + + +{`class Employees_ByFirstAndLastName extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = + "from employee in docs.Employees " . + "select new " . + "{" . + " FirstName = employee.FirstName," . + " LastName = employee.LastName" . + "}"; + } +} +`} + + + + +{`from index 'Employees/ByFirstAndLastName' as e +select +{ + FullName : e.FirstName + " " + e.LastName +} +`} + + + +#### Example V - Projection with `let` + + + + +{`$results = $session->advanced()->rawQuery( + Employee::class, + "declare function output(e) { " . + " var format = function(p){ return p.FirstName + \\" \\" + p.LastName; }; " . + " return { FullName : format(e) }; " . + "} " . + "from Employees as e select output(e)" + ) + ->toList(); +`} + + + + +{`class Employees_ByFirstAndLastName extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = + "from employee in docs.Employees " . + "select new " . + "{" . + " FirstName = employee.FirstName," . + " LastName = employee.LastName" . + "}"; + } +} +`} + + + + +{`declare function output(e) { + var format = function(p){ return p.FirstName + " " + p.LastName; }; + return { FullName : format(e) }; +} +from index 'Employees/ByFirstAndLastName' as e select output(e) +`} + + + +#### Example VI - Projection with Calculation + + + + +{`$results = $session->advanced()->rawQuery( + Total::class, + "from Orders as o " . + "select { " . + " Total : o.Lines.reduce( " . + " (acc , l) => acc += l.PricePerUnit * l.Quantity, 0) " . + "}") + ->toList(); +`} + + + + +{`class Orders_ByShipToAndLines extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "from order in docs.Orders" . + "select new { " . + " ShipTo = order.ShipTo, " . + " Lines = order.Lines " . + "}"; + } +} +`} + + + + +{`from index 'Orders/ByShipToAndLines' as o +select { + Total : o.Lines.reduce( + (acc , l) => acc += l.PricePerUnit * l.Quantity, 0) +} +`} + + + +#### Example VII - Projection With a Count() Predicate + + + + +{`class Orders_ByShippedAtAndCompany extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = + "from order in docs.Orders " . + "select new " . + "{" . + " ShippedAt = order.ShippedAt," . + " Company = order.Company" . + "}"; + } +} +`} + + + + +{`from Orders as o +load o.Company as c +select +{ + CompanyName : c.Name, + ShippedAt : o.ShippedAt, + TotalProducts : o.Lines.length, + TotalDiscountedProducts : o.Lines.filter(x => x.Discount > 0 ).length +} +`} + + + +#### Example VIII - Projection Using a Loaded Document + + + + +{`$results = $session->advanced()->rawQuery( + OrderProjection::class, + "from Orders as o " . + "load o.company as c " . + "select { " . + " CompanyName: c.Name," . + " ShippedAt: o.ShippedAt" . + "}") + ->toList(); +`} + + + + +{`class Orders_ByShippedAtAndCompany extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = + "from order in docs.Orders " . + "select new " . + "{" . + " ShippedAt = order.ShippedAt," . + " Company = order.Company" . + "}"; + } +} +`} + + + + +{`from index 'Orders/ByShippedAtAndCompany' as o +load o.Company as c +select { + CompanyName: c.Name, + ShippedAt: o.ShippedAt +} +`} + + + +#### Example IX - Projection with Dates + + + + +{`$results = $session->advanced()->rawQuery( + EmployeeProjection::class, + "from Employees as e " . + "select { " . + " DayOfBirth : new Date(Date.parse(e.Birthday)).getDate(), " . + " MonthOfBirth : new Date(Date.parse(e.Birthday)).getMonth() + 1, " . + " Age : new Date().getFullYear() - new Date(Date.parse(e.Birthday)).getFullYear() " . + "}") + ->toList(); +`} + + + + +{`class Employees_ByFirstNameAndBirthday extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = + "from employee in docs.Employees " . + "select new " . + "{" . + " FirstName = employee.FirstName," . + " Birthday = employee.Birthday" . + "}"; + } +} +`} + + + + +{`from index 'Employees/ByFirstNameAndBirthday' as e +select { + DayOfBirth : new Date(Date.parse(e.Birthday)).getDate(), + MonthOfBirth : new Date(Date.parse(e.Birthday)).getMonth() + 1, + Age : new Date().getFullYear() - new Date(Date.parse(e.Birthday)).getFullYear() +} +`} + + + +#### Example X - Projection with Raw JavaScript Code + + + + +{`$results = $session->advanced()->rawQuery( + EmployeeProjection::class, + "from Employees as e " . + "select { " . + " Date : new Date(Date.parse(e.Birthday)), " . + " Name : e.FirstName.substr(0,3) " . + "}") + ->toList(); +`} + + + + +{`class Employees_ByFirstNameAndBirthday extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = + "from employee in docs.Employees " . + "select new " . + "{" . + " FirstName = employee.FirstName," . + " Birthday = employee.Birthday" . + "}"; + } +} +`} + + + + +{`from index 'Employees/ByFirstNameAndBirthday' as e +select { + Date : new Date(Date.parse(e.Birthday)), + Name : e.FirstName.substr(0,3) +} +`} + + + +#### Example XI - Projection with Metadata + + + + +{`$results = $session->advanced()->rawQuery( + Employee::class, + "from Employees as e " . + "select {" . + " Name : e.FirstName, " . + " Metadata : getMetadata(e)" . + "}") + ->toList(); +`} + + + + +{`class Employees_ByFirstAndLastName extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = + "from employee in docs.Employees " . + "select new " . + "{" . + " FirstName = employee.FirstName," . + " LastName = employee.LastName" . + "}"; + } +} +`} + + + + +{`from index 'Employees/ByFirstAndLastName' as e +select { + Name : e.FirstName, + Metadata : getMetadata(e) +} +`} + + + + + + +## Projection Behavior +The `selectFields` methods can also take a `ProjectionBehavior` parameter, which +determines whether the query should retrieve indexed data or directly retrieve +document data, and what to do when the data can't be retrieved. Learn more +[here](../../client-api/session/querying/how-to-customize-query.mdx#projection). + + + +## Projections and the Session +As you work with projections rather than directly with documents, the data is _not_ tracked by the session. +Modifications to a projection will not modify the document when `saveChanges` is called. + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_projections-python.mdx b/versioned_docs/version-7.1/indexes/querying/_projections-python.mdx new file mode 100644 index 0000000000..61e7c82055 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_projections-python.mdx @@ -0,0 +1,699 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article provides examples of projecting query results when querying a **static-index**. + +* Prior to reading this article, please refer to [query results projection overview](../../client-api/session/querying/how-to-project-query-results.mdx) + for general knowledge about Projections and for dynamic-queries examples. + +* Projections can be applied using the `select_fields` and `select_fields_query_data` methods. + +* In this page: + + * [What are Projections and When to Use Them](../../indexes/querying/projections.mdx#what-are-projections-and-when-to-use-them) + * [`select_fields`](../../indexes/querying/projections.mdx#select_fields) + * [Examples](../../indexes/querying/projections.mdx#examples) + * [Projection Behavior](../../indexes/querying/projections.mdx#projection-behavior) + * [Projections and the Session](../../indexes/querying/projections.mdx#projections-and-the-session) + * [Syntax](../../indexes/querying/projections.mdx#syntax) + + +## What are Projections and When to Use Them + +When performing a query, we usually pull the full document back from the server. + +However, we often need to display the data to the user. Instead of pulling the whole document back and picking +just what we'll show, we can ask the server to send us just the details we want to show the user and thus reduce +the amount of traffic on the network. + +The savings can be very significant if we need to show just a bit of information on a large document. + +A good example in the sample data set would be the order document. If we ask for all the Orders where Company +is "companies/65-A", the size of the result that we get back from the server is 19KB. + +However, if we perform the same query and ask to get back only the Employee and OrderedAt fields, the size of +the result is only 5KB. + +Aside from allowing you to pick only a portion of the data, projection functions give you the ability to +rename some fields, load external documents, and perform transformations on the results. + +#### Projections are Applied as the Last Stage in the Query +It is important to understand that projections are applied after the query has been processed, filtered, +sorted, and paged. The projection doesn't apply to all the documents in the database, only to the results +that are actually returned. +This reduces the load on the server significantly, since we can avoid doing work only to throw it immediately +after. It also means that we cannot do any filtering work as part of the projection. You can filter what will +be returned, but not which documents will be returned. That has already been determined earlier in the query +pipeline. + +#### The Cost of Running a Projection +Another consideration to take into account is the cost of running the projection. It is possible to make the +projection query expensive to run. RavenDB has limits on the amount of time it will spend in evaluating the +projection, and exceeding these (quite generous) limits will fail the query. + +#### Projections and Stored Fields +If a projection function only requires fields that are stored, then the document will not be loaded from +storage and all data will come from the index directly. This can increase query performance (by the cost +of disk space used) in many situations when whole document is not needed. You can read more about field +storing [here](../../indexes/storing-data-in-index.mdx). + + + +## `select_fields` + +Projections can be applied using the `select_fields` and `select_fields_query_data` methods. + +The projection fields can be specified as a `str` array of field names, +and the projection type can be passed as a generic parameter. + + + + +{`fields = ["Name", "Phone"] +results = list( + session.advanced.document_query_from_index_type(Companies_ByContact, Company).select_fields( + ContactDetails, fields + ) +) +`} + + + + +{`class Companies_ByContact(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = "companies.Select(x => new {name = x.Contact.Name, phone = x.Phone})" + self._store_all_fields(FieldStorage.YES) # Name and Phone fields can be retrieved directly from index +`} + + + + +{`class ContactDetails: + def __init__(self, name: str = None, phone: str = None): + self.name = name + self.phone = phone +`} + + + + +{`from index 'Companies/ByContact' +select Name, Phone +`} + + + + +The projection can also be defined by simply passing the projection type as a generic parameter. + + + + +{`results = list( + session.advanced.document_query_from_index_type(Companies_ByContact, Company).select_fields( + ContactDetails + ) +) +`} + + + + +{`class Companies_ByContact(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = "companies.Select(x => new {name = x.Contact.Name, phone = x.Phone})" + self._store_all_fields(FieldStorage.YES) # Name and Phone fields can be retrieved directly from index +`} + + + + +{`class ContactDetails: + def __init__(self, name: str = None, phone: str = None): + self.name = name + self.phone = phone +`} + + + + +{`from index 'Companies/ByContact' +select Name, Phone +`} + + + + + + +## Examples + +#### Example I - Projecting Individual Fields of the Document + + + + +{`results = list( + session.query_index_type(Employees_ByFirstAndLastName, Employee).select_fields( + Employee, "FirstName", "LastName" + ) +) +`} + + + + +{`class Employees_ByFirstAndLastName(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = ( + "from employee in docs.Employees " + "select new " + "{" + " FirstName = employee.FirstName," + " LastName = employee.LastName" + "}" + ) +`} + + + + +{`from index 'Employees/ByFirstAndLastName' +select FirstName, LastName +`} + + + + +This will issue a query to a database, requesting only `FirstName` and `LastName` from all documents that +index entries match query predicate from `Employees/ByFirstAndLastName` index. What does it mean? If an index +entry matches our query predicate, then we will try to extract all requested fields from that particular entry. +If all requested fields are available in there, then we do not download it from storage. +The index `Employees/ByFirstAndLastName` used in the above query is not storing any fields, +so the documents will be fetched from storage. +#### Example II - Projecting Stored Fields + +If we create an index that stores `FirstName` and `LastName` and it requests only those fields in query, +then **the data will come from the index directly**. + + + + +{`results = list( + session.query_index_type(Employees_ByFirstAndLastNameWithStoredFields, Employee).select_fields( + Employee, "FirstName", "LastName" + ) +) +`} + + + + +{`class Employees_ByFirstAndLastNameWithStoredFields(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = ( + "from employee in docs.Employees " + "select new" + "{" + " FirstName = employee.FirstName," + " LastName = employee.LastName" + "}" + ) + self._store_all_fields(FieldStorage.YES) +`} + + + + +{`from index 'Employees/ByFirstAndLastNameWithStoredFields' +select FirstName, LastName +`} + + + +#### Example III - Projecting Arrays and Objects + + + + +{`query_data = QueryData(["ShipTo", "Lines[].ProductName"], ["ShipTo", "Products"]) +results = list(session.query(object_type=Order).select_fields_query_data(ShipToAndProducts, query_data)) +`} + + + + +{`class Orders_ByShipToAndLines(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = "from order in docs.Orders select new { ShipTo = order.ShipTo, Lines = order.Lines}" +`} + + + + +{`from index 'Orders/ByShipToAndLines' as o +select +{ + ShipTo: o.ShipTo, + Products : o.Lines.map(function(y){return y.ProductName;}) +} +`} + + + +#### Example IV - Projection with Expression + + + + +{`results = list( + session.advanced.raw_query( + 'from Employees as e select { FullName: e.FirstName + " " + e.LastName }', FullName + ) +) +`} + + + + +{`class Employees_ByFirstAndLastName(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = ( + "from employee in docs.Employees " + "select new " + "{" + " FirstName = employee.FirstName," + " LastName = employee.LastName" + "}" + ) +`} + + + + +{`from index 'Employees/ByFirstAndLastName' as e +select +{ + FullName : e.FirstName + " " + e.LastName +} +`} + + + +#### Example V - Projection with `let` + + + + +{`results = list( + session.advanced.raw_query( + "declare function output (e) { " + ' var format = function(p){ return p.FirstName + " " + p.LastName; };' + " return { FullName : format(e) }; " + "} " + "from Employees as e select output(e)", + Employee, + ) +) +`} + + + + +{`class Employees_ByFirstAndLastName(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = ( + "from employee in docs.Employees " + "select new " + "{" + " FirstName = employee.FirstName," + " LastName = employee.LastName" + "}" + ) +`} + + + + +{`declare function output(e) { + var format = function(p){ return p.FirstName + " " + p.LastName; }; + return { FullName : format(e) }; +} +from index 'Employees/ByFirstAndLastName' as e select output(e) +`} + + + +#### Example VI - Projection with Calculation + + + + +{`results = session.advanced.raw_query( + "from Orders as o " + "select { " + " Total : o.Lines.reduce( " + " (acc, 1) => acc += l.PricePerUnit * l.Quantity, 0) " + "}", + Total, +) +`} + + + + +{`class Orders_ByShipToAndLines(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = "from order in docs.Orders select new { ShipTo = order.ShipTo, Lines = order.Lines}" +`} + + + + +{`from index 'Orders/ByShipToAndLines' as o +select { + Total : o.Lines.reduce( + (acc , l) => acc += l.PricePerUnit * l.Quantity, 0) +} +`} + + + +#### Example VII - Projection With a Count() Predicate + + + + +{`class Orders_ByShippedAtAndCompany(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = ( + "from order in docs.Orders " + "select new " + "{" + " ShippedAt = order.ShippedAt," + " Company = order.Company" + "}" + ) +`} + + + + +{`from Orders as o +load o.Company as c +select +{ + CompanyName : c.Name, + ShippedAt : o.ShippedAt, + TotalProducts : o.Lines.length, + TotalDiscountedProducts : o.Lines.filter(x => x.Discount > 0 ).length +} +`} + + + +#### Example VIII - Projection Using a Loaded Document + + + + +{`results = list( + session.advanced.raw_query( + "from Orders as o " + "load o.company as c " + "select { " + " CompanyName: c.Name," + " ShippedAt: o.ShippedAt" + "}", + OrderProjection, + ) +) +`} + + + + +{`class Orders_ByShippedAtAndCompany(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = ( + "from order in docs.Orders " + "select new " + "{" + " ShippedAt = order.ShippedAt," + " Company = order.Company" + "}" + ) +`} + + + + +{`from index 'Orders/ByShippedAtAndCompany' as o +load o.Company as c +select { + CompanyName: c.Name, + ShippedAt: o.ShippedAt +} +`} + + + +#### Example IX - Projection with Dates + + + + +{`results = list( + session.advanced.raw_query( + "from Employees as e " + "select { " + " DayOfBirth : new Date(Date.parse(e.Birthday)).getDate(), " + " MonthOfBirth : new Date(Date.parse(e.Birthday)).getMonth() + 1, " + " Age : new Date().getFullYear() - new Date(Date.parse(e.Birthday)).getFullYear() " + "}" + ) +) +`} + + + + +{`class Employees_ByFirstNameAndBirthday(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = ( + "from employee in docs.Employees " + "select new " + "{" + " FirstName = employee.FirstName," + " Birthday = employee.Birthday" + "}" + ) +`} + + + + +{`from index 'Employees/ByFirstNameAndBirthday' as e +select { + DayOfBirth : new Date(Date.parse(e.Birthday)).getDate(), + MonthOfBirth : new Date(Date.parse(e.Birthday)).getMonth() + 1, + Age : new Date().getFullYear() - new Date(Date.parse(e.Birthday)).getFullYear() +} +`} + + + +#### Example X - Projection with Raw JavaScript Code + + + + +{`results = list( + session.advanced.raw_query( + "from Employees as e " + "select { " + " Date : new Date(Date.parse(e.Birthday)), " + " Name : e.FirstName.substr(0,3) " + "}", + EmployeeProjection, + ) +) +`} + + + + +{`class Employees_ByFirstNameAndBirthday(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = ( + "from employee in docs.Employees " + "select new " + "{" + " FirstName = employee.FirstName," + " Birthday = employee.Birthday" + "}" + ) +`} + + + + +{`from index 'Employees/ByFirstNameAndBirthday' as e +select { + Date : new Date(Date.parse(e.Birthday)), + Name : e.FirstName.substr(0,3) +} +`} + + + +#### Example XI - Projection with Metadata + + + + +{`results = list( + session.advanced.raw_query( + "from Employee as e " "select {" " Name : e.FirstName, " " Metadata : getMetadata(e)" "}", + Employee, + ) +) +`} + + + + +{`class Employees_ByFirstAndLastName(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = ( + "from employee in docs.Employees " + "select new " + "{" + " FirstName = employee.FirstName," + " LastName = employee.LastName" + "}" + ) +`} + + + + +{`from index 'Employees/ByFirstAndLastName' as e +select { + Name : e.FirstName, + Metadata : getMetadata(e) +} +`} + + + + + + +## Projection Behavior +The `select_fields` methods can also take a `ProjectionBehavior` parameter, which +determines whether the query should retrieve indexed data or directly retrieve +document data, and what to do when the data can't be retrieved. Learn more +[here](../../client-api/session/querying/how-to-customize-query.mdx#projection). + + + +## Projections and the Session +As you work with projections rather than directly with documents, the data is _not_ tracked by the session. +Modifications to a projection will not modify the document when `save_changes` is called. + + +## Syntax + + + +{`def select_fields( + self, + projection_class: Type[_TProjection], + *fields: str, + projection_behavior: Optional[ProjectionBehavior] = ProjectionBehavior.DEFAULT, +) -> DocumentQuery[_TProjection]: ... + +def select_fields_query_data( + self, projection_class: Type[_TProjection], query_data: QueryData +) -> DocumentQuery[_TProjection]: ... + +class QueryData: + def __init__( + self, + fields: List[str], + projections: List[str], + from_alias: Optional[str] = None, + declare_tokens: Optional[List[DeclareToken]] = None, + load_tokens: Optional[List[LoadToken]] = None, + is_custom_function: Optional[bool] = None, + ): + self.fields = fields + self.projections = projections + self.from_alias = from_alias + self.declare_tokens = declare_tokens + self.load_tokens = load_tokens + self.is_custom_function = is_custom_function + + self.map_reduce: Union[None, bool] = None + self.project_into: Union[None, bool] = None + self.projection_behavior: Union[None, ProjectionBehavior] = None +`} + + +#### `ProjectionBehavior` Syntax: + + + +{`class ProjectionBehavior(Enum): + DEFAULT = "Default" + FROM_INDEX = "FromIndex" + FROM_INDEX_OR_THROW = "FromIndexOrThrow" + FROM_DOCUMENT = "FromDocument" + FROM_DOCUMENT_OR_THROW = "FromDocumentOrThrow" +`} + + + +* `Default` + Retrieve values from the stored index fields when available. + If fields are not stored then get values from the document, + a field that is not found in the document is skipped. + +* `FromIndex` + Retrieve values from the stored index fields when available. + A field that is not stored in the index is skipped. + +* `FromIndexOrThrow` + Retrieve values from the stored index fields when available. + An exception is thrown if the index does not store the requested field. + +* `FromDocument` + Retrieve values directly from the documents store. + A field that is not found in the document is skipped. + +* `FromDocumentOrThrow` + Retrieve values directly from the documents store. + An exception is thrown if the document does not contain the requested field. + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_query-index-csharp.mdx b/versioned_docs/version-7.1/indexes/querying/_query-index-csharp.mdx new file mode 100644 index 0000000000..236e200081 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_query-index-csharp.mdx @@ -0,0 +1,556 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Prior to this article, it is recommended that you first read this [Query Overview](../../client-api/session/querying/how-to-query.mdx). + +* For a basic indexes overview, see the [Indexes Overview](../../studio/database/indexes/indexes-overview.mdx). +* Indexing the content of your documents allows for **fast document retrieval** when querying the index. + +* This article is a basic overview of how to query a **static index** using **code**. + * For dynamic query examples see [Query Overview](../../client-api/session/querying/how-to-query.mdx). + * An index can also be queried from [Studio](../../studio/database/queries/query-view.mdx) + using [RQL](../../client-api/session/querying/what-is-rql.mdx). + +* In this page: + * [Query an index by `Query`](../../indexes/querying/query-index.mdx#query-an-index-by-query) (using LINQ) + * [Query an index by `DocumentQuery`](../../indexes/querying/query-index.mdx#query-an-index-by-documentquery) (low-level API) + * [Query an index by `RawQuery`](../../indexes/querying/query-index.mdx#query-an-index-by-rawquery) (using RQL) + + +## Query an index by `Query` + +* In the following examples we **query an index** using the session's `Session.Query` method, which supports LINQ. + +* Querying can be enhanced using these [extension methods](../../client-api/session/querying/how-to-query.mdx#custom-methods-and-extensions-for-linq). +#### Query index - no filtering: + + + + +{`// Query the 'Employees' collection using the index - without filtering +// (Open the 'Index' tab to view the index class definition) + +List employees = session + // Pass the queried collection as the first generic parameter + // Pass the index class as the second generic parameter + .Query() + // Execute the query + .ToList(); + +// All 'Employee' documents that contain DOCUMENT-fields 'FirstName' and\\or 'LastName' will be returned +`} + + + + +{`// Query the 'Employees' collection using the index - without filtering + +List employees = await asyncSession + // Pass the queried collection as the first generic parameter + // Pass the index class as the second generic parameter + .Query() + // Execute the query + .ToListAsync(); + +// All 'Employee' documents that contain DOCUMENT-fields 'FirstName' and\\or 'LastName' will be returned +`} + + + + +{`// Query the 'Employees' collection using the index - without filtering + +List employees = session + // Pass the index name as a parameter + // Use slash \`/\` in the index name, replacing the underscore \`_\` from the index class definition + .Query("Employees/ByName") + // Execute the query + .ToList(); + +// All 'Employee' documents that contain DOCUMENT-fields 'FirstName' and\\or 'LastName' will be returned +`} + + + + +{`// The index definition: + +public class Employees_ByName : AbstractIndexCreationTask +{ + // The IndexEntry class defines the index-fields + public class IndexEntry + { + public string FirstName { get; set; } + public string LastName { get; set; } + } + + public Employees_ByName() + { + // The 'Map' function defines the content of the INDEX-fields + Map = employees => from employee in employees + select new IndexEntry + { + // * The content of INDEX-fields 'FirstName' & 'LastName' + // is composed of the relevant DOCUMENT-fields. + FirstName = employee.FirstName, + LastName = employee.LastName + + // * The index-fields can be queried on to fetch matching documents. + // You can query and filter Employee documents based on their first or last names. + + // * Employee documents that do Not contain both 'FirstName' and 'LastName' fields + // will Not be indexed. + + // * Note: the INDEX-field name does Not have to be exactly the same + // as the DOCUMENT-field name. + }; + } +} +`} + + + + +{`// Note: +// Use slash \`/\` in the index name, replacing the underscore \`_\` from the index class definition + +from index "Employees/ByName" + +// All 'Employee' documents that contain DOCUMENT-fields 'FirstName' and\\or 'LastName' will be returned +`} + + + +#### Query index - with filtering: + + + + +{`// Query the 'Employees' collection using the index - filter by INDEX-field + +List employees = session + // Pass the IndexEntry class as the first generic parameter + // Pass the index class as the second generic parameter + .Query() + // Filter the retrieved documents by some predicate on an INDEX-field + .Where(x => x.LastName == "King") + // Specify the type of the returned document entities + .OfType() + // Execute the query + .ToList(); + +// Results will include all documents from 'Employees' collection whose 'LastName' equals to 'King'. +`} + + + + +{`// Query the 'Employees' collection using the index - filter by INDEX-field + +List employees = await asyncSession + // Pass the IndexEntry class as the first generic parameter + // Pass the index class as the second generic parameter + .Query() + // Filter the retrieved documents by some predicate on an INDEX-field + .Where(x => x.LastName == "King") + // Specify the type of the returned document entities + .OfType() + // Execute the query + .ToListAsync(); + +// Results will include all documents from 'Employees' collection whose 'LastName' equals to 'King'. +`} + + + + +{`// The index definition: + +public class Employees_ByName : AbstractIndexCreationTask +{ + // The IndexEntry class defines the index-fields + public class IndexEntry + { + public string FirstName { get; set; } + public string LastName { get; set; } + } + + public Employees_ByName() + { + // The 'Map' function defines the content of the INDEX-fields + Map = employees => from employee in employees + select new IndexEntry + { + // * The content of INDEX-fields 'FirstName' & 'LastName' + // is composed of the relevant DOCUMENT-fields. + FirstName = employee.FirstName, + LastName = employee.LastName + + // * The index-fields can be queried on to fetch matching documents. + // You can query and filter Employee documents based on their first or last names. + + // * Employee documents that do Not contain both 'FirstName' and 'LastName' fields + // will Not be indexed. + + // * Note: the INDEX-field name does Not have to be exactly the same + // as the DOCUMENT-field name. + }; + } +} +`} + + + + +{`// Note: +// Use slash \`/\` in the index name, replacing the underscore \`_\` from the index class definition + +from index "Employees/ByName" +where LastName == "King" + +// Results will include all documents from 'Employees' collection whose 'LastName' equals to 'King'. +`} + + + + +* `OfType` is used to convert the type being used in the where clause (`IndexEntry`) + to the collection type (`Employee`). + The reason for this is that while the `IndexEntry` type allows for a strongly typed query, + the server returns the actual documents entities objects. + +* An exception will be thrown when filtering by fields that are Not defined in the index. + +* Read more about filtering [here](../../indexes/querying/filtering.mdx). +#### Query index - with paging: + + + + +{`// Query the 'Employees' collection using the index - page results + +// This example is based on the previous filtering example +List employees = session + .Query() + .Where(x => x.LastName == "King") + .Skip(5) // Skip first 5 results + .Take(10) // Retrieve up to 10 documents + .OfType() + .ToList(); + +// Results will include up to 10 matching documents +`} + + + + +{`// Query the 'Employees' collection using the index - page results + +// This example is based on the previous filtering example +List employees = await asyncSession + .Query() + .Where(x => x.LastName == "King") + .Skip(5) // Skip first 5 results + .Take(10) // Retrieve up to 10 documents + .OfType() + .ToListAsync(); + +// Results will include up to 10 matching documents +`} + + + + +{`// The index definition: + +public class Employees_ByName : AbstractIndexCreationTask +{ + // The IndexEntry class defines the index-fields + public class IndexEntry + { + public string FirstName { get; set; } + public string LastName { get; set; } + } + + public Employees_ByName() + { + // The 'Map' function defines the content of the INDEX-fields + Map = employees => from employee in employees + select new IndexEntry + { + // * The content of INDEX-fields 'FirstName' & 'LastName' + // is composed of the relevant DOCUMENT-fields. + FirstName = employee.FirstName, + LastName = employee.LastName + + // * The index-fields can be queried on to fetch matching documents. + // You can query and filter Employee documents based on their first or last names. + + // * Employee documents that do Not contain both 'FirstName' and 'LastName' fields + // will Not be indexed. + + // * Note: the INDEX-field name does Not have to be exactly the same + // as the DOCUMENT-field name. + }; + } +} +`} + + + + +{`// Note: +// Use slash \`/\` in the index name, replacing the underscore \`_\` from the index class definition + +from index "Employees/ByName" +where LastName == "King" +limit 5, 10 // skip 5, take 10 +`} + + + + +* Read more about paging [here](../../indexes/querying/paging.mdx). + + + +## Query an index by `DocumentQuery` + +* `Session.Advanced.DocumentQuery` provides low-level access to RavenDB's querying mechanism, + giving you more flexibility and control when making complex queries. + +* For more information about _DocumentQuery_ see: + * [What is a document query](../../client-api/session/querying/document-query/what-is-document-query.mdx) + * [Query -vs- DocumentQuery](../../client-api/session/querying/document-query/query-vs-document-query.mdx) + +**Example**: + + + + +{`// Query the 'Employees' collection using the index - filter by INDEX-field + +List employees = session.Advanced + // Pass the IndexEntry class as the first generic parameter + // Pass the index class as the second generic parameter + .DocumentQuery() + // Filter the retrieved documents by some predicate on an INDEX-field + .WhereEquals(x => x.LastName, "King") + // Specify the type of the returned document entities + .OfType() + // Execute the query + .ToList(); + +// Results will include all documents from 'Employees' collection whose 'LastName' equals to 'King'. +`} + + + + +{`// Query the 'Employees' collection using the index - filter by INDEX-field + +List employees = await asyncSession.Advanced + // Pass the IndexEntry class as the first generic parameter + // Pass the index class as the second generic parameter + .AsyncDocumentQuery() + // Filter the retrieved documents by some predicate on an INDEX-field + .WhereEquals(x => x.LastName, "King") + // Specify the type of the returned document entities + .OfType() + // Execute the query + .ToListAsync(); + +// Results will include all documents from 'Employees' collection whose 'LastName' equals to 'King'. +`} + + + + +{`// Query the 'Employees' collection using the index - filter by INDEX-field + +List employees = session.Advanced + // Pass the IndexEntry class as the generic param + // Pass the index name as the param + // Use slash \`/\` in the index name, replacing the underscore \`_\` from the index class definition + .DocumentQuery("Employees/ByName") + // Filter the retrieved documents by some predicate on an INDEX-field + .WhereEquals(x => x.LastName, "King") + // Specify the type of the returned document entities + .OfType() + // Execute the query + .ToList(); + +// Results will include all documents from 'Employees' collection whose 'LastName' equals to 'King'. +`} + + + + +{`// Query the 'Employees' collection using the index - filter by INDEX-field + +List employees = await asyncSession.Advanced + // Pass the IndexEntry class as the generic parameter + // Pass the index name as the parameter + // Use slash \`/\` in the index name, replacing the underscore \`_\` from the index class definition + .AsyncDocumentQuery("Employees/ByName") + // Filter the retrieved documents by some predicate on an INDEX-field + .WhereEquals(x => x.LastName, "King") + // Specify the type of the returned document entities + .OfType() + // Execute the query + .ToListAsync(); + +// Results will include all documents from 'Employees' collection whose 'LastName' equals to 'King'. +`} + + + + +{`// The index definition: + +public class Employees_ByName : AbstractIndexCreationTask +{ + // The IndexEntry class defines the index-fields + public class IndexEntry + { + public string FirstName { get; set; } + public string LastName { get; set; } + } + + public Employees_ByName() + { + // The 'Map' function defines the content of the INDEX-fields + Map = employees => from employee in employees + select new IndexEntry + { + // * The content of INDEX-fields 'FirstName' & 'LastName' + // is composed of the relevant DOCUMENT-fields. + FirstName = employee.FirstName, + LastName = employee.LastName + + // * The index-fields can be queried on to fetch matching documents. + // You can query and filter Employee documents based on their first or last names. + + // * Employee documents that do Not contain both 'FirstName' and 'LastName' fields + // will Not be indexed. + + // * Note: the INDEX-field name does Not have to be exactly the same + // as the DOCUMENT-field name. + }; + } +} +`} + + + + +{`// Note: +// Use slash \`/\` in the index name, replacing the underscore \`_\` from the index class definition + +from index "Employees/ByName" +where LastName == "King" +`} + + + + + + +## Query an index by `RawQuery` + +* Queries defined with [Query](../../indexes/querying/query-index.mdx#sessionquery) + or [DocumentQuery](../../indexes/querying/query-index.mdx#sessionadvanceddocumentquery) + are translated by the RavenDB client to [RQL](../../client-api/session/querying/what-is-rql.mdx) + when sent to the server. + +* The session also gives you a way to express the query directly in RQL using the + `Session.Advanced.RawQuery` method. + +**Example**: + + + + +{`// Query with RawQuery - filter by INDEX-field + +List employees = session.Advanced + // Provide RQL to RawQuery + .RawQuery("from index 'Employees/ByName' where LastName == 'King'") + // Execute the query + .ToList(); + +// Results will include all documents from 'Employees' collection whose 'LastName' equals to 'King'. +`} + + + + +{`// Query with RawQuery - filter by INDEX-field + +List employees = await asyncSession.Advanced + // Provide RQL to RawQuery + .AsyncRawQuery("from index 'Employees/ByName' where LastName == 'King'") + // Execute the query + .ToListAsync(); + +// Results will include all documents from 'Employees' collection whose 'LastName' equals to 'King'. +`} + + + + +{`// The index definition: + +public class Employees_ByName : AbstractIndexCreationTask +{ + // The IndexEntry class defines the index-fields + public class IndexEntry + { + public string FirstName { get; set; } + public string LastName { get; set; } + } + + public Employees_ByName() + { + // The 'Map' function defines the content of the INDEX-fields + Map = employees => from employee in employees + select new IndexEntry + { + // * The content of INDEX-fields 'FirstName' & 'LastName' + // is composed of the relevant DOCUMENT-fields. + FirstName = employee.FirstName, + LastName = employee.LastName + + // * The index-fields can be queried on to fetch matching documents. + // You can query and filter Employee documents based on their first or last names. + + // * Employee documents that do Not contain both 'FirstName' and 'LastName' fields + // will Not be indexed. + + // * Note: the INDEX-field name does Not have to be exactly the same + // as the DOCUMENT-field name. + }; + } +} +`} + + + + +{`// Note: +// Use slash \`/\` in the index name, replacing the underscore \`_\` from the index class definition + +from index "Employees/ByName" +where LastName == "King" +`} + + + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_query-index-java.mdx b/versioned_docs/version-7.1/indexes/querying/_query-index-java.mdx new file mode 100644 index 0000000000..7131684bf7 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_query-index-java.mdx @@ -0,0 +1,191 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +Indexes are used by RavenDB to satisfy queries. + +## Query-Flow + +Each query in RavenDB must be expressed by [RQL](../../client-api/session/querying/what-is-rql.mdx), our query language. Each query must match an index in order to return the results. The full query flow is as follows: + +1. `from index | collection` + - First step. When a query is issued, it locates the appropriate index. If our query specifies that index, the task is simple - use this index. Otherwise, a query analysis takes place and an auto-index is created. + +2. `where` + - When we have our index, we scan it for records that match the query predicate. + +3. `load` + - If a query contains a projection that requires any document loads to be processed, they are done just before projection is executed. + +3. `select` + - From each record, the server extracts the appropriate fields. It always extracts the `id()` field ([stored](../../indexes/storing-data-in-index.mdx) by default). + + - If a query is not a projection query, then we load a document from storage. Otherwise, if we stored all requested fields in the index, we use them and continue. If not, the document is loaded from storage and the missing fields are fetched from it. + + - If a query indicates that [projection](../../indexes/querying/projections.mdx) should be used, then all results that were not filtered out are processed by that projection. Fields defined in the projection are extracted from the index (if stored). + +4. `include` + - If any [includes](../../client-api/how-to/handle-document-relationships.mdx#includes) are defined, then the results are being traversed to extract the IDs of potential documents to include with the results. + +5. Return results. + +## Querying + +RavenDB Client supports querying for data. This functionality can be accessed using the session `query` method, and is the most common and basic method for querying the database. + +### Example I + +Let's execute our first query and return all the employees from the Northwind database. To do that, we need to have a [document store](../../client-api/what-is-a-document-store.mdx) and [opened session](../../client-api/session/opening-a-session.mdx) and specify a [collection](../../client-api/faq/what-is-a-collection.mdx) type that we want to query (in our case `Employees`) by passing `Employee` as a first parameter to the `query` method: + + + + +{`// load all entities from 'Employees' collection +List results = session + .query(Employee.class) + .toList(); // send query +`} + + + + +{`from Employees +`} + + + + +By specifying `Employee` class as a parameter, we are also defining a result type. + +### Example II - Filtering + +To filter the results, use the suitable method, like `whereEquals`: + + + + +{`// load all entities from 'Employees' collection +// where 'firstName' is 'Robert' +List results = session + .query(Employee.class) + .whereEquals("FirstName", "Robert") + .toList(); // send query +`} + + + + +{`from Employees +where FirstName = 'Robert' +`} + + + + + + + +{`// load up entity from 'Employees' collection +// with ID matching 'employees/1-A' +Employee result = session + .query(Employee.class) + .whereEquals("Id", "employees/1-A") + .firstOrDefault(); +`} + + + + +{`from Employees +where id() = 'employees/1-A' +`} + + + + + +You can read more about filtering [here](../../indexes/querying/filtering.mdx). + +### Example III - Paging + +Paging is very simple. The methods `take` and `skip` can be used: + + + +{`// load up to 10 entities from 'Products' collection +// where there are more than 10 units in stock +// skip first 5 results +List results = session + .query(Product.class) + .whereGreaterThan("UnitsInStock", 10) + .skip(5) + .take(10) + .toList();//send query +`} + + + +You can read more about paging [here](../../indexes/querying/paging.mdx). + +### Example IV - Querying a Specified Index + +In the above examples, we **did not** specify an index that we want to query. RavenDB will try to locate an appropriate index or create a new one. You can read more about creating indexes [here](../../indexes/creating-and-deploying.mdx). + +In order to specify an index, we need to pass it as a second parameter to the `query` method or pass the index name as a parameter. + + + + +{`// load all entities from 'Employees' collection +// where 'firstName' is 'Robert' +// using 'Employees/ByFirstName' index +session + .query(Employee.class, Employees_ByFirstName.class) + .whereEquals("FirstName", "Robert") + .toList(); // send query +`} + + + + +{`from index 'Employees/ByFirstName' +where FirstName = 'Robert' +`} + + + + + + + +{`// load all entities from 'Employees' collection +// where 'firstName' is 'Robert' +// using 'Employees/ByFirstName' index +session + .query(Employee.class, Query.index("Employees/ByFirstName")) + .whereEquals("FirstName", "Robert") + .toList(); // send query +`} + + + + +{`from index 'Employees/ByFirstName' +where FirstName = 'Robert' +`} + + + + + +If you are filtering by fields that are not present in an index, an exception will be thrown. + + +### Remarks + + +You can check the API reference for the `DocumentQuery` [here](../../client-api/session/querying/document-query/what-is-document-query.mdx). + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_query-index-nodejs.mdx b/versioned_docs/version-7.1/indexes/querying/_query-index-nodejs.mdx new file mode 100644 index 0000000000..7a694d7064 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_query-index-nodejs.mdx @@ -0,0 +1,342 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Prior to this article, it is recommended that you first read this [Query Overview](../../client-api/session/querying/how-to-query.mdx). + +* For a basic indexes overview, see the [Indexes Overview](../../studio/database/indexes/indexes-overview.mdx). +* Indexing the content of your documents allows for **fast document retrieval** when querying the index. + +* This article is a basic overview of how to query a **static index** using **code**. + * For dynamic query examples see [Query Overview](../../client-api/session/querying/how-to-query.mdx). + * An index can also be queried from [Studio](../../studio/database/queries/query-view.mdx) + using [RQL](../../client-api/session/querying/what-is-rql.mdx). + +* In this page: + * [Query an index by `Query`](../../indexes/querying/query-index.mdx#query-an-index-by-query) (using API) + * [Query an index by `RawQuery`](../../indexes/querying/query-index.mdx#query-an-index-by-rawquery) (using RQL) + + +## Query an index by `Query` + +* The following examples **query an index** using the session's `query` method. + +* Customize your query with these [API methods](../../client-api/session/querying/how-to-query.mdx#query-api). + +**Query index - no filtering** + + + + +{`// Query the 'Employees' collection using the index - without filtering +// (Open the 'Index' tab to view the index class definition) + +const employees = await session + // Pass the index name as a parameter + // Use slash \`/\` in the index name, replacing the underscore \`_\` from the index class definition + .query({ indexName: "Employees/ByName" }) + // Execute the query + .all(); + +// All 'Employee' documents that contain DOCUMENT-fields 'firstName' and\\or 'lastName' will be returned +`} + + + + +{`// Query the 'Employees' collection using the index - without filtering + +const employees = await session + // Pass the queried collection as the first param + // Pass the index class as the second param + .query(Employee, Employees_ByName) + // Execute the query + .all(); + +// All 'Employee' documents that contain DOCUMENT-fields 'firstName' and\\or 'lastName' will be returned +`} + + + + +{`// The index definition: + +class Employees_ByName extends AbstractJavaScriptIndexCreationTask { + + constructor() { + super(); + + // Define the INDEX-fields + this.map("Employees", e => ({ + + // Content of INDEX-fields 'firstName' & 'lastName' + // is composed of the relevant DOCUMENT-fields + firstName: e.firstName, + lastName: e.lastName + })); + + // * The index-fields can be queried on to fetch matching documents. + // You can query and filter Employee documents based on their first or last names. + + // * Employee documents that do Not contain both 'firstName' and 'lastName' fields + // will Not be indexed. + + // * Note: the INDEX-field name does Not have to be exactly the same + // as the DOCUMENT-field name. + } +} +`} + + + + +{`// Note: +// Use slash \`/\` in the index name, replacing the underscore \`_\` from the index class definition + +from index "Employees/ByName" + +// All 'Employee' documents that contain DOCUMENT-fields 'FirstName' and\\or 'LastName' will be returned +`} + + + +#### Query index - with filtering: + + + + +{`// Query the 'Employees' collection using the index - filter by INDEX-field + +const employees = await session + // Pass the index name as a parameter + // Use slash \`/\` in the index name, replacing the underscore \`_\` from the index class definition + .query({ indexName: "Employees/ByName" }) + // Filter the retrieved documents by some predicate on an INDEX-field + .whereEquals("lastName", "King") + // Execute the query + .all(); + +// Results will include all documents from 'Employees' collection whose 'lastName' equals to 'King' +`} + + + + +{`// Query the 'Employees' collection using the index - filter by INDEX-field + +const employees = await session + // Pass the queried collection as the first param + // Pass the index class as the second param + .query(Employee, Employees_ByName) + // Filter the retrieved documents by some predicate on an INDEX-field + .whereEquals("lastName", "King") + // Execute the query + .all(); + +// Results will include all documents from 'Employees' collection whose 'lastName' equals to 'King' +`} + + + + +{`// The index definition: + +class Employees_ByName extends AbstractJavaScriptIndexCreationTask { + + constructor() { + super(); + + // Define the INDEX-fields + this.map("Employees", e => ({ + + // Content of INDEX-fields 'firstName' & 'lastName' + // is composed of the relevant DOCUMENT-fields + firstName: e.firstName, + lastName: e.lastName + })); + + // * The index-fields can be queried on to fetch matching documents. + // You can query and filter Employee documents based on their first or last names. + + // * Employee documents that do Not contain both 'firstName' and 'lastName' fields + // will Not be indexed. + + // * Note: the INDEX-field name does Not have to be exactly the same + // as the DOCUMENT-field name. + } +} +`} + + + + +{`// Note: +// Use slash \`/\` in the index name, replacing the underscore \`_\` from the index class definition + +from index "Employees/ByName" +where lastName == "King" + +// Results will include all documents from 'Employees' collection whose 'lastName' equals to 'King'. +`} + + + + +* An exception will be thrown when filtering by fields that are Not defined in the index. + +* Read more about filtering [here](../../indexes/querying/filtering.mdx). +#### Query index - with paging: + + + + +{`// Query the 'Employees' collection using the index - page results + +// This example is based on the previous filtering example +const employees = await session + .query({ indexName: "Employees/ByName" }) + .whereEquals("lastName", "King") + .skip(5) // Skip first 5 results + .take(10) // Retrieve up to 10 documents + .all(); + +// Results will include up to 10 matching documents +`} + + + + +{`// Query the 'Employees' collection using the index - page results + +// This example is based on the previous filtering example +const employees = await session + .query(Employee, Employees_ByName) + .whereEquals("lastName", "King") + .skip(5) // Skip first 5 results + .take(10) // Retrieve up to 10 documents + .all(); + +// Results will include up to 10 matching documents +`} + + + + +{`// The index definition: + +class Employees_ByName extends AbstractJavaScriptIndexCreationTask { + + constructor() { + super(); + + // Define the INDEX-fields + this.map("Employees", e => ({ + + // Content of INDEX-fields 'firstName' & 'lastName' + // is composed of the relevant DOCUMENT-fields + firstName: e.firstName, + lastName: e.lastName + })); + + // * The index-fields can be queried on to fetch matching documents. + // You can query and filter Employee documents based on their first or last names. + + // * Employee documents that do Not contain both 'firstName' and 'lastName' fields + // will Not be indexed. + + // * Note: the INDEX-field name does Not have to be exactly the same + // as the DOCUMENT-field name. + } +} +`} + + + + +{`// Note: +// Use slash \`/\` in the index name, replacing the underscore \`_\` from the index class definition + +from index "Employees/ByName" +where lastName == "King" +limit 5, 10 // skip 5, take 10 +`} + + + + +* Read more about paging [here](../../indexes/querying/paging.mdx). + + + +## Query an index by `RawQuery` + +* Queries defined with [query](../../indexes/querying/query-index.mdx#sessionquery) are translated by the RavenDB client to [RQL](../../client-api/session/querying/what-is-rql.mdx) when sent to the server. + +* The session also gives you a way to express the query directly in RQL using the `rawQuery` method. + +**Example**: + + + + +{`// Query with RawQuery - filter by INDEX-field + +const results = await session + // Provide RQL to rawQuery + .advanced.rawQuery("from index 'Employees/ByName' where lastName == 'King'") + // Execute the query + .all(); + +// Results will include all documents from 'Employees' collection whose 'lastName' equals to 'King'. +`} + + + + +{`// The index definition: + +class Employees_ByName extends AbstractJavaScriptIndexCreationTask { + + constructor() { + super(); + + // Define the INDEX-fields + this.map("Employees", e => ({ + + // Content of INDEX-fields 'firstName' & 'lastName' + // is composed of the relevant DOCUMENT-fields + firstName: e.firstName, + lastName: e.lastName + })); + + // * The index-fields can be queried on to fetch matching documents. + // You can query and filter Employee documents based on their first or last names. + + // * Employee documents that do Not contain both 'firstName' and 'lastName' fields + // will Not be indexed. + + // * Note: the INDEX-field name does Not have to be exactly the same + // as the DOCUMENT-field name. + } +} +`} + + + + +{`// Note: +// Use slash \`/\` in the index name, replacing the underscore \`_\` from the index class definition + +from index "Employees/ByName" +where LastName == "King" +`} + + + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_query-index-php.mdx b/versioned_docs/version-7.1/indexes/querying/_query-index-php.mdx new file mode 100644 index 0000000000..4b58c86e4f --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_query-index-php.mdx @@ -0,0 +1,541 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Prior to this article, it is recommended that you first read this [Query Overview](../../client-api/session/querying/how-to-query.mdx). + +* For a basic indexes overview, see the [Indexes Overview](../../studio/database/indexes/indexes-overview.mdx). +* Indexing the content of your documents allows for **fast document retrieval** when querying the index. + +* This article is a basic overview of how to query a **static index** using **code**. + * For dynamic query examples see [Query Overview](../../client-api/session/querying/how-to-query.mdx). + * An index can also be queried from [Studio](../../studio/database/queries/query-view.mdx) + using [RQL](../../client-api/session/querying/what-is-rql.mdx). + +* Querying can be enhanced using these [extension methods](../../client-api/session/querying/how-to-query.mdx#custom-methods). + +* In this page: + * [Query index - no filtering](../../indexes/querying/query-index.mdx#query-index---no-filtering) + * [Query index - with filtering](../../indexes/querying/query-index.mdx#query-index---with-filtering) + * [Query index - with paging](../../indexes/querying/query-index.mdx#query-index---with-paging) + * [Query an index by `documentQuery`](../../indexes/querying/query-index.mdx#query-an-index-by-documentquery) + * [Query an index by `rawQuery`](../../indexes/querying/query-index.mdx#query-an-index-by-rawquery) + + +## Query index - no filtering + + + + +{`// Query the 'Employees' collection using the index - without filtering +// (Open the 'Index' tab to view the index class definition) +/** @var array $employees */ +$employees = $session + // Pass the queried collection as the first generic parameter + // Pass the index class as the second generic parameter + ->query(Employee::class, Employees_ByName::class) + // Execute the query + ->toList(); + +// All 'Employee' documents that contain DOCUMENT-fields 'FirstName' and\\or 'LastName' will be returned +`} + + + + +{`// Query the 'Employees' collection using the index - without filtering +/** @var array $employees */ +$employees = $session + // Pass the index name as a parameter + // Use slash \`/\` in the index name, replacing the underscore \`_\` from the index class definition + ->query(Employee::class, "Employees/ByName") + // Execute the query + ->toList(); + +// All 'Employee' documents that contain DOCUMENT-fields 'FirstName' and\\or 'LastName' will be returned +`} + + + + +{`// The IndexEntry class defines the index-fields +class Employees_ByName_IndexEntry +{ + private ?string $firstName = null; + private ?string $lastName = null; + + public function getFirstName(): ?string + { + return $this->firstName; + } + + public function setFirstName(?string $firstName): void + { + $this->firstName = $firstName; + } + + public function getLastName(): ?string + { + return $this->lastName; + } + + public function setLastName(?string $lastName): void + { + $this->lastName = $lastName; + } +} + + +// The index definition: +class Employees_ByName extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + // The 'Map' function defines the content of the INDEX-fields + // * The content of INDEX-fields 'FirstName' & 'LastName' + // is composed of the relevant DOCUMENT-fields. + + $this->map = "from e in docs.Employees select new {FirstName = e.FirstName, LastName = e.LastName}"; + + // * The index-fields can be queried on to fetch matching documents. + // You can query and filter Employee documents based on their first or last names. + + // * Employee documents that do Not contain both 'FirstName' and 'LastName' fields + // will Not be indexed. + + // * Note: the INDEX-field name does Not have to be exactly the same + // as the DOCUMENT-field name. + } +} +`} + + + + +{`// Note: +// Use slash \`/\` in the index name, replacing the underscore \`_\` from the index class definition + +from index "Employees/ByName" + +// All 'Employee' documents that contain DOCUMENT-fields 'FirstName' and\\or 'LastName' will be returned +`} + + + + + + +## Query index - with filtering + + + + +{`// Query the 'Employees' collection using the index - filter by INDEX-field +/** @var array $employees */ +$employees = $session + // Pass the IndexEntry class as the first generic parameter + // Pass the index class as the second generic parameter + ->query(Employees_ByName_IndexEntry::class, Employees_ByName::class) + // Filter the retrieved documents by some predicate on an INDEX-field + ->whereEquals("LastName", "King") + // Specify the type of the returned document entities + ->ofType(Employee::class) + // Execute the query + ->toList(); + +// Results will include all documents from 'Employees' collection whose 'LastName' equals to 'King'. +`} + + + + +{`// The IndexEntry class defines the index-fields +class Employees_ByName_IndexEntry +{ + private ?string $firstName = null; + private ?string $lastName = null; + + public function getFirstName(): ?string + { + return $this->firstName; + } + + public function setFirstName(?string $firstName): void + { + $this->firstName = $firstName; + } + + public function getLastName(): ?string + { + return $this->lastName; + } + + public function setLastName(?string $lastName): void + { + $this->lastName = $lastName; + } +} + + +// The index definition: +class Employees_ByName extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + // The 'Map' function defines the content of the INDEX-fields + // * The content of INDEX-fields 'FirstName' & 'LastName' + // is composed of the relevant DOCUMENT-fields. + + $this->map = "from e in docs.Employees select new {FirstName = e.FirstName, LastName = e.LastName}"; + + // * The index-fields can be queried on to fetch matching documents. + // You can query and filter Employee documents based on their first or last names. + + // * Employee documents that do Not contain both 'FirstName' and 'LastName' fields + // will Not be indexed. + + // * Note: the INDEX-field name does Not have to be exactly the same + // as the DOCUMENT-field name. + } +} +`} + + + + +{`// Note: +// Use slash \`/\` in the index name, replacing the underscore \`_\` from the index class definition + +from index "Employees/ByName" +where LastName == "King" + +// Results will include all documents from 'Employees' collection whose 'LastName' equals to 'King'. +`} + + + + +* `ofType` is used to convert the type being used in the where clause (`IndexEntry`) + to the collection type (`Employee`). + The reason for this is that while the `IndexEntry` type allows for a strongly typed query, + the server returns the actual documents entities objects. + +* An exception will be thrown when filtering by fields that are Not defined in the index. + +* Read more about filtering [here](../../indexes/querying/filtering.mdx). + + + +## Query index - with paging + + + + +{`// Query the 'Employees' collection using the index - page results + +// This example is based on the previous filtering example +/** @var array $employees */ +$employees = $session + ->query(Employees_ByName_IndexEntry::class, Employees_ByName::class) + ->whereEquals("LastName", "King") + ->skip(5) // Skip first 5 results + ->take(10) // Retrieve up to 10 documents + ->ofType(Employee::class) + ->toList(); + +// Results will include up to 10 matching documents +`} + + + + +{`// The IndexEntry class defines the index-fields +class Employees_ByName_IndexEntry +{ + private ?string $firstName = null; + private ?string $lastName = null; + + public function getFirstName(): ?string + { + return $this->firstName; + } + + public function setFirstName(?string $firstName): void + { + $this->firstName = $firstName; + } + + public function getLastName(): ?string + { + return $this->lastName; + } + + public function setLastName(?string $lastName): void + { + $this->lastName = $lastName; + } +} + + +// The index definition: +class Employees_ByName extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + // The 'Map' function defines the content of the INDEX-fields + // * The content of INDEX-fields 'FirstName' & 'LastName' + // is composed of the relevant DOCUMENT-fields. + + $this->map = "from e in docs.Employees select new {FirstName = e.FirstName, LastName = e.LastName}"; + + // * The index-fields can be queried on to fetch matching documents. + // You can query and filter Employee documents based on their first or last names. + + // * Employee documents that do Not contain both 'FirstName' and 'LastName' fields + // will Not be indexed. + + // * Note: the INDEX-field name does Not have to be exactly the same + // as the DOCUMENT-field name. + } +} +`} + + + + +{`// Note: +// Use slash \`/\` in the index name, replacing the underscore \`_\` from the index class definition + +from index "Employees/ByName" +where LastName == "King" +limit 5, 10 // skip 5, take 10 +`} + + + + +* Read more about paging [here](../../indexes/querying/paging.mdx). + + + +## Query an index by `documentQuery` + + + + +{`// Query the 'Employees' collection using the index - filter by INDEX-field + +/** @var array $employees */ +$employees = $session->advanced() + // Pass the IndexEntry class as the first generic parameter + // Pass the index class as the second generic parameter + ->documentQuery(Employees_ByName_IndexEntry::class, Employees_ByName::class) + // Filter the retrieved documents by some predicate on an INDEX-field + ->whereEquals("LastName", "King") + // Specify the type of the returned document entities + ->ofType(Employee::class) + // Execute the query + ->toList(); + +// Results will include all documents from 'Employees' collection whose 'LastName' equals to 'King'. +`} + + + + +{`// Query the 'Employees' collection using the index - filter by INDEX-field +/** @var array $employees */ +$employees = $session->advanced() + // Pass the IndexEntry class as the generic param + // Pass the index name as the param + // Use slash \`/\` in the index name, replacing the underscore \`_\` from the index class definition + ->documentQuery(Employees_ByName_IndexEntry::class, "Employees/ByName") + // Filter the retrieved documents by some predicate on an INDEX-field + ->whereEquals("LastName", "King") + // Specify the type of the returned document entities + ->ofType(Employee::class) + // Execute the query + ->toList(); + +// Results will include all documents from 'Employees' collection whose 'LastName' equals to 'King'. +`} + + + + +{`// The IndexEntry class defines the index-fields +class Employees_ByName_IndexEntry +{ + private ?string $firstName = null; + private ?string $lastName = null; + + public function getFirstName(): ?string + { + return $this->firstName; + } + + public function setFirstName(?string $firstName): void + { + $this->firstName = $firstName; + } + + public function getLastName(): ?string + { + return $this->lastName; + } + + public function setLastName(?string $lastName): void + { + $this->lastName = $lastName; + } +} + + +// The index definition: +class Employees_ByName extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + // The 'Map' function defines the content of the INDEX-fields + // * The content of INDEX-fields 'FirstName' & 'LastName' + // is composed of the relevant DOCUMENT-fields. + + $this->map = "from e in docs.Employees select new {FirstName = e.FirstName, LastName = e.LastName}"; + + // * The index-fields can be queried on to fetch matching documents. + // You can query and filter Employee documents based on their first or last names. + + // * Employee documents that do Not contain both 'FirstName' and 'LastName' fields + // will Not be indexed. + + // * Note: the INDEX-field name does Not have to be exactly the same + // as the DOCUMENT-field name. + } +} +`} + + + + +{`// Note: +// Use slash \`/\` in the index name, replacing the underscore \`_\` from the index class definition + +from index "Employees/ByName" +where LastName == "King" +`} + + + + + + +## Query an index by `rawQuery` + +* Queries defined with [Query](../../indexes/querying/query-index.mdx#sessionquery) + or [DocumentQuery](../../indexes/querying/query-index.mdx#sessionadvanceddocumentquery) + are translated by the RavenDB client to [RQL](../../client-api/session/querying/what-is-rql.mdx) + when sent to the server. + +* The session also gives you a way to express the query directly in RQL using the + `session->advanced->rawQuery` method. + +**Example**: + + + + +{`// Query with RawQuery - filter by INDEX-field + +/** @var array $employees */ +$employees = $session->advanced() + // Provide RQL to RawQuery + ->rawQuery(Employee::class, "from index 'Employees/ByName' where LastName == 'King'") + // Execute the query + ->toList(); + +// Results will include all documents from 'Employees' collection whose 'LastName' equals to 'King'. +`} + + + + +{`// The IndexEntry class defines the index-fields +class Employees_ByName_IndexEntry +{ + private ?string $firstName = null; + private ?string $lastName = null; + + public function getFirstName(): ?string + { + return $this->firstName; + } + + public function setFirstName(?string $firstName): void + { + $this->firstName = $firstName; + } + + public function getLastName(): ?string + { + return $this->lastName; + } + + public function setLastName(?string $lastName): void + { + $this->lastName = $lastName; + } +} + + +// The index definition: +class Employees_ByName extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + // The 'Map' function defines the content of the INDEX-fields + // * The content of INDEX-fields 'FirstName' & 'LastName' + // is composed of the relevant DOCUMENT-fields. + + $this->map = "from e in docs.Employees select new {FirstName = e.FirstName, LastName = e.LastName}"; + + // * The index-fields can be queried on to fetch matching documents. + // You can query and filter Employee documents based on their first or last names. + + // * Employee documents that do Not contain both 'FirstName' and 'LastName' fields + // will Not be indexed. + + // * Note: the INDEX-field name does Not have to be exactly the same + // as the DOCUMENT-field name. + } +} +`} + + + + +{`// Note: +// Use slash \`/\` in the index name, replacing the underscore \`_\` from the index class definition + +from index "Employees/ByName" +where LastName == "King" +`} + + + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_query-index-python.mdx b/versioned_docs/version-7.1/indexes/querying/_query-index-python.mdx new file mode 100644 index 0000000000..d52f5a0510 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_query-index-python.mdx @@ -0,0 +1,320 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Prior to this article, it is recommended that you first read this [Query Overview](../../client-api/session/querying/how-to-query.mdx). + +* For a basic indexes overview, see the [Indexes Overview](../../studio/database/indexes/indexes-overview.mdx). +* Indexing the content of your documents allows for **fast document retrieval** when querying the index. + +* This article is a basic overview of how to query a **static index** using **code**. + * For dynamic query examples see [Query Overview](../../client-api/session/querying/how-to-query.mdx). + * An index can also be queried from [Studio](../../studio/database/queries/query-view.mdx) + using [RQL](../../client-api/session/querying/what-is-rql.mdx). + +* In this page: + * [Query an index by `query_index_type` and `query_index`](../../indexes/querying/query-index.mdx#query-an-index-by-query_index_type-and-query_index) + * [Query an index by `raw_query`](../../indexes/querying/query-index.mdx#query-an-index-by-raw_query) (using RQL) + + +## Query an index by `query_index_type` and `query_index` + +* In the following examples we **query an index** using the session `query_index_type` and `query_index` methods. + +* Querying can be enhanced using these [extension methods](../../client-api/session/querying/how-to-query.mdx#custom-methods). +#### Query index - no filtering: + + + + +{`# Query the 'Employees' collection using the index - without filtering +# (Open the 'Index' tab to view the index class definition) +employees = list( + session + # Pass the queried collection as the first generic parameter + # Pass the index class as the second generic parameter + .query_index_type(Employees_ByName, Employee) +) + +# All 'Employee' documents that contain DOCUMENT-fields 'FirstName' and\\or 'LastName' will be returned +`} + + + + +{`# Query the 'Employees' collection using the index - without filtering +employees = list( + session + # Pass the index name as a parameter + # Use slash '/' in the index name, replacing the underscore '_' from the index class definition + .query_index("Employees/ByName") +) +# All 'Employee' documents that contain DOCUMENT-fields 'FirstName' and\\or 'LastName' will be returned +`} + + + + +{`# The index definition: +class Employees_ByName(AbstractIndexCreationTask): + # The IndexEntry class defines the index-fields + class IndexEntry: + def __init__(self, first_name: str = None, last_name: str = None): + self.first_name = first_name + self.last_name = last_name + + # The from_json method to handle different casing on the server + @classmethod + def from_json(cls, json_dict: Dict[str, Any]) -> "Employees_ByName.IndexEntry": + return cls(json_dict["FirstName"], json_dict["LastName"]) + + def __init__(self): + super().__init__() + # The 'map' function defines the content of the INDEX-fields + # * The content of INDEX-fields 'FirstName' & 'LastName' + # is composed of the relevant DOCUMENT-fields. + self.map = """from e in docs.Employees select new {FirstName = e.FirstName, LastName = e.LastName}""" + # * The index-fields can be queried on to fetch matching documents. + # You can query and filter Employee documents based on their first or last names. + + # * Employee documents that do Not contain both 'FirstName' and 'LastName' fields + # will Not be indexed. + + # * Note: the INDEX-field name does Not have to be exactly the same + # as the DOCUMENT-field name. +`} + + + + +{`// Note: +// Use slash \`/\` in the index name, replacing the underscore \`_\` from the index class definition + +from index "Employees/ByName" + +// All 'Employee' documents that contain DOCUMENT-fields 'FirstName' and\\or 'LastName' will be returned +`} + + + +#### Query index - with filtering: + + + + +{`# Query the 'Employees' collection using the index - filter by INDEX-field + +employees = list( + session + # Pass the index class as the first parameter + # Pass the IndexEntry class as the second parameter + .query_index_type(Employees_ByName, Employees_ByName.IndexEntry) + # Filter the retrieved documents by some predicate on an INDEX-field + .where_equals("LastName", "King") + # Specify the type of the returned document entities + .of_type(Employee) +) + +# Results will include all documents from 'Employees' collection whose 'LastName' equals to 'King' +`} + + + + +{`# The index definition: +class Employees_ByName(AbstractIndexCreationTask): + # The IndexEntry class defines the index-fields + class IndexEntry: + def __init__(self, first_name: str = None, last_name: str = None): + self.first_name = first_name + self.last_name = last_name + + # The from_json method to handle different casing on the server + @classmethod + def from_json(cls, json_dict: Dict[str, Any]) -> "Employees_ByName.IndexEntry": + return cls(json_dict["FirstName"], json_dict["LastName"]) + + def __init__(self): + super().__init__() + # The 'map' function defines the content of the INDEX-fields + # * The content of INDEX-fields 'FirstName' & 'LastName' + # is composed of the relevant DOCUMENT-fields. + self.map = """from e in docs.Employees select new {FirstName = e.FirstName, LastName = e.LastName}""" + # * The index-fields can be queried on to fetch matching documents. + # You can query and filter Employee documents based on their first or last names. + + # * Employee documents that do Not contain both 'FirstName' and 'LastName' fields + # will Not be indexed. + + # * Note: the INDEX-field name does Not have to be exactly the same + # as the DOCUMENT-field name. +`} + + + + +{`// Note: +// Use slash \`/\` in the index name, replacing the underscore \`_\` from the index class definition + +from index "Employees/ByName" +where LastName == "King" + +// Results will include all documents from 'Employees' collection whose 'LastName' equals to 'King'. +`} + + + + +* `of_type` is used to convert the type being used in the where clause (`IndexEntry`) + to the collection type (`Employee`). + The reason for this is that while the `IndexEntry` type allows for a strongly typed query, + the server returns the actual documents entities objects. + +* An exception will be thrown when filtering by fields that are Not defined in the index. + +* Read more about filtering [here](../../indexes/querying/filtering.mdx). +#### Query index - with paging: + + + + +{`# Query the 'Employees' collection using the index - page results + +# This example is based on the previous filtering example +employees = list( + session.query_index_type(Employees_ByName, Employees_ByName.IndexEntry) + .where_equals("LastName", "King") + .skip(5) # Skip first 5 results + .take(10) # Retrieve up to 10 documents + .of_type(Employee) +) + +# Results will include up to 10 matching documents +`} + + + + +{`# The index definition: +class Employees_ByName(AbstractIndexCreationTask): + # The IndexEntry class defines the index-fields + class IndexEntry: + def __init__(self, first_name: str = None, last_name: str = None): + self.first_name = first_name + self.last_name = last_name + + # The from_json method to handle different casing on the server + @classmethod + def from_json(cls, json_dict: Dict[str, Any]) -> "Employees_ByName.IndexEntry": + return cls(json_dict["FirstName"], json_dict["LastName"]) + + def __init__(self): + super().__init__() + # The 'map' function defines the content of the INDEX-fields + # * The content of INDEX-fields 'FirstName' & 'LastName' + # is composed of the relevant DOCUMENT-fields. + self.map = """from e in docs.Employees select new {FirstName = e.FirstName, LastName = e.LastName}""" + # * The index-fields can be queried on to fetch matching documents. + # You can query and filter Employee documents based on their first or last names. + + # * Employee documents that do Not contain both 'FirstName' and 'LastName' fields + # will Not be indexed. + + # * Note: the INDEX-field name does Not have to be exactly the same + # as the DOCUMENT-field name. +`} + + + + +{`// Note: +// Use slash \`/\` in the index name, replacing the underscore \`_\` from the index class definition + +from index "Employees/ByName" +where LastName == "King" +limit 5, 10 // skip 5, take 10 +`} + + + + +* Read more about paging [here](../../indexes/querying/paging.mdx). + + + +## Query an index by `raw_query` + +* Queries defined with [Query](../../indexes/querying/query-index.mdx#sessionquery) + or [DocumentQuery](../../indexes/querying/query-index.mdx#sessionadvanceddocumentquery) + are translated by the RavenDB client to [RQL](../../client-api/session/querying/what-is-rql.mdx) + when sent to the server. + +* The session also gives you a way to express the query directly in RQL using the + `session.advanced.raw_query` method. + +**Example**: + + + + +{`# Query with RawQuery - filter by INDEX-field + +employees = list( + session.advanced + # Provide RQL to raw_query + .raw_query("from index 'Employees/ByName' where LastName == 'King'", Employee) +) +# Results will include all documents from 'Employees' collection whose 'LastName' equals to 'King'. +`} + + + + +{`# The index definition: +class Employees_ByName(AbstractIndexCreationTask): + # The IndexEntry class defines the index-fields + class IndexEntry: + def __init__(self, first_name: str = None, last_name: str = None): + self.first_name = first_name + self.last_name = last_name + + # The from_json method to handle different casing on the server + @classmethod + def from_json(cls, json_dict: Dict[str, Any]) -> "Employees_ByName.IndexEntry": + return cls(json_dict["FirstName"], json_dict["LastName"]) + + def __init__(self): + super().__init__() + # The 'map' function defines the content of the INDEX-fields + # * The content of INDEX-fields 'FirstName' & 'LastName' + # is composed of the relevant DOCUMENT-fields. + self.map = """from e in docs.Employees select new {FirstName = e.FirstName, LastName = e.LastName}""" + # * The index-fields can be queried on to fetch matching documents. + # You can query and filter Employee documents based on their first or last names. + + # * Employee documents that do Not contain both 'FirstName' and 'LastName' fields + # will Not be indexed. + + # * Note: the INDEX-field name does Not have to be exactly the same + # as the DOCUMENT-field name. +`} + + + + +{`// Note: +// Use slash \`/\` in the index name, replacing the underscore \`_\` from the index class definition + +from index "Employees/ByName" +where LastName == "King" +`} + + + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_searching-csharp.mdx b/versioned_docs/version-7.1/indexes/querying/_searching-csharp.mdx new file mode 100644 index 0000000000..98ab32277a --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_searching-csharp.mdx @@ -0,0 +1,858 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Prior to this article, please refer to [Full-Text search with dynamic queries](../../client-api/session/querying/text-search/full-text-search.mdx) to learn about the `Search` method. + +* **All capabilities** provided by `Search` with a dynamic query can also be used when querying a static-index. + +* However, as opposed to making a dynamic search query where an auto-index is created for you, + when using a **static-index**: + + * You must configure the index-field in which you want to search. + See examples below. + + * You can configure which analyzer will be used to tokenize this field. + See [selecting an analyzer](../../indexes/using-analyzers.mdx#selecting-an-analyzer-for-a-field). +* In this article: + * [Indexing single field for FTS](../../indexes/querying/searching.mdx#indexing-single-field-for-fts) + * [Indexing multiple fields for FTS](../../indexes/querying/searching.mdx#indexing-multiple-fields-for-fts) + * [Indexing all fields for FTS (using AsJson)](../../indexes/querying/searching.mdx#indexing-all-fields-for-fts-(using-asjson)) + * [Boosting search results](../../indexes/querying/searching.mdx#boosting-search-results) + * [Searching with wildcards](../../indexes/querying/searching.mdx#searching-with-wildcards) + * [When using RavenStandardAnalyzer or StandardAnalyzer or NGramAnalyzer](../../indexes/querying/searching.mdx#when-usingoror) + * [When using a custom analyzer](../../indexes/querying/searching.mdx#when-using-a-custom-analyzer) + * [When using the Exact analyzer](../../indexes/querying/searching.mdx#when-using-the-exact-analyzer) + + +## Indexing single field for FTS + +#### The index: + + + +{`public class Employees_ByNotes : + AbstractIndexCreationTask +\{ + // The IndexEntry class defines the index-fields + public class IndexEntry + \{ + public string EmployeeNotes \{ get; set; \} + \} + + public Employees_ByNotes() + \{ + // The 'Map' function defines the content of the index-fields + Map = employees => from employee in employees + select new IndexEntry() + \{ + EmployeeNotes = employee.Notes[0] + \}; + + // Configure the index-field for FTS: + // Set 'FieldIndexing.Search' on index-field 'EmployeeNotes' + Index(x => x.EmployeeNotes, FieldIndexing.Search); + + // Optionally: Set your choice of analyzer for the index-field. + // Here the text from index-field 'EmployeeNotes' will be tokenized by 'WhitespaceAnalyzer'. + Analyze(x => x.EmployeeNotes, "WhitespaceAnalyzer"); + + // Note: + // If no analyzer is set then the default 'RavenStandardAnalyzer' is used. + \} +\} +`} + + +#### Query with Search: + +* Use `Search` to make a full-text search when querying the index. + +* Refer to [Full-Text search with dynamic queries](../../client-api/session/querying/text-search/full-text-search.mdx) for all available **Search options**, + such as using wildcards, searching for multiple terms, etc. + + + + +{`List employees = session + // Query the index + .Query() + // Call 'Search': + // pass the index field that was configured for FTS and the term to search for. + .Search(x => x.EmployeeNotes, "French") + .OfType() + .ToList(); + +// * Results will contain all Employee documents that have 'French' in their 'Notes' field. +// +// * Search is case-sensitive since field was indexed using the 'WhitespaceAnalyzer' +// which preserves casing. +`} + + + + +{`List employees = await asyncSession + // Query the index + .Query() + // Call 'Search': + // pass the index field that was configured for FTS and the term to search for. + .Search(x => x.EmployeeNotes, "French") + .OfType() + .ToListAsync(); + +// * Results will contain all Employee documents that have 'French' in their 'Notes' field. +// +// * Search is case-sensitive since field was indexed using the 'WhitespaceAnalyzer' +// which preserves casing. +`} + + + + +{`List employees = session.Advanced + // Query the index + .DocumentQuery() + // Call 'Search': + // pass the index field that was configured for FTS and the term to search for. + .Search(x => x.EmployeeNotes, "French") + .OfType() + .ToList(); + +// * Results will contain all Employee documents that have 'French' in their 'Notes' field. +// +// * Search is case-sensitive since field was indexed using the 'WhitespaceAnalyzer' +// which preserves casing. +`} + + + + +{`from index "Employees/ByNotes" +where search(EmployeeNotes, "French") +`} + + + + + + +## Indexing multiple fields for FTS + +#### The index: + + + +{`public class Employees_ByEmployeeData : + AbstractIndexCreationTask +\{ + public class IndexEntry + \{ + public object[] EmployeeData \{ get; set; \} + \} + + public Employees_ByEmployeeData() + \{ + Map = employees => from employee in employees + select new IndexEntry() + \{ + EmployeeData = new object[] + \{ + // Multiple document-fields can be indexed + // into the single index-field 'EmployeeData' + employee.FirstName, + employee.LastName, + employee.Title, + employee.Notes + \} + \}; + + // Configure the index-field for FTS: + // Set 'FieldIndexing.Search' on index-field 'EmployeeData' + Index(x => x.EmployeeData, FieldIndexing.Search); + + // Note: + // Since no analyzer is set, the default 'RavenStandardAnalyzer' is used. + \} +\} +`} + + +#### Sample query: + + + + +{`List employees = session + // Query the static-index + .Query() + // A logical OR is applied between the following two Search calls: + .Search(x => x.EmployeeData, "Manager") + // A logical AND is applied between the following two terms: + .Search(x => x.EmployeeData, "French Spanish", @operator: SearchOperator.And) + .OfType() + .ToList(); + +// * Results will contain all Employee documents that have: +// ('Manager' in any of the 4 document-fields that were indexed) +// OR +// ('French' AND 'Spanish' in any of the 4 document-fields that were indexed) +// +// * Search is case-insensitive since the default analyzer is used +`} + + + + +{`List employees = await asyncSession + // Query the static-index + .Query() + // A logical OR is applied between the following two Search calls: + .Search(x => x.EmployeeData, "Manager") + // A logical AND is applied between the following two terms: + .Search(x => x.EmployeeData, "French Spanish", @operator: SearchOperator.And) + .OfType() + .ToListAsync(); + +// * Results will contain all Employee documents that have: +// ('Manager' in any of the 4 document-fields that were indexed) +// OR +// ('French' AND 'Spanish' in any of the 4 document-fields that were indexed) +// +// * Search is case-insensitive since the default analyzer is used +`} + + + + +{`List employees = session.Advanced + // Query the static-index + .DocumentQuery() + .OpenSubclause() + // A logical OR is applied between the following two Search calls: + .Search(x => x.EmployeeData, "Manager") + // A logical AND is applied between the following two terms: + .Search(x => x.EmployeeData, "French Spanish", @operator: SearchOperator.And) + .CloseSubclause() + .OfType() + .ToList(); + +// * Results will contain all Employee documents that have: +// ('Manager' in any of the 4 document-fields that were indexed) +// OR +// ('French' AND 'Spanish' in any of the 4 document-fields that were indexed) +// +// * Search is case-insensitive since the default analyzer is used +`} + + + + +{`from index "Employees/ByEmployeeData" +where (search(EmployeeData, "Manager") or search(EmployeeData, "French Spanish", and)) +`} + + + + + + +## Indexing all fields for FTS (using AsJson) + +* To search across ALL fields in a document without defining each one explicitly, + use the `AsJson` method in the _Map_ function to extract all property values and index them in a single searchable field. + +* This approach makes the index robust to changes in the document schema. + By calling `.Select(x => x.Value)` on the result of `AsJson(...)`, + the index automatically includes values from ALL existing and newly added properties + and there is no need to update the index when the document structure changes. + +* + This indexing method is supported only when using **Lucene** as the indexing engine. + +#### The index: + + + +{`public class Products_ByAllValues : + AbstractIndexCreationTask +\{ + public class IndexEntry + \{ + // This index field will contain all values from all properties in the document + public string AllValues \{ get; set; \} + + // Note: + // RavenDB seamlessly supports multi-value indexing on this field. + // Even though the 'AllValues' index-field is declared as a 'string', + // it can accept a collection of values, as defined in the Map function. + // The engine treats the field as if it contains multiple strings + // and indexes each one individually. + \} + + public Products_ByAllValues() + \{ + Map = products => from product in products + select new + \{ + // Use the 'AsJson' method to convert the document into a JSON-like structure + // and call 'Select' to extract only the values of each property + AllValues = AsJson(product).Select(x => x.Value) + \}; + + // Configure the index-field for FTS: + // Set 'FieldIndexing.Search' on index-field 'AllValues' + Index(x => x.AllValues, FieldIndexing.Search); + + // Note: + // Since no analyzer is set, the default 'RavenStandardAnalyzer' is used. + + // Set the search engine type to Lucene: + SearchEngineType = Raven.Client.Documents.Indexes.SearchEngineType.Lucene; + \} +\} +`} + + +#### Sample query: + + + + +{`List products = session + .Query() + .Search(x => x.AllValues, "tofu") + .OfType() + .ToList(); + +// * Results will contain all Product documents that have 'tofu' +// in ANY of their fields. +// +// * Search is case-insensitive since the default analyzer is used. +`} + + + + +{`List products = await asyncSession + .Query() + .Search(x => x.AllValues, "tofu") + .OfType() + .ToListAsync(); + +// * Results will contain all Product documents that have 'tofu' +// in ANY of their fields. +// +// * Search is case-insensitive since the default analyzer is used. +`} + + + + +{`List products = session.Advanced + .DocumentQuery() + .Search(x => x.AllValues, "tofu") + .OfType() + .ToList(); + +// * Results will contain all Product documents that have 'tofu' +// in ANY of their fields. +// +// * Search is case-insensitive since the default analyzer is used. +`} + + + + +{`from index "Products/ByAllValues" +where search(AllValues, "tofu") +`} + + + + + + +## Boosting search results + +* In order to prioritize results, you can provide a boost value to the searched terms. + This can be applied by either of the following: + + * Add a boost value to the relevant index-field **inside the index definition**. + Refer to article [indexes - boosting](../../indexes/boosting.mdx). + + * Add a boost value to the queried terms **at query time**. + Refer to article [Boost search results](../../client-api/session/querying/text-search/boost-search-results.mdx). + + + +## Searching with wildcards + +* When making a full-text search with wildcards in the search terms, + the presence of wildcards (`*`) in the terms sent to the search engine is determined by the transformations applied by the + [analyzer](../../indexes/using-analyzers.mdx) used in the index. + +* Note the different behavior in the following cases, as described below: + * [When using RavenStandardAnalyzer or StandardAnalyzer or NGramAnalyzer](../../indexes/querying/searching.mdx#when-usingoror) + * [When using a custom analyzer](../../indexes/querying/searching.mdx#when-using-a-custom-analyzer) + * [When using the Exact analyzer](../../indexes/querying/searching.mdx#when-using-the-exact-analyzer) + +* When using [Corax](../../indexes/search-engine/corax.mdx) as the search engine, + this behavior will only apply to indexes that are newly created or have been reset. + + +##### When using `RavenStandardAnalyzer` or`StandardAnalyzer` or `NGramAnalyzer`: +Usually, the same analyzer used to tokenize field content at **indexing time** is also used to process the terms provided in the **full-text search query** +before they are sent to the search engine to retrieve matching documents. + +**However, in the following cases**: + +* When making a [dynamic search query](../../client-api/session/querying/text-search/full-text-search.mdx) +* or when querying a static index that uses the default [RavenStandardAnalyzer](../../indexes/using-analyzers.mdx#using-the-default-search-analyzer) +* or when querying a static index that uses the [StandardAnalyzer](../../indexes/using-analyzers.mdx#analyzers-that-remove-common-stop-words) +* or when querying a static index that uses the [NGramAnalyzer](../../indexes/using-analyzers.mdx#analyzers-that-tokenize-according-to-the-defined-number-of-characters) + +the queried terms in the _Search_ method are processed with the [LowerCaseKeywordAnalyzer](../../indexes/using-analyzers.mdx#using-the-default-analyzer) +before being sent to the search engine. + +This analyzer does Not remove the `*`, so the terms are sent with `*`, as provided in the search terms. +For example: + + + + +{`public class Employees_ByNotes_usingDefaultAnalyzer : + AbstractIndexCreationTask +{ + public class IndexEntry + { + public string EmployeeNotes { get; set; } + } + + public Employees_ByNotes_usingDefaultAnalyzer() + { + Map = employees => from employee in employees + select new IndexEntry() + { + EmployeeNotes = employee.Notes[0] + }; + + // Configure the index-field for FTS: + Index(x => x.EmployeeNotes, FieldIndexing.Search); + + // Since no analyzer is explicitly set + // then the default 'RavenStandardAnalyzer' will be used at indexing time. + + // However, when making a search query with wildcards, + // the 'LowerCaseKeywordAnalyzer' will be used to process the search terms + // prior to sending them to the search engine. + } +} +`} + + + + +{`List employees = session + .Query() + + // If you request to include explanations, + // you can see the exact term that was sent to the search engine. + .ToDocumentQuery() + .IncludeExplanations(out var explanations) + .ToQueryable() + + // Provide a term with a wildcard to the Search method: + .Search(x => x.EmployeeNotes, "*rench") + .OfType() + .ToList(); + +// Results will contain all Employee documents that have terms that end with 'rench' +// (e.g. French). + +// Checking the explanations, you can see that the search term 'rench' +// was sent to the search engine WITH the leading wildcard, i.e. '*rench' +// since the 'LowerCaseKeywordAnalyzer' is used in this case. +var explanation = explanations.GetExplanations(employees[0].Id)[0]; +Assert.Contains($"EmployeeNotes:*rench", explanation); +`} + + + + +{`List employees = await asyncSession + .Query() + + // If you request to include explanations, + // you can see the exact term that was sent to the search engine. + .ToDocumentQuery() + .IncludeExplanations(out var explanations) + .ToQueryable() + + // Provide a term with a wildcard to the Search method: + .Search(x => x.EmployeeNotes, "*rench") + .OfType() + .ToListAsync(); + +// Results will contain all Employee documents that have terms that end with 'rench' +// (e.g. French). + +// Checking the explanations, you can see that the search term 'rench' +// was sent to the search engine WITH the leading wildcard, i.e. '*rench' +// since the 'LowerCaseKeywordAnalyzer' is used in this case. +var explanation = explanations.GetExplanations(employees[0].Id)[0]; +Assert.Contains($"EmployeeNotes:*rench", explanation); +`} + + + + +{`List employees = session.Advanced + .DocumentQuery() + + // If you request to include explanations, + // you can see the exact term that was sent to the search engine. + .IncludeExplanations(out var explanations) + + // Provide a term with a wildcard to the Search method: + .Search(x => x.EmployeeNotes, "*rench") + .OfType() + .ToList(); + +// Results will contain all Employee documents that have terms that end with 'rench' +// (e.g. French). + +// Checking the explanations, you can see that the search term 'rench' +// was sent to the search engine WITH the leading wildcard, i.e. '*rench' +// since the 'LowerCaseKeywordAnalyzer' is used in this case. +var explanation = explanations.GetExplanations(employees[0].Id)[0]; +Assert.Contains($"EmployeeNotes:*rench", explanation); +`} + + + + +{`from index "Employees/ByNotes/usingDefaultAnalyzer" +where search(EmployeeNotes, "*rench") +include explanations() +`} + + + + + + + +##### When using a custom analyzer: +* When setting a [custom analyzer](../../indexes/using-analyzers.mdx#creating-custom-analyzers) in your index to tokenize field content, + then when querying the index, the search terms in the query will be processed according to the **custom analyzer's logic**. + +* The `*` will remain in the terms if the custom analyzer allows it. + It is the user’s responsibility to ensure that wildcards are not removed by the custom analyzer if they should be included in the query. + +* Note: + An exception to the above is when the wildcard is used as a suffix in the search term (e.g. `Fren*`). + In this case the wildcard will be included in the query regardless of the analyzer's logic. + +For example: + + + + +{`public class Employees_ByNotes_usingCustomAnalyzer : + AbstractIndexCreationTask +{ + public class IndexEntry + { + public string EmployeeNotes { get; set; } + } + + public Employees_ByNotes_usingCustomAnalyzer() + { + Map = employees => from employee in employees + select new IndexEntry() + { + EmployeeNotes = employee.Notes[0] + }; + + // Configure the index-field for FTS: + Index(x => x.EmployeeNotes, FieldIndexing.Search); + + // Set a custom analyzer for the index-field: + Analyze(x => x.EmployeeNotes, "CustomAnalyzers.RemoveWildcardsAnalyzer"); + } +} +`} + + + + +{`// The custom analyzer: +// ==================== + +const string RemoveWildcardsAnalyzer = + @" + using System.IO; + using Lucene.Net.Analysis; + using Lucene.Net.Analysis.Standard; + namespace CustomAnalyzers + { + public class RemoveWildcardsAnalyzer : StandardAnalyzer + { + public RemoveWildcardsAnalyzer() : base(Lucene.Net.Util.Version.LUCENE_30) + { + } + + public override TokenStream TokenStream(string fieldName, System.IO.TextReader reader) + { + // Read input stream and remove wildcards (*) + string text = reader.ReadToEnd(); + string processedText = RemoveWildcards(text); + StringReader newReader = new StringReader(processedText); + + return base.TokenStream(fieldName, newReader); + } + + private string RemoveWildcards(string input) + { + // Replace wildcard characters with an empty string + return input.Replace(""*"", """"); + } + } + }"; + +// Deploying the custom analyzer: +// ============================== + +store.Maintenance.Send(new PutAnalyzersOperation(new AnalyzerDefinition() +{ + Name = "CustomAnalyzers.RemoveWildcardsAnalyzer", + Code = RemoveWildcardsAnalyzer, +})); +`} + + + + +{`List employees = session + .Query() + + .ToDocumentQuery() + .IncludeExplanations(out var explanations) + .ToQueryable() + + // Provide a term with wildcards to the Search method: + .Search(x => x.EmployeeNotes, "*French*") + .OfType() + .ToList(); + +// Even though a wildcard was provided, +// the results will contain only Employee documents that contain the exact term 'French'. + +// The search term was sent to the search engine WITHOUT the wildcard, +// as the custom analyzer's logic strips them out. + +// This can be verified by checking the explanations: +var explanation = explanations.GetExplanations(employees[0].Id)[0]; +Assert.Contains($"EmployeeNotes:french", explanation); +Assert.DoesNotContain($"EmployeeNotes:*french", explanation); +`} + + + + +{`List employees = await asyncSession + .Query() + + .ToDocumentQuery() + .IncludeExplanations(out var explanations) + .ToQueryable() + + // Provide a term with wildcards to the Search method: + .Search(x => x.EmployeeNotes, "*French*") + .OfType() + .ToListAsync(); + +// Even though a wildcard was provided, +// the results will contain only Employee documents that contain the exact term 'French'. + +// The search term was sent to the search engine WITHOUT the wildcard, +// as the custom analyzer's logic strips them out. + +// This can be verified by checking the explanations: +var explanation = explanations.GetExplanations(employees[0].Id)[0]; +Assert.Contains($"EmployeeNotes:french", explanation); +Assert.DoesNotContain($"EmployeeNotes:*french", explanation); +`} + + + + +{`List employees = session.Advanced + .DocumentQuery() + .IncludeExplanations(out var explanations) + // Provide a term with wildcards to the Search method: + .Search(x => x.EmployeeNotes, "*French*") + .OfType() + .ToList(); + +// Even though a wildcard was provided, +// the results will contain only Employee documents that contain the exact term 'French'. + +// The search term was sent to the search engine WITHOUT the wildcard, +// as the custom analyzer's logic strips them out. + +// This can be verified by checking the explanations: +var explanation = explanations.GetExplanations(employees[0].Id)[0]; +Assert.Contains($"EmployeeNotes:french", explanation); +Assert.DoesNotContain($"EmployeeNotes:*french", explanation); +`} + + + + +{`from index "Employees/ByNotes/UsingCustomAnalyzer" +where search(EmployeeNotes, "*French*") +include explanations() +`} + + + + + + + +##### When using the Exact analyzer: +When using the default Exact analyzer in your index (which is [KeywordAnalyzer](../../indexes/using-analyzers.mdx#using-the-default-exact-analyzer)), +then when querying the index, the wildcards in your search terms remain untouched. +The terms are sent to the search engine exactly as produced by the analyzer. + +For example: + + + + +{`public class Employees_ByFirstName_usingExactAnalyzer : + AbstractIndexCreationTask +{ + public class IndexEntry + { + public string FirstName { get; set; } + } + + public Employees_ByFirstName_usingExactAnalyzer() + { + Map = employees => from employee in employees + select new IndexEntry() + { + FirstName = employee.FirstName + }; + + // Set the Exact analyzer for the index-field: + // (The field will not be tokenized) + Indexes.Add(x => x.FirstName, FieldIndexing.Exact); + } +} +`} + + + + +{`List employees = session + .Query() + + .ToDocumentQuery() + .IncludeExplanations(out var explanations) + .ToQueryable() + + // Provide a term with a wildcard to the Search method: + .Search(x => x.FirstName, "Mich*") + .OfType() + .ToList(); + +// Results will contain all Employee documents with FirstName that starts with 'Mich' +// (e.g. Michael). + +// The search term, 'Mich*', is sent to the search engine +// exactly as was provided to the Search method, WITH the wildcard. + +var explanation = explanations.GetExplanations(employees[0].Id)[0]; +Assert.Contains($"FirstName:Mich*", explanation); +`} + + + + +{`List employees = await asyncSession + .Query() + + .ToDocumentQuery() + .IncludeExplanations(out var explanations) + .ToQueryable() + + // Provide a term with a wildcard to the Search method: + .Search(x => x.FirstName, "Mich*") + .OfType() + .ToListAsync(); + +// Results will contain all Employee documents with FirstName that starts with 'Mich' +// (e.g. Michael). + +// The search term, 'Mich*', is sent to the search engine +// exactly as was provided to the Search method, WITH the wildcard. + +var explanation = explanations.GetExplanations(employees[0].Id)[0]; +Assert.Contains($"FirstName:Mich*", explanation); +`} + + + + +{`List employees = session.Advanced + .DocumentQuery() + .IncludeExplanations(out var explanations) + // Provide a term with a wildcard to the Search method: + .Search(x => x.FirstName, "Mich*") + .OfType() + .ToList(); + +// Results will contain all Employee documents with FirstName that starts with 'Mich' +// (e.g. Michael). + +// The search term, 'Mich*', is sent to the search engine +// exactly as was provided to the Search method, WITH the wildcard. + +var explanation = explanations.GetExplanations(employees[0].Id)[0]; +Assert.Contains($"FirstName:Mich*", explanation); +`} + + + + +{`from index "Employees/ByFirstName/usingExactAnalyzer" +where search(FirstName, "Mich*") +include explanations() +`} + + + + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_searching-java.mdx b/versioned_docs/version-7.1/indexes/querying/_searching-java.mdx new file mode 100644 index 0000000000..5f3eb311d8 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_searching-java.mdx @@ -0,0 +1,391 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +When you need to do a more complex text searching, use the `search` method. This method allows you to pass a few search terms that will be used in the searching process for a particular field. Here is a sample code +that uses the `search` method to get users with the name *John* or *Adam*: + + + + +{`List users = session + .query(User.class) + .search("Name", "John Adam") + .toList(); +`} + + + + +{`from Users +where search(Name, 'John Adam') +`} + + + + +Each of the search terms (separated by space character) will be checked independently. The result documents must match exactly one of the passed terms. + +In the same way, you can also look for users that have some hobby: + + + + +{`List users = session + .query(User.class) + .search("Hobbies", "looking for someone who likes sport books computers") + .toList(); +`} + + + + +{`from Users +where search(Name, 'looking for someone who likes sport books computers') +`} + + + + +The results will return users that are interested in *sport*, *books* or *computers*. + +## Multiple Fields + +By using the `search` method, you are also able to look for multiple indexed fields. In order to search using both `name` and `hobbies` properties, you need to issue the following query: + + + + +{`List users = session + .query(User.class) + .search("Name", "Adam") + .search("Hobbies", "sport") + .toList(); +`} + + + + +{`from Users +where search(Name, 'Adam') or search(Hobbies, 'sport') +`} + + + + +## Boosting + +Indexing in RavenDB is built upon the Lucene engine that provides a boosting term mechanism. This feature introduces the relevance level of matching documents based on the terms found. +Each search term can be associated with a boost factor that influences the final search results. The higher the boost factor, the more relevant the term will be. +RavenDB also supports that, in order to improve your searching mechanism and provide the users with much more accurate results you can specify the boost argument. + +For example: + + + + +{`List users = session + .query(User.class) + .search("Hobbies", "I love sport") + .boost(10) + .search("Hobbies", "but also like reading books") + .boost(5) + .toList(); +`} + + + + +{`from Users +where boost(search(Hobbies, 'I love sport'), 10) or boost(search(Hobbies, 'but also like reading books'), 5) +`} + + + + +This search will promote users who do sports before book readers and they will be placed at the top of the results list. + +## Search Options + +You can specify the logic of a search expression. It can be either: + +* or, +* andAlso, +* not. + +The following query: + + + +{`List users = session + .query(User.class) + .search("Hobbies", "computers") + .search("Name", "James") + .whereEquals("Age", 20) + .toList(); +`} + + + +will be translated into + + + +{`from Users +where search(Hobbies, 'computers') or search(Name, 'James') and Age = 20 +`} + + + +You can also specify what exactly the query logic should be. The applied option will influence a query term where it was used. The query as follows: + + + +{`List users = session + .query(User.class) + .search("Name", "Adam") + .andAlso() + .search("Hobbies", "sport") + .toList(); +`} + + + +will result in the following RQL query: + + + +{`from Users +where search(Name, 'Adam') and search(Hobbies, 'sport') +`} + + + +If you want to negate the term use `not`: + + + +{`List users = session + .query(User.class) + .not() + .search("Name", "James") + .toList(); +`} + + + +According to RQL syntax it will be transformed into the query: + + + +{`from Users +where exists(Name) and not search(James, 'Adam') +`} + + + +You can also combine search options: + + + +{`List users = session + .query(User.class) + .search("Name", "Adam") + .andAlso() + .not() + .search("Hobbies", "sport") + .toList(); +`} + + + +It will produce the following RQL query: + + + +{`from Users +where search(Name, 'Adam') and (exists(Hobbies) and not search(Hobbies, 'sport')) +`} + + + +## Using Wildcards + +When the beginning or ending of a search term is unknown, wildcards can be used to add additional power to the searching feature. RavenDB supports both suffix and postfix wildcards. + +### Example I - Using Postfix Wildcards + + + + +{`List users = session + .query(User.class) + .search("Name", "Jo* Ad*") + .toList(); +`} + + + + +{`from Users +where search(Name, 'Jo* Ad*') +`} + + + + +### Example II - Using Suffix and Postfix Wildcards + + + + +{`List users = session + .query(User.class) + .search("Name", "*oh* *da*") + .toList(); +`} + + + + +{`from Users +where search(Name, '*oh* *da*') +`} + + + + + +RavenDB allows you to search by using such queries, but you have to be aware that **leading wildcards drastically slow down searches**. + +Consider if you really need to find substrings. In most cases, looking for whole words is enough. There are also other alternatives for searching without expensive wildcard matches, e.g. indexing a reversed version of text field or creating a custom analyzer. + + +## Static Indexes + +All of the previous examples demonstrated searching capabilities by executing dynamic queries and were using auto indexes underneath. The same set of queries can be done when static indexes are used, and also those capabilities can be customized by changing the [analyzer](../using-analyzers.mdx) or setting up full text search on multiple fields. + +### Example I - Basics + +To be able to search you need to set `Indexing` to `Search` on a desired field. + + + +{`public static class Users_ByName extends AbstractIndexCreationTask \{ + public Users_ByName() \{ + map = "docs.Users.Select(user => new \{" + + " Name = user.Name" + + "\})"; + + index("Name", FieldIndexing.SEARCH); + \} +\} +`} + + + + + + +{`List users = session + .query(User.class, Users_ByName.class) + .search("Name", "John") + .toList(); +`} + + + + +{`from index 'Users/ByName' +where search(Name, 'John') +`} + + + + +### Example II - FullTextSearch + + + + +{`List users = session + .query(User.class, Users_Search.class) + .search("Query", "John") + .toList(); +`} + + + + +{`from index 'Users/Search' +where search(Query, 'John') +`} + + + + +### Example III - Indexing all fields for FTS + +This indexing method is supported only when using **Lucene** as the indexing engine. + + + +{`public static class Products_ByAllValues extends AbstractIndexCreationTask \{ + public static class IndexEntry \{ + private String allValues; + + public String getAllValues() \{ + return allValues; + \} + + public void setAllValues(String allValues) \{ + this.allValues = allValues; + \} + \} + + public Products_ByAllValues() \{ + map = "docs.Products.Select(product => new \{ " + + // Use the 'AsJson' method to convert the document into a JSON-like structure + // and call 'Select' to extract only the values of each property + " allValues = this.AsJson(product).Select(x => x.Value) " + + "\})"; + + // Configure the index-field for FTS: + // Set 'FieldIndexing.SEARCH' on index-field 'allValues' + index("allValues", FieldIndexing.SEARCH); + + // Set the search engine type to Lucene: + searchEngineType = SearchEngineType.LUCENE; + \} +\} +`} + + + + + + +{`List results = session + .query(Products_ByAllValues.IndexEntry.class, Products_ByAllValues.class) + .search("allValues", "tofu") + .ofType(Product.class) + .toList(); + +// * Results will contain all Product documents that have 'tofu' +// in ANY of their fields. +// +// * Search is case-insensitive since the default analyzer is used. +`} + + + + +{`from index "Products/ByAllValues" +where search(allValues, "tofu") +`} + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_searching-nodejs.mdx b/versioned_docs/version-7.1/indexes/querying/_searching-nodejs.mdx new file mode 100644 index 0000000000..87407a6670 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_searching-nodejs.mdx @@ -0,0 +1,536 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Prior to reading this article, please refer to [full-Text search with dynamic queries](../../client-api/session/querying/text-search/full-text-search.mdx) + to learn about the `search` method. + +* **All capabilities** provided by `search` with a dynamic query can also be used when querying a static-index. + +* However, as opposed to making a dynamic search query where an auto-index is created for you, + when using a **static-index**: + + * You must configure the index-field in which you want to search. + See examples below. + + * You can configure which analyzer will be used to tokenize this field. + See [selecting an analyzer](../../indexes/using-analyzers.mdx#selecting-an-analyzer-for-a-field). +* In this article: + * [Indexing single field for FTS](../../indexes/querying/searching.mdx#indexing-single-field-for-fts) + * [Indexing multiple fields for FTS](../../indexes/querying/searching.mdx#indexing-multiple-fields-for-fts) + * [Indexing all fields for FTS (using AsJson)](../../indexes/querying/searching.mdx#indexing-all-fields-for-fts-(using-asjson)) + * [Boosting search results](../../indexes/querying/searching.mdx#boosting-search-results) + * [Searching with wildcards](../../indexes/querying/searching.mdx#searching-with-wildcards) + * [When using RavenStandardAnalyzer or StandardAnalyzer or NGramAnalyzer](../../indexes/querying/searching.mdx#when-usingoror) + * [When using a custom analyzer](../../indexes/querying/searching.mdx#when-using-a-custom-analyzer) + * [When using the Exact analyzer](../../indexes/querying/searching.mdx#when-using-the-exact-analyzer) + + +## Indexing single field for FTS + +#### The index: + + + +{`class Employees_ByNotes extends AbstractJavaScriptIndexCreationTask \{ + + constructor() \{ + super(); + + // Define the index-fields + this.map("Employees", e => (\{ + employeeNotes: e.Notes + \})); + + // Configure the index-field for FTS: + // Set 'Search' on index-field 'employeeNotes' + this.index("employeeNotes", "Search"); + + // Optionally: Set your choice of analyzer for the index-field. + // Here the text from index-field 'employeeNotes' will be tokenized by 'WhitespaceAnalyzer'. + this.analyze("employeeNotes", "WhitespaceAnalyzer"); + + // Note: + // If no analyzer is set then the default 'RavenStandardAnalyzer' is used. + \} +\} +`} + + +#### Query with Search: + +* Using the `search` method has the advantage of using any of its functionalities, + such as using wildcards, searching for multiple terms, etc. + +* Refer to [Full-Text search with dynamic queries](../../client-api/session/querying/text-search/full-text-search.mdx) for all available **Search options**. + + + + +{`const employees = await session + // Query the index + .query({ indexName: "Employees/ByNotes" }) + // Call 'search': + // pass the index field name that was configured for FTS and the term to search for. + .search("employeeNotes", "French") + .all(); + +// * Results will contain all Employee documents that have 'French' in their 'Notes' field. +// +// * Search is case-sensitive since field was indexed using the 'WhitespaceAnalyzer' +// which preserves casing. +`} + + + + +{`from index "Employees/ByNotes" +where search(employeeNotes, "French") +`} + + + + + + +## Indexing multiple fields for FTS + +#### The index: + + + +{`class Employees_ByEmployeeData extends AbstractJavaScriptIndexCreationTask \{ + + constructor() \{ + super(); + + // Define the index-fields + this.map("Employees", e => (\{ + // Multiple document-fields can be indexed + // into the single index-field 'employeeData' + employeeData: [e.FirstName, e.LastName, e.Title, e.Notes] + \})); + + // Configure the index-field for FTS: + // Set 'Search' on index-field 'employeeNotes' + this.index("employeeNotes", "Search"); + + // Note: + // Since no analyzer is set then the default 'RavenStandardAnalyzer' is used. + \} +\} +`} + + +#### Sample query: + + + + +{`const employees = await session + // Query the static-index + .query({ indexName: "Employees/ByEmployeeData" }) + .openSubclause() + // A logical OR is applied between the following two Search calls: + .search("employeeData", "Manager") + // A logical AND is applied between the following two terms: + .search("employeeData", "French Spanish", "AND") + .closeSubclause() + .all(); + +// * Results will contain all Employee documents that have: +// ('Manager' in any of the 4 document-fields that were indexed) +// OR +// ('French' AND 'Spanish' in any of the 4 document-fields that were indexed) +// +// * Search is case-insensitive since the default analyzer is used +`} + + + + +{`from index "Employees/ByEmployeeData" +where (search(employeeData, "Manager") or search(employeeData, "French Spanish", and)) +`} + + + + + + +## Indexing all fields for FTS (using AsJson) + +* To search across ALL fields in a document without defining each one explicitly, use the `AsJson` method, + which is available when using **a C# LINQ string** that is assigned to the `map` property in the Node.js index class, + as shown in the example below. + +* This approach makes the index robust to changes in the document schema. + By calling `.Select(x => x.Value)` on the result of `AsJson(...)`, + the index automatically includes values from ALL existing and newly added properties + and there is no need to update the index when the document structure changes. + +* + This indexing method is supported only when using **Lucene** as the indexing engine. + +#### The index: + + + +{`// Extend the index class from 'AbstractCsharpIndexCreationTask': +class Products_ByAllValues extends AbstractCsharpIndexCreationTask \{ + constructor () \{ + super(); + + // Using a C# LINQ string: + this.map = \`docs.Products.Select(product => new \{ + AllValues = this.AsJson(product).Select(x => x.Value) + \})\`; + + // Configure the index-field for FTS: + // Set 'Search' on index-field 'AllValues' + this.index("AllValues", "Search"); + + // Note: + // Since no analyzer is set, the default 'RavenStandardAnalyzer' is used. + + // Set the search engine type to Lucene: + this.searchEngineType = "Lucene"; + \} +\} +`} + + +#### Sample query: + + + + +{`const products = await session + .query({ indexName: "Products/ByAllValues" }) + .search("AllValues", "tofu") + .all(); + +// * Results will contain all Product documents that have 'tofu' +// in ANY of their fields. +// +// * Search is case-insensitive since the default analyzer is used. +`} + + + + +{`from index "Products/ByAllValues" +where search(AllValues, "tofu") +`} + + + + + + +## Boosting search results + +* In order to prioritize results, you can provide a boost value to the searched terms. + This can be applied by either of the following: + + * Add a boost value to the relevant index-field **inside the index definition**. + Refer to article [indexes - boosting](../../indexes/boosting.mdx). + + * Add a boost value to the queried terms **at query time**. + Refer to article [Boost search results](../../client-api/session/querying/text-search/boost-search-results.mdx). + + + +## Searching with wildcards + +* When making a full-text search with wildcards in the search terms, + the presence of wildcards (`*`) in the terms sent to the search engine is determined by the transformations applied by the + [analyzer](../../indexes/using-analyzers.mdx) used in the index. + +* Note the different behavior in the following cases, as described below: + * [When using RavenStandardAnalyzer or StandardAnalyzer or NGramAnalyzer](../../indexes/querying/searching.mdx#when-usingoror) + * [When using a custom analyzer](../../indexes/querying/searching.mdx#when-using-a-custom-analyzer) + * [When using the Exact analyzer](../../indexes/querying/searching.mdx#when-using-the-exact-analyzer) + +* When using [Corax](../../indexes/search-engine/corax.mdx) as the search engine, + this behavior will only apply to indexes that are newly created or have been reset. + + +##### When using `RavenStandardAnalyzer` or`StandardAnalyzer` or `NGramAnalyzer`: +Usually, the same analyzer used to tokenize field content at **indexing time** is also used to process the terms provided in the **full-text search query** +before they are sent to the search engine to retrieve matching documents. + +**However, in the following cases**: + +* When making a [dynamic search query](../../client-api/session/querying/text-search/full-text-search.mdx) +* or when querying a static index that uses the default [RavenStandardAnalyzer](../../indexes/using-analyzers.mdx#using-the-default-search-analyzer) +* or when querying a static index that uses the [StandardAnalyzer](../../indexes/using-analyzers.mdx#analyzers-that-remove-common-stop-words) +* or when querying a static index that uses the [NGramAnalyzer](../../indexes/using-analyzers.mdx#analyzers-that-tokenize-according-to-the-defined-number-of-characters) + +the queried terms in the _search_ method are processed with the [LowerCaseKeywordAnalyzer](../../indexes/using-analyzers.mdx#using-the-default-analyzer) +before being sent to the search engine. + +This analyzer does Not remove the `*`, so the terms are sent with `*`, as provided in the search terms. +For example: + + + + +{`class Employees_ByNotes_usingDefaultAnalyzer extends AbstractJavaScriptIndexCreationTask { + + constructor() { + super(); + + // Define the index-fields + this.map("Employees", e => ({ + employeeNotes: e.Notes + })); + + // Configure the index-field for FTS: + this.index("employeeNotes", "Search"); + + // Since no analyzer is explicitly set + // then the default 'RavenStandardAnalyzer' will be used at indexing time. + + // However, when making a search query with wildcards, + // the 'LowerCaseKeywordAnalyzer' will be used to process the search terms + // prior to sending them to the search engine. + } +} +`} + + + + +{`let explanations; + +const employees = await session + .query({ indexName: "Employees/ByNotes/usingDefaultAnalyzer" }) + // If you request to include explanations, + // you can see the exact term that was sent to the search engine. + .includeExplanations(e => explanations = e) + // Provide a term with a wildcard to the search method: + .search("employeeNotes", "*rench") + .all(); + +// Results will contain all Employee documents that have terms that end with 'rench' +// (e.g. French). + +// Checking the explanations, you can see that the search term 'rench' +// was sent to the search engine WITH the leading wildcard, i.e. '*rench' +// since the 'LowerCaseKeywordAnalyzer' is used in this case. +const explanation = explanations.explanations[employees[0].id][0]; +const expectedVal = "employeeNotes:*rench"; + +assert.ok(explanation.includes(expectedVal), + \`'$\{explanation\}' does not contain '$\{expectedVal\}.'\`); +`} + + + + +{`from index "Employees/ByNotes/usingDefaultAnalyzer" +where search(EmployeeNotes, "*rench") +include explanations() +`} + + + + + + + +##### When using a custom analyzer: +* When setting a [custom analyzer](../../indexes/using-analyzers.mdx#creating-custom-analyzers) in your index to tokenize field content, + then when querying the index, the search terms in the query will be processed according to the custom analyzer's logic. + +* The `*` will remain in the terms if the custom analyzer allows it. + It is the user’s responsibility to ensure that wildcards are not removed by the custom analyzer if they should be included in the query. + +* Note: + An exception to the above is when the wildcard is used as a suffix in the search term (e.g. `Fren*`). + In this case the wildcard will be included in the query regardless of the analyzer's logic. + +For example: + + + + +{`class Employees_ByNotes_usingCustomAnalyzer extends AbstractJavaScriptIndexCreationTask { + + constructor() { + super(); + + this.map("Employees", e => ({ + employeeNotes: e.Notes + })); + + // Configure the index-field for FTS: + this.index("employeeNotes", "Search"); + + // Set a custom analyzer for the index-field: + this.analyze("employeeNotes", "RemoveWildcardsAnalyzer"); + } +} +`} + + + + +{`// The custom analyzer: +// ==================== + +const removeWildcardsanalyzer = \` + using System.IO; + using Lucene.Net.Analysis; + using Lucene.Net.Analysis.Standard; + namespace CustomAnalyzers + { + public class RemoveWildcardsAnalyzer : StandardAnalyzer + { + public RemoveWildcardsAnalyzer() : base(Lucene.Net.Util.Version.LUCENE_30) + { + } + + public override TokenStream TokenStream(string fieldName, System.IO.TextReader reader) + { + // Read input stream and remove wildcards (*) + string text = reader.ReadToEnd(); + string processedText = RemoveWildcards(text); + StringReader newReader = new StringReader(processedText); + + return base.TokenStream(fieldName, newReader); + } + + private string RemoveWildcards(string input) + { + // Replace wildcard characters with an empty string + return input.Replace("*", ""); + } + } + }\`; + +// Deploying the custom analyzer: +// ============================== + +const analyzerDefinition = { + name: "RemoveWildcardsAnalyzer", + code: RemoveWildcardsAnalyzer +}; + +await documentStore.maintenance.send(new PutAnalyzersOperation(analyzerDefinition)); +`} + + + + +{`let explanations; + +const employees = await session + .query({ indexName: "Employees/ByNotes/usingCustomAnalyzer" }) + .includeExplanations(e => explanations = e) + // Provide a term with wildcards to the Search method: + .search("employeeNotes", "*French*") + .all(); + +// Even though a wildcard was provided, +// the results will contain only Employee documents that contain the exact term 'French'. + +// The search term was sent to the search engine WITHOUT the wildcard, +// as the custom analyzer's logic strips them out. + +// This can be verified by checking the explanations: +const explanation = explanations.explanations[employees[0].id][0]; + +const expectedVal = "employeeNotes:french"; +assert.ok(explanation.includes(expectedVal), + \`'$\{explanation\}' does not contain '$\{expectedVal\}.'\`); + +const notExpectedVal = "employeeNotes:*french"; +assert.ok(!explanation.includes(notExpectedVal), + \`'$\{explanation\}' does not contain '$\{notExpectedVal\}.'\`); +`} + + + + +{`from index "Employees/ByNotes/UsingCustomAnalyzer" +where search(EmployeeNotes, "*French*") +include explanations() +`} + + + + + + + +##### When using the Exact analyzer: +When using the default Exact analyzer in your index (which is [KeywordAnalyzer](../../indexes/using-analyzers.mdx#using-the-default-exact-analyzer)), +then when querying the index, the wildcards in your search terms remain untouched. +The terms are sent to the search engine exactly as produced by the analyzer. + +For example: + + + + +{`class Employees_ByFirstName_usingExactAnalyzer extends AbstractJavaScriptIndexCreationTask { + + constructor() { + super(); + + this.map("Employees", e => ({ + firstName: e.FirstName + })); + + // Set the Exact analyzer for the index-field: + // (The field will not be tokenized) + this.index("firstName", "Exact"); + } +} +`} + + + + +{`let explanations; + +const employees = await session + .query({ indexName: "Employees/ByFirstName/usingExactAnalyzer" }) + .includeExplanations(e => explanations = e) + // Provide a term with a wildcard to the Search method: + .search("firstName", "Mich*") + .all(); + +// Results will contain all Employee documents with FirstName that starts with 'Mich' +// (e.g. Michael). + +// The search term, 'Mich*', is sent to the search engine +// exactly as was provided to the Search method, WITH the wildcard. + +const explanation = explanations.explanations[employees[0].id][0]; +const expectedVal = "firstName:Mich*"; + +assert.ok(explanation.includes(expectedVal), + \`'$\{explanation\}' does not contain '$\{expectedVal\}.'\`); +`} + + + + +{`from index "Employees/ByFirstName/usingExactAnalyzer" +where search(FirstName, "Mich*") +include explanations() +`} + + + + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_searching-php.mdx b/versioned_docs/version-7.1/indexes/querying/_searching-php.mdx new file mode 100644 index 0000000000..796f074bf7 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_searching-php.mdx @@ -0,0 +1,349 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Prior to reading this article, please refer to [full-Text search with dynamic queries](../../client-api/session/querying/text-search/full-text-search.mdx) + to learn about the `search` method. + +* **All capabilities** provided by `search` with a dynamic query can also be used when querying a static-index. + +* However, as opposed to making a dynamic search query where an auto-index is created for you, + when using a **static-index**: + + * You must configure the index-field in which you want to search. + See examples below. + + * You can configure which analyzer will be used to tokenize this field. + See [selecting an analyzer](../../indexes/using-analyzers.mdx#selecting-an-analyzer-for-a-field). +* In this article: + * [Indexing single field for FTS](../../indexes/querying/searching.mdx#indexing-single-field-for-fts) + * [Indexing multiple fields for FTS](../../indexes/querying/searching.mdx#indexing-multiple-fields-for-fts) + * [Indexing all fields for FTS (using AsJson)](../../indexes/querying/searching.mdx#indexing-all-fields-for-fts-(using-asjson)) + * [Boosting search results](../../indexes/querying/searching.mdx#boosting-search-results) + + +## Indexing single field for FTS + +#### The index: + + + +{`// The IndexEntry class defines the index-fields +class Employees_ByNotes_IndexEntry +\{ + private ?string $employeeNotes = null; + + public function getEmployeeNotes(): ?string + \{ + return $this->employeeNotes; + \} + + public function setEmployeeNotes(?string $employeeNotes): void + \{ + $this->employeeNotes = $employeeNotes; + \} +\} +class Employees_ByNotes extends AbstractIndexCreationTask +\{ + public function __construct() + \{ + parent::__construct(); + + // The 'Map' function defines the content of the index-fields + $this->map = + "from employee in docs.Employees " . + "select new " . + "\{ " . + " employee_notes = employee.Notes[0]" . + "\}"; + + # Configure the index-field for FTS: + # Set 'FieldIndexing.Search' on index-field 'employee_notes' + $this->index("employee_notes", FieldIndexing::search()); + + # Optionally: Set your choice of analyzer for the index-field: + # Here the text from index-field 'EmployeeNotes' will be tokenized by 'WhitespaceAnalyzer'. + $this->analyze("employee_notes", "WhitespaceAnalyzer"); + + # Note: + # If no analyzer is set then the default 'RavenStandardAnalyzer' is used. + \} +\} +`} + + +#### Query with Search: + +* Use `search` to make a full-text search when querying the index. + +* Refer to [Full-Text search with dynamic queries](../../client-api/session/querying/text-search/full-text-search.mdx) for all available **Search options**, + such as using wildcards, searching for multiple terms, etc. + + + + +{`/** @var array $employees */ +$employees = $session + // Query the index + ->query(Employees_ByNotes_IndexEntry::class, Employees_ByNotes::class) + // Call 'Search': + // pass the index field that was configured for FTS and the term to search for. + ->search("EmployeeNotes", "French") + ->ofType(Employee::class) + ->toList(); + +// * Results will contain all Employee documents that have 'French' in their 'Notes' field. +// +// * Search is case-sensitive since field was indexed using the 'WhitespaceAnalyzer' +// which preserves casing. +`} + + + + +{`/** @var array $employees */ +$employees = $session->advanced() + // Query the index + ->documentQuery(Employees_ByNotes_IndexEntry::class, Employees_ByNotes::class) + // Call 'Search': + // pass the index field that was configured for FTS and the term to search for. + ->search("EmployeeNotes", "French") + ->ofType(Employee::class) + ->toList(); + +// * Results will contain all Employee documents that have 'French' in their 'Notes' field. +// +// * Search is case-sensitive since field was indexed using the 'WhitespaceAnalyzer' +// which preserves casing. +`} + + + + +{`from index "Employees/ByNotes" +where search(EmployeeNotes, "French") +`} + + + + + + +## Indexing multiple fields for FTS + +#### The index: + + + +{`class EmployeeData +\{ + private ?string $firstName = null; + private ?string $lastName = null; + private ?string $title = null; + private ?string $notes = null; + + // ... getters and setters +\} + +class EmployeeDataArray extends TypedArray +\{ + protected function __construct() + \{ + parent::__construct(EmployeeData::class); + \} +\} + +class Employees_ByEmployeeData_IndexEntry +\{ + public ?EmployeeDataArray $employeeData = null; +\} +class Employees_ByEmployeeData extends AbstractIndexCreationTask +\{ + public function __construct() + \{ + parent::__construct(); + + $this->map = + "from employee in docs.Employees " . + "select new \{" . + " EmployeeData = " . + " \{" . + # Multiple document-fields can be indexed + # into the single index-field 'employee_data' + " employee.FirstName," . + " employee.LastName," . + " employee.Title," . + " employee.Notes" . + " \}" . + "\}"; + + // Configure the index-field for FTS: + // Set 'FieldIndexing.Search' on index-field 'EmployeeData' + $this->index("EmployeeData", FieldIndexing::search()); + + // Note: + // Since no analyzer is set then the default 'RavenStandardAnalyzer' is used. + \} +\} +`} + + +#### Sample query: + + + + +{`/** @var array $employees */ +$employees = $session + // Query the static-index + ->query(Employees_ByEmployeeData_IndexEntry::class, Employees_ByEmployeeData::class) + // A logical OR is applied between the following two Search calls: + ->search("EmployeeData", "Manager") + // A logical AND is applied between the following two terms: + ->search("EmployeeData", "French Spanish", SearchOperator::and()) + ->ofType(Employee::class) + ->toList(); + +// * Results will contain all Employee documents that have: +// ('Manager' in any of the 4 document-fields that were indexed) +// OR +// ('French' AND 'Spanish' in any of the 4 document-fields that were indexed) +// +// * Search is case-insensitive since the default analyzer is used +`} + + + + +{`/** @var array $employees */ +$employees = $session->advanced() + // Query the static-index + ->documentQuery(Employees_ByEmployeeData_IndexEntry::class, Employees_ByEmployeeData::class) + ->openSubclause() + // A logical OR is applied between the following two Search calls: + ->search("EmployeeData", "Manager") + // A logical AND is applied between the following two terms: + ->search("EmployeeData", "French Spanish", SearchOperator::and()) + ->closeSubclause() + ->ofType(Employee::class) + ->toList(); + +// * Results will contain all Employee documents that have: +// ('Manager' in any of the 4 document-fields that were indexed) +// OR +// ('French' AND 'Spanish' in any of the 4 document-fields that were indexed) +// +// * Search is case-insensitive since the default analyzer is used +`} + + + + +{`from index "Employees/ByEmployeeData" +where (search(EmployeeData, "Manager") or search(EmployeeData, "French Spanish", and)) +`} + + + + + + +## Indexing all fields for FTS (using AsJson) + +* To search across ALL fields in a document without defining each one explicitly, use the `AsJson` method, + which is available in the **C# LINQ string** that is assigned to the `map` property in the PHP index class, + as shown in the example below. + +* This approach makes the index robust to changes in the document schema. + By calling `.Select(x => x.Value)` on the result of `AsJson(...)`, + the index automatically includes values from ALL existing and newly added properties + and there is no need to update the index when the document structure changes. + +* + This indexing method is supported only when using **Lucene** as the indexing engine. + +#### The index: + + + +{`class Products_ByAllValues_IndexEntry +\{ + public ?string $allValues = null; + public function getAllValues(): ?string + \{ + return $this->allValues; + \} + public function setAllValues(?string $allValues): void + \{ + $this->allValues = $allValues + \} +\} + +class Products_ByAllValues extends AbstractIndexCreationTask +\{ + public function __construct() + \{ + parent::__construct(); + + $this->map = "docs.Products.Select(product => new \{ " . + # Use the 'AsJson' method to convert the document into a JSON-like structure + # and call 'Select' to extract only the values of each property + " allValues = this.AsJson(product).Select(x => x.Value) " . + "\})"; + + # Configure the index-field for FTS: + # Set 'FieldIndexing::search' on index-field 'allValues' + $this->index("allValues", FieldIndexing::search()); + + # Set the search engine type to Lucene: + $this->setSearchEngineType(SearchEngineType::lucene()); + \} +\} +`} + + +#### Sample query: + + + + +{`$results = $session->query(Products_ByAllValues_IndexEntry::class, Products_ByAllValues::class) + ->search("allValues", "tofu") + ->ofType(Product::class) + ->toList(); + +// * Results will contain all Product documents that have 'tofu' +// in ANY of their fields. +// +// * Search is case-insensitive since the default analyzer is used. +`} + + + + +{`from index "Products/ByAllValues" +where search(allValues, "tofu") +`} + + + + + + +## Boosting search results + +* To prioritize results, you can provide a boost value to the searched terms. + This can be applied by either of the following: + + * Add a boost value to the relevant index-field **inside the index definition**. + Refer to the indexes [Boosting](../../indexes/boosting.mdx) article. + + * Add a boost value to the queried terms **at query time**. + Refer to the [Boost search results](../../client-api/session/querying/text-search/boost-search-results.mdx) article. + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_searching-python.mdx b/versioned_docs/version-7.1/indexes/querying/_searching-python.mdx new file mode 100644 index 0000000000..68a7184b3d --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_searching-python.mdx @@ -0,0 +1,255 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Prior to reading this article, please refer to [full-Text search with dynamic queries](../../client-api/session/querying/text-search/full-text-search.mdx) + to learn about the `search` method. + +* **All capabilities** provided by `search` with a dynamic query can also be used when querying a static-index. + +* However, as opposed to making a dynamic search query where an auto-index is created for you, + when using a **static-index**: + + * You must configure the index-field in which you want to search. + See examples below. + + * You can configure which analyzer will be used to tokenize this field. + See [selecting an analyzer](../../indexes/using-analyzers.mdx#selecting-an-analyzer-for-a-field). +* In this article: + * [Indexing single field for FTS](../../indexes/querying/searching.mdx#indexing-single-field-for-fts) + * [Indexing multiple fields for FTS](../../indexes/querying/searching.mdx#indexing-multiple-fields-for-fts) + * [Indexing all fields for FTS (using AsJson)](../../indexes/querying/searching.mdx#indexing-all-fields-for-fts-(using-asjson)) + * [Boosting search results](../../indexes/querying/searching.mdx#boosting-search-results) + + +## Indexing single field for FTS + +#### The index: + + + +{`class Employees_ByNotes(AbstractIndexCreationTask): + # The IndexEntry class defines the index-fields + class IndexEntry: + def __init__(self, employee_notes: str = None): + self.employee_notes = employee_notes + + def __init__(self): + super().__init__() + # The 'Map' function defines the content of the index-fields + self.map = "from employee in docs.Employees " "select new " "\{ " " employee_notes = employee.Notes[0]" "\}" + + # Configure the index-field for FTS: + # Set 'FieldIndexing.Search' on index-field 'employee_notes' + self._index("employee_notes", FieldIndexing.SEARCH) + + # Optionally: Set your choice of analyzer for the index-field: + # Here the text from index-field 'EmployeeNotes' will be tokenized by 'WhitespaceAnalyzer'. + self._analyze("employee_notes", "WhitespaceAnalyzer") + + # Note: + # If no analyzer is set then the default 'RavenStandardAnalyzer' is used. +`} + + +#### Query with Search: + +* Use `Search` to make a full-text search when querying the index. + +* Refer to [Full-Text search with dynamic queries](../../client-api/session/querying/text-search/full-text-search.mdx) for all available **Search options**, + such as using wildcards, searching for multiple terms, etc. + + + + +{`employees = list( + session + # Query the index + .query_index_type(Employees_ByNotes, Employees_ByNotes.IndexEntry) + # Call 'search': + # pass the index field that was configured for FTS and the term to search for. + .search("employee_notes", "French").of_type(Employee) +) +# * Results will contain all Employee documents that have 'French' in their 'Notes' field. + +# * Search is case-sensitive since field was indexed using the 'WhitespaceAnalyzer' +# which preserves casing. +`} + + + + +{`from index "Employees/ByNotes" +where search(EmployeeNotes, "French") +`} + + + + + + +## Indexing multiple fields for FTS + +#### The index: + + + +{`class Employees_ByEmployeeData(AbstractIndexCreationTask): + class IndexEntry: + def __init__(self, employee_data: List = None): + self.employee_data = employee_data + + def __init__(self): + super().__init__() + self.map = ( + "from employee in docs.Employees " + "select new \{" + " employee_data = " + " \{" + # Multiple document-fields can be indexed + # into the single index-field 'employee_data' + " employee.FirstName," + " employee.LastName," + " employee.Title," + " employee.Notes" + " \}" + "\}" + ) + # Configure the index-field for FTS: + # Set 'FieldIndexing.SEARCH' on index-field 'employee_data' + self._index("employee_data", FieldIndexing.SEARCH) + + # Note: + # Since no analyzer is set, the default 'RavenStandardAnalyzer' is used. +`} + + +#### Sample query: + + + + +{`employees = list( + session + # Query the static-index + .query_index_type(Employees_ByEmployeeData, Employees_ByEmployeeData.IndexEntry) + .open_subclause() + # A logical OR is applied between the following two search calls + .search("employee_data", "Manager") + # A logical AND is applied between the following two terms + .search("employee_data", "French Spanish", operator=SearchOperator.AND) + .close_subclause() + .of_type(Employee) +) + +# * Results will contain all Employee documents that have: +# ('Manager' in any of the 4 document-fields that were indexed) +# OR +# ('French' AND 'Spanish' in any of the 4 document-fields that were indexed) + +# * Search is case-insensitive since the default analyzer is used +`} + + + + +{`from index "Employees/ByEmployeeData" +where (search(EmployeeData, "Manager") or search(EmployeeData, "French Spanish", and)) +`} + + + + + + +## Indexing all fields for FTS (using AsJson) + +* To search across ALL fields in a document without defining each one explicitly, use the `AsJson` method, + which is available in the **C# LINQ string** that is assigned to the `map` property in the Python index class, + as shown in the example below. + +* This approach makes the index robust to changes in the document schema. + By calling `.Select(x => x.Value)` on the result of `AsJson(...)`, + the index automatically includes values from ALL existing and newly added properties + and there is no need to update the index when the document structure changes. + +* + This indexing method is supported only when using **Lucene** as the indexing engine. + +#### The index: + + + +{`class Products_ByAllValues(AbstractIndexCreationTask): + class IndexEntry: + def __init__(self, all_values: str = None): + self.all_values = all_values + + def __init__(self): + super().__init__() + self.map = ( + "docs.Products.Select(product => new \{ " + # Use the 'AsJson' method to convert the document into a JSON-like structure + # and call 'Select' to extract only the values of each property + " all_values = this.AsJson(product).Select(x => x.Value) " + "\})" + ) + + # Configure the index-field for FTS: + # Set 'FieldIndexing.SEARCH' on index-field 'all_values' + self._index("all_values", FieldIndexing.SEARCH) + + # Note: + # Since no analyzer is set, the default 'RavenStandardAnalyzer' is used. + + # Set the search engine type to Lucene: + self.search_engine_type = SearchEngineType.LUCENE +`} + + +#### Sample query: + + + + +{`products = list( + session.query_index_type(Products_ByAllValues, Products_ByAllValues.IndexEntry) + .search("all_values", "tofu") + .of_type(Product) +) + +# * Results will contain all Product documents that have 'tofu' +# in ANY of their fields. +# +# * Search is case-insensitive since the default analyzer is used. +`} + + + + +{`from index "Products/ByAllValues" +where search(all_values, "tofu") +`} + + + + + + +## Boosting search results + +* In order to prioritize results, you can provide a boost value to the searched terms. + This can be applied by either of the following: + + * Add a boost value to the relevant index-field **inside the index definition**. + Refer to the indexes [Boosting](../../indexes/boosting.mdx) article. + + * Add a boost value to the queried terms **at query time**. + Refer to the [Boost search results](../../client-api/session/querying/text-search/boost-search-results.mdx) article. + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_sorting-csharp.mdx b/versioned_docs/version-7.1/indexes/querying/_sorting-csharp.mdx new file mode 100644 index 0000000000..b1823b0e57 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_sorting-csharp.mdx @@ -0,0 +1,276 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article provides examples of sorting query results when querying a static-index. + +* **Prior to this article**, please refer to [Sort dynamic queries results](../../client-api/session/querying/sort-query-results.mdx) for dynamic-queries examples + and general knowledge about Sorting. + +* All sorting capabilities provided for a dynamic query can also be used when querying a static-index. + +* In this page: + * [Order by index-field value](../../indexes/querying/sorting.mdx#order-by-index-field-value) + * [Order results when index-field is searchable](../../indexes/querying/sorting.mdx#order-results-when-index-field-is-searchable) + * [Additional Sorting Options](../../indexes/querying/sorting.mdx#additional-sorting-options) + + +## Order by index-field value + +* Use `OrderBy` or `OrderByDescending` to order the results by the specified index-field. + + + + +{`List products = session + // Query the index + .Query() + // Apply filtering (optional) + .Where(x => x.UnitsInStock > 10) + // Call 'OrderByDescending', pass the index-field by which to order the results + .OrderByDescending(x => x.UnitsInStock) + .OfType() + .ToList(); + +// Results will be sorted by the 'UnitsInStock' value in descending order, +// with higher values listed first. +`} + + + + +{`List products = await asyncSession + // Query the index + .Query() + // Apply filtering (optional) + .Where(x => x.UnitsInStock > 10) + // Call 'OrderByDescending', pass the index-field by which to order the results + .OrderByDescending(x => x.UnitsInStock) + .OfType() + .ToListAsync(); + +// Results will be sorted by the 'UnitsInStock' value in descending order, +// with higher values listed first. +`} + + + + +{`List products = session.Advanced + // Query the index + .DocumentQuery() + // Apply filtering (optional) + .WhereGreaterThan(x => x.UnitsInStock, 10) + // Call 'OrderByDescending', pass the index-field by which to order the results + .OrderByDescending(x => x.UnitsInStock) + .OfType() + .ToList(); + +// Results will be sorted by the 'UnitsInStock' value in descending order, +// with higher values listed first. +`} + + + + +{`public class Products_ByUnitsInStock : AbstractIndexCreationTask +{ + public class IndexEntry + { + public int UnitsInStock { get; set; } + } + + public Products_ByUnitsInStock() + { + Map = products => from product in products + select new IndexEntry() + { + UnitsInStock = product.UnitsInStock + }; + } +} +`} + + + + +{`from index "Products/ByUnitsInStock" +where UnitsInStock > 10 +order by UnitsInStock as long desc +`} + + + + + + +**Ordering Type**: + +* By default, the `OrderBy` methods will determine the `OrderingType` from the property path expression + and specify that ordering type in the generated RQL that is sent to the server. + +* E.g. in the above example, ordering by `x => x.UnitsInStock` will result in `OrderingType.Long` + because that property data type is an integer. + +* Different ordering can be forced. + See section [Force ordering type](../../client-api/session/querying/sort-query-results.mdx#force-ordering-type) for all available ordering types. + The same syntax used with dynamic queries also applies to queries made on indexes. + + + + + +## Order results when index-field is searchable + +* **When configuring an index-field** for [full-text search](../../indexes/querying/searching.mdx), + the content of the index-field is broken down into terms at indexing time. + The specific tokenization depends on the [analyzer](../../indexes/using-analyzers.mdx) used. + +* **When querying such index**, if you order by that searchable index-field, + results will come back sorted based on the terms, and not based on the original text of the field. + +* To overcome this, you can define another index-field that is not searchable and sort by it. + + + + +{`public class Products_BySearchName : AbstractIndexCreationTask +{ + public class IndexEntry + { + // Index-field 'Name' will be configured below for full-text search + public string Name { get; set; } + + // Index-field 'NameForSorting' will be used for ordering query results + public string NameForSorting { get; set; } + } + + public Products_BySearchName() + { + Map = products => from product in products + select new + { + // Both index-fields are assigned the same content (the 'Name' from the document) + Name = product.Name, + NameForSorting = product.Name + }; + + // Configure only the 'Name' index-field for FTS + Indexes.Add(x => x.Name, FieldIndexing.Search); + } +} +`} + + + + +{`List products = session + // Query the index + .Query() + // Call 'Search': + // Pass the index-field that was configured for FTS and the term to search for. + // Here we search for terms that start with "ch" within index-field 'Name'. + .Search(x => x.Name, "ch*") + // Call 'OrderBy': + // Pass the other index-field by which to order the results. + .OrderBy(x => x.NameForSorting) + .OfType() + .ToList(); + +// Running the above query on the NorthWind sample data, ordering by 'NameForSorting' field, +// we get the following order: +// ========================================================================================= + +// "Chai" +// "Chang" +// "Chartreuse verte" +// "Chef Anton's Cajun Seasoning" +// "Chef Anton's Gumbo Mix" +// "Chocolade" +// "Jack's New England Clam Chowder" +// "Pâté chinois" +// "Teatime Chocolate Biscuits" + +// While ordering by the searchable 'Name' field would have produced the following order: +// ====================================================================================== + +// "Chai" +// "Chang" +// "Chartreuse verte" +// "Chef Anton's Cajun Seasoning" +// "Pâté chinois" +// "Chocolade" +// "Teatime Chocolate Biscuits" +// "Chef Anton's Gumbo Mix" +// "Jack's New England Clam Chowder" +`} + + + + +{`List products = await asyncSession + // Query the index + .Query() + // Call 'Search': + // Pass the index-field that was configured for FTS and the term to search for. + // Here we search for terms that start with "ch" within index-field 'Name'. + .Search(x => x.Name, "ch*") + // Call 'OrderBy': + // Pass the other index-field by which to order the results. + .OrderBy(x => x.NameForSorting) + .OfType() + .ToListAsync(); +`} + + + + +{`List products = session.Advanced + // Query the index + .DocumentQuery() + // Call 'Search': + // Pass the index-field that was configured for FTS and the term to search for. + // Here we search for terms that start with "ch" within index-field 'Name'. + .Search("Name", "ch*") + // Call 'OrderBy': + // Pass the other index-field by which to order the results. + .OrderBy("NameForSorting") + .OfType() + .ToList(); +`} + + + + +{`from index "Products/BySearchName" +where search(Name, "ch*") +order by NameForSorting +`} + + + + + + +## Additional sorting options + +* When querying an index, the following sorting options are the **same** as when making a dynamic query. + +* Refer to the examples in the links below to see how each option is achieved. + + * [Order by score](../../client-api/session/querying/sort-query-results.mdx#order-by-score) + + * [Order by random](../../client-api/session/querying/sort-query-results.mdx#order-by-random) + + * [Order by spatial](../../client-api/session/querying/sort-query-results.mdx#order-by-spatial) + + * [Chain ordering](../../client-api/session/querying/sort-query-results.mdx#chain-ordering) + + * [Custom sorters](../../client-api/session/querying/sort-query-results.mdx#custom-sorters) + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_sorting-java.mdx b/versioned_docs/version-7.1/indexes/querying/_sorting-java.mdx new file mode 100644 index 0000000000..fa55a4a986 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_sorting-java.mdx @@ -0,0 +1,376 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +## Basics + +Starting from RavenDB 4.0, the server will determine possible sorting capabilities automatically from the indexed value, but sorting will **not be applied** until you request it by using the appropriate methods. The following queries will not return ordered results: + + + + +{`List results = session + .query(Product.class, Products_ByUnitsInStock.class) + .whereGreaterThan("UnitsInStock", 10) + .toList(); +`} + + + + +{`public static class Products_ByUnitsInStock extends AbstractIndexCreationTask { + public Products_ByUnitsInStock() { + map = "docs.Products.Select(product => new {" + + " UnitsInStock = product.UnitsInStock" + + "})"; + } +} +`} + + + + +{`from index 'Products/ByUnitsInStock' +where UnitsInStock > 10 +`} + + + + +To start sorting, we need to request to order by some specified index field. In our case we will order by `UnitsInStock` in descending order: + + + + +{`List results = session + .query(Product.class, Products_ByUnitsInStock.class) + .whereGreaterThan("UnitsInStock", 10) + .orderByDescending("UnitsInStock") + .toList(); +`} + + + + +{`public static class Products_ByUnitsInStock extends AbstractIndexCreationTask { + public Products_ByUnitsInStock() { + map = "docs.Products.Select(product => new {" + + " UnitsInStock = product.UnitsInStock" + + "})"; + } +} +`} + + + + +{`from index 'Products/ByUnitsInStock' +where UnitsInStock > 10 +order by UnitsInStock as long desc +`} + + + + + + +By default, `orderBy` methods will determine `orderingType` from the property path expression (e.g. `x => x.unitsInStock` will result in `OrderingType.LONG` because property type is an integer), but a different ordering can be forced by passing `OrderingType` explicitly to one of the `orderBy` methods. + + + + +{`List results = session + .query(Product.class, Products_ByUnitsInStock.class) + .whereGreaterThan("UnitsInStock", 10) + .orderByDescending("UnitsInStock", OrderingType.STRING) + .toList(); +`} + + + + +{`public static class Products_ByUnitsInStock extends AbstractIndexCreationTask { + public Products_ByUnitsInStock() { + map = "docs.Products.Select(product => new {" + + " UnitsInStock = product.UnitsInStock" + + "})"; + } +} +`} + + + + +{`from index 'Products/ByUnitsInStock' +where UnitsInStock > 10 +order by UnitsInStock desc +`} + + + + + + +## Ordering by Score + +When a query is issued, each index entry is scored by Lucene (you can read more about Lucene scoring [here](http://lucene.apache.org/core/3_3_0/scoring.html)). +This value is available in metadata information of the resulting query documents under `@index-score` (the higher the value, the better the match). +To order by this value you can use the `orderByScore` or the `orderByScoreDescending` methods: + + + + +{`List results = session + .query(Product.class, Products_ByUnitsInStock.class) + .whereGreaterThan("UnitsInStock", 10) + .orderByScore() + .toList(); +`} + + + + +{`public static class Products_ByUnitsInStock extends AbstractIndexCreationTask { + public Products_ByUnitsInStock() { + map = "docs.Products.Select(product => new {" + + " UnitsInStock = product.UnitsInStock" + + "})"; + } +} +`} + + + + +{`from index 'Products/ByUnitsInStock' +where UnitsInStock > 10 +order by score() +`} + + + + +## Chaining Orderings + +It is also possible to chain multiple orderings of the query results. +You can sort the query results first by some specified index field (or by the `@index-score`), then sort all the equal entries by some different index field (or the `@index-score`). +This can be achieved by using the `thenBy` (`thenByDescending`) and `thenByScore` (`thenByScoreDescending`) methods. + + + + +{`List results = session + .query(Product.class, Products_ByUnitsInStockAndName.class) + .whereGreaterThan("UnitsInStock", 10) + .orderBy("UnitsInStock") + .orderByScore() + .orderByDescending("Name") + .toList(); +`} + + + + +{`public static class Products_ByUnitsInStockAndName extends AbstractIndexCreationTask { + public Products_ByUnitsInStockAndName() { + map = "docs.Products.Select(product => new {" + + " UnitsInStock = product.UnitsInStock" + + " Name = product.Name" + + "})"; + } +} +`} + + + + +{`from index 'Products/ByUnitsInStockAndName' +where UnitsInStock > 10 +order by UnitsInStock, score(), Name desc +`} + + + + +## Random Ordering + +If you want to randomize the order of your results each time the query is executed, use the `randomOrdering` method (API reference [here](../../client-api/session/querying/how-to-customize-query.mdx#randomordering)): + + + + +{`List results = session + .query(Product.class, Products_ByUnitsInStock.class) + .randomOrdering() + .whereGreaterThan("UnitsInStock", 10) + .toList(); +`} + + + + +{`public static class Products_ByUnitsInStock extends AbstractIndexCreationTask { + public Products_ByUnitsInStock() { + map = "docs.Products.Select(product => new {" + + " UnitsInStock = product.UnitsInStock" + + "})"; + } +} +`} + + + + +{`from index 'Products/ByUnitsInStock' +where UnitsInStock > 10 +order by random() +`} + + + + +## Ordering When a Field is Searchable + +When sorting must be done on field that is [Searchable](../../indexes/using-analyzers.mdx), due to [Lucene](https://lucene.apache.org/) limitations sorting on such a field is not supported. To overcome this, create another field that is not searchable, and sort by it. + + + + +{`List results = session + .query(Product.class, Products_ByName_Search.class) + .search("Name", "Louisiana") + .orderByDescending("NameForSorting") + .toList(); +`} + + + + +{`public static class Products_ByName_Search extends AbstractIndexCreationTask { + public static class Result { + private String name; + private String nameForSorting; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getNameForSorting() { + return nameForSorting; + } + + public void setNameForSorting(String nameForSorting) { + this.nameForSorting = nameForSorting; + } + } + + public Products_ByName_Search() { + map = "docs.Products.Select(product => new {" + + " Name = product.Name," + + " NameForSorting = product.Name" + + "})"; + + index("Name", FieldIndexing.SEARCH); + } +} +`} + + + + +{`from index 'Products/ByName/Search' +where search(Name, 'Louisiana') +order by NameForSorting desc +`} + + + + +## AlphaNumeric Ordering + +Sometimes when ordering strings, it doesn't make sense to use the default lexicographic ordering. + +For example, "Abc9" will come after "Abc10" because if treated as single characters, 9 is greater than 1. + +If you want digit characters in a string to be treated as numbers and not as text, you should use alphanumeric ordering. In that case, when comparing "Abc10" to "Abc9", the digits 1 and 0 will be treated as the number 10 which will be considered greater than 9. + +To order in this mode you can pass the `OrderingType.ALPHA_NUMERIC` type into `orderBy` or `orderByDescending`: + + + + +{`List results = session + .query(Product.class, Products_ByUnitsInStock.class) + .whereGreaterThan("UnitsInStock", 10) + .orderBy("Name", OrderingType.ALPHA_NUMERIC) + .toList(); +`} + + + + +{`public static class Products_ByUnitsInStock extends AbstractIndexCreationTask { + public Products_ByUnitsInStock() { + map = "docs.Products.Select(product => new {" + + " UnitsInStock = product.UnitsInStock" + + "})"; + } +} +`} + + + + +{`from index 'Products/ByUnitsInStock ' +where UnitsInStock > 10 +order by Name as alphanumeric +`} + + + + +## Spatial Ordering + +If your data contains geographical locations, you might want to sort the query result by distance from a given point. + +This can be achived by using the `orderByDistance` and `orderByDistanceDescending` methods (API reference [here](../../client-api/session/querying/how-to-make-a-spatial-query.mdx)): + + + + +{`List results = session + .query(Events.class, Events_ByCoordinates.class) + .spatial("Coordinates", criteria -> criteria.withinRadius(500, 30, 30)) + .orderByDistance(new PointField("Latitude", "Longitude"), 32.1234, 23.4321) + .toList(); +`} + + + + +{`public static class Events_ByCoordinates extends AbstractIndexCreationTask { + public Events_ByCoordinates() { + map = "docs.Events.Select(e => new {" + + " Coordinates = this.CreateSpatialField(e.Latitude, e.Longitude)" + + "})"; + } +} +`} + + + + +{`from index 'Events/ByCoordinates' +where spatial.within(Coordinates, spatial.circle(500, 30, 30)) +order by spatial.distance(spatial.point(Latitude, Longitude), spatial.point(32.1234, 23.4321)) +`} + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_sorting-nodejs.mdx b/versioned_docs/version-7.1/indexes/querying/_sorting-nodejs.mdx new file mode 100644 index 0000000000..a9e606937e --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_sorting-nodejs.mdx @@ -0,0 +1,197 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article provides examples of sorting query results when querying a static-index. + +* **Prior to this article**, please refer to [Sort dynamic queries results](../../client-api/session/querying/sort-query-results.mdx) for dynamic-queries examples + and general knowledge about Sorting. + +* All sorting capabilities provided for a dynamic query can also be used when querying a static-index. + +* In this page: + * [Order by index-field value](../../indexes/querying/sorting.mdx#order-by-index-field-value) + * [Order results when index-field is searchable](../../indexes/querying/sorting.mdx#order-results-when-index-field-is-searchable) + * [Additional Sorting Options](../../indexes/querying/sorting.mdx#additional-sorting-options) + + +## Order by index-field value + +* Use `orderBy` or `orderByDescending` to order the results by the specified index-field. + + + + +{`const products = await session + // Query the index + .query({ indexName: "Products/ByUnitsInStock" }) + // Apply filtering (optional) + .whereGreaterThan("unitsInStock", 10) + // Call 'orderByDescending' + // Pass the index-field by which to order the results and the ordering type + .orderByDescending("unitsInStock", "Long") + .all(); + +// Results will be sorted by the 'unitsInStock' value in descending order, +// with higher values listed first. +`} + + + + +{`class Products_ByUnitsInStock extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("Products", p => { + return { + unitsInStock: p.UnitsInStock + }; + }); + } +} +`} + + + + +{`from index "Products/ByUnitsInStock" +where unitsInStock > 10 +order by unitsInStock as long desc +`} + + + + + + +**Ordering Type**: + +* If no ordering type is specified in the query then the server will apply the default lexicographical ordering. + +* In the above example, the ordering type was set to `Long`. + +* Different ordering can be forced. + See section [Force ordering type](../../client-api/session/querying/sort-query-results.mdx#force-ordering-type) for all available ordering types. + The same syntax used with dynamic queries also applies to queries made on indexes. + + + + + +## Order results when index-field is searchable + +* **When configuring an index-field** for [full-text search](../../indexes/querying/searching.mdx), + the content of the index-field is broken down into terms at indexing time. + The specific tokenization depends on the [analyzer](../../indexes/using-analyzers.mdx) used. + +* **When querying such index**, if you order by that searchable index-field, + results will come back sorted based on the terms, and not based on the original text of the field. + +* To overcome this, you can define another index-field that is not searchable and sort by it. + + + + +{`class Products_BySearchName extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("Products", p => { + return { + // Index-field 'name' will be configured below for full-text search + name: p.Name, + + // Index-field 'nameForSorting' will be used for ordering query results + nameForSorting: p.Name + + // Note: + // Both index-fields are assigned the same content (the 'Name' from the document) + }; + }); + + // Configure only the 'name' index-field for FTS + this.index("name", "Search"); + } +} +`} + + + + +{`const products = await session + // Query the index + .query({ indexName: "Products/BySearchName"}) + // Call 'search': + // Pass the index-field that was configured for FTS and the term to search for. + // Here we search for terms that start with "ch" within index-field 'name'. + .search("name", "ch*") + // Call 'orderBy': + // Pass the other index-field by which to order the results. + .orderBy("nameForSorting") + .all(); + + +// Running the above query on the NorthWind sample data, ordering by 'NameForSorting' field, +// we get the following order: +// ========================================================================================= + +// "Chai" +// "Chang" +// "Chartreuse verte" +// "Chef Anton's Cajun Seasoning" +// "Chef Anton's Gumbo Mix" +// "Chocolade" +// "Jack's New England Clam Chowder" +// "Pâté chinois" +// "Teatime Chocolate Biscuits" + +// While ordering by the searchable 'Name' field would have produced the following order: +// ====================================================================================== + +// "Chai" +// "Chang" +// "Chartreuse verte" +// "Chef Anton's Cajun Seasoning" +// "Pâté chinois" +// "Chocolade" +// "Teatime Chocolate Biscuits" +// "Chef Anton's Gumbo Mix" +// "Jack's New England Clam Chowder" +`} + + + + +{`from index "Products/BySearchName" +where search(name, "ch*") +order by nameForSorting +`} + + + + + + +## Additional sorting options + +* When querying an index, the following sorting options are the **same** as when making a dynamic query. + +* Refer to the examples in the links below to see how each option is achieved. + + * [Order by score](../../client-api/session/querying/sort-query-results.mdx#order-by-score) + + * [Order by random](../../client-api/session/querying/sort-query-results.mdx#order-by-random) + + * [Order by spatial](../../client-api/session/querying/sort-query-results.mdx#order-by-spatial) + + * [Chain ordering](../../client-api/session/querying/sort-query-results.mdx#chain-ordering) + + * [Custom sorters](../../client-api/session/querying/sort-query-results.mdx#custom-sorters) + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_sorting-php.mdx b/versioned_docs/version-7.1/indexes/querying/_sorting-php.mdx new file mode 100644 index 0000000000..01f17c4ee5 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_sorting-php.mdx @@ -0,0 +1,273 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article provides examples of sorting query results when querying a static-index. + +* **Prior to this article**, please refer to [Sort dynamic queries results](../../client-api/session/querying/sort-query-results.mdx) for dynamic-queries examples + and general knowledge about Sorting. + +* All sorting capabilities provided for a dynamic query can also be used when querying a static-index. + +* In this page: + * [Order by index-field value](../../indexes/querying/sorting.mdx#order-by-index-field-value) + * [Order results when index-field is searchable](../../indexes/querying/sorting.mdx#order-results-when-index-field-is-searchable) + * [Additional Sorting Options](../../indexes/querying/sorting.mdx#additional-sorting-options) + + +## Order by index-field value + +Use `orderByDescending` to order the results by the specified index-field. + + + + +{`/** @var array $products */ +$products = $session + // Query the index + ->query(Products_ByUnitsInStock_IndexEntry::class, Products_ByUnitsInStock::class) + // Apply filtering (optional) + ->whereGreaterThan("UnitsInStock", 10) + // Call 'OrderByDescending', pass the index-field by which to order the results + ->orderByDescending("UnitsInStock") + ->ofType(Product::class) + ->toList(); + +// Results will be sorted by the 'UnitsInStock' value in descending order, +// with higher values listed first. +`} + + + + +{`/** @var array $products */ +$products = $session->advanced() + // Query the index + ->documentQuery(Products_ByUnitsInStock_IndexEntry::class, Products_ByUnitsInStock::class) + // Apply filtering (optional) + ->whereGreaterThan("UnitsInStock", 10) + // Call 'OrderByDescending', pass the index-field by which to order the results + ->orderByDescending("UnitsInStock") + ->ofType(Product::class) + ->toList(); + +// Results will be sorted by the 'UnitsInStock' value in descending order, +// with higher values listed first. +`} + + + + +{`class Products_ByUnitsInStock_IndexEntry +{ + private ?int $unitsInStock = null; + + public function getUnitsInStock(): ?int + { + return $this->unitsInStock; + } + + public function setUnitsInStock(?int $unitsInStock): void + { + $this->unitsInStock = $unitsInStock; + } +} +class Products_ByUnitsInStock extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "docs.Products.Select(product => new {" . + " UnitsInStock = product.UnitsInStock" . + "})"; + } +} +`} + + + + +{`from index "Products/ByUnitsInStock" +where UnitsInStock > 10 +order by UnitsInStock as long desc +`} + + + + + + +**Ordering Type**: + +* By default, `orderByDescending` will determine the `OrderingType` from the property path expression + and specify that ordering type in the generated RQL that is sent to the server. + +* E.g., in the above example, ordering by `UnitsInStock` will result in `OrderingType::int` + because that property data type is an integer. + +* Different ordering can be forced. + See section [Force ordering type](../../client-api/session/querying/sort-query-results.mdx#force-ordering-type) + for all available ordering types. + The same syntax used with dynamic queries also applies to queries made on indexes. + + + + + +## Order results when index-field is searchable + +* **When configuring an index-field** for [full-text search](../../indexes/querying/searching.mdx), + the content of the index-field is broken down into terms at indexing time. + The specific tokenization depends on the [analyzer](../../indexes/using-analyzers.mdx) used. + +* **When querying such index**, if you order by that searchable index-field, + results will come back sorted based on the terms, and not based on the original text of the field. + +* To overcome this, you can define another index-field that is not searchable and sort by it. + + + + +{`class Products_BySearchName_IndexEntry +{ + // Index-field 'Name' will be configured below for full-text search + private ?string $name = null; + + // Index-field 'NameForSorting' will be used for ordering query results + private ?string $nameForSorting = null; + + public function getName(): ?string + { + return $this->name; + } + + public function setName(?string $name): void + { + $this->name = $name; + } + + public function getNameForSorting(): ?string + { + return $this->nameForSorting; + } + + public function setNameForSorting(?string $nameForSorting): void + { + $this->nameForSorting = $nameForSorting; + } +} +class Products_BySearchName extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "docs.Products.Select(product => new {" . + " Name = product.Name," . + " NameForSorting = product.Name" . + "})"; + + $this->index("Name", FieldIndexing::search()); + } +} +`} + + + + +{`/** @var array $products */ +$products = $session + // Query the index + ->query(Products_BySearchName_IndexEntry::class, Products_BySearchName::class) + // Call 'Search': + // Pass the index-field that was configured for FTS and the term to search for. + // Here we search for terms that start with "ch" within index-field 'Name'. + ->search("Name", "ch*") + // Call 'OrderBy': + // Pass the other index-field by which to order the results. + ->orderBy("NameForSorting") + ->ofType(Product::class) + ->toList(); + +// Running the above query on the NorthWind sample data, ordering by 'NameForSorting' field, +// we get the following order: +// ========================================================================================= + +// "Chai" +// "Chang" +// "Chartreuse verte" +// "Chef Anton's Cajun Seasoning" +// "Chef Anton's Gumbo Mix" +// "Chocolade" +// "Jack's New England Clam Chowder" +// "Pâté chinois" +// "Teatime Chocolate Biscuits" + +// While ordering by the searchable 'Name' field would have produced the following order: +// ====================================================================================== + +// "Chai" +// "Chang" +// "Chartreuse verte" +// "Chef Anton's Cajun Seasoning" +// "Pâté chinois" +// "Chocolade" +// "Teatime Chocolate Biscuits" +// "Chef Anton's Gumbo Mix" +// "Jack's New England Clam Chowder" +`} + + + + +{`/** @var array $products */ +$products = $session->advanced() + // Query the index + ->documentQuery(Products_BySearchName_IndexEntry::class, Products_BySearchName::class) + // Call 'Search': + // Pass the index-field that was configured for FTS and the term to search for. + // Here we search for terms that start with "ch" within index-field 'Name'. + ->search("Name", "ch*") + // Call 'OrderBy': + // Pass the other index-field by which to order the results. + ->orderBy("NameForSorting") + ->ofType(Product::class) + ->toList(); +`} + + + + +{`from index "Products/BySearchName" +where search(Name, "ch*") +order by NameForSorting +`} + + + + + + +## Additional sorting options + +* When querying an index, the following sorting options are the **same** as when making a dynamic query. + +* Refer to the examples in the links below to see how each option is achieved. + + * [Order by score](../../client-api/session/querying/sort-query-results.mdx#order-by-score) + + * [Order by random](../../client-api/session/querying/sort-query-results.mdx#order-by-random) + + * [Order by spatial](../../client-api/session/querying/sort-query-results.mdx#order-by-spatial) + + * [Chain ordering](../../client-api/session/querying/sort-query-results.mdx#chain-ordering) + + * [Custom sorters](../../client-api/session/querying/sort-query-results.mdx#custom-sorters) + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_sorting-python.mdx b/versioned_docs/version-7.1/indexes/querying/_sorting-python.mdx new file mode 100644 index 0000000000..bb85dd1815 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_sorting-python.mdx @@ -0,0 +1,201 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This article provides examples of sorting query results when querying a static-index. + +* **Prior to this article**, please refer to [Sort dynamic queries results](../../client-api/session/querying/sort-query-results.mdx) for dynamic-queries examples + and general knowledge about Sorting. + +* All sorting capabilities provided for a dynamic query can also be used when querying a static-index. + +* In this page: + * [Order by index-field value](../../indexes/querying/sorting.mdx#order-by-index-field-value) + * [Order results when index-field is searchable](../../indexes/querying/sorting.mdx#order-results-when-index-field-is-searchable) + * [Additional Sorting Options](../../indexes/querying/sorting.mdx#additional-sorting-options) + + +## Order by index-field value + +* Use `order_by` or `order_by_descending` to order the results by the specified index-field. + + + + +{`products = list( + session + # Query the index + .query_index_type(Products_ByUnitsInStock, Products_ByUnitsInStock.IndexEntry) + # Apply filtering (optional) + .where_greater_than("UnitsInStock", 10) + # Call 'order_by_descending', pass the index-field by which to order the results + .order_by_descending("UnitsInStock").of_type(Product) +) + +# Results will be sorted by the 'UnitsInStock' value in descending order, +# with higher values listed first. +`} + + + + +{`class Products_ByUnitsInStock(AbstractIndexCreationTask): + + class IndexEntry: + def __init__(self, units_in_stock: int = None): + self.units_in_stock = units_in_stock + + # Handle different casing + @classmethod + def from_json(cls, json_dict: Dict[str, Any]): + return cls(json_dict["UnitsInStock"]) + + def __init__(self): + super().__init__() + self.map = "from p in products select new { UnitsInStock = p.UnitsInStock }" +`} + + + + +{`from index "Products/ByUnitsInStock" +where UnitsInStock > 10 +order by UnitsInStock as long desc +`} + + + + + + +**Ordering Type**: + +* By default, the `order_by` methods will determine the `OrderingType` from the property path expression + and specify that ordering type in the generated RQL that is sent to the server. + +* E.g., in the above example, ordering by `UnitsInStock` will result in `OrderingType.Int` + because that property data type is an integer. + +* Different ordering can be forced. + See section [Force ordering type](../../client-api/session/querying/sort-query-results.mdx#force-ordering-type) + for all available ordering types. + The same syntax used with dynamic queries also applies to queries made on indexes. + + + + + +## Order results when index-field is searchable + +* **When configuring an index-field** for [full-text search](../../indexes/querying/searching.mdx), + the content of the index-field is broken down into terms at indexing time. + The specific tokenization depends on the [analyzer](../../indexes/using-analyzers.mdx) used. + +* **When querying such index**, if you order by that searchable index-field, + results will come back sorted based on the terms, and not based on the original text of the field. + +* To overcome this, you can define another index-field that is not searchable and sort by it. + + + + +{`class Products_BySearchName(AbstractIndexCreationTask): + class IndexEntry: + def __init__(self, name: str = None, name_for_sorting: str = None): + # Index-field 'Name' will be configured below for full-text search + self.name = name + + # Index-field 'NameForSorting' will be used for ordering query results + self.name_for_sorting = name_for_sorting + + @classmethod + def from_json(cls, json_dict: Dict[str, Any]): + return cls(json_dict["Name"], json_dict["NameForSorting"]) + + def __init__(self): + super().__init__() + # Both index-fields are assigned the same content (The 'Name' from the document) + self.map = "from p in products select new {Name = p.Name, NameForSorting = p.Name}" + + # Configure only the 'Name' index-field for FTS + self._index("Name", FieldIndexing.SEARCH) +`} + + + + +{`products = list( + session + # Query the index + .query_index_type(Products_BySearchName, Products_BySearchName.IndexEntry) + # Call 'search': + # Pass the index-field that was configured for FTS and the term to search for. + # Here we search for terms that start with "ch" within index-field 'Name'. + .search("Name", "ch*") + # Call 'order_by': + # Pass the other index-field by which to order the results. + .order_by("NameForSorting").of_type(Product) +) +# Running the above query on the NorthWind sample data, ordering by 'NameForSorting' field, +# we get the following order: +# ========================================================================================= + +# "Chai" +# "Chang" +# "Chartreuse verte" +# "Chef Anton's Cajun Seasoning" +# "Chef Anton's Gumbo Mix" +# "Chocolade" +# "Jack's New England Clam Chowder" +# "Pâté chinois" +# "Teatime Chocolate Biscuits" + +# While ordering by the searchable 'Name' field would have produced the following order: +# ====================================================================================== + +# "Chai" +# "Chang" +# "Chartreuse verte" +# "Chef Anton's Cajun Seasoning" +# "Pâté chinois" +# "Chocolade" +# "Teatime Chocolate Biscuits" +# "Chef Anton's Gumbo Mix" +# "Jack's New England Clam Chowder" +`} + + + + +{`from index "Products/BySearchName" +where search(Name, "ch*") +order by NameForSorting +`} + + + + + + +## Additional sorting options + +* When querying an index, the following sorting options are the **same** as when making a dynamic query. + +* Refer to the examples in the links below to see how each option is achieved. + + * [Order by score](../../client-api/session/querying/sort-query-results.mdx#order-by-score) + + * [Order by random](../../client-api/session/querying/sort-query-results.mdx#order-by-random) + + * [Order by spatial](../../client-api/session/querying/sort-query-results.mdx#order-by-spatial) + + * [Chain ordering](../../client-api/session/querying/sort-query-results.mdx#chain-ordering) + + * [Custom sorters](../../client-api/session/querying/sort-query-results.mdx#custom-sorters) + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_spatial-csharp.mdx b/versioned_docs/version-7.1/indexes/querying/_spatial-csharp.mdx new file mode 100644 index 0000000000..76e78791ff --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_spatial-csharp.mdx @@ -0,0 +1,376 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Documents that contain spatial data can be queried by spatial queries that employ geographical criteria. + You have two options: + + * **Dynamic spatial query** + Either make a dynamic spatial query on a collection (see [how to make a spatial query](../../client-api/session/querying/how-to-make-a-spatial-query.mdx)). + An auto-index will be created by the server. + + * **Spatial index query** + Or, index your documents' spatial data in a static-index (see [indexing spatial data](../../indexes/indexing-spatial-data.mdx)) + and then make a spatial query on this index ( **described in this article** ). + +* A few examples of querying a spatial index are provided below. + **A spatial query performed on a static-index is similar to the** [dynamic spatial query](../../client-api/session/querying/how-to-make-a-spatial-query.mdx). + Find all spatial API methods listed [here](../../client-api/session/querying/how-to-make-a-spatial-query.mdx#spatial-api). + +* Examples in this page: + * [Search by radius](../../indexes/querying/spatial.mdx#search-by-radius) + * [Search by shape](../../indexes/querying/spatial.mdx#search-by-shape) + * [Sort results](../../indexes/querying/spatial.mdx#sort-results) + + +## Search by radius + +* Query the spatial index: + +* Use the `WithinRadius` method to search for all documents containing spatial data that is located + within the specified distance from the given center point. + + + + +{`// Define a spatial query on index 'Events_ByNameAndCoordinates' +List employeesWithinRadius = session + .Query() + // Call 'Spatial' method + .Spatial( + // Pass the spatial index-field containing the spatial data + "Coordinates", + // Set the geographical area in which to search for matching documents + // Call 'WithinRadius', pass the radius and the center points coordinates + criteria => criteria.WithinRadius(20, 47.623473, -122.3060097)) + .ToList(); + +// The query returns all matching Event entities +// that are located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). +`} + + + + +{`// Define a spatial query on index 'Events_ByNameAndCoordinates' +List employeesWithinRadius = session.Advanced + .DocumentQuery() + // Call 'Spatial' method + .Spatial( + // Pass the spatial index-field containing the spatial data + "Coordinates", + // Set the geographical area in which to search for matching documents + // Call 'WithinRadius', pass the radius and the center points coordinates + criteria => criteria.WithinRadius(20, 47.623473, -122.3060097)) + .ToList(); + +// The query returns all matching Event entities +// that are located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). +`} + + + + +{`// Define an index with a spatial field +public class Events_ByNameAndCoordinates : AbstractIndexCreationTask +{ + public Events_ByNameAndCoordinates() + { + Map = events => from e in events + select new + { + Name = e.Name, + // Call 'CreateSpatialField' to create a spatial index-field + // Field 'Coordinates' will be composed of lat & lng supplied from the document + Coordinates = CreateSpatialField(e.Latitude, e.Longitude) + + // Documents can be retrieved + // by making a spatial query on the 'Coordinates' index-field + }; + } +} + +public class Event +{ + public string Id { get; set; } + public string Name { get; set; } + public double Latitude { get; set; } + public double Longitude { get; set; } +} +`} + + + + +{`from index "Events/ByNameAndCoordinates" +where spatial.within( + Coordinates, + spatial.circle(20, 47.623473, -122.3060097) +) + +// The query returns all matching Event entities +// that are located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). +`} + + + + + + +## Search by shape + +* Query the spatial index: + Use the `RelatesToShape` method to search for all documents containing spatial data that is located + in the specified relation to the given shape. + +* The shape in the query is specified as either a **circle** or a **polygon** in a WKT format. + See polygon rules [here](../../client-api/session/querying/how-to-make-a-spatial-query.mdx#polygonrules). + +* The relation to the shape can be one of: `Within`, `Contains`, `Disjoint`, `Intersects`. + +* See more usage examples in the [dynamic search by shape](../../client-api/session/querying/how-to-make-a-spatial-query.mdx#search-by-shape) query. + + + + +{`// Define a spatial query on index 'EventsWithWKT_ByNameAndWKT' +List employeesWithinShape = session + .Query() + // Call 'Spatial' method + .Spatial( + // Pass the spatial index-field containing the spatial data + "WKT", + // Set the geographical search criteria, call 'RelatesToShape' + criteria => criteria.RelatesToShape( + // Specify the WKT string + shapeWkt: @"POLYGON (( + -118.6527948 32.7114894, + -95.8040242 37.5929338, + -102.8344151 53.3349629, + -127.5286633 48.3485664, + -129.4620208 38.0786067, + -118.7406746 32.7853769, + -118.6527948 32.7114894 + ))", + // Specify the relation between the WKT shape and the documents spatial data + relation: SpatialRelation.Within)) + .ToList(); + +// The query returns all matching Event entities +// that are located within the specified polygon. +`} + + + + +{`// Define a spatial query on index 'EventsWithWKT_ByNameAndWKT' +List employeesWithinShape = session.Advanced + .DocumentQuery() + // Call 'Spatial' method + .Spatial( + // Pass the spatial index-field containing the spatial data + "WKT", + // Set the geographical search criteria, call 'RelatesToShape' + criteria => criteria.RelatesToShape( + // Specify the WKT string + shapeWkt: @"POLYGON (( + -118.6527948 32.7114894, + -95.8040242 37.5929338, + -102.8344151 53.3349629, + -127.5286633 48.3485664, + -129.4620208 38.0786067, + -118.7406746 32.7853769, + -118.6527948 32.7114894 + ))", + // Specify the relation between the WKT shape and the documents spatial data + relation: SpatialRelation.Within)) + .ToList(); + +// The query returns all matching Event entities +// that are located within the specified polygon. +`} + + + + +{`// Define an index with a spatial field +public class EventsWithWKT_ByNameAndWKT : AbstractIndexCreationTask +{ + public EventsWithWKT_ByNameAndWKT() + { + Map = events => from e in events + select new + { + Name = e.Name, + // Call 'CreateSpatialField' to create a spatial index-field + // Field 'WKT' will be composed of the WKT string supplied from the document + WKT = CreateSpatialField(e.WKT) + + // Documents can be retrieved + // by making a spatial query on the 'WKT' index-field + }; + } +} + +public class EventWithWKT +{ + public string Id { get; set; } + public string Name { get; set; } + public string WKT { get; set; } +} +`} + + + + +{`from index "EventsWithWKT/ByNameAndWKT" +where spatial.within( + WKT, + spatial.wkt("POLYGON (( + -118.6527948 32.7114894, + -95.8040242 37.5929338, + -102.8344151 53.3349629, + -127.5286633 48.3485664, + -129.4620208 38.0786067, + -118.7406746 32.7853769, + -118.6527948 32.7114894))") +) + +// The query returns all matching Event entities +// that are located within the specified polygon. +`} + + + + +* Note: + The index in the above example indexes a WKT string in the spatial index-field. + However, you can query by shape also on spatial data that is indexed as lat/lng coordinates. + + + +## Sort results + +* Query the spatial index: + Use `OrderByDistance` or `OrderByDistanceDescending` to sort the results by distance from a given point. + +* By default, distance in RavenDB measured in **kilometers**. + The distance can be rounded to a specific range. + + + + +{`// Define a spatial query on index 'Events_ByNameAndCoordinates' +List employeesSortedByDistance = session + .Query() + // Filter results by geographical criteria + .Spatial( + "Coordinates", + criteria => criteria.WithinRadius(20, 47.623473, -122.3060097)) + // Sort results, call 'OrderByDistance' + .OrderByDistance( + // Pass the spatial index-field containing the spatial data + "Coordinates", + // Sort the results by their distance from this point: + 47.623473, -122.3060097) + .ToList(); + +// Return all matching Event entities located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +// Sort the results by their distance from a specified point, +// the closest results will be listed first. +`} + + + + +{`// Define a spatial query on index 'Events_ByNameAndCoordinates' +List employeesSortedByDistance = session.Advanced + .DocumentQuery() + // Filter results by geographical criteria + .Spatial( + "Coordinates", + criteria => criteria.WithinRadius(20, 47.623473, -122.3060097)) + // Sort results, call 'OrderByDistance' + .OrderByDistance( + // Pass the spatial index-field containing the spatial data + "Coordinates", + // Sort the results by their distance from this point: + 47.623473, -122.3060097) + .ToList(); + +// Return all matching Event entities located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +// Sort the results by their distance from a specified point, +// the closest results will be listed first. +`} + + + + +{`// Define an index with a spatial field +public class Events_ByNameAndCoordinates : AbstractIndexCreationTask +{ + public Events_ByNameAndCoordinates() + { + Map = events => from e in events + select new + { + Name = e.Name, + // Call 'CreateSpatialField' to create a spatial index-field + // Field 'Coordinates' will be composed of lat & lng supplied from the document + Coordinates = CreateSpatialField(e.Latitude, e.Longitude) + + // Documents can be retrieved + // by making a spatial query on the 'Coordinates' index-field + }; + } +} + +public class Event +{ + public string Id { get; set; } + public string Name { get; set; } + public double Latitude { get; set; } + public double Longitude { get; set; } +} +`} + + + + +{`from index "Events/ByNameAndCoordinates" +where spatial.within( + Coordinates, + spatial.circle(20, 47.623473, -122.3060097) +) +order by spatial.distance( + Coordinates, + spatial.point(47.623473, -122.3060097) +) + +// The query returns all matching Event entities located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +// Sort the results by their distance from a specified point, +// the closest results will be listed first. +`} + + + + +* More sorting examples are available in the [dynamic spatial query](../../client-api/session/querying/how-to-make-a-spatial-query.mdx#spatial-sorting) article. + +* To get the **distance** for each resulting entity see [get resulting distance](../../client-api/session/querying/how-to-make-a-spatial-query.mdx#getresultingdistance). + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_spatial-java.mdx b/versioned_docs/version-7.1/indexes/querying/_spatial-java.mdx new file mode 100644 index 0000000000..b3e21d4a8e --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_spatial-java.mdx @@ -0,0 +1,121 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +To perform a spatial search, you can use the `spatial` method which contains a full spectrum of spatial capabilities. +You can check the detailed Client API reference for this method [here](../../client-api/session/querying/how-to-make-a-spatial-query.mdx). + +## Radius Search + +The most basic usage and probably most common one is to search for all points or shapes within provided distance from the given center point. To perform this search use the `withinRadius` method. + + + + +{`List results = session + .query(Event.class) + .spatial(new PointField("latitude", "longitude"), + criteria -> criteria.withinRadius(500, 30, 30)) + .toList(); +`} + + + + +{`from Events +where spatial.within(spatial.point(latitude, longitude), spatial.circle(500, 30, 30)) +`} + + + + +## Advanced Search + +The most advanced (and low-level) method available is `relatesToShape` + + + + +{`List results = session + .query(Event.class) + .spatial(new PointField("latitude", "longitude"), + criteria -> criteria.relatesToShape( + "Circle(30 30 d=500.0000)", + SpatialRelation.WITHIN + )) + .toList(); +`} + + + + +{`from Events +where spatial.within(spatial.point(latitude, longitude), spatial.wkt('Circle(30 30 d=500.0000)')) +`} + + + + +Where the shape is in [WKT](https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry) format and the relation is one of `within`, `contains`, `disjoint`, `intersects`. The above example will yield the same results as the example from the `Radius Search` section. + + +When using `spatial.wkt()` to define a **polygon**, the vertices (points that form the corners of the polygon) must be listed +in a counter-clockwise order: + + + +![NoSQL ACID DB - Query a Spatial Index](./assets/spatial_1.png) + + +## Static Indexes + +All of the above examples are using the dynamic querying capabilities of RavenDB and will create automatic indexes to retrieve their results. However, spatial queries can also be performed against static indexes, and this is done in a very similar way. + + + + +{`List results = session + .query(Event.class, Events_ByCoordinates.class) + .spatial("coordinates", + criteria -> criteria.withinRadius(500, 30, 30)) + .toList(); +`} + + + + +{`public static class Events_ByCoordinates extends AbstractIndexCreationTask { + public Events_ByCoordinates() { + map = "docs.Events.Select(e => new {" + + " Coordinates = this.CreateSpatialField(((double ? ) e.latitude), ((double ? ) e.longitude))" + + "})"; + } +} +`} + + + + +{`from index 'Events/ByCoordinates' +where spatial.within(coordinates, spatial.circle(500, 30, 30)) +`} + + + + + +If you want to know how to setup and customize a spatial field in static index please refer to [this](../../indexes/indexing-spatial-data.mdx) article. + + +## Ordering + +In order to sort the results by distance, please use the `orderByDistance` or `orderByDistanceDescending` methods. You can read more about them [here](../../client-api/session/querying/how-to-make-a-spatial-query.mdx). + +## Remarks + + +Distance in RavenDB by default is measured in **kilometers**. + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_spatial-nodejs.mdx b/versioned_docs/version-7.1/indexes/querying/_spatial-nodejs.mdx new file mode 100644 index 0000000000..2ae2ef9f36 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_spatial-nodejs.mdx @@ -0,0 +1,314 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Documents that contain spatial data can be queried by spatial queries that employ geographical criteria. + You have two options: + + * **Dynamic spatial query** + Either make a dynamic spatial query on a collection (see [how to make a spatial query](../../client-api/session/querying/how-to-make-a-spatial-query.mdx)). + An auto-index will be created by the server. + + * **Spatial index query** + Or, index your documents' spatial data in a static-index (see [indexing spatial data](../../indexes/indexing-spatial-data.mdx)) + and then make a spatial query on this index ( **described in this article** ). + +* A few examples of querying a spatial index are provided below. + **A spatial query performed on a static-index is similar to the** [dynamic spatial query](../../client-api/session/querying/how-to-make-a-spatial-query.mdx). + Find all spatial API methods listed [here](../../client-api/session/querying/how-to-make-a-spatial-query.mdx#spatial-api). + +* Examples in this page: + * [Search by radius](../../indexes/querying/spatial.mdx#search-by-radius) + * [Search by shape](../../indexes/querying/spatial.mdx#search-by-shape) + * [Sort results](../../indexes/querying/spatial.mdx#sort-results) + + +## Search by radius + +* Query the spatial index: + +* Use the `withinRadius` method to search for all documents containing spatial data that is located + within the specified distance from the given center point. + + + + +{`// Define a spatial query on index 'Events/ByNameAndCoordinates' +const employeesWithinRadius = await session + .query({ indexName: "Events/ByNameAndCoordinates"}) + // Call 'spatial' method + .spatial( + /// Pass the spatial index-field containing the spatial data + "coordinates", + // Set the geographical area in which to search for matching documents + // Call 'withinRadius', pass the radius and the center points coordinates + criteria => criteria.withinRadius(20, 47.623473, -122.3060097)) + .all(); + +// The query returns all matching Event entities +// that are located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). +`} + + + + +{`// Define an index with a spatial field +class Events_ByNameAndCoordinates extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + const { createSpatialField } = this.mapUtils(); + + this.map('events', e => { + return { + name: e.Name, + // Call 'createSpatialField' to create a spatial index-field + // Field 'coordinates' will be composed of lat & lng supplied from the document + coordinates: createSpatialField( + e.latitude, + e.longitude + ) + + // Documents can be retrieved + // by making a spatial query on the 'coordinates' index-field + }; + }); + } +} + +class Event { + constructor(id, name, latitude, longitude) { + this.id = id; + this.name = name; + this.latitude = latitude + this.longitude = longitude; + } +} +`} + + + + +{`from index "Events/ByNameAndCoordinates" +where spatial.within( + Coordinates, + spatial.circle(20, 47.623473, -122.3060097) +) + +// The query returns all matching Event entities +// that are located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). +`} + + + + + + +## Search by shape + +* Query the spatial index: + Use the `relatesToShape` method to search for all documents containing spatial data that is located + in the specified relation to the given shape. + +* The shape in the query is specified as either a **circle** or a **polygon** in a WKT format. + See polygon rules [here](../../client-api/session/querying/how-to-make-a-spatial-query.mdx#polygonrules). + +* The relation to the shape can be one of: `Within`, `Contains`, `Disjoint`, `Intersects`. + +* See more usage examples in the [dynamic search by shape](../../client-api/session/querying/how-to-make-a-spatial-query.mdx#search-by-shape) query. + + + + +{`// Define a spatial query on index 'EventsWithWKT/ByNameAndWKT' +const employeesWithinShape = await session + .query({ indexName: "EventsWithWKT/ByNameAndWKT" }) + // Call 'spatial' method + .spatial( + // Pass the spatial index-field containing the spatial data + "wkt", + // Set the geographical search criteria, call 'relatesToShape' + criteria => criteria.relatesToShape( + // Specify the WKT string + \`POLYGON (( + -118.6527948 32.7114894, + -95.8040242 37.5929338, + -102.8344151 53.3349629, + -127.5286633 48.3485664, + -129.4620208 38.0786067, + -118.7406746 32.7853769, + -118.6527948 32.7114894 + ))\`, + // Specify the relation between the WKT shape and the documents spatial data + "Within" + )) + .all(); + +// The query returns all matching Event entities +// that are located within the specified polygon. +`} + + + + +{`// Define an index with a spatial field +class EventsWithWKT_ByNameAndWKT extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + const { createSpatialField } = this.mapUtils(); + + this.map('events', e => { + return { + name: e.Name, + // Call 'createSpatialField' to create a spatial index-field + // Field 'wkt' will be composed of the WKT string supplied from the document + wkt: createSpatialField(e.wkt) + + // Documents can be retrieved by + // making a spatial query on the 'wkt' index-field + }; + }); + } +} + +class EventWithWKT { + constructor(id, name, wkt) { + this.id = id; + this.name = name; + this.wkt = wkt; + } +} +`} + + + + +{`from index "EventsWithWKT/ByNameAndWKT" +where spatial.within( + WKT, + spatial.wkt("POLYGON (( + -118.6527948 32.7114894, + -95.8040242 37.5929338, + -102.8344151 53.3349629, + -127.5286633 48.3485664, + -129.4620208 38.0786067, + -118.7406746 32.7853769, + -118.6527948 32.7114894))") +) + +// The query returns all matching Event entities +// that are located within the specified polygon. +`} + + + + +* Note: + The index in the above example indexes a WKT string in the spatial index-field. + However, you can query by shape also on spatial data that is indexed as lat/lng coordinates. + + + +## Sort results + +* Query the spatial index: + Use `orderByDistance` or `orderByDistanceDescending` to sort the results by distance from a given point. + +* By default, distance in RavenDB measured in **kilometers**. + The distance can be rounded to a specific range. + + + + +{`// Define a spatial query on index 'Events_ByNameAndCoordinates' +const employeesSortedByDistance = await session + .query({ indexName: "Events/ByNameAndCoordinates" }) + // Filter results by geographical criteria + .spatial( + "coordinates", + criteria => criteria.withinRadius(20, 47.623473, -122.3060097)) + // Sort results, call 'orderByDistance' + .orderByDistance( + // Pass the spatial index-field containing the spatial data + "coordinates", + // Sort the results by their distance from this point: + 47.623473, -122.3060097) + .all(); + +// Return all matching Event entities located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +// Sort the results by their distance from a specified point, +// the closest results will be listed first. +`} + + + + +{`// Define an index with a spatial field +class Events_ByNameAndCoordinates extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + const { createSpatialField } = this.mapUtils(); + + this.map('events', e => { + return { + name: e.Name, + // Call 'createSpatialField' to create a spatial index-field + // Field 'coordinates' will be composed of lat & lng supplied from the document + coordinates: createSpatialField( + e.latitude, + e.longitude + ) + + // Documents can be retrieved + // by making a spatial query on the 'coordinates' index-field + }; + }); + } +} + +class Event { + constructor(id, name, latitude, longitude) { + this.id = id; + this.name = name; + this.latitude = latitude + this.longitude = longitude; + } +} +`} + + + + +{`from index "Events/ByNameAndCoordinates" +where spatial.within( + Coordinates, + spatial.circle(20, 47.623473, -122.3060097) +) +order by spatial.distance( + Coordinates, + spatial.point(47.623473, -122.3060097) +) + +// The query returns all matching Event entities located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +// Sort the results by their distance from a specified point, +// the closest results will be listed first. +`} + + + + +* More sorting examples are available in the [dynamic spatial query](../../client-api/session/querying/how-to-make-a-spatial-query.mdx#spatial-sorting) article. + +* To get the **distance** for each resulting entity see [get resulting distance](../../client-api/session/querying/how-to-make-a-spatial-query.mdx#getresultingdistance). + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_spatial-php.mdx b/versioned_docs/version-7.1/indexes/querying/_spatial-php.mdx new file mode 100644 index 0000000000..96a74748f5 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_spatial-php.mdx @@ -0,0 +1,540 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Documents that contain spatial data can be queried by spatial queries that employ geographical criteria. + You have two options: + + * **Dynamic spatial query** + Either make a dynamic spatial query on a collection (see [how to make a spatial query](../../client-api/session/querying/how-to-make-a-spatial-query.mdx)). + An auto-index will be created by the server. + + * **Spatial index query** + Or, index your documents' spatial data in a static-index (see [indexing spatial data](../../indexes/indexing-spatial-data.mdx)) + and then make a spatial query on this index ( **described in this article** ). + +* A few examples of querying a spatial index are provided below. + **A spatial query performed on a static-index is similar to the** [dynamic spatial query](../../client-api/session/querying/how-to-make-a-spatial-query.mdx). + Find all spatial API methods listed [here](../../client-api/session/querying/how-to-make-a-spatial-query.mdx#spatial-api). + +* Examples in this page: + * [Search by radius](../../indexes/querying/spatial.mdx#search-by-radius) + * [Search by shape](../../indexes/querying/spatial.mdx#search-by-shape) + * [Sort results](../../indexes/querying/spatial.mdx#sort-results) + + +## Search by radius + +* Query the spatial index: + +* Use the `withinRadius` method to search for all documents containing spatial data that is located + within the specified distance from the given center point. + + + + +{`// Define a spatial query on index 'Events_ByNameAndCoordinates' +/** @var array $employeesWithinRadius */ +$employeesWithinRadius = $session + ->query(Event::class, Events_ByNameAndCoordinates::class) + // Call 'Spatial' method + ->spatial( + // Pass the spatial index-field containing the spatial data + "Coordinates", + // Set the geographical area in which to search for matching documents + // Call 'withinRadius', pass the radius and the center points coordinates + function ($criteria) { return $criteria->withinRadius(20, 47.623473, -122.3060097); }) + ->toList(); + +// The query returns all matching Event entities +// that are located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). +`} + + + + +{`// Define a spatial query on index 'Events_ByNameAndCoordinates' +$employeesWithinRadius = $session->advanced() + ->documentQuery(Event::class, Events_ByNameAndCoordinates::class) + // Call 'Spatial' method + ->spatial( + // Pass the spatial index-field containing the spatial data + "Coordinates", + // Set the geographical area in which to search for matching documents + // Call 'WithinRadius', pass the radius and the center points coordinates + function($criteria) { return $criteria->withinRadius(20, 47.623473, -122.3060097); }) + ->toList(); + +// The query returns all matching Event entities +// that are located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). +`} + + + + +{`class Event +{ + private ?string $id = null; + private ?string $name = null; + private ?float $latitude = null; + private ?float $longitude = null; + + public function getId(): ?string + { + return $this->id; + } + + public function setId(?string $id): void + { + $this->id = $id; + } + + public function getName(): ?string + { + return $this->name; + } + + public function setName(?string $name): void + { + $this->name = $name; + } + + public function getLatitude(): ?float + { + return $this->latitude; + } + + public function setLatitude(?float $latitude): void + { + $this->latitude = $latitude; + } + + public function getLongitude(): ?float + { + return $this->longitude; + } + + public function setLongitude(?float $longitude): void + { + $this->longitude = $longitude; + } +} + +class Events_ByNameAndCoordinates extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "docs.Events.Select(e => new { " . + " name = e.name, " . + " coordinates = this.CreateSpatialField(((double ? ) e.latitude), ((double ? ) e.longitude)) " . + "})"; + } +} +`} + + + + +{`from index "Events/ByNameAndCoordinates" +where spatial.within( + Coordinates, + spatial.circle(20, 47.623473, -122.3060097) +) + +// The query returns all matching Event entities +// that are located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). +`} + + + + + + +## Search by shape + +* Query the spatial index: + Use the `RelatesToShape` method to search for all documents containing spatial data that is located + in the specified relation to the given shape. + +* The shape in the query is specified as either a **circle** or a **polygon** in a WKT format. + See polygon rules [here](../../client-api/session/querying/how-to-make-a-spatial-query.mdx#polygonrules). + +* The relation to the shape can be one of: `WITHIN`, `CONTAINS`, `DISJOINT`, `INTERSECTS`. + +* See more usage examples in the [dynamic search by shape](../../client-api/session/querying/how-to-make-a-spatial-query.mdx#search-by-shape) query. + + + + +{`// Define a spatial query on index 'EventsWithWKT_ByNameAndWKT' +/** @var array $employeesWithinShape */ +$employeesWithinShape = $session + ->query(EventWithWKT::class, EventsWithWKT_ByNameAndWKT::class) + // Call 'spatial' method + ->spatial( + // Pass the spatial index-field containing the spatial data + "WKT", + // Set the geographical search criteria, call 'RelatesToShape' + function($criteria) { return $criteria->relatesToShape( + // Specify th e WKT string + "POLYGON (( + -118.6527948 32.7114894, + -95.8040242 37.5929338, + -102.8344151 53.3349629, + -127.5286633 48.3485664, + -129.4620208 38.0786067, + -118.7406746 32.7853769, + -118.6527948 32.7114894 + ))", + // Specify the relation between the WKT shape and the documents spatial data + SpatialRelation::within()); }) + ->toList(); + +// The query returns all matching Event entities +// that are located within the specified polygon. +`} + + + + +{`// Define a spatial query on index 'EventsWithWKT_ByNameAndWKT' +/** @var array $employeesWithinShape */ +$employeesWithinShape = $session->advanced() + ->documentQuery(EventWithWKT::class, EventsWithWKT_ByNameAndWKT::class) + // Call 'Spatial' method + ->spatial( + // Pass the spatial index-field containing the spatial data + "WKT", + // Set the geographical search criteria, call 'RelatesToShape' + function($criteria) { return $criteria->relatesToShape( + // Specify the WKT string + "POLYGON (( + -118.6527948 32.7114894, + -95.8040242 37.5929338, + -102.8344151 53.3349629, + -127.5286633 48.3485664, + -129.4620208 38.0786067, + -118.7406746 32.7853769, + -118.6527948 32.7114894 + ))", + // Specify the relation between the WKT shape and the documents spatial data + SpatialRelation::within()); }) + ->toList(); + +// The query returns all matching Event entities +// that are located within the specified polygon. +`} + + + + +{`class EventWithWKT { + private ?string $id = null; + private ?string $name = null; + private ?string $wkt = null; + + public function getId(): ?string + { + return $this->id; + } + + public function setId(?string $id): void + { + $this->id = $id; + } + + public function getName(): ?string + { + return $this->name; + } + + public function setName(?string $name): void + { + $this->name = $name; + } + + public function getWkt(): ?string + { + return $this->wkt; + } + + public function setWkt(?string $wkt): void + { + $this->wkt = $wkt; + } +} + +class EventsWithWKT_ByNameAndWKT extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "docs.EventWithWKTs.Select(e => new { " . + " name = e.name, " . + " wkt = this.CreateSpatialField(e.wkt) " . + "})"; + } +} +`} + + + + +{`from index "EventsWithWKT/ByNameAndWKT" +where spatial.within( + WKT, + spatial.wkt("POLYGON (( + -118.6527948 32.7114894, + -95.8040242 37.5929338, + -102.8344151 53.3349629, + -127.5286633 48.3485664, + -129.4620208 38.0786067, + -118.7406746 32.7853769, + -118.6527948 32.7114894))") +) + +// The query returns all matching Event entities +// that are located within the specified polygon. +`} + + + + +* Note: + The index in the above example indexes a WKT string in the spatial index-field. + However, you can query by shape also on spatial data that is indexed as lat/lng coordinates. + + + +## Sort results + +* Query the spatial index: + Use `OrderByDistance` to sort the results by distance from a given point. + +* By default, distance in RavenDB measured in **kilometers**. + The distance can be rounded to a specific range. + + + + +{`// Define a spatial query on index 'Events_ByNameAndCoordinates' +/** @var array $employeesSortedByDistance */ +$employeesSortedByDistance = $session + ->query(Event::class, Events_ByNameAndCoordinates::class) + // Filter results by geographical criteria + ->spatial( + "Coordinates", + function($criteria) { return $criteria->withinRadius(20, 47.623473, -122.3060097); }) + // Sort results, call 'OrderByDistance' + ->orderByDistance( + // Pass the spatial index-field containing the spatial data + "Coordinates", + // Sort the results by their distance from this point: + 47.623473, -122.3060097) + ->toList(); + +// Return all matching Event entities located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +// Sort the results by their distance from a specified point, +// the closest results will be listed first. +`} + + + + +{`// Define a spatial query on index 'Events_ByNameAndCoordinates' +/** @var array $employeesSortedByDistance */ +$employeesSortedByDistance = $session->advanced() + ->documentQuery(Event::class, Events_ByNameAndCoordinates::class) + // Filter results by geographical criteria + ->spatial( + "Coordinates", + function($criteria) { return $criteria->withinRadius(20, 47.623473, -122.3060097); }) + // Sort results, call 'OrderByDistance' + ->orderByDistance( + // Pass the spatial index-field containing the spatial data + "Coordinates", + // Sort the results by their distance from this point: + 47.623473, -122.3060097) + ->toList(); + +// Return all matching Event entities located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +// Sort the results by their distance from a specified point, +// the closest results will be listed first. +`} + + + + +{`class Event +{ + private ?string $id = null; + private ?string $name = null; + private ?float $latitude = null; + private ?float $longitude = null; + + public function getId(): ?string + { + return $this->id; + } + + public function setId(?string $id): void + { + $this->id = $id; + } + + public function getName(): ?string + { + return $this->name; + } + + public function setName(?string $name): void + { + $this->name = $name; + } + + public function getLatitude(): ?float + { + return $this->latitude; + } + + public function setLatitude(?float $latitude): void + { + $this->latitude = $latitude; + } + + public function getLongitude(): ?float + { + return $this->longitude; + } + + public function setLongitude(?float $longitude): void + { + $this->longitude = $longitude; + } +} + +class Events_ByNameAndCoordinates extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + $this->map = "docs.Events.Select(e => new { " . + " name = e.name, " . + " coordinates = this.CreateSpatialField(((double ? ) e.latitude), ((double ? ) e.longitude)) " . + "})"; + } +} +`} + + + + +{`from index "Events/ByNameAndCoordinates" +where spatial.within( + Coordinates, + spatial.circle(20, 47.623473, -122.3060097) +) +order by spatial.distance( + Coordinates, + spatial.point(47.623473, -122.3060097) +) + +// The query returns all matching Event entities located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +// Sort the results by their distance from a specified point, +// the closest results will be listed first. +`} + + + + +* More sorting examples are available in the [dynamic spatial query](../../client-api/session/querying/how-to-make-a-spatial-query.mdx#spatial-sorting) article. + +* To get the **distance** for each resulting entity see [get resulting distance](../../client-api/session/querying/how-to-make-a-spatial-query.mdx#getresultingdistance). + + + +## Syntax + + + +{`object CreateSpatialField(double? lat, double? lng); // Latitude/Longitude coordinates +object CreateSpatialField(string shapeWkt); // Shape in WKT string format +`} + + + + +{`class SpatialOptionsFactory +\{ + public function geography(): GeographySpatialOptionsFactory + \{ + return new GeographySpatialOptionsFactory(); + \} + + public function cartesian(): CartesianSpatialOptionsFactory + \{ + return new CartesianSpatialOptionsFactory(); + \} +\} +`} + + + + +{`interface GeographySpatialOptionsFactory +\{ + // if $circleRadiusUnits is not set SpatialUnits::kilometers() will be used + + // Default is GeohashPrefixTree strategy with maxTreeLevel set to 9 + public function defaultOptions(?SpatialUnits $circleRadiusUnits = null): SpatialOptions; + + public function boundingBoxIndex(?SpatialUnits $circleRadiusUnits = null): SpatialOptions; + + public function geohashPrefixTreeIndex(int $maxTreeLevel, ?SpatialUnits $circleRadiusUnits = null): SpatialOptions; + + public function quadPrefixTreeIndex(int $maxTreeLevel, ?SpatialUnits $circleRadiusUnits = null): SpatialOptions; +\} +`} + + + + +{`interface CartesianSpatialOptionsFactory +\{ + public function boundingBoxIndex(): SpatialOptions; + public function quadPrefixTreeIndex(int $maxTreeLevel, SpatialBounds $bounds): SpatialOptions; +\} + +class SpatialBounds +\{ + private float $minX; + private float $maxX; + private float $minY; + private float $maxY; + + // ... getters and setters +\} +`} + + + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_spatial-python.mdx b/versioned_docs/version-7.1/indexes/querying/_spatial-python.mdx new file mode 100644 index 0000000000..cb8f2f5f08 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_spatial-python.mdx @@ -0,0 +1,281 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Documents that contain spatial data can be queried by spatial queries that employ geographical criteria. + You have two options: + + * **Dynamic spatial query** + Either make a dynamic spatial query on a collection (see [how to make a spatial query](../../client-api/session/querying/how-to-make-a-spatial-query.mdx)). + An auto-index will be created by the server. + + * **Spatial index query** + Or, index your documents' spatial data in a static-index (see [indexing spatial data](../../indexes/indexing-spatial-data.mdx)) + and then make a spatial query on this index ( **described in this article** ). + +* A few examples of querying a spatial index are provided below. + **A spatial query performed on a static-index is similar to the** [dynamic spatial query](../../client-api/session/querying/how-to-make-a-spatial-query.mdx). + Find all spatial API methods listed [here](../../client-api/session/querying/how-to-make-a-spatial-query.mdx#spatial-api). + +* Examples in this page: + * [Search by radius](../../indexes/querying/spatial.mdx#search-by-radius) + * [Search by shape](../../indexes/querying/spatial.mdx#search-by-shape) + * [Sort results](../../indexes/querying/spatial.mdx#sort-results) + + +## Search by radius + +* Query the spatial index: + +* Use the `within_radius` method to search for all documents containing spatial data that is located + within the specified distance from the given center point. + + + + +{`# Define a spatail query on index 'Events_ByNameAndCoordinates' +employees_within_radius = list( + session.query_index_type(Events_ByNameAndCoordinates, Event) + # Call 'spatial' method + .spatial( + # Pass the spatial index-field containing the spatial data + "coordinates", + # Set the geographical area in which to search for matching documents + # Call 'within_radius', pass the radius and the center points coordinates + lambda criteria: criteria.within_radius(20, 47.623473, -122.3060097), + ) +) + +# The query returns all matching Event entities +# that are located within 20 kilometers radius +# from point (47.623473 latitude, -122.3060097 longitude) +`} + + + + +{`# Define an index with a spatial field +class Events_ByNameAndCoordinates(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + # Call 'CreateSpatialField' to create a spatial index-field + # Field 'coordinates' will be composed of lat & lng supplied from the document + self.map = ( + "from e in docs.Events select new {" + " name = e.name," + " coordinates = CreateSpatialField(e.latitude, e.longitude)" + "}" + ) + # Documents can be retrieved + # by making a spatial query on the 'coordinates' index-field + + +class Event: + def __init__(self, Id: str = None, name: str = None, latitude: float = None, longitude: float = None): + self.Id = Id + self.name = name + self.latitude = latitude + self.longitude = longitude +`} + + + + +{`from index "Events/ByNameAndCoordinates" +where spatial.within( + Coordinates, + spatial.circle(20, 47.623473, -122.3060097) +) + +// The query returns all matching Event entities +// that are located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). +`} + + + + + + +## Search by shape + +* Query the spatial index: + Use the `relates_to_shape` method to search for all documents containing spatial data that is located + in the specified relation to the given shape. + +* The shape in the query is specified as either a **circle** or a **polygon** in a WKT format. + See polygon rules [here](../../client-api/session/querying/how-to-make-a-spatial-query.mdx#polygonrules). + +* The relation to the shape can be one of: `WITHIN`, `CONTAINS`, `DISJOINT`, `INTERSECTS`. + +* See more usage examples in the [dynamic search by shape](../../client-api/session/querying/how-to-make-a-spatial-query.mdx#search-by-shape) query. + + + + +{`# Define a spatial query on index 'EventsWithWKT_ByNameAndWKT' +employees_within_radius = list( + session.query_index_type(EventsWithWKT_ByNameAndWKT, EventWithWKT) + # Call 'spatial' method + .spatial( + # Pass the spatial index-field containing the spatial data, + "WKT", + # Set the geographical search criteria, call 'relates_to_shape' + lambda criteria: criteria.relates_to_shape( + # Specify the WKT string + shape_wkt="""POLYGON (( + -118.6527948 32.7114894, + -95.8040242 37.5929338, + -102.8344151 53.3349629, + -127.5286633 48.3485664, + -129.4620208 38.0786067, + -118.7406746 32.7853769, + -118.6527948 32.7114894 + ))""", + # Specify the relation between the WKT shape and the documents spatial data + relation=SpatialRelation.WITHIN, + ), + ) +) +# The query returns all matching Event properties +# that are located within the specified polygon. +`} + + + + +{`# Define an index with a spatial field +class EventsWithWKT_ByNameAndWKT(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + self.map = "from e in docs.Events select new {" " name = e.name," " WKT = CreateSpatialField(e.WKT)" "}" + + +class EventWithWKT: + def __init__(self, Id: str = None, name: str = None, WKT: str = None): + self.Id = Id + self.name = name + self.WKT = WKT +`} + + + + +{`from index "EventsWithWKT/ByNameAndWKT" +where spatial.within( + WKT, + spatial.wkt("POLYGON (( + -118.6527948 32.7114894, + -95.8040242 37.5929338, + -102.8344151 53.3349629, + -127.5286633 48.3485664, + -129.4620208 38.0786067, + -118.7406746 32.7853769, + -118.6527948 32.7114894))") +) + +// The query returns all matching Event entities +// that are located within the specified polygon. +`} + + + + +* Note: + The index in the above example indexes a WKT string in the spatial index-field. + However, you can query by shape also on spatial data that is indexed as lat/lng coordinates. + + + +## Sort results + +* Query the spatial index: + Use `order_by_distance` or `order_by_distance_descending` to sort the results by distance from a given point. + +* By default, distance in RavenDB measured in **kilometers**. + The distance can be rounded to a specific range. + + + + +{`# Define a spatial query on index 'Events_ByNameAndCoordinates' +employees_sorted_by_distance = list( + session.query_index_type(Events_ByNameAndCoordinates, Event) + # Filter results by geographical criteria + .spatial("coordinates", lambda criteria: criteria.within_radius(20, 47.623473, -122.3060097)) + # Sort results, call 'order_by_distance' + .order_by_distance( + # Pass the spatial index-field containing the spatial data + "coordinates", + # Sort the results by their distance from this point + 47.623473, + -122.3060097, + ) +) +# Return all matching Event entities located within 20 kilometers radius +# from point (47.623473 latitude, -122.3060097 longitude). + +# Sort the results by their distance from a specified point, +# the closest results will be listed first. +`} + + + + +{`# Define an index with a spatial field +class Events_ByNameAndCoordinates(AbstractIndexCreationTask): + def __init__(self): + super().__init__() + # Call 'CreateSpatialField' to create a spatial index-field + # Field 'coordinates' will be composed of lat & lng supplied from the document + self.map = ( + "from e in docs.Events select new {" + " name = e.name," + " coordinates = CreateSpatialField(e.latitude, e.longitude)" + "}" + ) + # Documents can be retrieved + # by making a spatial query on the 'coordinates' index-field + + +class Event: + def __init__(self, Id: str = None, name: str = None, latitude: float = None, longitude: float = None): + self.Id = Id + self.name = name + self.latitude = latitude + self.longitude = longitude +`} + + + + +{`from index "Events/ByNameAndCoordinates" +where spatial.within( + Coordinates, + spatial.circle(20, 47.623473, -122.3060097) +) +order by spatial.distance( + Coordinates, + spatial.point(47.623473, -122.3060097) +) + +// The query returns all matching Event entities located within 20 kilometers radius +// from point (47.623473 latitude, -122.3060097 longitude). + +// Sort the results by their distance from a specified point, +// the closest results will be listed first. +`} + + + + +* More sorting examples are available in the [dynamic spatial query](../../client-api/session/querying/how-to-make-a-spatial-query.mdx#spatial-sorting) article. + +* To get the **distance** for each resulting entity see [get resulting distance](../../client-api/session/querying/how-to-make-a-spatial-query.mdx#getresultingdistance). + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_suggestions-csharp.mdx b/versioned_docs/version-7.1/indexes/querying/_suggestions-csharp.mdx new file mode 100644 index 0000000000..f2c63f193d --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_suggestions-csharp.mdx @@ -0,0 +1,608 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Prior to reading this article, please refer to [query for suggestions](../../client-api/session/querying/how-to-work-with-suggestions.mdx) + for general knowledge about Suggestions and for dynamic-queries examples. + +* In addition to getting suggested terms when making a dynamic-query, + you can query for similar terms when querying an index. + +* This article provides examples of querying an index for suggestions. + Find the Suggestions API methods listed [here](../../client-api/session/querying/how-to-work-with-suggestions.mdx#syntax). + +* In this page: + * [Configure the index for suggestions](../../indexes/querying/suggestions.mdx#configure-the-index-for-suggestions) + * [The index terms](../../indexes/querying/suggestions.mdx#the-index-terms) + * [Suggest terms - for a single term](../../indexes/querying/suggestions.mdx#suggest-terms---for-a-single-term) + * [Suggest terms - for multiple terms](../../indexes/querying/suggestions.mdx#suggest-terms---for-multiple-terms) + * [Suggest terms - for multiple fields](../../indexes/querying/suggestions.mdx#suggest-terms---for-multiple-fields) + * [Suggest terms - customize options and display name](../../indexes/querying/suggestions.mdx#suggest-terms---customize-options-and-display-name) + + +## Configure the index for suggestions + +* In order to be able to ask for suggested terms when querying an index field, + that field must first be configured for suggestions in the **index definition**. + +* See the following sample index: + (This index will be used in the examples ahead). + + + +{`public class Products_ByName : AbstractIndexCreationTask +\{ + // The IndexEntry class defines the index-fields + public class IndexEntry + \{ + public string ProductName \{ get; set; \} + \} + + public Products_ByName() + \{ + // The 'Map' function defines the content of the index-fields + Map = products => from product in products + select new IndexEntry + \{ + ProductName = product.Name + \}; + + // Configure index-field 'ProductName' for suggestions + Suggestion(x => x.ProductName); + + // Optionally: set 'Search' on this field + // This will split the field content into multiple terms allowing for a full-text search + Indexes.Add(x => x.ProductName, FieldIndexing.Search); + \} +\} +`} + + + + + +**Increased indexing time**: + +* When configuring an index for suggestions, then during the indexing process, + in addition to the regular breakdown of the data into terms (tokenization), + RavenDB will scramble the terms to simulate common errors. + +* This can impact indexing speed but the cost of querying suggestions is Not impacted. + + + + + +## The index terms + +Based on the Northwind sample data, +these are the terms generated for the above index `Products/ByName`: + +![Figure 1. Index terms](./assets/index-terms.png) + +1. **The index-field name** - as defined in the index definition. + In this example the field name is `ProductName`. + +2. **The terms** that were generated for this index-field from the documents in the Products collection. + * The image shows a partial view out of the 163 terms in this list. + * The terms were generated by RavenDB's [default search analyzer](../../indexes/using-analyzers.mdx#ravendb) since full-text search was set on this field. + + + +## Suggest terms - for a single term + +Based on the Northwind sample data, +the following query on the index `Products/ByName` from above has no resulting documents, +since the term `chokolade` does Not exist in the index terms for index-field `ProductName`. + + + +{`// This query on index 'Products/ByName' has NO resulting documents +List products = session + .Query() + .Search(x => x.ProductName, "chokolade") + .OfType() + .ToList(); +`} + + + +If you suspect that the term `chokolate` in the query criteria is written incorrectly, +you can ask RavenDB to suggest similar terms from the index, as follows: + + + + +{`// Query the index for suggested terms for single term: +// ==================================================== + +Dictionary suggestions = session + // Query the index + .Query() + // Call 'SuggestUsing' + .SuggestUsing(builder => builder + // Request to get terms from index-field 'ProductName' that are similar to 'chokolade' + .ByField(x => x.ProductName, "chokolade")) + .Execute(); +`} + + + + +{`// Query the index for suggested terms for single term: +// ==================================================== + +Dictionary suggestions = await asyncSession + // Query the index + .Query() + // Call 'SuggestUsing' + .SuggestUsing(builder => builder + // Request to get terms from index-field 'ProductName' that are similar to 'chokolade' + .ByField(x => x.ProductName, "chokolade")) + .ExecuteAsync(); +`} + + + + +{`// Define the suggestion request for single term +var suggestionRequest = new SuggestionWithTerm("ProductName") +{ + // Looking for terms from index-field 'ProductName' that are similar to 'chokolade' + Term = "chokolade" +}; + +// Query the index for suggestions +Dictionary suggestions = session + .Query() + // Call 'SuggestUsing' - pass the suggestion request + .SuggestUsing(suggestionRequest) + .Execute(); +`} + + + + +{`// Query the index for suggested terms for single term: +// ==================================================== + +Dictionary suggestions = session.Advanced + // Query the index + .DocumentQuery() + // Call 'SuggestUsing' + .SuggestUsing(builder => builder + // Request to get terms from index-field 'ProductName' that are similar to 'chokolade' + .ByField(x => x.ProductName, "chokolade")) + .Execute(); +`} + + + + +{`// Query for terms from index-field 'ProductName' that are similar to 'chokolade' +from index "Products/ByName" +select suggest(ProductName, "chokolade") +`} + + + + + + +{`// The resulting suggested terms: +// ============================== + +Console.WriteLine("Suggested terms in index-field 'ProductName' that are similar to 'chokolade':"); +foreach (string suggestedTerm in suggestions["ProductName"].Suggestions) +\{ + Console.WriteLine("\\t\{0\}", suggestedTerm); +\} + +// Suggested terms in index-field 'ProductName' that are similar to 'chokolade': +// schokolade +// chocolade +// chocolate +`} + + + + + +## Suggest terms - for multiple terms + + + + +{`// Query the index for suggested terms for multiple terms: +// ======================================================= + +Dictionary suggestions = session + // Query the index + .Query() + // Call 'SuggestUsing' + .SuggestUsing(builder => builder + // Request to get terms from index-field 'ProductName' that are similar to 'chokolade' OR 'syrop' + .ByField(x => x.ProductName, new[] { "chokolade", "syrop" })) + .Execute(); +`} + + + + +{`// Query the index for suggested terms for multiple terms: +// ======================================================= + +Dictionary suggestions = await asyncSession + // Query the index + .Query() + // Call 'SuggestUsing' + .SuggestUsing(builder => builder + // Request to get terms from index-field 'ProductName' that are similar to 'chokolade' OR 'syrop' + .ByField(x => x.ProductName, new[] { "chokolade", "syrop" })) + .ExecuteAsync(); +`} + + + + +{`// Define the suggestion request for multiple terms +var suggestionRequest = new SuggestionWithTerms("ProductName") +{ + // Looking for terms from index-field 'ProductName' that are similar to 'chokolade' OR 'syrop' + Terms = new[] { "chokolade", "syrop"} +}; + +// Query the index for suggestions +Dictionary suggestions = session + .Query() + // Call 'SuggestUsing' - pass the suggestion request + .SuggestUsing(suggestionRequest) + .Execute(); +`} + + + + +{`// Query the index for suggested terms for multiple terms: +// ======================================================= + +Dictionary suggestions = session.Advanced + // Query the index + .DocumentQuery() + // Call 'SuggestUsing' + .SuggestUsing(builder => builder + // Request to get terms from index-field 'ProductName' that are similar to 'chokolade' OR 'syrop' + .ByField(x => x.ProductName, new[] { "chokolade", "syrop" })) + .Execute(); +`} + + + + +{`// Query for terms from index-field 'ProductName' that are similar to 'chokolade' OR 'syrop' +from index "Products/ByName" select suggest(ProductName, $p0) +{ "p0" : ["chokolade", "syrop"] } +`} + + + + + + +{`// The resulting suggested terms: +// ============================== + +// Suggested terms in index-field 'ProductName' that are similar to 'chokolade' OR to 'syrop': +// schokolade +// chocolade +// chocolate +// sirop +// syrup +`} + + + + + +## Suggest terms - for multiple fields + + + + +{`// Query the index for suggested terms in multiple fields: +// ======================================================= + +Dictionary suggestions = session + // Query the index + .Query() + // Call 'SuggestUsing' to get suggestions for terms that are + // similar to 'chese' in first index-field (e.g. 'CompanyName') + .SuggestUsing(builder => builder + .ByField(x => x.CompanyName, "chese" )) + // Call 'AndSuggestUsing' to get suggestions for terms that are + // similar to 'frank' in an additional index-field (e.g. 'ContactName') + .AndSuggestUsing(builder => builder + .ByField(x => x.ContactName, "frank")) + .Execute(); +`} + + + + +{`// Query the index for suggested terms in multiple fields: +// ======================================================= + +Dictionary suggestions = await asyncSession + // Query the index + .Query() + // Call 'SuggestUsing' to get suggestions for terms that are + // similar to 'chese' in first index-field (e.g. 'CompanyName') + .SuggestUsing(builder => builder + .ByField(x => x.CompanyName, "chese" )) + // Call 'AndSuggestUsing' to get suggestions for terms that are + // similar to 'frank' in an additional index-field (e.g. 'ContactName') + .AndSuggestUsing(builder => builder + .ByField(x => x.ContactName, "frank")) + .ExecuteAsync(); +`} + + + + +{`// Define suggestion requests for multiple fields: + +var request1 = new SuggestionWithTerm("CompanyName") +{ + // Looking for terms from index-field 'CompanyName' that are similar to 'chese' + Term = "chese" +}; + +var request2 = new SuggestionWithTerm("ContactName") +{ + // Looking for terms from nested index-field 'ContactName' that are similar to 'frank' + Term = "frank" +}; + +// Query the index for suggestions +Dictionary suggestions = session + .Query() + // Call 'SuggestUsing' - pass the suggestion request for the first index-field + .SuggestUsing(request1) + // Call 'AndSuggestUsing' - pass the suggestion request for the second index-field + .AndSuggestUsing(request2) + .Execute(); +`} + + + + +{`// Query the index for suggested terms in multiple fields: +// ======================================================= + +Dictionary suggestions = session.Advanced + // Query the index + .DocumentQuery() + // Call 'SuggestUsing' to get suggestions for terms that are + // similar to 'chese' in first index-field (e.g. 'CompanyName') + .SuggestUsing(builder => builder + .ByField(x => x.CompanyName, "chese" )) + // Call 'AndSuggestUsing' to get suggestions for terms that are + // similar to 'frank' in an additional index-field (e.g. 'ContactName') + .AndSuggestUsing(builder => builder + .ByField(x => x.ContactName, "frank")) + .Execute(); +`} + + + + +{`public class Companies_ByNameAndByContactName : + AbstractIndexCreationTask +{ + // The IndexEntry class defines the index-fields. + public class IndexEntry + { + public string CompanyName { get; set; } + public string ContactName { get; set; } + } + + public Companies_ByNameAndByContactName() + { + // The 'Map' function defines the content of the index-fields + Map = companies => from company in companies + select new IndexEntry + { + CompanyName = company.Name, + ContactName = company.Contact.Name + }; + + // Configure the index-fields for suggestions + Suggestion(x => x.CompanyName); + Suggestion(x => x.ContactName); + + // Optionally: set 'Search' on the index-fields + // This will split the fields' content into multiple terms allowing for a full-text search + Indexes.Add(x => x.CompanyName, FieldIndexing.Search); + Indexes.Add(x => x.ContactName, FieldIndexing.Search); + } +} +`} + + + + +{`// Query for suggested terms +// from index-field 'CompanyName' AND from index-field 'ContactName' +from index "Companies/ByNameAndByContactName" +select suggest(CompanyName, "chese"), suggest(ContactName, "frank") +`} + + + + + + +{`// The resulting suggested terms: +// ============================== + +// Suggested terms in index-field 'CompanyName' that is similar to 'chese': +// cheese +// chinese + +// Suggested terms in index-field 'ContactName' that are similar to 'frank': +// fran +// franken +`} + + + + + +## Suggest terms - customize options and display name + + + + +{`// Query the index for suggested terms - customize options and display name: +// ========================================================================= + +Dictionary suggestions = session + // Query the index + .Query() + // Call 'SuggestUsing' + .SuggestUsing(builder => builder + .ByField(x => x.ProductName, "chokolade") + // Customize suggestions options + .WithOptions(new SuggestionOptions + { + Accuracy = 0.3f, + PageSize = 5, + Distance = StringDistanceTypes.NGram, + SortMode = SuggestionSortMode.Popularity + }) + // Customize display name for results + .WithDisplayName("SomeCustomName")) + .Execute(); +`} + + + + +{`// Query the index for suggested terms - customize options and display name: +// ========================================================================= + +Dictionary suggestions = await asyncSession + // Query the index + .Query() + // Call 'SuggestUsing' + .SuggestUsing(builder => builder + .ByField(x => x.ProductName, "chokolade") + // Customize suggestions options + .WithOptions(new SuggestionOptions + { + Accuracy = 0.3f, + PageSize = 5, + Distance = StringDistanceTypes.NGram, + SortMode = SuggestionSortMode.Popularity + }) + // Customize display name for results + .WithDisplayName("SomeCustomName")) + .ExecuteAsync(); +`} + + + + +{`// Define the suggestion request +var suggestionRequest = new SuggestionWithTerm("ProductName") +{ + // Looking for terms from index-field 'ProductName' that are similar to 'chokolade' + Term = "chokolade", + // Customize options + Options = new SuggestionOptions + { + Accuracy = 0.3f, + PageSize = 5, + Distance = StringDistanceTypes.NGram, + SortMode = SuggestionSortMode.Popularity + }, + // Customize display name + DisplayField = "SomeCustomName" +}; + +// Query the index for suggestions +Dictionary suggestions = session + .Query() + // Call 'SuggestUsing' - pass the suggestion request + .SuggestUsing(suggestionRequest) + .Execute(); +`} + + + + +{`// Query the index for suggested terms - customize options and display name: +// ========================================================================= + +Dictionary suggestions = session.Advanced + // Query the index + .DocumentQuery() + // Call 'SuggestUsing' + .SuggestUsing(builder => builder + .ByField(x => x.ProductName, "chokolade") + // Customize suggestions options + .WithOptions(new SuggestionOptions + { + Accuracy = 0.3f, + PageSize = 5, + Distance = StringDistanceTypes.NGram, + SortMode = SuggestionSortMode.Popularity + }) + // Customize display name for results + .WithDisplayName("SomeCustomName")) + .Execute(); +`} + + + + +{`// Query for suggested terms - customize options and display name +from index "Products/ByName" +select suggest( + ProductName, + "chokolade", + '{ "Accuracy" : 0.3, "PageSize" : 5, "Distance" : "NGram", "SortMode" : "Popularity" }' +) as "SomeCustomName" +`} + + + + + + +{`// The resulting suggested terms: +// ============================== + +Console.WriteLine("Suggested terms:"); +// Results are available under the custom name entry +foreach (string suggestedTerm in suggestions["SomeCustomName"].Suggestions) +\{ + Console.WriteLine("\\t\{0\}", suggestedTerm); +\} + +// Suggested terms: +// chocolade +// schokolade +// chocolate +// chowder +// marmalade +`} + + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_suggestions-java.mdx b/versioned_docs/version-7.1/indexes/querying/_suggestions-java.mdx new file mode 100644 index 0000000000..446943eafb --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_suggestions-java.mdx @@ -0,0 +1,122 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +RavenDB has an indexing mechanism built upon the Lucene engine which has a great suggestions feature. This capability allows a significant improvement of search functionalities enhancing the overall user experience of the application. + +Let's consider an example where the users have the option to look for products by their name. The index and query would appear as follows: + + + +{`public class Products_ByName extends AbstractIndexCreationTask \{ + public Products_ByName() \{ + map = "from product in docs.Products " + + "select new " + + "\{ " + + " product.Name " + + "\} "; + + index("Name", FieldIndexing.SEARCH); // (optional) splitting name into multiple tokens + suggestion("Name");// configuring suggestions + \} +\} +`} + + + + + +{`Product product = session + .query(Product.class, Products_ByName.class) + .search("Name", "chaig") + .firstOrDefault(); +`} + + + +If our database has `Northwind` samples deployed then it will not return any results. However, we can ask RavenDB for help: + + + + +{`Map suggestionResult = session + .query(Product.class, Products_ByName.class) + .suggestUsing(builder -> builder.byField("Name", "chaig")) + .execute(); + +System.out.println("Did you mean?"); + +for (String suggestion : suggestionResult.get("Name").getSuggestions()) { + System.out.println("\\t" + suggestion); +} +`} + + + + +{`from index 'Products/ByName' +select suggest('Name', 'chaig') +`} + + + + +It will produce these suggestions: + + Did you mean? + chang + chai + + + +You can read more about suggestions in our [Client API](../../client-api/session/querying/how-to-work-with-suggestions.mdx) article. + + + +## Suggest Over Multiple Words + +RavenDB allows you to perform a suggestion query over multiple words. + + + +{`SuggestionOptions options = new SuggestionOptions(); +options.setAccuracy(0.4f); +options.setPageSize(5); +options.setDistance(StringDistanceTypes.JARO_WINKLER); +options.setSortMode(SuggestionSortMode.POPULARITY); + +Map resultsByMultipleWords = session + .query(Product.class, Products_ByName.class) + .suggestUsing(builder -> + builder.byField("Name", new String[]\{"chaig", "tof"\}) + .withOptions(options)) + .execute(); + +System.out.println("Did you mean?"); + +for (String suggestion : resultsByMultipleWords.get("Name").getSuggestions()) \{ + System.out.println("\\t" + suggestion); +\} +`} + + + +This will produce the following results: + + Did you mean? + chai + chang + chartreuse + chef + tofu + +## Remarks + + + +Indexes with turned on suggestions tend to use a lot more CPU power than other indexes. This can impact indexing speed (querying is not impacted). + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_suggestions-nodejs.mdx b/versioned_docs/version-7.1/indexes/querying/_suggestions-nodejs.mdx new file mode 100644 index 0000000000..5b2d2f4fec --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_suggestions-nodejs.mdx @@ -0,0 +1,341 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Prior to reading this article, please refer to [query for suggestions](../../client-api/session/querying/how-to-work-with-suggestions.mdx) + for general knowledge about Suggestions and for dynamic-queries examples. + +* In addition to getting suggested terms when making a dynamic-query, + you can query for similar terms when querying an index. + +* This article provides examples of querying an index for suggestions. + Find the Suggestions API methods listed [here](../../client-api/session/querying/how-to-work-with-suggestions.mdx#syntax). + +* In this page: + * [Configure the index for suggestions](../../indexes/querying/suggestions.mdx#configure-the-index-for-suggestions) + * [The index terms](../../indexes/querying/suggestions.mdx#the-index-terms) + * [Suggest terms - for a single term](../../indexes/querying/suggestions.mdx#suggest-terms---for-a-single-term) + * [Suggest terms - for multiple terms](../../indexes/querying/suggestions.mdx#suggest-terms---for-multiple-terms) + * [Suggest terms - for multiple fields](../../indexes/querying/suggestions.mdx#suggest-terms---for-multiple-fields) + * [Suggest terms - customize options and display name](../../indexes/querying/suggestions.mdx#suggest-terms---customize-options-and-display-name) + + +## Configure the index for suggestions + +* In order to be able to ask for suggested terms when querying an index field, + that field must first be configured for suggestions in the **index definition**. + +* See the following sample index: + (This index will be used in the examples ahead). + + + +{`class Products_ByName extends AbstractJavaScriptIndexCreationTask \{ + constructor() \{ + super(); + + this.map("Products", p => \{ + return \{ + ProductName: p.Name + \}; + \}); + + // Configure index-field 'ProductName' for suggestions + this.suggestion("ProductName"); + + // Optionally: set 'Search' on this field + // This will split the field content into multiple terms allowing for a full-text search + this.index("ProductName", "Search"); + \} +\} +`} + + + + + +**Increased indexing time**: + +* When configuring an index for suggestions, then during the indexing process, + in addition to the regular breakdown of the data into terms (tokenization), + RavenDB will scramble the terms to simulate common errors. + +* This can impact indexing speed but the cost of querying suggestions is Not impacted. + + + + + +## The index terms + +Based on the **Northwind sample data**, +these are the terms generated for the above index `Products/ByName`: + +![Figure 1. Index terms](./assets/index-terms.png) + +1. **The index-field name** - as defined in the index definition. + In this example the field name is `ProductName`. + +2. **The terms** that were generated for this index-field from the documents in the Products collection. + * The image shows a partial view out of the 163 terms in this list. + * The terms were generated by RavenDB's [default search analyzer](../../indexes/using-analyzers.mdx#ravendb) since full-text search was set on this field. + + + +## Suggest terms - for a single term + +Based on the **Northwind sample data**, +the following query on the index `Products/ByName` from above has no resulting documents, +since the term `chokolade` does Not exist in the index terms for index-field `ProductName`. + + + +{`// This query on index 'Products/ByName' has NO resulting documents +const products = await session + .query(\{ indexName: "Products/ByName" \}) + .search("ProductName", "chokolade") + .all(); +`} + + + +If you suspect that the term `chokolate` in the query criteria is written incorrectly, +you can ask RavenDB to suggest similar terms from the index, as follows: + + + + +{`// Query the index for suggested terms for single term: +// ==================================================== + +const suggestions = await session + // Query the index + .query({ indexName: "Products/ByName" }) + // Call 'suggestUsing' + .suggestUsing(x => x + // Request to get terms from index-field 'ProductName' that are similar to 'chokolade' + .byField("ProductName", "chokolade")) + .execute(); +`} + + + + +{`// Query for terms from index-field 'ProductName' that are similar to 'chokolade' +from index "Products/ByName" +select suggest(ProductName, "chokolade") +`} + + + + + + +{`// The resulting suggested terms: +// ============================== + +console.log("Suggested terms in index-field 'ProductName' that are similar to 'chokolade':"); +suggestions["ProductName"].suggestions.forEach(suggestedTerm => \{ + console.log("\\t" + suggestedTerm); +\}); + +// Suggested terms in index-field 'ProductName' that are similar to 'chokolade': +// schokolade +// chocolade +// chocolate +`} + + + + + +## Suggest terms - for multiple terms + + + + +{`// Query the index for suggested terms for multiple terms: +// ======================================================= + +const suggestions = await session + // Query the index + .query({ indexName: "Products/ByName" }) + // Call 'suggestUsing' + .suggestUsing(x => x + // Request to get terms from index-field 'ProductName' that are similar to 'chokolade' OR 'syrop' + .byField("ProductName", ["chokolade", "syrop"])) + .execute(); +`} + + + + +{`// Query for terms from index-field 'ProductName' that are similar to 'chokolade' OR 'syrop' +from index "Products/ByName" select suggest(ProductName, $p0) +{ "p0" : ["chokolade", "syrop"] } +`} + + + + + + +{`// The resulting suggested terms: +// ============================== + +// Suggested terms in index-field 'ProductName' that are similar to 'chokolade' OR to 'syrop': +// schokolade +// chocolade +// chocolate +// sirop +// syrup +`} + + + + + +## Suggest terms - for multiple fields + + + + +{`// Query the index for suggested terms in multiple fields: +// ======================================================= + +const suggestions = await session + // Query the index + .query({ indexName: "Companies/ByNameAndByContactName" }) + // Call 'suggestUsing' to get suggestions for terms that are + // similar to 'chese' in first index-field (e.g. 'CompanyName') + .suggestUsing(x => x.byField("CompanyName", "chese")) + // Call 'andSuggestUsing' to get suggestions for terms that are + // similar to 'frank' in an additional index-field (e.g. 'ContactName') + .andSuggestUsing(x => x.byField("ContactName", "frank")) + .execute(); +`} + + + + +{`class Companies_ByNameAndByContactName extends AbstractJavaScriptIndexCreationTask { + constructor() { + super(); + + this.map("Companies", p => { + return { + CompanyName: p.Name, + ContactName: p.Contact.Name + }; + }); + + // Configure the index-fields for suggestions + this.suggestion("CompanyName"); + this.suggestion("ContactName"); + + // Optionally: set 'Search' on the index-fields + // This will split the fields' content into multiple terms allowing for a full-text search + this.index("CompanyName", "Search"); + this.index("ContactName", "Search"); + } +} +`} + + + + +{`// Query for suggested terms +// from index-field 'CompanyName' AND from index-field 'ContactName' +from index "Companies/ByNameAndByContactName" +select suggest(CompanyName, "chese"), suggest(ContactName, "frank") +`} + + + + + + +{`// The resulting suggested terms: +// ============================== + +// Suggested terms in index-field 'CompanyName' that is similar to 'chese': +// cheese +// chinese + +// Suggested terms in index-field 'ContactName' that are similar to 'frank': +// fran +// franken +`} + + + + + +## Suggest terms - customize options and display name + + + + +{`// Query the index for suggested terms - customize options and display name: +// ========================================================================= + +const suggestions = await session + // Query the index + .query({ indexName: "Products/ByName" }) + // Call 'suggestUsing' + .suggestUsing(x => x + .byField("ProductName", "chokolade") + // Customize suggestions options + .withOptions({ + accuracy: 0.3, + pageSize: 5, + distance: "NGram", + sortMode: "Popularity" + }) + // Customize display name for results + .withDisplayName("SomeCustomName")) + .execute(); +`} + + + + +{`// Query for suggested terms - customize options and display name +from index "Products/ByName" +select suggest( + ProductName, + "chokolade", + '{ "Accuracy" : 0.3, "PageSize" : 5, "Distance" : "NGram", "SortMode" : "Popularity" }' +) as "SomeCustomName" +`} + + + + + + +{`// The resulting suggested terms: +// ============================== + +console.log("Suggested terms:"); +// Results are available under the custom name entry +suggestions["SomeCustomName"].suggestions.forEach(suggestedTerm => \{ + console.log("\\t" + suggestedTerm); +\}); + +// Suggested terms: +// chocolade +// schokolade +// chocolate +// chowder +// marmalade +`} + + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_suggestions-php.mdx b/versioned_docs/version-7.1/indexes/querying/_suggestions-php.mdx new file mode 100644 index 0000000000..bbfa5951fd --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_suggestions-php.mdx @@ -0,0 +1,585 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Prior to reading this article, please refer to [query for suggestions](../../client-api/session/querying/how-to-work-with-suggestions.mdx) + for general knowledge about Suggestions and for dynamic-queries examples. + +* In addition to getting suggested terms when making a dynamic-query, + you can query for similar terms when querying an index. + +* This article provides examples of querying an index for suggestions. + Find the Suggestions API methods listed [here](../../client-api/session/querying/how-to-work-with-suggestions.mdx#syntax). + +* In this page: + * [Configure the index for suggestions](../../indexes/querying/suggestions.mdx#configure-the-index-for-suggestions) + * [The index terms](../../indexes/querying/suggestions.mdx#the-index-terms) + * [Suggest terms - for a single term](../../indexes/querying/suggestions.mdx#suggest-terms---for-a-single-term) + * [Suggest terms - for multiple terms](../../indexes/querying/suggestions.mdx#suggest-terms---for-multiple-terms) + * [Suggest terms - for multiple fields](../../indexes/querying/suggestions.mdx#suggest-terms---for-multiple-fields) + * [Suggest terms - customize options and display name](../../indexes/querying/suggestions.mdx#suggest-terms---customize-options-and-display-name) + + +## Configure the index for suggestions + +* In order to be able to ask for suggested terms when querying an index field, + that field must first be configured for suggestions in the **index definition**. + +* See the following sample index: + (This index will be used in the examples ahead). + + + +{`// The IndexEntry class defines the index-fields +class Products_ByName_IndexEntry +\{ + private ?string $productName = null; + + public function getProductName(): ?string + \{ + return $this->productName; + \} + + public function setProductName(?string $productName): void + \{ + $this->productName = $productName; + \} +\} +class Products_ByName extends AbstractIndexCreationTask +\{ + public function __construct() + \{ + parent::__construct(); + + // The 'Map' function defines the content of the index-fields + $this->map = "from product in docs.Products " . + "select new " . + "\{ " . + " product.Name " . + "\} "; + + // Configure index-field 'ProductName' for suggestions + $this->suggestion("Name"); // configuring suggestions + + // Optionally: set 'Search' on this field + // This will split the field content into multiple terms allowing for a full-text search + $this->index("Name", FieldIndexing::search()); // (optional) splitting name into multiple tokens + + \} +\} +`} + + + + + +**Increased indexing time**: + +* When configuring an index for suggestions, then during the indexing process, + in addition to the regular breakdown of the data into terms (tokenization), + RavenDB will scramble the terms to simulate common errors. + +* This can impact indexing speed but the cost of querying suggestions is Not impacted. + + + + + +## The index terms + +Based on the Northwind sample data, +these are the terms generated for the above index `Products/ByName`: + +![Figure 1. Index terms](./assets/index-terms.png) + +1. **The index-field name** - as defined in the index definition. + In this example the field name is `ProductName`. + +2. **The terms** that were generated for this index-field from the documents in the Products collection. + * The image shows a partial view out of the 163 terms in this list. + * The terms were generated by RavenDB's [default search analyzer](../../indexes/using-analyzers.mdx#ravendb) since full-text search was set on this field. + + + +## Suggest terms - for a single term + +Based on the Northwind sample data, +the following query on the index `Products/ByName` from above has no resulting documents, +since the term `chokolade` does Not exist in the index terms for index-field `ProductName`. + + + +{`// This query on index 'Products/ByName' has NO resulting documents +/** @var array $products */ +$products = $session + ->query(Products_ByName_IndexEntry::class, Products_ByName::class) + ->search("ProductName", "chokolade") + ->ofType(Product::class) + ->toList(); +`} + + + +If you suspect that the term `chokolade` in the query criteria is written incorrectly, +you can ask RavenDB to suggest similar terms from the index, as follows: + + + + +{`// Query the index for suggested terms for single term: +// ==================================================== + +/** @var array $suggestions */ +$suggestions = $session + // Query the index + ->query(Products_ByName_IndexEntry::class, Products_ByName::class) + // Call 'SuggestUsing' + ->suggestUsing(function ($builder) { + // Request to get terms from index-field 'ProductName' that are similar to 'chokolade' + return $builder->byField("ProductName", "chokolade"); + }) + ->execute(); +`} + + + + +{`// Define the suggestion request for single term +$suggestionRequest = new SuggestionWithTerm("ProductName"); +$suggestionRequest->setTerm("chokolade"); + +// Query the index for suggestions +/** @var array $suggestions */ +$suggestions = $session + ->query(Products_ByName_IndexEntry::class, Products_ByName::class) + // Call 'SuggestUsing' - pass the suggestion request + ->suggestUsing($suggestionRequest) + ->Execute(); +`} + + + + +{`// Query the index for suggested terms for single term: +// ==================================================== + +/** @var array $suggestions */ +$suggestions = $session->advanced() + // Query the index + ->documentQuery(Products_ByName_IndexEntry::class, Products_ByName::class) + // Call 'SuggestUsing' + ->suggestUsing(function($builder) { + // Request to get terms from index-field 'ProductName' that are similar to 'chokolade' + return $builder->byField("ProductName", "chokolade"); + }) + ->execute(); +`} + + + + +{`// Query for terms from index-field 'ProductName' that are similar to 'chokolade' +from index "Products/ByName" +select suggest(ProductName, "chokolade") +`} + + + + + + +{`// The resulting suggested terms: +// ============================== + +echo "Suggested terms in index-field 'ProductName' that are similar to 'chokolade':"; +foreach ($suggestions["ProductName"]->getSuggestions() as $suggestedTerm) +\{ + echo "\\t" . $suggestedTerm; +\} + +// Suggested terms in index-field 'ProductName' that are similar to 'chokolade': +// schokolade +// chocolade +// chocolate +`} + + + + + +## Suggest terms - for multiple terms + + + + +{`// Query the index for suggested terms for multiple terms: +// ======================================================= + +/** @var array $suggestions */ +$suggestions = $session + // Query the index + ->query(Products_ByName_IndexEntry::class, Products_ByName::class) + // Call 'SuggestUsing' + ->suggestUsing(function($builder) { + return $builder + // Request to get terms from index-field 'ProductName' that are similar to 'chokolade' OR 'syrop' + ->ByField("ProductName", ["chokolade", "syrop"]); + }) + ->execute(); +`} + + + + +{`// Define the suggestion request for multiple terms +$suggestionRequest = new SuggestionWithTerms("ProductName"); +$suggestionRequest->setTerms([ "chokolade", "syrop" ]); + +// Query the index for suggestions +/** @var array $suggestions */ +$suggestions = $session + ->query(Products_ByName_IndexEntry::class, Products_ByName::class) + // Call 'SuggestUsing' - pass the suggestion request + ->suggestUsing($suggestionRequest) + ->execute(); +`} + + + + +{`// Query the index for suggested terms for multiple terms: +// ======================================================= + +/** @var array $suggestions */ +$suggestions = $session->advanced() + // Query the index + ->documentQuery(Products_ByName_IndexEntry::class, Products_ByName::class) + // Call 'SuggestUsing' + ->suggestUsing(function($builder) { + return $builder + // Request to get terms from index-field 'ProductName' that are similar to 'chokolade' OR 'syrop' + ->byField("ProductName", [ "chokolade", "syrop" ]); + }) + ->execute(); +`} + + + + +{`// Query for terms from index-field 'ProductName' that are similar to 'chokolade' OR 'syrop' +from index "Products/ByName" select suggest(ProductName, $p0) +{ "p0" : ["chokolade", "syrop"] } +`} + + + + + + +{`// The resulting suggested terms: +// ============================== + +// Suggested terms in index-field 'ProductName' that are similar to 'chokolade' OR to 'syrop': +// schokolade +// chocolade +// chocolate +// sirop +// syrup +`} + + + + + +## Suggest terms - for multiple fields + + + + +{`// Query the index for suggested terms in multiple fields: +// ======================================================= + +/** @var array $suggestions */ +$suggestions = $session + // Query the index + ->query(Companies_ByNameAndByContactName_IndexEntry::class, Companies_ByNameAndByContactName::class) + // Call 'SuggestUsing' to get suggestions for terms that are + // similar to 'chese' in first index-field (e.g. 'CompanyName') + ->suggestUsing(function($builder) { + return $builder + ->byField("CompanyName", "chese" ); + }) + // Call 'AndSuggestUsing' to get suggestions for terms that are + // similar to 'frank' in an additional index-field (e.g. 'ContactName') + ->andSuggestUsing(functioN($builder) { + return $builder + ->byField("ContactName", "frank"); + }) + ->execute(); +`} + + + + +{`// Define suggestion requests for multiple fields: + +$request1 = new SuggestionWithTerm("CompanyName"); +// Looking for terms from index-field 'CompanyName' that are similar to 'chese' +$request1->setTerm("chese"); + +$request2 = new SuggestionWithTerm("ContactName"); +// Looking for terms from nested index-field 'ContactName' that are similar to 'frank' +$request2->setTerm("frank"); + +// Query the index for suggestions +/** @var array $suggestions */ +$suggestions = $session + ->query(Companies_ByNameAndByContactName_IndexEntry::class, Companies_ByNameAndByContactName::class) + // Call 'SuggestUsing' - pass the suggestion request for the first index-field + ->suggestUsing($request1) + // Call 'AndSuggestUsing' - pass the suggestion request for the second index-field + ->andSuggestUsing($request2) + ->execute(); +`} + + + + +{`// Query the index for suggested terms in multiple fields: +// ======================================================= + +/** @var array $suggestions */ +$suggestions = $session->advanced() + // Query the index + ->documentQuery(Companies_ByNameAndByContactName_IndexEntry::class, Companies_ByNameAndByContactName::class) + // Call 'SuggestUsing' to get suggestions for terms that are + // similar to 'chese' in first index-field (e.g. 'CompanyName') + ->suggestUsing(function($builder) { + return $builder + ->ByField("CompanyName", "chese" ); + }) + // Call 'AndSuggestUsing' to get suggestions for terms that are + // similar to 'frank' in an additional index-field (e.g. 'ContactName') + ->andSuggestUsing(function($builder) { + return $builder + ->byField("ContactName", "frank"); + }) + ->execute(); +`} + + + + +{`// The IndexEntry class defines the index-fields. +class Companies_ByNameAndByContactName_IndexEntry +{ + private ?string $companyName = null; + private ?string $contactName = null; + + public function getCompanyName(): ?string + { + return $this->companyName; + } + + public function setCompanyName(?string $companyName): void + { + $this->companyName = $companyName; + } + + public function getContactName(): ?string + { + return $this->contactName; + } + + public function setContactName(?string $contactName): void + { + $this->contactName = $contactName; + } +} + +class Companies_ByNameAndByContactName extends AbstractIndexCreationTask +{ + public function __construct() + { + parent::__construct(); + + // The 'Map' function defines the content of the index-fields + $this->map= "from company in docs.Companies" . + "select new { " . + "CompanyName = company.Name, " . + "ContactName = company.Contact.Name " . + "}"; + + // Configure the index-fields for suggestions + $this->suggestion("CompanyName"); + $this->suggestion("ContactName"); + + // Optionally: set 'Search' on the index-fields + // This will split the fields' content into multiple terms allowing for a full-text search + $this->index("CompanyName", FieldIndexing::search()); + $this->index("ContactName", FieldIndexing::search()); + } +} +`} + + + + +{`// Query for suggested terms +// from index-field 'CompanyName' AND from index-field 'ContactName' +from index "Companies/ByNameAndByContactName" +select suggest(CompanyName, "chese"), suggest(ContactName, "frank") +`} + + + + + + +{`// The resulting suggested terms: +// ============================== + +// Suggested terms in index-field 'CompanyName' that is similar to 'chese': +// cheese +// chinese + +// Suggested terms in index-field 'ContactName' that are similar to 'frank': +// fran +// franken +`} + + + + + +## Suggest terms - customize options and display name + + + + +{`// Query the index for suggested terms - customize options and display name: +// ========================================================================= + +/** @var array $suggestions */ +$suggestions = $session + // Query the index + ->query(Products_ByName_IndexEntry::class, Products_ByName::class) + // Call 'SuggestUsing' + ->suggestUsing(function($builder) { + $suggestionOptions = new SuggestionOptions(); + $suggestionOptions->setAccuracy(0.3); + $suggestionOptions->setPageSize(5); + $suggestionOptions->setDistance(StringDistanceTypes::nGram()); + $suggestionOptions->setSortMode(SuggestionSortMode::popularity()); + + $builder + ->byField("ProductName", "chokolade") + // Customize suggestions options + ->withOptions($suggestionOptions) + // Customize display name for results + ->withDisplayName("SomeCustomName"); + }) + ->execute(); +`} + + + + +{`// Define the suggestion request +$suggestionRequest = new SuggestionWithTerm("ProductName"); +// Looking for terms from index-field 'ProductName' that are similar to 'chokolade' +$suggestionRequest->setTerm("chokolade"); + +// Customize options +$options = new SuggestionOptions(); +$options->setAccuracy(0.3); +$options->setPageSize(5); +$options->setDistance(StringDistanceTypes::nGram()); +$options->setSortMode(SuggestionSortMode::popularity()); + +$suggestionRequest->setOptions($options); + +// Customize display name +$suggestionRequest->setDisplayField("SomeCustomName"); + + +// Query the index for suggestions +/** @var array $suggestions */ +$suggestions = $session + ->query(Products_ByName_IndexEntry::class, Products_ByName::class) + // Call 'SuggestUsing' - pass the suggestion request + ->suggestUsing($suggestionRequest) + ->execute(); +`} + + + + +{`// Query the index for suggested terms - customize options and display name: +// ========================================================================= + +/** @var array $suggestions */ +$suggestions = $session->advanced() + // Query the index + ->documentQuery(Products_ByName_IndexEntry::class, Products_ByName::class) + // Call 'SuggestUsing' + ->suggestUsing(function($builder) { + $options = new SuggestionOptions(); + $options->setAccuracy(0.3); + $options->setPageSize(5); + $options->setDistance(StringDistanceTypes::nGram()); + $options->setSortMode(SuggestionSortMode::popularity()); + + return $builder + ->byField("ProductName", "chokolade") + // Customize suggestions options + ->withOptions($options) + // Customize display name for results + ->withDisplayName("SomeCustomName"); + }) + ->execute(); +`} + + + + +{`// Query for suggested terms - customize options and display name +from index "Products/ByName" +select suggest( + ProductName, + "chokolade", + '{ "Accuracy" : 0.3, "PageSize" : 5, "Distance" : "NGram", "SortMode" : "Popularity" }' +) as "SomeCustomName" +`} + + + + + + +{`// The resulting suggested terms: +// ============================== + +echo "Suggested terms:"; +// Results are available under the custom name entry +foreach ($suggestions["SomeCustomName"]->getSuggestions() as $suggestedTerm) +\{ + echo "\\t" . $suggestedTerm; +\} + +// Suggested terms: +// chocolade +// schokolade +// chocolate +// chowder +// marmalade +`} + + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_suggestions-python.mdx b/versioned_docs/version-7.1/indexes/querying/_suggestions-python.mdx new file mode 100644 index 0000000000..79cee84018 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_suggestions-python.mdx @@ -0,0 +1,424 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Prior to reading this article, please refer to [query for suggestions](../../client-api/session/querying/how-to-work-with-suggestions.mdx) + for general knowledge about Suggestions and for dynamic-queries examples. + +* In addition to getting suggested terms when making a dynamic-query, + you can query for similar terms when querying an index. + +* This article provides examples of querying an index for suggestions. + Find the Suggestions API methods listed [here](../../client-api/session/querying/how-to-work-with-suggestions.mdx#syntax). + +* In this page: + * [Configure the index for suggestions](../../indexes/querying/suggestions.mdx#configure-the-index-for-suggestions) + * [The index terms](../../indexes/querying/suggestions.mdx#the-index-terms) + * [Suggest terms - for a single term](../../indexes/querying/suggestions.mdx#suggest-terms---for-a-single-term) + * [Suggest terms - for multiple terms](../../indexes/querying/suggestions.mdx#suggest-terms---for-multiple-terms) + * [Suggest terms - for multiple fields](../../indexes/querying/suggestions.mdx#suggest-terms---for-multiple-fields) + * [Suggest terms - customize options and display name](../../indexes/querying/suggestions.mdx#suggest-terms---customize-options-and-display-name) + + +## Configure the index for suggestions + +* In order to be able to ask for suggested terms when querying an index field, + that field must first be configured for suggestions in the **index definition**. + +* See the following sample index: + (This index will be used in the examples ahead). + + + +{`class Products_ByName(AbstractIndexCreationTask): + # The IndexEntry class defines the index-fields + class IndexEntry: + def __init__(self, product_name: str = None): + self.product_name = product_name + + def __init__(self): + super().__init__() + # The 'map' function defines the content of the index-fields + self.map = "from product in docs.Products select new \{product_name = product.Name\}" + self._suggestion("product_name") + self._index("product_name", FieldIndexing.SEARCH) +`} + + + + + +**Increased indexing time**: + +* When configuring an index for suggestions, then during the indexing process, + in addition to the regular breakdown of the data into terms (tokenization), + RavenDB will scramble the terms to simulate common errors. + +* This can impact indexing speed but the cost of querying suggestions is Not impacted. + + + + + +## The index terms + +Based on the Northwind sample data, +these are the terms generated for the above index `Products/ByName`: + +![Figure 1. Index terms](./assets/index-terms.png) + +1. **The index-field name** - as defined in the index definition. + In this example the field name is `ProductName`. + +2. **The terms** that were generated for this index-field from the documents in the Products collection. + * The image shows a partial view out of the 163 terms in this list. + * The terms were generated by RavenDB's [default search analyzer](../../indexes/using-analyzers.mdx#ravendb) since full-text search was set on this field. + + + +## Suggest terms - for a single term + +Based on the Northwind sample data, +the following query on the index `Products/ByName` from above has no resulting documents, +since the term `chokolade` does Not exist in the index terms for index-field `ProductName`. + + + +{`# This query on index 'Products/ByName' has NO resulting documents +products = list( + session.query_index_type(Products_ByName, Products_ByName.IndexEntry) + .search("product_name", "chokolade") + .of_type(Product) +) +`} + + + +If you suspect that the term `chokolade` in the query criteria is written incorrectly, +you can ask RavenDB to suggest similar terms from the index, as follows: + + + + +{`# Query the index for suggested terms for single term: +# ==================================================== + +suggestions = ( + session + # Query the index + .query_index_type(Products_ByName, Products_ByName.IndexEntry) + # Call 'suggest_using' + .suggest_using( + lambda builder: builder + # Request to get terms from index-field 'ProductName' that are similar to 'chokolade' + .by_field("product_name", "chokolade") + ).execute() +) +`} + + + + +{`# Define the suggestion request for single term +suggestion_request = SuggestionWithTerm("product_name") +# Looking for terms from index-field 'product_name' that are similar to 'chokolade' +suggestion_request.term = "chokolade" + +# Query the index for suggestions +suggestions = ( + session.query_index_type(Products_ByName, Products_ByName.IndexEntry) + # Call 'suggest_using' - pass the suggestion request + .suggest_using(suggestion_request).execute() +) +`} + + + + +{`// Query for terms from index-field 'ProductName' that are similar to 'chokolade' +from index "Products/ByName" +select suggest(ProductName, "chokolade") +`} + + + + + + +{`# The resulting suggested terms: +# ============================== + +print("Suggested terms in index-field 'product_name' that are similar to 'chokolade':") +for suggested_term in suggestions["product_name"].suggestions: + print(f"\\t\{suggested_term\}") + +# Suggested terms in index-field 'product_name' that are similar to 'chokolade': +# schokolade +# chocolade +# chocolate +`} + + + + + +## Suggest terms - for multiple terms + + + + +{`# Query the index for suggested terms for multiple terms: +# ======================================================= + +suggestions = ( + session + # Query the index + .query_index_type(Products_ByName, Products_ByName.IndexEntry) + # Call 'suggest_using' + .suggest_using( + lambda builder: builder + # Request to get terms from index-field 'product_name' that are similar to 'chokolade' OR 'syrop' + .by_field("product_name", ["chokolade", "syrop"]) + ).execute() +) +`} + + + + +{`# Define the suggestion request for multiple terms +suggestion_request = SuggestionWithTerms("product_name") +# Looking for terms from index-field 'product_name' that are similar to 'chokolade' OR 'syrop' +suggestion_request.terms = ["chokolade", "syrop"] + +# Query the index for suggestions +suggestions = ( + session.query_index_type(Products_ByName, Products_ByName.IndexEntry) + # Call 'suggest_using' - pass the suggestion request + .suggest_using(suggestion_request).execute() +) +`} + + + + +{`// Query for terms from index-field 'ProductName' that are similar to 'chokolade' OR 'syrop' +from index "Products/ByName" select suggest(ProductName, $p0) +{ "p0" : ["chokolade", "syrop"] } +`} + + + + + + +{`# The resulting suggested terms: +# ============================== + +# Suggested terms in index-field 'product_name' that are similar to 'chokolade' OR to 'syrop': +# schokolade +# chocolade +# chocolate +# sirop +# syrup +`} + + + + + +## Suggest terms - for multiple fields + + + + +{`# Query the index for suggested terms in multiple fields: +# ======================================================= + +suggestions = ( + session + # Query the index + .query_index_type(Companies_ByNameAndByContactName, Companies_ByNameAndByContactName.IndexEntry) + # Call 'suggest_using' to get suggestions for terms that are + # similar to 'chese' in first index-field (e.g. 'company_name') + .suggest_using(lambda builder: builder.by_field("company_name", "chese")) + # Call 'and_suggest_using' to get suggestions for terms that are + # similar to 'frank' in an additional index-field (e.g. 'company_name') + .and_suggest_using(lambda builder: builder.by_field("contact_name", "frank")).execute() +) +`} + + + + +{`# Define suggestion requests for multiple fields: + +request1 = SuggestionWithTerm("company_name") +# Looking for terms from index-field 'company_name' that are similar to 'chese' +request1.term = "chese" + +request2 = SuggestionWithTerm("contact_name") +# Looking for terms from nested index-field 'contact_name' that are similar to 'frank' +request2.term = "frank" + +# Query the index for suggestions +suggestions = ( + session.query_index_type( + Companies_ByNameAndByContactName, Companies_ByNameAndByContactName.IndexEntry + ) + # Call 'suggest_using' - pass the suggestion request for the first index-field + .suggest_using(request1) + # Call 'and_suggest_using' - pass the suggestion request for the second index-field + .and_suggest_using(request2).execute() +) +`} + + + + +{`class Companies_ByNameAndByContactName(AbstractIndexCreationTask): + class IndexEntry: + def __init__(self, company_name: str = None, contact_name: str = None): + self.company_name = company_name + self.contact_name = contact_name + + def __init__(self): + super().__init__() + self.map = "from company in docs.Companies select new {company_name = company.Name, contact_name = company.Contact.Name}" + + # Configure the index-fields for suggestions + self._suggestion("company_name") + self._suggestion("contact_name") + + # Optionally: set 'search' on the index-fields + # This will split the fields' content into multiple terms allowing for a full-text search + self._index("company_name", FieldIndexing.SEARCH) + self._index("contact_name", FieldIndexing.SEARCH) +`} + + + + +{`// Query for suggested terms +// from index-field 'CompanyName' AND from index-field 'ContactName' +from index "Companies/ByNameAndByContactName" +select suggest(CompanyName, "chese"), suggest(ContactName, "frank") +`} + + + + + + +{`# The resulting suggested terms: +# ============================== + +# Suggested terms in index-field 'company_name' that is similar to 'chese': +# cheese +# chinese + +# Suggested terms in index-field 'contact_name' that are similar to 'frank': +# fran +# franken +`} + + + + + +## Suggest terms - customize options and display name + + + + +{`# Query the index for suggested terms - customize options and display name: +# ========================================================================= + +suggestions = ( + session + # Query the index + .query_index_type(Products_ByName, Products_ByName.IndexEntry) + # Call 'suggest_using' + .suggest_using( + lambda builder: builder.by_field("product_name", "chokolade") + # Customize suggestions options + .with_options( + SuggestionOptions( + accuracy=0.3, + page_size=5, + distance=StringDistanceTypes.N_GRAM, + sort_mode=SuggestionSortMode.POPULARITY, + ) + ) + # Customize display name for results + .with_display_name("SomeCustomName") + ).execute() +) +`} + + + + +{`# Define the suggestion request +suggestion_request = SuggestionWithTerm("product_name") +# Looking for terms from index-field 'ProductName' that are similar to 'chokolade' +suggestion_request.term = "chokolade" +# Customize options +suggestion_request.options = SuggestionOptions( + accuracy=0.3, + page_size=5, + distance=StringDistanceTypes.N_GRAM, + sort_mode=SuggestionSortMode.POPULARITY, +) +# Customize display name +suggestion_request.display_field = "SomeCustomName" + +# Query the index for suggestions +suggestions = ( + session.query_index_type(Products_ByName, Products_ByName.IndexEntry) + # Call 'suggest_using' - pass the suggestion request + .suggest_using(suggestion_request).execute() +) +`} + + + + +{`// Query for suggested terms - customize options and display name +from index "Products/ByName" +select suggest( + ProductName, + "chokolade", + '{ "Accuracy" : 0.3, "PageSize" : 5, "Distance" : "NGram", "SortMode" : "Popularity" }' +) as "SomeCustomName" +`} + + + + + + +{`# The resulting suggested terms: +# ============================== + +print("Suggested terms:") +# Results are available under the custom name entry +for suggested_term in suggestions["SomeCustomName"].suggestions: + print(f"\\t\{suggested_term\}") + +# Suggested terms: +# chocolade +# schokolade +# chocolate +# chowder +# marmalade +`} + + + + + + diff --git a/versioned_docs/version-7.1/indexes/querying/_vector-search-csharp.mdx b/versioned_docs/version-7.1/indexes/querying/_vector-search-csharp.mdx new file mode 100644 index 0000000000..95ab553735 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/_vector-search-csharp.mdx @@ -0,0 +1,21 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Vector search enables you to retrieve data based on **contextual relevance**, rather than relying on exact keyword matches. + +* In addition to its other capabilities, RavenDB serves as a **vector database** that allows you to efficiently store, index, and search vector representations. + +* You can perform vector searches to locate documents based on their **content's similarity** to a given search item in your queries. + +* This feature is covered in detail in the following articles: + + * [RavenDB as a Vector Database](../../ai-integration/vector-search/ravendb-as-vector-database.mdx) + * [Vector Search using a Dynamic Query](../../ai-integration/vector-search/vector-search-using-dynamic-query.mdx) + * [Vector Search using a Static Index](../../ai-integration/vector-search/vector-search-using-static-index.mdx) + + + diff --git a/versioned_docs/version-7.1/indexes/querying/assets/CNET_faceted_search.jpg b/versioned_docs/version-7.1/indexes/querying/assets/CNET_faceted_search.jpg new file mode 100644 index 0000000000..adfe1a5beb Binary files /dev/null and b/versioned_docs/version-7.1/indexes/querying/assets/CNET_faceted_search.jpg differ diff --git a/versioned_docs/version-7.1/indexes/querying/assets/index-terms.png b/versioned_docs/version-7.1/indexes/querying/assets/index-terms.png new file mode 100644 index 0000000000..edfffd7ea8 Binary files /dev/null and b/versioned_docs/version-7.1/indexes/querying/assets/index-terms.png differ diff --git a/versioned_docs/version-7.1/indexes/querying/assets/performance-hint.png b/versioned_docs/version-7.1/indexes/querying/assets/performance-hint.png new file mode 100644 index 0000000000..d4270a91de Binary files /dev/null and b/versioned_docs/version-7.1/indexes/querying/assets/performance-hint.png differ diff --git a/versioned_docs/version-7.1/indexes/querying/assets/spatial_1.png b/versioned_docs/version-7.1/indexes/querying/assets/spatial_1.png new file mode 100644 index 0000000000..697e58d9bf Binary files /dev/null and b/versioned_docs/version-7.1/indexes/querying/assets/spatial_1.png differ diff --git a/versioned_docs/version-7.1/indexes/querying/distinct.mdx b/versioned_docs/version-7.1/indexes/querying/distinct.mdx new file mode 100644 index 0000000000..8af5367c5d --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/distinct.mdx @@ -0,0 +1,44 @@ +--- +title: "Query for distinct results" +hide_table_of_contents: true +sidebar_label: Distinct +sidebar_position: 6 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import DistinctCsharp from './_distinct-csharp.mdx'; +import DistinctJava from './_distinct-java.mdx'; +import DistinctNodejs from './_distinct-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/querying/exploration-queries.mdx b/versioned_docs/version-7.1/indexes/querying/exploration-queries.mdx new file mode 100644 index 0000000000..436f9d02eb --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/exploration-queries.mdx @@ -0,0 +1,49 @@ +--- +title: "Exploration Queries" +hide_table_of_contents: true +sidebar_label: Exploration Queries +sidebar_position: 2 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import ExplorationQueriesCsharp from './_exploration-queries-csharp.mdx'; +import ExplorationQueriesPython from './_exploration-queries-python.mdx'; +import ExplorationQueriesPhp from './_exploration-queries-php.mdx'; +import ExplorationQueriesNodejs from './_exploration-queries-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/querying/faceted-search.mdx b/versioned_docs/version-7.1/indexes/querying/faceted-search.mdx new file mode 100644 index 0000000000..01e9860ae3 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/faceted-search.mdx @@ -0,0 +1,48 @@ +--- +title: "Query by Facets" +hide_table_of_contents: true +sidebar_label: Faceted Search +sidebar_position: 12 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import FacetedSearchCsharp from './_faceted-search-csharp.mdx'; +import FacetedSearchJava from './_faceted-search-java.mdx'; +import FacetedSearchPython from './_faceted-search-python.mdx'; +import FacetedSearchPhp from './_faceted-search-php.mdx'; +import FacetedSearchNodejs from './_faceted-search-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/querying/filtering.mdx b/versioned_docs/version-7.1/indexes/querying/filtering.mdx new file mode 100644 index 0000000000..c468acdb0b --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/filtering.mdx @@ -0,0 +1,55 @@ +--- +title: "Filter Query Results" +hide_table_of_contents: true +sidebar_label: Filtering +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import FilteringCsharp from './_filtering-csharp.mdx'; +import FilteringJava from './_filtering-java.mdx'; +import FilteringPython from './_filtering-python.mdx'; +import FilteringPhp from './_filtering-php.mdx'; +import FilteringNodejs from './_filtering-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/querying/highlighting.mdx b/versioned_docs/version-7.1/indexes/querying/highlighting.mdx new file mode 100644 index 0000000000..2a231fe966 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/highlighting.mdx @@ -0,0 +1,47 @@ +--- +title: "Highlight Index Search Results" +hide_table_of_contents: true +sidebar_label: Highlighting +sidebar_position: 14 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import HighlightingCsharp from './_highlighting-csharp.mdx'; +import HighlightingJava from './_highlighting-java.mdx'; +import HighlightingPython from './_highlighting-python.mdx'; +import HighlightingPhp from './_highlighting-php.mdx'; +import HighlightingNodejs from './_highlighting-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/querying/include-explanations.mdx b/versioned_docs/version-7.1/indexes/querying/include-explanations.mdx new file mode 100644 index 0000000000..4105b810e1 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/include-explanations.mdx @@ -0,0 +1,35 @@ +--- +title: "Include Explanations in Index Query" +hide_table_of_contents: true +sidebar_label: Include Explanations +sidebar_position: 15 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import IncludeExplanationsCsharp from './_include-explanations-csharp.mdx'; +import IncludeExplanationsNodejs from './_include-explanations-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/querying/intersection.mdx b/versioned_docs/version-7.1/indexes/querying/intersection.mdx new file mode 100644 index 0000000000..483aa38959 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/intersection.mdx @@ -0,0 +1,47 @@ +--- +title: "Querying: Intersection" +hide_table_of_contents: true +sidebar_label: Intersection +sidebar_position: 9 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import IntersectionCsharp from './_intersection-csharp.mdx'; +import IntersectionJava from './_intersection-java.mdx'; +import IntersectionPython from './_intersection-python.mdx'; +import IntersectionPhp from './_intersection-php.mdx'; +import IntersectionNodejs from './_intersection-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/querying/morelikethis.mdx b/versioned_docs/version-7.1/indexes/querying/morelikethis.mdx new file mode 100644 index 0000000000..0e302020f6 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/morelikethis.mdx @@ -0,0 +1,50 @@ +--- +title: "Querying: MoreLikeThis" +hide_table_of_contents: true +sidebar_label: MoreLikeThis +sidebar_position: 13 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import MorelikethisCsharp from './_morelikethis-csharp.mdx'; +import MorelikethisJava from './_morelikethis-java.mdx'; +import MorelikethisPython from './_morelikethis-python.mdx'; +import MorelikethisPhp from './_morelikethis-php.mdx'; +import MorelikethisNodejs from './_morelikethis-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/querying/paging.mdx b/versioned_docs/version-7.1/indexes/querying/paging.mdx new file mode 100644 index 0000000000..b759885274 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/paging.mdx @@ -0,0 +1,58 @@ +--- +title: "Paging Query Results" +hide_table_of_contents: true +sidebar_label: Paging +sidebar_position: 3 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import PagingCsharp from './_paging-csharp.mdx'; +import PagingJava from './_paging-java.mdx'; +import PagingPython from './_paging-python.mdx'; +import PagingPhp from './_paging-php.mdx'; +import PagingNodejs from './_paging-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/querying/projections.mdx b/versioned_docs/version-7.1/indexes/querying/projections.mdx new file mode 100644 index 0000000000..2cd1e384f5 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/projections.mdx @@ -0,0 +1,58 @@ +--- +title: "Project Index Query Results" +hide_table_of_contents: true +sidebar_label: Projections +sidebar_position: 5 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import ProjectionsCsharp from './_projections-csharp.mdx'; +import ProjectionsJava from './_projections-java.mdx'; +import ProjectionsPython from './_projections-python.mdx'; +import ProjectionsPhp from './_projections-php.mdx'; +import ProjectionsNodejs from './_projections-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/querying/query-index.mdx b/versioned_docs/version-7.1/indexes/querying/query-index.mdx new file mode 100644 index 0000000000..d5a71a8181 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/query-index.mdx @@ -0,0 +1,65 @@ +--- +title: "Querying an Index" +hide_table_of_contents: true +sidebar_label: Querying an Index +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import QueryIndexCsharp from './_query-index-csharp.mdx'; +import QueryIndexJava from './_query-index-java.mdx'; +import QueryIndexPython from './_query-index-python.mdx'; +import QueryIndexPhp from './_query-index-php.mdx'; +import QueryIndexNodejs from './_query-index-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/querying/searching.mdx b/versioned_docs/version-7.1/indexes/querying/searching.mdx new file mode 100644 index 0000000000..8bbc8816fb --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/searching.mdx @@ -0,0 +1,51 @@ +--- +title: "Full-Text Search with Index" +hide_table_of_contents: true +sidebar_label: Full-Text Search +sidebar_position: 7 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import SearchingCsharp from './_searching-csharp.mdx'; +import SearchingJava from './_searching-java.mdx'; +import SearchingPython from './_searching-python.mdx'; +import SearchingPhp from './_searching-php.mdx'; +import SearchingNodejs from './_searching-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/querying/sorting.mdx b/versioned_docs/version-7.1/indexes/querying/sorting.mdx new file mode 100644 index 0000000000..12625cac95 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/sorting.mdx @@ -0,0 +1,58 @@ +--- +title: "Sort Index Query Results" +hide_table_of_contents: true +sidebar_label: Sorting +sidebar_position: 4 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import SortingCsharp from './_sorting-csharp.mdx'; +import SortingJava from './_sorting-java.mdx'; +import SortingPython from './_sorting-python.mdx'; +import SortingPhp from './_sorting-php.mdx'; +import SortingNodejs from './_sorting-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/querying/spatial.mdx b/versioned_docs/version-7.1/indexes/querying/spatial.mdx new file mode 100644 index 0000000000..bbc2a1ea86 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/spatial.mdx @@ -0,0 +1,54 @@ +--- +title: "Query a Spatial Index" +hide_table_of_contents: true +sidebar_label: Spatial +sidebar_position: 11 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import SpatialCsharp from './_spatial-csharp.mdx'; +import SpatialPython from './_spatial-python.mdx'; +import SpatialPhp from './_spatial-php.mdx'; +import SpatialNodejs from './_spatial-nodejs.mdx'; +import SpatialJava from './_spatial-java.mdx'; + +export const supportedLanguages = ["csharp", "python", "php", "nodejs", "java"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/querying/suggestions.mdx b/versioned_docs/version-7.1/indexes/querying/suggestions.mdx new file mode 100644 index 0000000000..441bca5bb1 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/suggestions.mdx @@ -0,0 +1,47 @@ +--- +title: "Query for suggestions with index" +hide_table_of_contents: true +sidebar_label: Suggestions +sidebar_position: 10 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import SuggestionsJava from './_suggestions-java.mdx'; +import SuggestionsCsharp from './_suggestions-csharp.mdx'; +import SuggestionsPython from './_suggestions-python.mdx'; +import SuggestionsPhp from './_suggestions-php.mdx'; +import SuggestionsNodejs from './_suggestions-nodejs.mdx'; + +export const supportedLanguages = ["java", "csharp", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/querying/vector-search.mdx b/versioned_docs/version-7.1/indexes/querying/vector-search.mdx new file mode 100644 index 0000000000..e4b240139a --- /dev/null +++ b/versioned_docs/version-7.1/indexes/querying/vector-search.mdx @@ -0,0 +1,24 @@ +--- +title: "Vector Search" +hide_table_of_contents: true +sidebar_label: Vector Search +sidebar_position: 8 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import VectorSearchCsharp from './_vector-search-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/rolling-index-deployment.mdx b/versioned_docs/version-7.1/indexes/rolling-index-deployment.mdx new file mode 100644 index 0000000000..3cad3805ff --- /dev/null +++ b/versioned_docs/version-7.1/indexes/rolling-index-deployment.mdx @@ -0,0 +1,166 @@ +--- +title: "Indexes: Rolling Index Deployment" +hide_table_of_contents: true +sidebar_label: Rolling Index Deployment +sidebar_position: 21 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Indexes: Rolling Index Deployment + + +* When **Rolling Index Deployment** is enabled, indexing a database is performed by one node at a time. +* Rolling index deployment prevents parallel indexing by multiple nodes, to ensure that most cluster resources + would always be available for data and task procession. +* Indexing operations are assigned to nodes by the cluster. +* The cluster will assign a node with indexing, only when the node it had previously assigned + with indexing confirms that it finished indexing. + + +* In this page: + * [Why Rolling Index Deployment](../indexes/rolling-index-deployment.mdx#why-rolling-index-deployment) + * [How Does It Work](../indexes/rolling-index-deployment.mdx#how-does-it-work) + * [The Rolling Procedure](../indexes/rolling-index-deployment.mdx#the-rolling-procedure) + * [Deployment Concurrency and Order](../indexes/rolling-index-deployment.mdx#deployment-concurrency-and-order) + * [Setting Indexing Deployment Mode](../indexes/rolling-index-deployment.mdx#setting-indexing-deployment-mode) + * [Server-Wide Deployment Mode](../indexes/rolling-index-deployment.mdx#server-wide-deployment-mode) + * [Database Deployment Mode](../indexes/rolling-index-deployment.mdx#database-deployment-mode) + * [Deployment Mode in an Index Definition](../indexes/rolling-index-deployment.mdx#deployment-mode-in-an-index-definition) + + +## Why Rolling Index Deployment + +When heavy-duty indexing is performed in parallel by all cluster nodes, the cluster's +performance and availability may be reduced. +The extent of the reduction in cluster conduct depends upon its nodes' resources, +the scope of the required indexing, and the number of nodes concurrently indexing. + +* **On Site**, dedicating much of all nodes' resources to indexing rather than to processing + data and tasks may reduce the cluster's performance. +* **On the Cloud**, parallel indexing may exhaust the credits available to multiple nodes + at the same time and degrade the cluster's availability. + +**Rolling index deployment** ensures that indexes will be created and updated while +the cluster remains fully available and performant. + + +Parallel indexing may be a better option when there is minor or no database activity. + + + + +## How Does It Work +### The Rolling Procedure + +Nodes are assigned with the indexing of each database in a linear order, one node at a time. + +1. The cluster assigns indexing to one of its nodes. +2. When the assigned node finishes indexing, it sends a cluster-wide confirmation command + that indexing is done. +3. The cluster assigns indexing to the next node. + + If the delivery of an **indexing completion confirmation** fails when the current node + finishes indexing, no other node will be able to start indexing until the confirmation + succeeds or indexing is initiated manually. + Confirmation delivery may fail, for example, due to forceful disconnection of the indexing + node or cluster leader node during indexation. + +### Deployment Concurrency and Order + +* **Deployment Concurrency** + * A node is assigned to index a database, only if no other node currently indexes this database. + * Multiple nodes **can** be assigned to concurrently index **different databases**. + E.g., node `A` can index the "Integration" database, while node `B` indexes the "Production" database. + +* **Deployment Order** + * Deployment order is determined by the cluster. + * Indexing is deployed in the reverse order of nodes membership in the database group. + Nodes that are currently in [Rehab or Promotable state](../server/clustering/distribution/distributed-database.mdx#database-topology) + are given a lower priority. + + + +## Setting Indexing Deployment Mode +### Server-Wide Deployment Mode + + Deployment mode can be set server-wide using [configuration options](../server/configuration/configuration-options.mdx#settingsjson). + Setting the server-wide configuration option will apply to all databases on a given node. + +* [Auto Indexes](../indexes/creating-and-deploying.mdx#auto-indexes) Deployment Mode + Set a deployment mode for indexes created automatically using the `Indexing.Auto.DeploymentMode` configuration option. + `"Indexing.Auto.DeploymentMode": "Rolling"` + `"Indexing.Auto.DeploymentMode": "Parallel"` + +* [Static Indexes](../indexes/creating-and-deploying.mdx#static-indexes) Deployment Mode + Set a deployment mode for static indexes using the `Indexing.Static.DeploymentMode` configuration option. + `"Indexing.Static.DeploymentMode": "Rolling"` + `"Indexing.Static.DeploymentMode": "Parallel"` +### Database Deployment Mode + +Enable or disable rolling for a specific database using database configuration keys. +Setting these properties overrides the +[Server-Wide](../indexes/rolling-index-deployment.mdx#server-wide-deployment-mode) default. + +* From Studio: + + ![Database Configuration Keys](./assets/rolling-index-deployment-01.png) + + 1. Open **Settings** > **Database Settings** view. + 2. **Filter Keys** - Enter a search string to locate the configuration keys. + 3. **Edit** - Click to edit values (see next image for details). + 4. **Configuration Keys** - + `Indexing.Auto.DeploymentMode` - Deployment mode configuration key for Auto Indexes. + `Indexing.Static.DeploymentMode` - Deployment mode configuration key for Static Indexes. + 5. **Effective Value** - The current configuration. + 6. **Origin** - The origin of the current configuration. + Can be - Default | Database + + ![Edit Values](./assets/rolling-index-deployment-02.png) + + 1. **Override** - Toggle to override the server-wide configuation. + 2. **Edit Value** - Select Parallel or Rolling indexing deployment mode. + 3. **Set Default** - Click 'Set Default' to select the server-wide default value. + 4. **Save** - Apply changes. + + An edited configuration key value will become effective only after the database is reloaded. + +### Deployment Mode in an Index Definition + +Enable or disable rolling for a specific index using the index-definition `DeploymentMode` property. +Setting this property overrides [server-wide](../indexes/rolling-index-deployment.mdx#server-wide-deployment-mode) +and [database](../indexes/rolling-index-deployment.mdx#database-deployment-mode) settings configuration. + + * `DeploymentMode = IndexDeploymentMode.Rolling` + * `DeploymentMode = IndexDeploymentMode.Parallel` + + + The deployment mode can be set for a specific index when, for example, parallel indexing + is preferred in general but rolling is a better option for a particularly "weighty" index. + + + + +{`private class MyRollingIndex : AbstractIndexCreationTask +\{ + public MyRollingIndex() + \{ + Map = orders => from order in orders + select new + \{ + order.Company, + \}; + DeploymentMode = IndexDeploymentMode.Rolling; + \} +\} +`} + + + + + diff --git a/versioned_docs/version-7.1/indexes/search-engine/_category_.json b/versioned_docs/version-7.1/indexes/search-engine/_category_.json new file mode 100644 index 0000000000..03f3ccc982 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/search-engine/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 31, + "label": Search Engine, +} diff --git a/versioned_docs/version-7.1/indexes/search-engine/assets/corax-01_search-engine-changed.png b/versioned_docs/version-7.1/indexes/search-engine/assets/corax-01_search-engine-changed.png new file mode 100644 index 0000000000..240bb5878c Binary files /dev/null and b/versioned_docs/version-7.1/indexes/search-engine/assets/corax-01_search-engine-changed.png differ diff --git a/versioned_docs/version-7.1/indexes/search-engine/assets/corax-02_index-definition.png b/versioned_docs/version-7.1/indexes/search-engine/assets/corax-02_index-definition.png new file mode 100644 index 0000000000..80256fcfc4 Binary files /dev/null and b/versioned_docs/version-7.1/indexes/search-engine/assets/corax-02_index-definition.png differ diff --git a/versioned_docs/version-7.1/indexes/search-engine/assets/corax-03_index-definition_searcher-select.png b/versioned_docs/version-7.1/indexes/search-engine/assets/corax-03_index-definition_searcher-select.png new file mode 100644 index 0000000000..b4929dfe0f Binary files /dev/null and b/versioned_docs/version-7.1/indexes/search-engine/assets/corax-03_index-definition_searcher-select.png differ diff --git a/versioned_docs/version-7.1/indexes/search-engine/assets/corax-04_database-settings_01.png b/versioned_docs/version-7.1/indexes/search-engine/assets/corax-04_database-settings_01.png new file mode 100644 index 0000000000..3876108f36 Binary files /dev/null and b/versioned_docs/version-7.1/indexes/search-engine/assets/corax-04_database-settings_01.png differ diff --git a/versioned_docs/version-7.1/indexes/search-engine/assets/corax-05_database-settings_02.png b/versioned_docs/version-7.1/indexes/search-engine/assets/corax-05_database-settings_02.png new file mode 100644 index 0000000000..6ccc9efa6a Binary files /dev/null and b/versioned_docs/version-7.1/indexes/search-engine/assets/corax-05_database-settings_02.png differ diff --git a/versioned_docs/version-7.1/indexes/search-engine/assets/corax-06_database-settings_03.png b/versioned_docs/version-7.1/indexes/search-engine/assets/corax-06_database-settings_03.png new file mode 100644 index 0000000000..32d374068f Binary files /dev/null and b/versioned_docs/version-7.1/indexes/search-engine/assets/corax-06_database-settings_03.png differ diff --git a/versioned_docs/version-7.1/indexes/search-engine/assets/corax-07_exception-method-not-implemented.png b/versioned_docs/version-7.1/indexes/search-engine/assets/corax-07_exception-method-not-implemented.png new file mode 100644 index 0000000000..6c76c8cd5a Binary files /dev/null and b/versioned_docs/version-7.1/indexes/search-engine/assets/corax-07_exception-method-not-implemented.png differ diff --git a/versioned_docs/version-7.1/indexes/search-engine/assets/corax-08_disable-indexing-of-nested-field.png b/versioned_docs/version-7.1/indexes/search-engine/assets/corax-08_disable-indexing-of-nested-field.png new file mode 100644 index 0000000000..2d7b18cad3 Binary files /dev/null and b/versioned_docs/version-7.1/indexes/search-engine/assets/corax-08_disable-indexing-of-nested-field.png differ diff --git a/versioned_docs/version-7.1/indexes/search-engine/corax.mdx b/versioned_docs/version-7.1/indexes/search-engine/corax.mdx new file mode 100644 index 0000000000..b2a339b903 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/search-engine/corax.mdx @@ -0,0 +1,603 @@ +--- +title: "Search Engine: Corax" +hide_table_of_contents: true +sidebar_label: Corax +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Search Engine: Corax + + +* **Corax** is RavenDB's native search engine, introduced in RavenDB + version 6.0 as an in-house searching alternative for Lucene. + Lucene remains available as well, you can use either search engine + as you prefer. + +* The main role of the database's search engine is to **satisfy incoming queries**. + In RavenDB, the search engine achieves this by handling each query via an index. + If no relevant index exists, the search engine will create one automatically. + + The search engine is the main "moving part" of the indexing mechanism, + which processes and indexes documents by index definitions. + +* The search engine supports both [Auto](../../indexes/creating-and-deploying.mdx#auto-indexes) + and [Static](../../indexes/creating-and-deploying.mdx#static-indexes) indexing + and can be selected separately for each. + +* The search engine can be selected per server, per database, and per index (for static indexes only). + +* In this page: + * [Selecting the search engine](../../indexes/search-engine/corax.mdx#selecting-the-search-engine) + * [Server wide](../../indexes/search-engine/corax.mdx#select-search-engine-server-wide) + * [Per database](../../indexes/search-engine/corax.mdx#select-search-engine-per-database) + * [Per index](../../indexes/search-engine/corax.mdx#select-search-engine-per-index) + * [Unsupported features](../../indexes/search-engine/corax.mdx#unsupported-features) + * [Unimplemented methods](../../indexes/search-engine/corax.mdx#unimplemented-methods) + * [Handling of complex JSON objects](../../indexes/search-engine/corax.mdx#handling-of-complex-json-objects) + * [Compound fields](../../indexes/search-engine/corax.mdx#compound-fields) + * [Limits](../../indexes/search-engine/corax.mdx#limits) + * [Configuration options](../../indexes/search-engine/corax.mdx#configuration-options) + * [Index training: Compression dictionaries](../../indexes/search-engine/corax.mdx#index-training:-compression-dictionaries) + +## Selecting the search engine + +* You can select your preferred search engine in several scopes: + * [Server-wide](../../indexes/search-engine/corax.mdx#select-search-engine-server-wide), + selecting which search engine will be used by all the databases hosted by this server. + * [Per database](../../indexes/search-engine/corax.mdx#select-search-engine-per-database), + overriding server-wide settings for a specific database. + * [Per index](../../indexes/search-engine/corax.mdx#select-search-engine-per-index), + overriding server-wide and per-database settings. + Per-index settings are available only for **static** indexes. + + + Note that the search engine is selected for **new indexes** only. + These settings do not apply to existing indexes. + + +* These configuration options are available: + * [Indexing.Auto.SearchEngineType](../../server/configuration/indexing-configuration.mdx#indexingautosearchenginetype) + Use this option to select the search engine (either `Lucene` or `Corax`) for **auto** indexes. + The search engine can be selected **server-wide** or **per database**. + * [Indexing.Static.SearchEngineType](../../server/configuration/indexing-configuration.mdx#indexingstaticsearchenginetype) + Use this option to select the search engine (either `Lucene` or `Corax`) for **static** indexes. + The search engine can be selected **server-wide**, **per database**, or **per index**. + * Read about additional Corax configuration options [here](../../indexes/search-engine/corax.mdx#configuration-options). +### Select search engine: Server wide + +Select the search engine for all the databases hosted by a server +by modifying the server's [settings.json](../../server/configuration/configuration-options.mdx#settingsjson) file. +E.g. - + + +{`\{ + "Indexing.Auto.SearchEngineType": "Corax", + "Indexing.Static.SearchEngineType": "Corax" +\} +`} + + + + +You must restart the server for the new settings to be read and applied. + + + +Selecting a new search engine will change the search engine only for indexes created from now on. + +E.g., If my configuration has been `"Indexing.Static.SearchEngineType": "Corax"` +until now and I now changed it to `"Indexing.Static.SearchEngineType": "Lucene"`, +static indexes created from now on will use Lucene, but static indexes created +while Corax was selected will continue using Corax. + +After selecting a new search engine using the above options, change the search +engine used by an existing index by [resetting](../../client-api/operations/maintenance/indexes/reset-index.mdx) +the index. + +### Select search engine: Per database + +To select the search engine that the database would use, modify the +relevant Database Record settings. You can easily do this via Studio: + +* Open Studio's [Database Settings](../../studio/database/settings/database-settings.mdx) + page, and enter `SearchEngine` in the search bar to find the search engine settings. + Click `Edit` to modify the default search engine. + + ![Database Settings](./assets/corax-04_database-settings_01.png) + +* Select your preferred search engine for Auto and Static indexes. + + ![Corax Database Options](./assets/corax-05_database-settings_02.png) + +* To apply the new settings either **disable and re-enable the database** or **restart the server**. + + ![Default Search Engine](./assets/corax-06_database-settings_03.png) +### Select search engine: Per index + +You can also select the search engine that would be used by a specific index, +overriding any per-database and per-server settings. + +#### Select index search engine via studio: + +* **Indexes-List-View** > **Edit Index Definition** + Open Studio's [Index List](../../studio/database/indexes/indexes-list-view.mdx) + view and select the index whose search engine you want to set. + + ![Index Definition](./assets/corax-02_index-definition.png) + 1. Open the index **Configuration** tab. + 2. Select the search engine you prefer for this index. + ![Per-Index Search Engine](./assets/corax-03_index-definition_searcher-select.png) + +* The indexes list view will show the changed configuration. + + ![Search Engine Changed](./assets/corax-01_search-engine-changed.png) +#### Select index search engine using code + +While defining an index using the API, use the `SearchEngineType` +property to select the search engine that would run the index. +Available values: `SearchEngineType.Lucene`, `SearchEngineType.Corax`. + +* You can pass the search engine type you prefer: + + +{`// Set search engine type while creating the index +new Product_ByAvailability(SearchEngineType.Corax).Execute(store); +`} + + +* And set it in the index definition: + + +{`private class Product_ByAvailability : AbstractIndexCreationTask +\{ + public Product_ByAvailability(SearchEngineType type) + \{ + // Any Map/Reduce segments here + Map = products => from p in products + select new + \{ + p.Name, + p.Brand + \}; + + // The preferred search engine type + SearchEngineType = type; + \} +\} +`} + + + + + +## Unsupported features + +The below features are currently not supported by Corax. + +#### Unsupported during indexing: + +* Setting a [boost factor on an index-field](../../indexes/boosting.mdx#assign-a-boost-factor-to-an-index-field) is not supported. + Note that [boosting the whole index-entry](../../indexes/boosting.mdx#assign-a-boost-factor-to-the-index-entry) IS supported. +* Indexing [WKT shapes](../../indexes/indexing-spatial-data.mdx) is not supported. + Note that indexing **spatial points** IS supported. +* [Custom analyzers](../../studio/database/settings/custom-analyzers.mdx) +* [Custom Sorters](../../indexes/querying/sorting.mdx#creating-a-custom-sorter) + +#### Unsupported while querying: + +* [Fuzzy Search](../../client-api/session/querying/text-search/fuzzy-search.mdx) +* [Explanations](../../client-api/session/querying/debugging/include-explanations.mdx) + +#### Complex JSON properties: + +Complex JSON properties cannot currently be indexed and searched by Corax. +Read more about this [below](../../indexes/search-engine/corax.mdx#handling-of-complex-json-objects). + +#### Unsupported `WHERE` methods/terms: + +* [lucene()](../../client-api/session/querying/document-query/how-to-use-lucene.mdx) +* [intersect()](../../indexes/querying/intersection.mdx) +### Unimplemented methods + +Trying to use Corax with an unimplemented method (see +[Unsupported Features](../../indexes/search-engine/corax.mdx#unsupported-features) above) +will generate a `NotSupportedInCoraxException` exception and end the search. + + +E.g. - +The following query uses the `intersect` method, which is currently not supported by Corax. + + +{`from index 'Orders/ByCompany' +where intersect(Count > 10, Total > 3) +`} + + + +If you set Corax as the search engine for the `Orders/ByCompany` index +used by the above query, running the query will generate the following +exception and the search will stop. + ![Method Not Implemented Exception](./assets/corax-07_exception-method-not-implemented.png) + + + + +## Handling of complex JSON objects + +To avoid unnecessary resource usage, the content of complex JSON properties is not indexed by RavenDB. +[See below](../../indexes/search-engine/corax.mdx#if-corax-encounters-a-complex-property-while-indexing) +how auto and static indexes handle such fields. + + +Lucene's approach of indexing complex fields as JSON strings usually makes no +sense, and is not supported by Corax. + + +Consider, for example, the following `orders` document: + + +{`\{ + "Company": "companies/27-A", + "Employee": "employees/2-A", + "ShipTo": \{ + "City": "Torino", + "Country": "Italy", + "Location": \{ + "Latitude": 45.0907661, + "Longitude": 7.687425699999999 + \} + \} +\} +`} + + + +As `Location` contains a list of key/value pairs rather than a simple numeric value or a string, +Corax will not index its contents (see [here](../../indexes/search-engine/corax.mdx#if-corax-encounters-a-complex-property-while-indexing) +what will be indexes). + +There are several ways to handle the indexing of complex JSON objects: + +#### 1. Index a simple property contained in the complex field + +Index one of the simple key/value properties stored within the nested object. +In the `Location` field, for example, Location's `Latitude` and `Longitude`. +can serve us this way: + + + +{`from order in docs.Orders +select new +\{ + Latitude = order.ShipTo.Location.Latitude, + Longitude = order.ShipTo.Location.Longitude +\} +`} + + +#### 2. Index the document using lucene + +As long as Corax doesn't index complex JSON objects, you can always +select Lucene as your search engine when you need to index nested properties. +#### 3. Revise index definition and fields usage + +As [shown above](../../indexes/search-engine/corax.mdx#index-a-simple-property-contained-in-the-complex-field), +indexing a whole complex field is rarely needed, and users would typically +index and search only the simple properties such a field contains. +Queries may sometimes need, however, to **project** the content of an entire +complex field. +When this is the case, you can revise the index definition (see below) to +**disable the indexing** of the complex field but **store its content** so +[projection queries](../../indexes/querying/projections.mdx#projections-and-stored-fields) +would be able to project it. + +Content we retrieve from the database and store in indexes becomes available for +projection and will be henceforth retrieved directly from the indexes, accelerating +its retrieval at the expense of indexes storage space. + + +* To store a field's content and disable its indexing **via Studio**: + + ![Disable indexing of a Nested Field](./assets/corax-08_disable-indexing-of-nested-field.png) + + 1. Open the index definition's **Fields** tab. + 2. Click **Add Field** to specify what field Corax shouldn't index. + 3. Enter the name of the field Corax should not index. + 4. Select **Yes** to Store the field's content + 5. Select **No** to disable the field's indexing + +* To store a field's content and disable its indexing **using Code**: + + +{`private class Order_ByLocation : AbstractIndexCreationTask +\{ + public Order_ByLocation(SearchEngineType type) + \{ + Map = orders => from o in orders + select new + \{ + o.ShipTo.Location + \}; + + SearchEngineType = type; + + // Disable indexing for this field + Index("Location", FieldIndexing.No); + + // Store the field's content + // (this is mandatory if the field's indexing is disabled) + Store("Location", FieldStorage.Yes); + \} +\} +`} + + +#### 4. Turn the complex property into a string + +You can handle the complex property as a string. + + + + +{`from order in docs.Orders +select new +{ + // This will fail for the above document when using Corax + Location = order.ShipTo.Location +} +`} + + + + +{`from order in docs.Orders +select new +{ + // .ToString() will convert the data to a string in JSON format (same as using JsonConvert.Serialize()) + Location = order.ShipTo.Location.ToString() +} +`} + + + + + +Serializing all the properties of a complex property into a single string, +including names, values, brackets, and so on, can be used as a last resort +to produce a string that **doesn't** make a good feed for analyzers and is not +commonly used for searches. +It does, however, make sense in some cases to **project** such a string. + +#### If Corax encounters a complex property while indexing: +Auto and Static indexes handle complex fields differently. +New and Old static indexes also handle complex fields differently. + +* **Auto Index** + An auto index will replace a complex field with a `JSON_VALUE` string. + This will allow basic queries over the field, like checking if it + exists using `Field == null` or `exists(Field)`. + * Corax will also raise a complex-field alert: + + +{`We have detected a complex field in an auto index. To avoid higher +resources usage when processing JSON objects, the values of these fields +will be replaced with JSON_VALUE. +Please consider querying on individual fields of that object or using +a static index. +`} + + + +* **New static index** (created or reset on RavenDB `6.2.x` and on) + The index will behave as determined by the + [Indexing.Corax.Static.ComplexFieldIndexingBehavior](../../server/configuration/indexing-configuration.mdx#indexingcoraxstaticcomplexfieldindexingbehavior) + configuration option. + * If `ComplexFieldIndexingBehavior` is set to **`Throw`** - + Corax will throw a `NotSupportedInCoraxException` exception with this message: + + +{`The value of \`\{fieldName\}\` field is a complex object. +Typically a complex field is not intended to be indexed as a whole hence indexing +it as a text isn't supported in Corax. The field is supposed to have 'Indexing' +option set to 'No' (note that you can still store it and use it in projections). +Alternatively you can switch 'Indexing.Corax.Static.ComplexFieldIndexingBehavior' +configuration option from 'Throw' to 'Skip' to disable the indexing of all complex +fields in the index or globally for all indexes (index reset is required). +If you really need to use this field for searching purposes, you have to call ToString() +on the field value in the index definition. Although it's recommended to index individual +fields of this complex object. +Read more at: https://ravendb.net/l/OB9XW4/6.2 +`} + + + * If `ComplexFieldIndexingBehavior` is set to **`Skip`** - + Corax will skip indexing the complex field without throwing an exception. + +* **Old static index** (created using RavenDB `6.0.x` or older) + If the index doesn't explicitly relate to the complex field, Corax will automatically + **disable indexing** for this field by defining **Indexing: No** for it as shown + [above](../../indexes/search-engine/corax.mdx#disable-the-indexing-of-the-complex-field). + * If the Indexing flag is set to anything but "no" - + Corax will throw a `NotSupportedInCoraxException` exception. + As disabling indexing for this field will prevent additional attempts to index its values, + the exception will be thrown just once. + + + +## Compound fields + + +This feature should be applied to very large datasets and specific queries. +It is meant for **experts only**. + + +A compound field is a Corax index field comprised of 2 simple data elements. + +A compound field can currently be composed of exactly **2 elements**. + + +Expert users can define compound fields to optimize data retrieval: data stored in a compound +field is sorted as requested by the user, and would later on be retrieved in this order +with extreme efficiency. +Compound fields can also be used to unify simple data elements in cohesive units to +make the index more readable. + +* **Adding a Compound Field** + In an index definition, add a compound field using the `CompoundField` method. + Pass the method simple data elements in the order by which you want them to be sorted. +* **Example** + An example of an index definition with a compound field can be: + + +{`private class Product_Location : AbstractIndexCreationTask +\{ + public Product_Location() + \{ + Map = products => + from p in products + select new \{ p.Brand, p.Location \}; + + // Add a compound field + CompoundField(x => x.Brand, x => x.Location); + \} +\} +`} + + + + The query that uses the indexed data will look no different than if the + index included no compound field, but produce the results much faster. + + + + +{`using (var s = store.OpenSession()) +{ + // Use the internal optimization previously created by the added compound field + var products = s.Query() + .Where(x => x.Brand == "RunningShoes") + .OrderBy(x => x.Location) + .ToList(); +} +`} + + + + +{`from Products +where Brand = "RunningShoes" +order by Location +`} + + + + + + +## Limits + +* Corax can create and use indexes of more than `int.MaxValue` (2,147,483,647) documents. + To match this capacity, queries over Corax indexes can + [skip](../../client-api/session/querying/what-is-rql.mdx#limit) + a number of results that exceeds `int.MaxValue` and + [take](../../indexes/querying/paging.mdx#example-ii---basic-paging) + documents from this location. + +* The maximum number of documents that can be **projected** by a query + (using either Corax or Lucene) is `int.MaxValue` (2,147,483,647). + + + +## Configuration options + +Corax configuration options include: + +* [Indexing.Auto.SearchEngineType](../../server/configuration/indexing-configuration.mdx#indexingautosearchenginetype) + [Select](../../indexes/search-engine/corax.mdx#selecting-the-search-engine) the search engine for **Auto** indexes. + +* [Indexing.Static.SearchEngineType](../../server/configuration/indexing-configuration.mdx#indexingstaticsearchenginetype) + [Select](../../indexes/search-engine/corax.mdx#selecting-the-search-engine) the search engine for **Static** indexes. + +* [Indexing.Corax.IncludeDocumentScore](../../server/configuration/indexing-configuration.mdx#indexingcoraxincludedocumentscore) + Choose whether to include the score value in document metadata when sorting by score. + + Disabling this option can improve query performance. + + +* [Indexing.Corax.IncludeSpatialDistance](../../server/configuration/indexing-configuration.mdx#indexingcoraxincludespatialdistance) + Choose whether to include spatial information in document metadata when sorting by distance. + + Disabling this option can improve query performance. + + +* [Indexing.Corax.MaxMemoizationSizeInMb](../../server/configuration/indexing-configuration.mdx#indexingcoraxmaxmemoizationsizeinmb) + The maximum amount of memory that Corax can use for a memoization clause during query processing. + + Please configure this option only if you are an expert. + + +* [Indexing.Corax.DocumentsLimitForCompressionDictionaryCreation](../../server/configuration/indexing-configuration.mdx#indexingcoraxdocumentslimitforcompressiondictionarycreation) + Set the maximum number of documents that will be used for the training of a Corax index during dictionary creation. + Training will stop when it reaches this limit. + +* [Indexing.Corax.MaxAllocationsAtDictionaryTrainingInMb](../../server/configuration/indexing-configuration.mdx#indexingcoraxmaxallocationsatdictionarytraininginmb) + Set the maximum amount of memory (in MB) that will be allocated for the training of a Corax index during dictionary creation. + Training will stop when it reaches this limit. + +* [Indexing.Corax.Static.ComplexFieldIndexingBehavior](../../server/configuration/indexing-configuration.mdx#indexingcoraxstaticcomplexfieldindexingbehavior) + Choose [how to react](../../indexes/search-engine/corax.mdx#if-corax-encounters-a-complex-property-while-indexing) + when a static Corax index is requested to index a complex JSON object. + + + +## Index training: Compression dictionaries + +When creating Corax indexes, RavenDB analyzes index contents and trains +[compression dictionaries](https://en.wikibooks.org/wiki/Data_Compression/Dictionary_compression) +for much higher storage and execution efficiency. + +* The larger the collection, the longer the training process will take. + The index, however, will become more efficient in terms of resource usage. +* The training process can take from a few seconds to up to a minute in multiterabyte collections. +* The IO speed of the storage system also affects the training time. + +Here are some additional things to keep in mind about Corax indexes compression dictionaries: + +* Compression dictionaries are used to store index terms more efficiently. + This can significantly reduce the size of the index, which can improve performance. +* The training process is **only performed once**, when the index is created. +* The compression dictionaries are stored with the index and are used for all subsequent + operations (indexing and querying). +* The benefits of compression dictionaries are most pronounced for large collections. + + Training stops when it reaches either the + [number of documents](../../server/configuration/indexing-configuration.mdx#indexingcoraxdocumentslimitforcompressiondictionarycreation) + threshold (100,000 docs by default) or the + [amount of memory](../../server/configuration/indexing-configuration.mdx#indexingcoraxmaxallocationsatdictionarytraininginmb) + threshold (up to 2GB). Both thresholds are configurable. + +* If upon creation there are less than 10,000 documents in the involved collections, + it may make sense to manually force an index reset after reaching + [100,000](../../server/configuration/indexing-configuration.mdx#indexingcoraxdocumentslimitforcompressiondictionarycreation) + documents to force retraining. + + Indexes are replaced in a [side-by-side](../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---side-by-side-indexing) + manner: existing indexes would continue running until the new ones are created, + to avoid any interruption to existing queries. + +### Corax and the Test Index Interface +Corax indexes will **not** train compression dictionaries if they are created in the +[Test Index](../../studio/database/indexes/create-map-index.mdx#test-index) interface, +because the testing interface is designed for indexing prototyping and the training +process will add unnecessary overhead. + + + + diff --git a/versioned_docs/version-7.1/indexes/sorting-and-collation.mdx b/versioned_docs/version-7.1/indexes/sorting-and-collation.mdx new file mode 100644 index 0000000000..9366d7349a --- /dev/null +++ b/versioned_docs/version-7.1/indexes/sorting-and-collation.mdx @@ -0,0 +1,38 @@ +--- +title: "Indexes: Sorting & Collation" +hide_table_of_contents: true +sidebar_label: Sorting & Collation +sidebar_position: 10 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import SortingAndCollationCsharp from './_sorting-and-collation-csharp.mdx'; +import SortingAndCollationJava from './_sorting-and-collation-java.mdx'; + +export const supportedLanguages = ["csharp", "java"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/stale-indexes.mdx b/versioned_docs/version-7.1/indexes/stale-indexes.mdx new file mode 100644 index 0000000000..ce292ad20e --- /dev/null +++ b/versioned_docs/version-7.1/indexes/stale-indexes.mdx @@ -0,0 +1,41 @@ +--- +title: "Indexes: Stale Indexes" +hide_table_of_contents: true +sidebar_label: Stale Indexes +sidebar_position: 9 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import StaleIndexesCsharp from './_stale-indexes-csharp.mdx'; +import StaleIndexesJava from './_stale-indexes-java.mdx'; +import StaleIndexesNodejs from './_stale-indexes-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "nodejs"]; + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/indexes/storing-data-in-index.mdx b/versioned_docs/version-7.1/indexes/storing-data-in-index.mdx new file mode 100644 index 0000000000..6d61cc64ad --- /dev/null +++ b/versioned_docs/version-7.1/indexes/storing-data-in-index.mdx @@ -0,0 +1,54 @@ +--- +title: "Storing Data in Index" +hide_table_of_contents: true +sidebar_label: Storing Data in Index +sidebar_position: 25 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import StoringDataInIndexJava from './_storing-data-in-index-java.mdx'; +import StoringDataInIndexPython from './_storing-data-in-index-python.mdx'; +import StoringDataInIndexPhp from './_storing-data-in-index-php.mdx'; +import StoringDataInIndexNodejs from './_storing-data-in-index-nodejs.mdx'; +import StoringDataInIndexCsharp from './_storing-data-in-index-csharp.mdx'; + +export const supportedLanguages = ["java", "python", "php", "nodejs", "csharp"]; + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/versioned_docs/version-7.1/indexes/troubleshooting/_category_.json b/versioned_docs/version-7.1/indexes/troubleshooting/_category_.json new file mode 100644 index 0000000000..ca00d767fb --- /dev/null +++ b/versioned_docs/version-7.1/indexes/troubleshooting/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 32, + "label": Troubleshooting, +} diff --git a/versioned_docs/version-7.1/indexes/troubleshooting/debugging-index-errors.mdx b/versioned_docs/version-7.1/indexes/troubleshooting/debugging-index-errors.mdx new file mode 100644 index 0000000000..857934533b --- /dev/null +++ b/versioned_docs/version-7.1/indexes/troubleshooting/debugging-index-errors.mdx @@ -0,0 +1,223 @@ +--- +title: "Indexes: Debugging Index Errors" +hide_table_of_contents: true +sidebar_label: Debugging Index Errors +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Indexes: Debugging Index Errors + +Indexes in RavenDB are user provided LINQ queries running on top of dynamic JSON data model. There is a wide space for errors here, either because of malformed index definition or missing / corrupt data on the JSON document itself. + +## Index Compilation Errors + +An index definition such as the following one will fail: + + + +{`\{ + "Name": "Posts_TitleLength", + "Maps" : [ + "from doc in docs where doc.Type == 'posts' select new \{ doc.Title.Length \}" + ] +\} +`} + + + +The error is the use of single quotes to enclose a string, something that is not allowed in C#. This will result in the following compilation error: + + + +{`IndexCompilationException: Failed to compile index Posts_TitleLength + +using System; +using System.Collections; +using System.Collections.Generic; +using System.Globalization; +using System.Linq; +using System.Text.RegularExpressions; +using Lucene.Net.Documents; +using Raven.Server.Documents.Indexes.Static; +using Raven.Server.Documents.Indexes.Static.Linq; +using Raven.Server.Documents.Indexes.Static.Extensions; + +namespace Raven.Server.Documents.Indexes.Static.Generated +\{ + public class Index_Posts_TitleLength : StaticIndexBase + \{ + IEnumerable Map_0(IEnumerable docs) + \{ + foreach (var doc in docs) + \{ + if ((doc.Type == 'posts') == false) + continue; + yield return new + \{ + doc.Title.Length + \} + + ; + \} + \} + + public Index_Posts_TitleLength() + \{ + this.AddMap("@all_docs", this.Map_0); + this.OutputFields = new string[] \{ "Length" \}; + \} + \} +\} + +(20,34): error CS1012: Too many characters in character literal +`} + + + +Which clearly indicates that the error is at line 20, column 34: `if ((doc.Type == 'posts') == false)`. + +This gives you enough information to figure out what is wrong. Those errors are immediate, and require no further action from the database. The only thing that the user can do is fix the index definition. + + + +Please note that the index definition differs from the send one, because internally RavenDB is applying a lot of optimizations to the send LINQ function to achieve best performance. + + + +## Index Execution Errors + +A common case is an index that doesn't take into account that other documents also exists on the server. For example, let us take this index: + + + +{`\{ + "Name": "YearOfBirth", + "Maps" : [ + "from doc in docs select new \{ YearOfBirth = DateTime.Parse(doc.DateOfBirth).Year \}" + ] +\} +`} + + + +This index makes an assumption that all documents have a `DateOfBirth` property and that +the value of this property can be parsed to `DateTime`. A document that doesn't have that +property will return `null` when it is accessed, resulting in a `ArgumentNullException` +when the index is executed. + +Because indexes are updated on a background thread, it is unlikely that users will be +aware of those errors. + +Index execution errors can be viewed in two places: + +* View **index statistics** and **index error statistics** + in `/indexes/stats` and `/indexes/errors`. +* View indexes activity, including errors, in a human-readable form via [Studio](../../studio/database/indexes/indexes-list-view.mdx#indexes-list-view---errors). + + + + +{`{ + "Results":[ + { + "Name":"TitleLength", + "MapAttempts":2, + "MapSuccesses":2, + "MapErrors":2, + "ReduceAttempts":null, + "ReduceSuccesses":null, + "ReduceErrors":null, + "MappedPerSecondRate":0.0, + "ReducedPerSecondRate":0.0, + "MaxNumberOfOutputsPerDocument":0, + "Collections":{ + "@all_docs":{ + "LastProcessedDocumentEtag":791, + "LastProcessedTombstoneEtag":0, + "DocumentLag":0, + "TombstoneLag":0 + } + }, + "LastQueryingTime":"2018-02-26T14:17:08.7454587Z", + "State":"Error", + "Priority":"Normal", + "CreatedTimestamp":"2018-02-26T14:17:08.7092294Z", + "LastIndexingTime":"2018-02-26T14:17:08.7512648Z", + "IsStale":true, + "LockMode":"Unlock", + "Type":"Map", + "Status":"Paused", + "EntriesCount":0, + "ErrorsCount":2, + "IsInvalidIndex":true + } + ] +} +`} + + + + +{`{ + "Results":[ + { + "Name":"TitleLength", + "Errors":[ + { + "Timestamp":"2018-02-26T14:17:08.7813846Z", + "Document":"Raven/Hilo/categories", + "Action":"Map", + "Error":"Failed to execute mapping function on Raven/Hilo/categories. Exception: System.ArgumentNullException: String reference not set to an instance of a String. +Parameter name: s + at System.DateTimeParse.Parse(String s, DateTimeFormatInfo dtfi, DateTimeStyles styles) + at CallSite.Target(Closure , CallSite , Type , Object ) + at System.Dynamic.UpdateDelegates.UpdateAndExecute2[T0,T1,TRet](CallSite site, T0 arg0, T1 arg1) + at Raven.Server.Documents.Indexes.Static.Generated.Index_TitleLength.d__0.MoveNext() + at Raven.Server.Documents.Indexes.Static.TimeCountingEnumerable.Enumerator.MoveNext() in C:\\\\Builds\\\\RavenDB-Stable-4.0\\\\src\\\\Raven.Server\\\\Documents\\\\Indexes\\\\Static\\\\TimeCountingEnumerable.cs:line 41 + at Raven.Server.Documents.Indexes.MapIndexBase\`2.HandleMap(LazyStringValue lowerId, IEnumerable mapResults, IndexWriteOperation writer, TransactionOperationContext indexContext, IndexingStatsScope stats) in C:\\\\Builds\\\\RavenDB-Stable-4.0\\\\src\\\\Raven.Server\\\\Documents\\\\Indexes\\\\MapIndexBase.cs:line 64 + at Raven.Server.Documents.Indexes.Workers.MapDocuments.Execute(DocumentsOperationContext databaseContext, TransactionOperationContext indexContext, Lazy\`1 writeOperation, IndexingStatsScope stats, CancellationToken token) in C:\\\\Builds\\\\RavenDB-Stable-4.0\\\\src\\\\Raven.Server\\\\Documents\\\\Indexes\\\\Workers\\\\MapDocuments.cs:line 108" + }, + { + "Timestamp":"2018-02-26T14:17:08.7958137Z", + "Document":"companies/1-A", + "Action":"Map", + "Error":"Failed to execute mapping function on companies/1-A. Exception: System.ArgumentNullException: String reference not set to an instance of a String. +Parameter name: s + at System.DateTimeParse.Parse(String s, DateTimeFormatInfo dtfi, DateTimeStyles styles) + at CallSite.Target(Closure , CallSite , Type , Object ) + at Raven.Server.Documents.Indexes.Static.Generated.Index_TitleLength.d__0.MoveNext() + at Raven.Server.Documents.Indexes.Static.TimeCountingEnumerable.Enumerator.MoveNext() in C:\\\\Builds\\\\RavenDB-Stable-4.0\\\\src\\\\Raven.Server\\\\Documents\\\\Indexes\\\\Static\\\\TimeCountingEnumerable.cs:line 41 + at Raven.Server.Documents.Indexes.MapIndexBase\`2.HandleMap(LazyStringValue lowerId, IEnumerable mapResults, IndexWriteOperation writer, TransactionOperationContext indexContext, IndexingStatsScope stats) in C:\\\\Builds\\\\RavenDB-Stable-4.0\\\\src\\\\Raven.Server\\\\Documents\\\\Indexes\\\\MapIndexBase.cs:line 64 + at Raven.Server.Documents.Indexes.Workers.MapDocuments.Execute(DocumentsOperationContext databaseContext, TransactionOperationContext indexContext, Lazy\`1 writeOperation, IndexingStatsScope stats, CancellationToken token) in C:\\\\Builds\\\\RavenDB-Stable-4.0\\\\src\\\\Raven.Server\\\\Documents\\\\Indexes\\\\Workers\\\\MapDocuments.cs:line 108" + } + ] + } + ] +} +`} + + + + +As you can see, RavenDB surfaces both the fact that the index has encountered, what was the document it errored on, and what that error was. The errors collection contains the last 500 errors that happened on the server per index. + +In addition to that, the server logs may contain additional information regarding the error. + +## Marking Index as Errored + +Furthermore, in order to protect itself from indexes that always fail, RavenDB will mark index as errored if it keeps failing. The actual logic for erroring-out an index is: + +* If an index has 15% or more failure rate +* The 15% count is only considered after the first 100 indexing attempts to make sure that have a good determination + +A errored index cannot be queried, all queries to a errored index will result in an exception. + +The only thing that can be done with a errored index is to either delete it or replace the index definition with one that is resilient to those errors. + diff --git a/versioned_docs/version-7.1/indexes/using-analyzers.mdx b/versioned_docs/version-7.1/indexes/using-analyzers.mdx new file mode 100644 index 0000000000..ab3c5b5af6 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/using-analyzers.mdx @@ -0,0 +1,43 @@ +--- +title: "Indexes: Analyzers" +hide_table_of_contents: true +sidebar_label: Analyzers +sidebar_position: 24 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import UsingAnalyzersJava from './_using-analyzers-java.mdx'; +import UsingAnalyzersCsharp from './_using-analyzers-csharp.mdx'; +import UsingAnalyzersNodejs from './_using-analyzers-nodejs.mdx'; + +export const supportedLanguages = ["java", "csharp", "nodejs"]; + + + + + + + + + + + + + + + + + diff --git a/versioned_docs/version-7.1/indexes/using-dynamic-fields.mdx b/versioned_docs/version-7.1/indexes/using-dynamic-fields.mdx new file mode 100644 index 0000000000..00e55486dc --- /dev/null +++ b/versioned_docs/version-7.1/indexes/using-dynamic-fields.mdx @@ -0,0 +1,50 @@ +--- +title: "Indexes: Dynamic Index Fields" +hide_table_of_contents: true +sidebar_label: Dynamic Fields +sidebar_position: 27 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import UsingDynamicFieldsCsharp from './_using-dynamic-fields-csharp.mdx'; +import UsingDynamicFieldsJava from './_using-dynamic-fields-java.mdx'; +import UsingDynamicFieldsPython from './_using-dynamic-fields-python.mdx'; +import UsingDynamicFieldsPhp from './_using-dynamic-fields-php.mdx'; +import UsingDynamicFieldsNodejs from './_using-dynamic-fields-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/versioned_docs/version-7.1/indexes/using-term-vectors.mdx b/versioned_docs/version-7.1/indexes/using-term-vectors.mdx new file mode 100644 index 0000000000..2cf7030d1a --- /dev/null +++ b/versioned_docs/version-7.1/indexes/using-term-vectors.mdx @@ -0,0 +1,40 @@ +--- +title: "Indexes: Term Vectors" +hide_table_of_contents: true +sidebar_label: Term Vectors +sidebar_position: 26 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import UsingTermVectorsJava from './_using-term-vectors-java.mdx'; +import UsingTermVectorsNodejs from './_using-term-vectors-nodejs.mdx'; +import UsingTermVectorsCsharp from './_using-term-vectors-csharp.mdx'; + +export const supportedLanguages = ["java", "nodejs", "csharp"]; + + + + + + + + + + + + + + + + + diff --git a/versioned_docs/version-7.1/indexes/what-are-indexes.mdx b/versioned_docs/version-7.1/indexes/what-are-indexes.mdx new file mode 100644 index 0000000000..d703b32626 --- /dev/null +++ b/versioned_docs/version-7.1/indexes/what-are-indexes.mdx @@ -0,0 +1,60 @@ +--- +title: "What are Indexes" +hide_table_of_contents: true +sidebar_label: What are Indexes +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import WhatAreIndexesCsharp from './_what-are-indexes-csharp.mdx'; +import WhatAreIndexesJava from './_what-are-indexes-java.mdx'; +import WhatAreIndexesPython from './_what-are-indexes-python.mdx'; +import WhatAreIndexesPhp from './_what-are-indexes-php.mdx'; +import WhatAreIndexesNodejs from './_what-are-indexes-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "java", "python", "php", "nodejs"]; + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/integrations/_category_.json b/versioned_docs/version-7.1/integrations/_category_.json new file mode 100644 index 0000000000..b1527a08a3 --- /dev/null +++ b/versioned_docs/version-7.1/integrations/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 9, + "label": "Integrations" +} diff --git a/versioned_docs/version-7.1/integrations/akka.net-persistence/_category_.json b/versioned_docs/version-7.1/integrations/akka.net-persistence/_category_.json new file mode 100644 index 0000000000..c90353e889 --- /dev/null +++ b/versioned_docs/version-7.1/integrations/akka.net-persistence/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 1, + "label": Akka.NET Persistence, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/integrations/akka.net-persistence/assets/navigate-to-traffic-watch.png b/versioned_docs/version-7.1/integrations/akka.net-persistence/assets/navigate-to-traffic-watch.png new file mode 100644 index 0000000000..54b3808e5d Binary files /dev/null and b/versioned_docs/version-7.1/integrations/akka.net-persistence/assets/navigate-to-traffic-watch.png differ diff --git a/versioned_docs/version-7.1/integrations/akka.net-persistence/assets/queries-in-traffic-watch.png b/versioned_docs/version-7.1/integrations/akka.net-persistence/assets/queries-in-traffic-watch.png new file mode 100644 index 0000000000..fc4ebb1005 Binary files /dev/null and b/versioned_docs/version-7.1/integrations/akka.net-persistence/assets/queries-in-traffic-watch.png differ diff --git a/versioned_docs/version-7.1/integrations/akka.net-persistence/assets/the-events-collection.png b/versioned_docs/version-7.1/integrations/akka.net-persistence/assets/the-events-collection.png new file mode 100644 index 0000000000..404e1dd13e Binary files /dev/null and b/versioned_docs/version-7.1/integrations/akka.net-persistence/assets/the-events-collection.png differ diff --git a/versioned_docs/version-7.1/integrations/akka.net-persistence/assets/the-payload.png b/versioned_docs/version-7.1/integrations/akka.net-persistence/assets/the-payload.png new file mode 100644 index 0000000000..f941db75cc Binary files /dev/null and b/versioned_docs/version-7.1/integrations/akka.net-persistence/assets/the-payload.png differ diff --git a/versioned_docs/version-7.1/integrations/akka.net-persistence/assets/the-snapshots-collection.png b/versioned_docs/version-7.1/integrations/akka.net-persistence/assets/the-snapshots-collection.png new file mode 100644 index 0000000000..2bfd01a8f5 Binary files /dev/null and b/versioned_docs/version-7.1/integrations/akka.net-persistence/assets/the-snapshots-collection.png differ diff --git a/versioned_docs/version-7.1/integrations/akka.net-persistence/events-and-snapshots.mdx b/versioned_docs/version-7.1/integrations/akka.net-persistence/events-and-snapshots.mdx new file mode 100644 index 0000000000..5bd8d0c95a --- /dev/null +++ b/versioned_docs/version-7.1/integrations/akka.net-persistence/events-and-snapshots.mdx @@ -0,0 +1,397 @@ +--- +title: "Events and Snapshots" +hide_table_of_contents: true +sidebar_label: Events and Snapshots +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Events and Snapshots + + +* Akka.Persistence provides two primary methods to persist actor state: **Event sourcing** and **Snapshots**. + +* With event sourcing, each state change is stored as a separate event, creating a sequence of events that represents the actor’s history. + Snapshots, on the other hand, capture the actor’s state at specific points in time. + +* Upon actor restart, both events and snapshots can be replayed to restore the actor's internal state, + with snapshots allowing for quicker recovery by avoiding the need to replay all past events. + +* The stored events can be queried via Akka's query interface. + Learn more about that in [Queries](../../integrations/akka.net-persistence/queries.mdx). + +* To learn how to configure the events journal and the snapshot-store via the _Akka.Persistence.RavenDB_ plugin, + see [Integrating with Akka.NET persistence](../../integrations/akka.net-persistence/integrating-with-akka-persistence.mdx). + +* In this page: + * [Storing events](../../integrations/akka.net-persistence/events-and-snapshots.mdx#storing-events) + * [Storing snapshots](../../integrations/akka.net-persistence/events-and-snapshots.mdx#storing-snapshots) + * [Storing guidelines](../../integrations/akka.net-persistence/events-and-snapshots.mdx#storing-guidelines) + * [Global consistency](../../integrations/akka.net-persistence/events-and-snapshots.mdx#global-consistency) + * [Sample application](../../integrations/akka.net-persistence/events-and-snapshots.mdx#sample-application) + + +## Storing events + +**Events**: +Persistent actors can write messages, called events, into the configured RavenDB database, +which serves as the events journal. + +**The Events collection**: +Each event is stored as a document in the `Events` collection in append-only mode. + +**The Event document**: +Each event document includes the following fields, among others: + + * `id` - The event document id, composed of `` + * `payload` - The actual message content or event data. + * `persistentId` - The unique identifier of the actor that persisted this event. + * `sequenceNr` - The sequence number for the event, indicating its position in the sequence of events for a particular actor. + Serves as a unique, gap-less identifier that helps maintain the correct order and consistency of the actor's state. + +**Replaying events**: +Maintaining the event documents in chronological order (based on the `sequenceNr` field) +enables retrieval and replay in the correct sequence when an actor restarts. + + + +## Storing snapshots + +**Snapshots**: + + * Snapshots capture the current state of an actor at a specific point in time, + representing all the data the actor has accumulated or processed up to that moment. + + * Persistent actors can store these snapshots in the configured RavenDB database, + which serves as the snapshot-store. + + * After a snapshot is successfully persisted, events can be deleted from the events journal to free up space. + +**The Snapshots collection**: +Each snapshot is stored as a document in the `Snapshots` collection in append-only mode. + +**The Snapshot document**: +Each snapshot document includes the following fields, among others: + + * `id` - The snapshot document id, composed of `` + * `payload` - The actor's state at the time the snapshot was taken. + * `persistentId` - The unique identifier of the actor that created the snapshot. + * `sequenceNr` - The sequence number indicating the position of the snapshot in the sequence of events. + Serves as a unique, gap-less identifier that helps maintain the correct order and consistency of the actor's state. + +**Replaying snapshots**: + + * When an actor restarts, instead of replaying the entire event history from the events journal, + which can be inefficient as this journal grows, the actor's state can be restored from a snapshot + and then replay only the events that occurred after that snapshot. + + * Replaying snapshots significantly accelerates recovery, reduces network transmission, + and lowers both actor event replay time and CPU usage. + + + +## Storing guidelines + + + +* The RavenDB plugin designates the Events and Snapshots collections for storing Akka’s data. + While it’s technically possible to store documents from other sources in these collections, + you shouldn't do so. + +* The Events and Snapshots collections should be reserved exclusively for Akka’s storage needs. + It is recommended to place these collections in a separate, designated database. + + + + + +## Global consistency + +**The consistency requirement**: + + * Consistency refers to the property that ensures data is uniform and accurate across all database replicas at a given point in time. + In a distributed system, Akka.NET Persistence relies on consistency to accurately restore an actor’s state from its events during recovery, + regardless of which node is contacted. + + * Events must be applied (replayed) in the exact order they were generated, + so consistency is crucial to ensure that no events are missed or processed out of order. + +**Cluster-wide transactions**: + + * RavenDB is a distributed database, allowing writes, reads, and queries to target different nodes across the cluster. + + * To prioritize consistency over availability, the RavenDB plugin uses a [cluster-wide transaction](../../server/clustering/cluster-transactions.mdx) for storing events and snapshot documents. + This ensures that persisted data is consistently applied across all database instances in the cluster, preventing conflicts and guaranteeing that restoring to the latest state reflects the correct event sequence, as required by Akka. + + * Note that cluster consensus is required for a cluster-wide transaction to execute. + This means that a majority of nodes in the [database group](../../studio/database/settings/manage-database-group.mdx) must be up and connected in order to persist new events & snapshots. + +**Atomic-guards usage**: + + * As with every document created using a cluster-wide transaction in RavenDB, + the server creates an [Atomic-Guard](../../client-api/session/cluster-transaction/atomic-guards.mdx) for each event or snapshot document that is stored to prevent concurrent modifications. + + * The atomic-guard is particularly beneficial in scenarios where an actor recovers its events and snapshots from a node that failed, + came back up, but has not yet received the complete replication information from the other nodes in the database group. + In such cases, the actor’s state might not be fully up-to-date. + + * If the actor attempts to write a new event using a _sequenceNr_ that already exists, the Atomic-Guard will prevent this action from succeeding. + Upon this failure, the actor will restart itself. If, by that time, the node has received all the missing information, the actor will now recover with a fully updated state. + + + +## Sample application + +The following is a sample application that stores events and snapshots in a RavenDB database. + + + + +{`static void Main(string[] args) +{ + var host = new HostBuilder().ConfigureServices((context, services) => + { + // Configure the RavenDB plugin using Hosting: + //============================================ + + services.AddAkka("SalesActorSystem", (builder, provider) => + { + builder.WithRavenDbPersistence( + urls: new[] { "http://localhost:8080" }, + databaseName: "AkkaStorage_PhoneSales", + // Use both akka.persistence.journal and akka.persistence.snapshot-store + mode: PersistenceMode.Both); + + builder.WithActors((system, registry) => + { + var taskCompletion = new TaskCompletionSource(); + long expectedProfit = 1_500; + + // Create actors: + // ============== + + var salesActor = system.ActorOf(Props.Create(() => + new SalesActor(expectedProfit, taskCompletion)), "sales-actor"); + + var salesSimulatorActor = system.ActorOf(Props.Create(() => + new SalesSimulatorActor(salesActor)), "sales-simulator-actor"); + + // Exit app when sales reach the 'expectedProfit' + taskCompletion.Task.Wait(); + system.Terminate(); + }); + }); + }); + + var app = host.Build(); + app.Run(); +} +`} + + + + +{`public class SalesActor: ReceivePersistentActor +{ + // The unique actor id + public override string PersistenceId => "sales-actor"; + + // The state that will be persisted in SNAPSHOTS + private SalesActorState _state; + + public SalesActor(long expectedProfit, TaskCompletionSource taskCompletion) + { + _state = new SalesActorState + { + totalSales = 0 + }; + + // Process a sale: + Command(saleInfo => + { + if (_state.totalSales < expectedProfit) + { + // Persist an EVENT to RavenDB + // =========================== + + // The handler function is executed after the EVENT was saved successfully + Persist(saleInfo, _ => + { + // Update the latest state in the actor + _state.totalSales += saleInfo.Price; + + ConsoleHelper.WriteToConsole(ConsoleColor.Black, + $"Sale was persisted. Phone brand: {saleInfo.Brand}. Price: {saleInfo.Price}"); + + // Store a SNAPSHOT every 5 sale events + // ==================================== + + if (LastSequenceNr != 0 && LastSequenceNr % 5 == 0) + { + SaveSnapshot(_state.totalSales); + } + }); + } + else if (!taskCompletion.Task.IsCompleted) + { + Sender.Tell(new StopSimulate()); + + ConsoleHelper.WriteToConsole(ConsoleColor.DarkMagenta, + $"Sale not persisted: " + + $"Total sales have already reached the expected profit of {expectedProfit}"); + + ConsoleHelper.WriteToConsole(ConsoleColor.DarkMagenta, + _state.ToString()); + + taskCompletion.TrySetResult(true); + } + }); + + // Handle a SNAPSHOT success msg + Command(success => + { + ConsoleHelper.WriteToConsole(ConsoleColor.Blue, + $"Snapshot saved successfully at sequence number {success.Metadata.SequenceNr}"); + + // Optionally, delete old snapshots or events here if needed + // DeleteMessages(success.Metadata.SequenceNr); + }); + + // Recover an EVENT + Recover(saleInfo => + { + _state.totalSales += saleInfo.Price; + + ConsoleHelper.WriteToConsole(ConsoleColor.DarkGreen, + $"Event was recovered. Price: {saleInfo.Price}"); + }); + + // Recover a SNAPSHOT + Recover(offer => + { + var salesFromSnapshot = (long) offer.Snapshot; + _state.totalSales = salesFromSnapshot; + + ConsoleHelper.WriteToConsole(ConsoleColor.DarkGreen, + $"Snapshot was recovered. Total sales from snapshot: {salesFromSnapshot}"); + }); + } +} +`} + + + + +{`public class SalesSimulatorActor : ReceiveActor +{ + private readonly IActorRef _salesActor; + private ICancelable scheduler; + + public SalesSimulatorActor(IActorRef salesActor) + { + _salesActor = salesActor; + + // Schedule the first sale simulation immediately and then every 2 seconds: + scheduler = Context.System.Scheduler.ScheduleTellRepeatedlyCancelable(TimeSpan.Zero, + TimeSpan.FromSeconds(2), Self, new StartSimulate(), Self); + + Receive(HandleStart); + Receive(HandleStop); + } + + private void HandleStart(StartSimulate message) + { + ConsoleHelper.WriteToConsole(ConsoleColor.Black, + $"About to simulate a sale..."); + + Random random = new Random(); + string[] products = { "Apple", "Google", "Nokia", "Xiaomi", "Huawei" }; + + var randomBrand = products[random.Next(products.Length)]; + var randomPrice = random.Next(1, 6) * 100; // 100, 200, 300, 400, or 500 + + var nextSale = new Sale(randomPrice, randomBrand); + _salesActor.Tell(nextSale); + } + + private void HandleStop(StopSimulate message) + { + scheduler.Cancel(); + ConsoleHelper.WriteToConsole(ConsoleColor.DarkRed, + "Simulation stopped"); + } +} +`} + + + + +{`// A sale EVENT to be persisted +public class Sale(long pricePaid, string productBrand) +{ + public long Price { get; set; } = pricePaid; + public string Brand { get; set; } = productBrand; +} + +// MESSAGES for the simulator actor +public class StartSimulate { } +public class StopSimulate { } + +// Internal state that will be persisted in a SNAPSHOT +class SalesActorState +{ + public long totalSales { get; set; } + + public override string ToString() + { + return $"[SalesActorState: Total sales are {totalSales}]"; + } +} + +public class ConsoleHelper +{ + public static void WriteToConsole(ConsoleColor color, string text) + { + Console.ForegroundColor = color; + Console.WriteLine(text); + Console.ResetColor(); + } +} +`} + + + +The documents created in the Events and Snapshots collections are visible in the Documents View in the Studio: + +#### The Events collection + +![The events collection](./assets/the-events-collection.png) + +1. The Events collection. +2. The event document ID in the format: `` +3. The unique ID of the actor that persisted these events. +4. The unique sequence number of the event. +5. The data that was stored for the event. + +![The payload](./assets/the-payload.png) + +The data stored for each event is an instance of the `Sale` class, containing `Price` and `Brand` fields. +#### The Snapshots collection + +![The snapshots collection](./assets/the-snapshots-collection.png) + +1. The Snapshots collection. +2. The snapshot document ID in the format: `` +3. The unique ID of the actor that persisted this snapshot. +4. The sequence number of the event after which the snapshot was stored, `5` in this case. +5. The data stored in this snapshot represents the actor's state immediately after event 5 was stored. + In this example, it reflects the **accumulated sales profit** made after the first 5 sale events. + + + diff --git a/versioned_docs/version-7.1/integrations/akka.net-persistence/integrating-with-akka-persistence.mdx b/versioned_docs/version-7.1/integrations/akka.net-persistence/integrating-with-akka-persistence.mdx new file mode 100644 index 0000000000..15926e6904 --- /dev/null +++ b/versioned_docs/version-7.1/integrations/akka.net-persistence/integrating-with-akka-persistence.mdx @@ -0,0 +1,405 @@ +--- +title: "Integrating with Akka.NET Persistence" +hide_table_of_contents: true +sidebar_label: Integrating with Akka.Persistence +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Integrating with Akka.NET Persistence + + +* This article provides guidance on integrating RavenDB with Akka.Persistence. + +* In this page: + * [Overview](../../integrations/akka.net-persistence/integrating-with-akka-persistence.mdx#overview) + * [Akka.Persistence.RavenDB](../../integrations/akka.net-persistence/integrating-with-akka-persistence.mdx#akkapersistenceravendb) + * [Installing the RavenDB persistence plugin](../../integrations/akka.net-persistence/integrating-with-akka-persistence.mdx#installing-the-ravendb-persistence-plugin) + * [Configuring the RavenDB persistence plugin with HOCON](../../integrations/akka.net-persistence/integrating-with-akka-persistence.mdx#configuring-the-ravendb-persistence-plugin-with-hocon) + * [Configuration keys](../../integrations/akka.net-persistence/integrating-with-akka-persistence.mdx#configuration-keys) + * [Configuring the RavenDB persistence plugin with Hosting](../../integrations/akka.net-persistence/integrating-with-akka-persistence.mdx#configuring-the-ravendb-persistence-plugin-with-hosting) + * [Syntax](../../integrations/akka.net-persistence/integrating-with-akka-persistence.mdx#syntax) + + +## Overview + +**What is Akka.Net** +[Akka.NET](https://getakka.net/) is a robust set of open-source libraries for building highly concurrent, distributed, and scalable applications on the .NET platform. +It employs the message-driven actor model to simplify concurrency management, resilience and fault isolation, making it easier to develop reliable systems. + +**What is Akka.Persistence** +Akka.Persistence is a library that extends the core functionality of Akka.NET by enabling durable state management for actors. +It allows the creation of actors whose internal state can be persisted and restored after an actor has restarted. + +This durability is achieved through event sourcing, where state changes are stored as a sequence of events. +Additionally, optional snapshots can capture the state at specific points in time for quicker recovery. +Upon actor restart, the stored events and snapshots are replayed to restore the actor's internal state. + +However, simply including Akka.Persistence only allows for persisting and recovering an actor's state from Akka's default **in-memory** store. +This approach is insufficient if the entire application crashes or restarts, as the in-memory store would be lost. + +**Using a persistence database plugin** +To ensure durability across application restarts, use a dedicated plugin that allows the state to be persisted and replayed from an external database. +Akka.NET supports various persistence stores through a plugin model, which specifies how an actor's state is persisted and recovered. + +Ensuring that your actor’s data and any critical messages are persisted and recovered is paramount to building a reliable system. +Persistence database plugins play a crucial role by providing the necessary mechanisms to achieve this reliability. + + + +## Akka.Persistence.RavenDB + +[Akka.Persistence.RavenDB](https://github.com/ravendb/Akka.Persistence.RavenDB) is a **persistence plugin** for Akka.NET that integrates RavenDB as the durable storage backend. + +RavenDB is a NoSQL database designed for high performance, scalability, and ease of use. +Among the available plugin options, RavenDB stands out as a highly efficient and flexible choice. + +By integrating the RavenDB plugin with Akka.Persistence, you can leverage RavenDB's powerful features +to ensure that your actor state and critical messages are securely persisted and quickly recovered. + +With the RavenDB plugin your application can: + + * Persist and recover Events to/from a **Journal store**. Learn more in [Events](../../integrations/akka.net-persistence/events-and-snapshots.mdx#storing-events). + * Persist and recover Snapshots to/from a **Snapshot store**. Learn more in [Snapshots](../../integrations/akka.net-persistence/events-and-snapshots.mdx#storing-snapshots). + * Query the stored events. Learn more in [Queries](../../integrations/akka.net-persistence/queries.mdx). + + + +## Installing the RavenDB persistence plugin + +Integrate RavenDB with Akka.Persistence using one of the two available NuGet packages: + + * [Akka.Persistence.RavenDB](https://www.nuget.org/packages/Akka.Persistence.RavenDB) + This package allows you to configure the plugin solely through HOCON (Human-Optimized Config Object Notation), + which is typically embedded within your _app.config_ or _web.config_ file, or a dedicated HOCON file. + + + +{`# Installing via .NET CLI: +dotnet add package Akka.Persistence.RavenDB +`} + + + + * [Akka.Persistence.RavenDB.Hosting](https://www.nuget.org/packages/Akka.Persistence.RavenDB.Hosting) + This package includes the base _Akka.Persistence.RavenDB_, offering greater flexibility + by allowing you to configure the plugin through **Hosting** or via a **HOCON** configuration file. + Using Hosting provides a fast and easy way to set up your app and its persistence without the need to configure HOCON. + + + +{`# Installing via .NET CLI: +dotnet add package Akka.Persistence.RavenDB.Hosting +`} + + +Installing either package will also install the _Akka.Persistence_ package. + + +When configuring the plugin using both Hosting and HOCON, if the same parameters are specified in both, +the configuration provided via Hosting takes precedence and will override the corresponding HOCON settings. + + + + +## Configuring the RavenDB persistence plugin with HOCON + +* While both the journal and the snapshot-store share the same configuration keys, they reside in separate scopes. + So when configuring using HOCON, the settings for the journal and snapshot-store must be defined separately, + as shown in the example below. + +* For example, properties `urls` and `name` can have the same values for both stores, + but they must still be defined distinctly within their respective sections. + Provide different values for each store as needed. + +* The following is a sample HOCON configuration under the `` section. + See the full description of each configuration key [below](../../integrations/akka.net-persistence/integrating-with-akka-persistence.mdx#configuration-keys). + + + +{` + + + + +`} + + + +---- + +### Configuration keys + + + +#### Journal and snapshot config keys +Predefined plugins and class names to use: + +* **journal.plugin** + The fully qualified name of the RavenDB plugin to be used for the journal store. + Value to set: `"akka.persistence.journal.ravendb"` +* **journal.ravendb.class** + The fully qualified class name for the RavenDB persistence journal actor. + Value to set: `"Akka.Persistence.RavenDb.Journal.RavenDbJournal, Akka.Persistence.RavenDb"` +* **snapshot-store.plugin** + The fully qualified name of the RavenDB plugin to be used for the snapshot store. + Value to set: `"akka.persistence.snapshot-store.ravendb"` +* **snapshot-store.ravendb.class** + The fully qualified class name for the RavenDB persistence snapshot actor. + Value to set: `"Akka.Persistence.RavenDb.Snapshot.RavenDbSnapshotStore, Akka.Persistence.RavenDb"` +Common config keys for journal and snapshot-store: + +* **plugin-dispatcher** +The dispatcher responsible for managing the thread pool and scheduling tasks for the actor. +Default: `"akka.actor.default-dispatcher"` +* **urls** +An array of server URLs where the RavenDb database is stored. +Default: No default, param must be provided. +e.g.: `["http://localhost:8080"]` +* **name** +The name of the database where the persistence data should be stored. +It is recommended to create a separate database for Akka storage, distinct from your other work databases. +Default: No default, param must be provided. +e.g.: `"MyAkkaStorageDB"` +* **auto-initialize** +Create the database if it doesn't exist. +No exception is thrown if the database already exists. +Default: `false` +* **certificate-path** +Location of a client certificate to access a secure RavenDB database. +If a password is required, it should be stored in the `RAVEN_CERTIFICATE_PASSWORD` env variable. +Default: `null` +e.g.: `"\\path\\to\\cert.pfx"` +* **save-changes-timeout** +Timeout for 'save' requests sent to RavenDB, such as writing or deleting +as opposed to stream operations which may take longer and have a different timeout (12h). +Client will fail requests that take longer than this. +Default: `30s` +* **http-version** +Http version for the RavenDB client to use in communication with the server. +Default: `"2.0"` +* **disable-tcp-compression** +Determines whether to compress the data sent in the client-server TCP communication. +Default: `false` + + + + + +#### Query config keys +* **query.ravendb.class** + The fully qualified class name for the RavenDB journal provider. + Value to set: `"Akka.Persistence.RavenDb.Query.RavenDbReadJournalProvider, Akka.Persistence.RavenDb"` +* **refresh-interval** + The interval at which to check for new ids/events. + Default: `3s` +* **max-buffer-size** + The number of events to keep buffered while querying until they are delivered downstream. + Default: `65536` + + + + +## Configuring the RavenDB persistence plugin with Hosting + +* Using Hosting, you can easily set up the RavenDB plugin during your application's startup. + +* Use method `WithRavenDbPersistence` to configure all relevant parameters. + See the available parameters and method overloads in the syntax section [below](../../integrations/akka.net-persistence/integrating-with-akka-persistence.mdx#syntax). + +* The following example shows a basic configuration using Hosting: + + + +{`// Add the following using statements: +using Microsoft.Extensions.Hosting; +using Akka.Hosting; +using Akka.Persistence.Hosting; +using Akka.Persistence.RavenDb.Hosting; + +namespace Raven.Documentation.Samples.Integrations.AkkaPersistence +\{ + class Program + \{ + static void Main(string[] args) + \{ + var host = new HostBuilder().ConfigureServices((context, services) => + \{ + services.AddAkka("my-actor-system-name", (builder, provider) => + \{ + // Call 'WithRavenDbPersistence' to configure RavenDB as the persistence storage + builder.WithRavenDbPersistence( + + // URL of the RavenDB server + urls: new[] \{ "http://localhost:8080" \}, + + // The database where the journal events and the snapshots will be persisted + databaseName: "MyAkkaStorageDB", + + // Configuration will apply to both the journal and the snapshot stores + mode: PersistenceMode.Both); + \}); + \}); + + var app = host.Build(); + app.Run(); + \} + \} +\} +`} + + +#### Syntax + + + +{`// A simple overload providing basic configuration +// =============================================== + +public static AkkaConfigurationBuilder WithRavenDbPersistence( + this AkkaConfigurationBuilder builder, + // An array of server URLs where the RavenDB database is stored. + string[] urls, + // The name of the database where the persistence data should be stored. + // It is recommended to create a separate database for Akka storage, + // distinct from your other work databases. + string databaseName, + // Location of a client certificate to access a secure RavenDB database. + // If a password is required, it should be stored in the RAVEN_CERTIFICATE_PASSWORD env var. + string? certificatePath = null, + // Create the database if it doesn't exist. + bool autoInitialize = true, + // Determines whether this configuration will be applied to the Journal store, + // the Snapshot store, or both stores. + PersistenceMode mode = PersistenceMode.Both, + string pluginIdentifier = "ravendb", + bool isDefaultPlugin = true, + Action? journalBuilder = null) +`} + + + + +{`public enum PersistenceMode +\{ + // Sets both the akka.persistence.journal and the akka.persistence.snapshot-store to use this plugin. + Both, + // Sets ONLY the akka.persistence.journal to use this plugin. + Journal, + // Sets ONLY the akka.persistence.snapshot-store to use this plugin. + SnapshotStore, +\} +`} + + + + +{`// These overloads allow for applying separate configurations to the Journal and Snapshot stores +// ============================================================================================= + +public static AkkaConfigurationBuilder WithRavenDbPersistence( + this AkkaConfigurationBuilder builder, + Action? journalOptionConfigurator = null, + Action? snapshotOptionConfigurator = null, + bool isDefaultPlugin = true) +`} + + + + +{`public static AkkaConfigurationBuilder WithRavenDbPersistence( + this AkkaConfigurationBuilder builder, + RavenDbJournalOptions? journalOptions = null, + RavenDbSnapshotOptions? snapshotOptions = null) +`} + + + + +{`// Use this class to define the Journal store configuration +public class RavenDbJournalOptions +\{ + public string? Name \{ get; set; \} + public string[] Urls \{ get; set; \} + public string? CertificatePath \{ get; set; \} + + // Http version for the RavenDB client to use in communication with the server + public Version? HttpVersion \{ get; set; \} + // Determines whether to compress the data sent in the client-server TCP communication + public bool? DisableTcpCompression \{ get; set; \} + // Timeout for 'save' requests sent to RavenDB, such as writing or deleting + // as opposed to stream operations which may take longer and have a different timeout (12h). + // Client will fail requests that take longer than this. + public TimeSpan? SaveChangesTimeout \{ get; set; \} +\} +`} + + + + +{`// Use this class to define the Snapshot store configuration +public class RavenDbSnapshotOptions +\{ + public string? Name \{ get; set; \} + public string[] Urls \{ get; set; \} + public string? CertificatePath \{ get; set; \} + public Version? HttpVersion \{ get; set; \} + public bool? DisableTcpCompression \{ get; set; \} + public TimeSpan? SaveChangesTimeout \{ get; set; \} +\} +`} + + + + + diff --git a/versioned_docs/version-7.1/integrations/akka.net-persistence/queries.mdx b/versioned_docs/version-7.1/integrations/akka.net-persistence/queries.mdx new file mode 100644 index 0000000000..fc3ae01a1d --- /dev/null +++ b/versioned_docs/version-7.1/integrations/akka.net-persistence/queries.mdx @@ -0,0 +1,338 @@ +--- +title: "Queries" +hide_table_of_contents: true +sidebar_label: Queries +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Queries + + + +* Akka.Persistence.Query comes with several stream-based query interfaces for querying persisted data. + This interface abstracts the underlying database, allowing your application to switch persistence providers without requiring changes to the query code. + +* The RavenDB persistence plugin fully supports all of Akka's query interfaces. + Just include `Akka.Persistence.RavenDb.Query` in your application. + +* In this page: + * [Interface types](../../integrations/akka.net-persistence/queries.mdx#interface-types) + * [Supported interfaces](../../integrations/akka.net-persistence/queries.mdx#supported-interfaces) + * [IPersistenceIdsQuery & ICurrentPersistenceIdsQuery](../../integrations/akka.net-persistence/queries.mdx#ipersistenceidsquery--icurrentpersistenceidsquery) + * [IEventsByPersistenceIdQuery & ICurrentEventsByPersistenceIdQuery](../../integrations/akka.net-persistence/queries.mdx#ieventsbypersistenceidquery--icurrenteventsbypersistenceidquery) + * [IEventsByTagQuery & ICurrentEventsByTagQuery](../../integrations/akka.net-persistence/queries.mdx#ieventsbytagquery--icurrenteventsbytagquery) + * [IAllEventsQuery & ICurrentAllEventsQuery](../../integrations/akka.net-persistence/queries.mdx#ialleventsquery--icurrentalleventsquery) + * [Inner implementation details](../../integrations/akka.net-persistence/queries.mdx#inner-implementation-details) + + +## Interface types + +Each query interface comes in two forms to allow flexible querying based on whether you need +real-time updates (continuous) or a snapshot of the current state (current). + +* **Continuous Query** (e.g., `EventsByPersistenceId`): + This type of query continuously streams data as it is persisted. + It starts from a specified offset (or from the beginning if no offset is provided) + and keeps the stream open to deliver new data as it is added. + +* **Current Query** (e.g., `CurrentEventsByPersistenceId`): + This type of query retrieves only the data available up to the point of the query. + Once all current data is fetched, the stream is completed. + Data that is persisted after the query is completed is Not included in the stream. + + + +## Supported interfaces + + + +#### IPersistenceIdsQuery & ICurrentPersistenceIdsQuery +Use these methods to retrieve the PersistenceIds of ALL actors that have persisted events to the journal store: + +`PersistenceIds()` +The stream does Not complete when it reaches the end of the PersistenceIds list that currently exists in the journal store. +Instead, it continues to push new PersistenceIds as they are added. + +`CurrentPersistenceIds()` +The stream is completed immediately when it reaches the end of the result set. +PersistenceIds that are created after the query is completed are Not included in the stream. + + + +{`// Obtain the RavenDB read journal +// =============================== +RavenDbReadJournal readJournal = PersistenceQuery + .Get(system) // system is your 'ActorSystem' param + .ReadJournalFor(RavenDbReadJournal.Identifier); + +// Issue query 'CurrentPersistenceIds' to the journal +// ================================================== +Source allPersistenceIds = readJournal.CurrentPersistenceIds(); + +// The materializer handles data flow from the persistence storage through the query pipeline +// ========================================================================================== +ActorMaterializer materializer = system.Materializer(); + +// Execute the query and consume the results +// ========================================= +allPersistenceIds.RunForeach(persistenceId => +\{ + Console.WriteLine($"ActorID: \{persistenceId\}"); +\}, materializer).Wait(); +`} + + + +**Syntax**: + + + +{`public Source PersistenceIds() +`} + + + + +{`public Source CurrentPersistenceIds() +`} + + + + + + + +#### IEventsByPersistenceIdQuery & ICurrentEventsByPersistenceIdQuery +* Use the methods below to retrieve events that have been persisted by a specific actor. + +* The returned event stream is ordered by the sequence numbers of the events. + +`EventsByPersistenceId()` +The stream does Not complete when it reaches the end of the currently stored events. +Instead, it continues to push new events as they are persisted. + +`CurrentEventsByPersistenceId()` +The stream is completed immediately when it reaches the end of the result set. +Events that are stored after the query is completed are Not included in the event stream. + + + +{`RavenDbReadJournal readJournal = PersistenceQuery + .Get(system) + .ReadJournalFor(RavenDbReadJournal.Identifier); + +// Issue query 'CurrentEventsByPersistenceId' +Source eventsSource = readJournal + .CurrentEventsByPersistenceId("sales-actor", 0L, long.MaxValue); + +ActorMaterializer materializer = system.Materializer(); +eventsSource.RunForeach(envelope => +\{ + var saleEvent = (Sale)envelope.Event; + Console.WriteLine($"Sale Event - Brand: \{saleEvent.Brand\}, Price: \{saleEvent.Price\}"); +\}, materializer).Wait(); +`} + + + +**Syntax**: + + + +{`public Source EventsByPersistenceId(string persistenceId, + long fromSequenceNr, + long toSequenceNr) +`} + + + + +{`public Source CurrentEventsByPersistenceId(string persistenceId, + long fromSequenceNr, + long toSequenceNr) +`} + + + +| Parameter | Type | Description | +|---------------------|----------|-------------------------------------------------------------------------------------------------------------| +| **persistenceId** | `string` | The actor's persistence ID for which to retrieve events. | +| **fromSequenceNr** | `long` | Retrieve events from this sequenceNr. | +| **toSequenceNr** | `long` | Retrieve events up to this sequenceNr.
Use `0L` and `long.MaxValue` respectively to retrieve all events. | + +
+ + + +#### IEventsByTagQuery & ICurrentEventsByTagQuery +* In Akka.Persistence, you can add one or more string **tags** to events. + +* Use the methods below to retrieve events that have a specific tag. + The query will be applied to all events persisted by all actors. + Results will include events with the specified tag, regardless of the PersistenceId they are associated with. + +* You can specify the change-vector of an event document as the **offset** to determine where in the event stream you want to start querying. + * In RavenDB, a [change-vector](../../server/clustering/replication/change-vector.mdx) is a unique identifier that represents the version of a document (an event in this case) + across different nodes in a distributed database. + * The change-vector of a document can be obtained from the Properties pane in the [Document View](../../studio/database/documents/document-view.mdx#the-document-view) in the Studio. + +* The returned event stream is ordered by the change-vector value of the event documents. + +`EventsByTagQuery()` +The stream does Not complete when it reaches the end of the currently stored events. +Instead, it continues to push new events as they are persisted. + +`CurrentEventsByTagQuery()` +The stream is completed immediately when it reaches the end of the result set. +Events that are stored after the query is completed are Not included in the event stream. + + + +{`RavenDbReadJournal _readJournal = PersistenceQuery.Get(system) + .ReadJournalFor(RavenDbReadJournal.Identifier); + +// Define an offset after which to return results. +// See the available offset options in the syntax below.. +ChangeVectorOffset cvOffset = + new ChangeVectorOffset("RAFT:1-hJ9jo4rRBEKs/kqNXV107Q TRXN:1169-5LEbeyPG40eQiq6fnnCthA"); + +// Issue query 'CurrentEventsByTag' +var eventsSource = _readJournal.CurrentEventsByTag("some-tag", cvOffset); + +ActorMaterializer materializer = system.Materializer(); +eventsSource.RunForeach(envelope => +\{ + var saleEvent = (Sale)envelope.Event; + Console.WriteLine($"Sale Event - Brand: \{saleEvent.Brand\}, Price: \{saleEvent.Price\}"); +\}, materializer).Wait(); +`} + + + +**Syntax**: + + + +{`public Source EventsByTag(string tag, Offset offset) +`} + + + + +{`public Source CurrentEventsByTag(string tag, Offset offset) +`} + + + +| Parameter | Type | Description | +|-------------|----------------------|-----------------------------------------------------------------------| +| **tag** | `string` | Retrieve only events that contain this tag. | +| **offset** | `null` | Retrieve all events from the beginning, no offset is applied. | +| **offset** | `Offset.NoOffset` | Retrieve all events from the beginning, no offset is applied. | +| **offset** | `Offset.Sequence(0)` | Retrieve all events from the beginning, no offset is applied. | +| **offset** | `ChangeVectorOffset` | Provide a change-vector to retrieve events starting after this point. | + +Note: +`Offset.TimeBasedUuid` is not supported. +`Offset.Sequence(x)` where x is > 0 is not supported. + + + + + +#### IAllEventsQuery & ICurrentAllEventsQuery +* Use the methods below to retrieve all events regardless of which PersistenceId they are associated with. + +* The returned event stream is ordered by the change-vector value of the event documents. + +`AllEvents()` +The stream does Not complete when it reaches the end of the currently stored events. +Instead, it continues to push new events as they are persisted. + +`CurrentAllEvents()` +The stream is completed immediately when it reaches the end of the result set. +Events that are stored after the query is completed are Not included in the event stream. + + + +{`RavenDbReadJournal readJournal = PersistenceQuery.Get(system) + .ReadJournalFor(RavenDbReadJournal.Identifier); + +// Issue query 'CurrentAllEvents' +var eventsSource = readJournal.CurrentAllEvents(Offset.NoOffset()); + +ActorMaterializer materializer = system.Materializer(); +eventsSource.RunForeach(envelope => +\{ + var saleEvent = (Sale)envelope.Event; + Console.WriteLine($"Sale Event - Brand: \{saleEvent.Brand\}, Price: \{saleEvent.Price\}"); +\}, materializer).Wait(); +`} + + + +**Syntax**: + + + +{`public Source AllEvents(Offset offset) +`} + + + + +{`public Source CurrentAllEvents(Offset offset) +`} + + + +The available options for the `offset` parameter are the same as those listed +for the `EventsByTag` & `CurrentEventsByTag` methods above. + + + + + +## Inner implementation details + +#### Indexes + +To support the above queries and optimize for fast data retrieval, +the RavenDB plugin automatically creates the following internal static-indexes upon instantiation of `RavenDbReadJournal`: + + * `ActorsByChangeVector` + * `EventsByTagAndChangeVector` +#### Additional collections + +In addition to the _Events_ & _Snapshots_ collections, which contain the persisted data, +the RavenDB plugin creates the following collections to keep track of actors and event metadata: + + * `UniqueActors` + This collection stores a document for each unique actor that has persisted data. + Each document includes the actor's PersistenceId. + + * `EventMetadatas` + This collection stores a document for each unique actor that has persisted data. + Each document includes the latest sequence number of the most recent event persisted by the actor. +#### Streaming queries + +The RavenDb plugin implements the above queries as [streaming queries](../../client-api/session/querying/how-to-stream-query-results.mdx). +You can monitor each query sent from your client to the RavenDB server in the _Traffic Watch_ view in the Studio. + +![Navigate to traffic watch](./assets/navigate-to-traffic-watch.png) + +1. Navigate to _Manage Server > Traffic Watch_. +2. Select **Streams** from the HTTP types dropdown. + +![Queries in traffic watch](./assets/queries-in-traffic-watch.png) + + + + diff --git a/versioned_docs/version-7.1/integrations/postgresql-protocol/_category_.json b/versioned_docs/version-7.1/integrations/postgresql-protocol/_category_.json new file mode 100644 index 0000000000..c53db0ee7b --- /dev/null +++ b/versioned_docs/version-7.1/integrations/postgresql-protocol/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 0, + "label": PostgreSQL Protocol, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/integrations/postgresql-protocol/assets/collections.png b/versioned_docs/version-7.1/integrations/postgresql-protocol/assets/collections.png new file mode 100644 index 0000000000..0c1de95b4f Binary files /dev/null and b/versioned_docs/version-7.1/integrations/postgresql-protocol/assets/collections.png differ diff --git a/versioned_docs/version-7.1/integrations/postgresql-protocol/assets/connection-details.png b/versioned_docs/version-7.1/integrations/postgresql-protocol/assets/connection-details.png new file mode 100644 index 0000000000..e455d42f42 Binary files /dev/null and b/versioned_docs/version-7.1/integrations/postgresql-protocol/assets/connection-details.png differ diff --git a/versioned_docs/version-7.1/integrations/postgresql-protocol/assets/credentials.png b/versioned_docs/version-7.1/integrations/postgresql-protocol/assets/credentials.png new file mode 100644 index 0000000000..f49f3fc693 Binary files /dev/null and b/versioned_docs/version-7.1/integrations/postgresql-protocol/assets/credentials.png differ diff --git a/versioned_docs/version-7.1/integrations/postgresql-protocol/assets/get-data-button.png b/versioned_docs/version-7.1/integrations/postgresql-protocol/assets/get-data-button.png new file mode 100644 index 0000000000..7783951d75 Binary files /dev/null and b/versioned_docs/version-7.1/integrations/postgresql-protocol/assets/get-data-button.png differ diff --git a/versioned_docs/version-7.1/integrations/postgresql-protocol/assets/retrieved-collection-data.png b/versioned_docs/version-7.1/integrations/postgresql-protocol/assets/retrieved-collection-data.png new file mode 100644 index 0000000000..67792c8523 Binary files /dev/null and b/versioned_docs/version-7.1/integrations/postgresql-protocol/assets/retrieved-collection-data.png differ diff --git a/versioned_docs/version-7.1/integrations/postgresql-protocol/assets/rql-query-results.png b/versioned_docs/version-7.1/integrations/postgresql-protocol/assets/rql-query-results.png new file mode 100644 index 0000000000..52e6f8d141 Binary files /dev/null and b/versioned_docs/version-7.1/integrations/postgresql-protocol/assets/rql-query-results.png differ diff --git a/versioned_docs/version-7.1/integrations/postgresql-protocol/assets/rql-query.png b/versioned_docs/version-7.1/integrations/postgresql-protocol/assets/rql-query.png new file mode 100644 index 0000000000..9f310d882e Binary files /dev/null and b/versioned_docs/version-7.1/integrations/postgresql-protocol/assets/rql-query.png differ diff --git a/versioned_docs/version-7.1/integrations/postgresql-protocol/assets/select-postgresql-database.png b/versioned_docs/version-7.1/integrations/postgresql-protocol/assets/select-postgresql-database.png new file mode 100644 index 0000000000..30b621a7df Binary files /dev/null and b/versioned_docs/version-7.1/integrations/postgresql-protocol/assets/select-postgresql-database.png differ diff --git a/versioned_docs/version-7.1/integrations/postgresql-protocol/overview.mdx b/versioned_docs/version-7.1/integrations/postgresql-protocol/overview.mdx new file mode 100644 index 0000000000..cf17309697 --- /dev/null +++ b/versioned_docs/version-7.1/integrations/postgresql-protocol/overview.mdx @@ -0,0 +1,101 @@ +--- +title: "PostgreSQL Protocol: Overview" +hide_table_of_contents: true +sidebar_label: Overview +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# PostgreSQL Protocol: Overview + +* RavenDB implements the PostgreSQL protocol, allowing applications and libraries that + use PostgreSQL, e.g. [Power BI](../../integrations/postgresql-protocol/power-bi.mdx), to + retrieve data from a RavenDB database. + +* To use RavenDB as a PostgreSQL server you need - + * a [license](../../start/licensing/licensing-overview.mdx) that enables the PostgreSQL Protocol. + * To explicitly enable PostgreSQL in your [settings](../../server/configuration/configuration-options.mdx). + +* [Installing](../../start/installation/setup-wizard.mdx) RavenDB as + a [secure](../../server/security/overview.mdx) server allows you to authenticate + PostgreSQL clients, granting access only to clients that provide the proper credentials. + + +* In this page: + * [Enabling PostgreSQL support](../../integrations/postgresql-protocol/overview.mdx#enabling-postgresql-support) + * [License](../../integrations/postgresql-protocol/overview.mdx#license) + * [Settings](../../integrations/postgresql-protocol/overview.mdx#settings) + * [PostgreSQL Port](../../integrations/postgresql-protocol/overview.mdx#postgresql-port) + * [Security](../../integrations/postgresql-protocol/overview.mdx#security) + +## Enabling PostgreSQL support + +### License + +* Your RavenDB license determines which features are available for your server. +* Visit Studio's [About](../../start/licensing/licensing-overview.mdx#manage-license-view) + page to find which features are included in your license. +* PostgreSQL is enabled for all licenses. +* To [use Power BI with RavenDB as its PostgreSQL server](../../integrations/postgresql-protocol/power-bi.mdx), + your license must explicitly enable Power BI. + If your current license doesn't include Power BI Support, you can acquire one that does [here](https://ravendb.net/buy). +### Settings + +* PostgreSQL protocol support must be explicitly enabled in your [settings](../../server/configuration/configuration-options.mdx#settingsjson). + Add this line to your server's `settings.json` file to enable the PostgreSQL protocol: + + +{`"Integrations.PostgreSQL.Enabled": true +`} + + +* PostgreSQL is an experimental feature. To enable it, enable RavenDB's + [Experimental Features](../../server/configuration/core-configuration.mdx#featuresavailability) + by adding this line to your server's `settings.json` file: + + +{`"Features.Availability": "Experimental" +`} + + +### PostgreSQL Port + +* To access RavenDB, your clients need not only its **URL** but also its + PostgreSQL **Port** number. + By default, the port number is *5433*. +* To use a different port, add the following line to your settings.json file, with a port number + of your choice: + + +{`"Integrations.PostgreSQL.Port": 5433 +`} + + + + + +## Security + +Allowing just any client to connect to your database (via PostgreSQL or otherwise) +without authentication is risky, and should in general be avoided. + +If RavenDB is not set as a secure server, it will require no authentication over the PostgreSQL protocol. + + +To allow access only for authorized clients - + +* Set RavenDB as a [Secure Server](../../server/security/overview.mdx). + This will allow RavenDB to authenticate PostgreSQL clients, in addition + to many other security measures this setup provides. +* Create [PostgreSQL Credentials](../../studio/database/settings/integrations.mdx) using RavenDB Studio. + PostgreSQL credentials are a **user name** and a **password**, that a client + would have to provide in order to access the database. + + + diff --git a/versioned_docs/version-7.1/integrations/postgresql-protocol/power-bi.mdx b/versioned_docs/version-7.1/integrations/postgresql-protocol/power-bi.mdx new file mode 100644 index 0000000000..6c95597bbf --- /dev/null +++ b/versioned_docs/version-7.1/integrations/postgresql-protocol/power-bi.mdx @@ -0,0 +1,116 @@ +--- +title: "PostgreSQL Protocol: Power BI" +hide_table_of_contents: true +sidebar_label: Power BI +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# PostgreSQL Protocol: Power BI + + +* The [Power BI](https://en.wikipedia.org/wiki/Microsoft_Power_BI) Desktop and + Online services can use RavenDB as a PostgreSQL server and retrieve data from it. + +* See below how to use Power BI Desktop to - + * Easily select RavenDB collections and retrieve chosen data. + * Query RavenDB using [RQL](../../client-api/session/querying/what-is-rql.mdx). + +* To use RavenDB with Power BI, your [license](../../integrations/postgresql-protocol/overview.mdx#license) + must explicitly enable **Power BI** Support. + +* In this page: + * [Using RavenDB From Power BI Desktop](../../integrations/postgresql-protocol/power-bi.mdx#using-ravendb-from-power-bi-desktop) + * [Connect to RavenDB](../../integrations/postgresql-protocol/power-bi.mdx#connect-to-ravendb) + * [Retrieve Collections Data](../../integrations/postgresql-protocol/power-bi.mdx#retrieve-collections-data) + * [Query RavenDB Using RQL](../../integrations/postgresql-protocol/power-bi.mdx#query-ravendb-using-rql) + + +## Using RavenDB From Power BI Desktop +### Connect to RavenDB + +!["Get Data"](./assets/get-data-button.png) + +* Click "Get Data" from Power BI Desktop's startup wizard or menu option. +!["Select PostgreSQL database"](./assets/select-postgresql-database.png) + +* Select the **PostgreSQL database** option and click **Connect**. +!["Connection Details"](./assets/connection-details.png) + +* **Server** + Enter RavenDB's **URL** and **PostgreSQL port number**. + * Enter the URL and port number in the form: **Hostname:Port** + E.g. - **`a.ravenpostgresql.development.run:5433`** + * Do **not** include the "https://" prefix in the URL. + * RavenDB's PostgreSQL port number is by default 5433, and is [configurable](../../integrations/postgresql-protocol/overview.mdx#postgresql-port). +* **Database** + Enter the name of the RavenDB database you want to retrieve data from. +* **Data Connectivity mode** + Select the **Import** data connectivity mode. +!["Credentials"](./assets/credentials.png) + +* Provide the [credentials](../../studio/database/settings/integrations.mdx) (user name & password) + required by RavenDB to authenticate your Power BI client, and click **Connect**. +### Retrieve Collections Data + +The database's collections & documents will show once RavenDB is connected. + +!["Collections"](./assets/collections.png) + +* Select the collection/s whose data you want to retrieve, and click **Load** or **Transform**. +!["Retrieved Collection Data"](./assets/retrieved-collection-data.png) + +* Your data is loaded, and you can play with it as you wish. +### Query RavenDB Using RQL + +Instead of loading collections in their entirety, you can run [RQL](../../client-api/session/querying/what-is-rql.mdx) queries +to import into Power BI just the data you're looking for. + +!["RQL Query"](./assets/rql-query.png) + +* **Server** + Enter RavenDB's **URL** and **PostgreSQL port number**. + * Enter the URL and port number in the form: **URL:Port** + E.g. - **`a.ravenpostgresql.development.run:5433`** + * Do **not** include the "https://" prefix in the URL. + * RavenDB's PostgreSQL port number is by default 5433, and is [configurable](../../integrations/postgresql-protocol/overview.mdx#postgresql-port). +* **Database** + Enter the name of the RavenDB database you want to retrieve data from. +* **Data Connectivity mode** + Select the **Import** data connectivity mode. +* **Advanced options** + * Open **Advanced options**. + * Enter your RQL query into the **SQL Statement** field. + + + * The [PostgreSQL](../../integrations/postgresql-protocol/overview.mdx) library + that Power BI uses to transfer your query to RavenDB, interprets the `;` + symbol as an instruction to split the query. + To avoid splitting the query, please avoid using this symbol in it. + * RavenDB queries can include **JavaScript** code, where `;` + is normally a valid operator. + However, to avoid splitting your query, please avoid using the `;` + operator in JavaScript code as well. + Using `;` is optional in JavaScript, and omitting it will have no effect on your code. + * RavenDB will throw the following exception if an erroneous query is likely + to have been split: + **Unhandled query (Are you using ; in your query? That is likely causing + the Postgres client to split the query and results in partial queries)** + + + * Click **OK**. +!["RQL Query Results"](./assets/rql-query-results.png) + +* Only the fields resulting from the RQL query will be imported to Power BI. +* One notable field is the rightmost `json()` field; we placed + it there for irregular data items, should there be ones, that + don't fit into one of the otherwise regular JSON arrays. + + + diff --git a/versioned_docs/version-7.1/integrations/terraform.mdx b/versioned_docs/version-7.1/integrations/terraform.mdx new file mode 100644 index 0000000000..7434f9b824 --- /dev/null +++ b/versioned_docs/version-7.1/integrations/terraform.mdx @@ -0,0 +1,266 @@ +--- +title: "Terraform" +hide_table_of_contents: true +sidebar_label: Terraform Deployment +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Terraform + + +* [Terraform](https://www.terraform.io/intro/index.html) is an + [IaC](https://en.wikipedia.org/wiki/Infrastructure_as_code) (infrastructure + as Code) **provisioning tool**. + To provision a desired infrastructure, define the infrastructure in a + Terraform configuration file and apply it using the Terraform application. + +* RavenDB nodes can be installed and removed via Terraform both **on-premise** + and **on the cloud**. + +* Using Terraform to automatize the deployment of RavenDB nodes and + clusters can, over time, save a lot of manual supervision effort. + The Terraform configuration represents an infrastructure clearly + and concisely and can always be used to review and easily modify it. + +* In this page: + * [Prerequisites](../integrations/terraform.mdx#prerequisites) + * [Prepare Terraform Configuration](../integrations/terraform.mdx#prepare-terraform-configuration) + * [Provider](../integrations/terraform.mdx#provider) + * [Terraform Local Parameters](../integrations/terraform.mdx#terraform-local-parameters) + * [RavenDB Resource](../integrations/terraform.mdx#ravendb-resource) + * [Terraform Output Values](../integrations/terraform.mdx#terraform-output-values) + * [Apply Terraform Configuration](../integrations/terraform.mdx#apply-terraform-configuration) + +## Prerequisites + +To deploy RavenDB using Terraform, you need: + +* **RavenDB license** + * Find [here](https://ravendb.net/buy) what features are supported by each RavenDB + license, and acquire the product that suits your needs. + * The license you acquire will be provided to RavenDB via the + [Terraform configuration file](../integrations/terraform.mdx#prepare-terraform-configuration) + to validate your product and determine which of its features would be enabled. + +* **Hosts** + RavenDB can be hosted by both **on-premise servers** and **cloud instances**. + * Make sure you have the IP addresses and SSH keys of the servers or cloud instances + intended to host RavenDB nodes. + * Cloud providers that support Terraform are listed in Terraform's + [providers list](https://registry.terraform.io/browse/providers). + +* **Terraform Configuration File** + * Learn [here](https://www.terraform.io/docs/language/index.html) the + basics of Terraform's configuration language. + * Read [below](../integrations/terraform.mdx#prepare-terraform-configuration) + how to set the configuration file to provision RavenDB. + +* **Terraform Application** + The Terraform application is executed via CLI to apply the infrastructure configuration. + * Download the application [here](https://www.terraform.io/downloads.html). + * Learn to use it [here](https://learn.hashicorp.com/tutorials/terraform/install-cli). + + + +## Prepare Terraform Configuration + + +This section explains how Terraform handles configuration files +in general, and how to create a RavenDB configuration file. +For a more comprehensive understanding of Terraform and its various +options please consult the official Terraform documentation. + + +* A Terraform configuration file is defined in a simple [declarative language](https://www.terraform.io/docs/language/index.html). + * The configuration file defines your **desired infrastructure topology**. + It does **not** specify the **actions** required to apply this infrastructure. + +* When the configuration file is applied, Terraform will - + * Compare the current infrastructure state with your design. + * Figure out what actions need to be taken and create an **execution plan**. + * **Create, Update, and Destroy** infrastructure resources by + the execution plan, until the infrastructure's state matches + the requested configuration. +### Provider +Use the `provider` object to **set RavenDB as the provider**. + +Note that the **version number** relates to the RavenDB Terraform +Provider version, **not** to the RavenDB Product version (that is +defined in the [resource](../integrations/terraform.mdx#ravendb-resource) +block's **package** field). + + + +{`provider "ravendb" \{ + version = "1.0.2" +\} +`} + + +### Terraform Local Parameters +Use the `locals` block to **define local configuration properties** +that your resources definitions will be able to refer, including: + + + +{`locals \{ + # Node Tags + # The tags that will be given to cluster nodes. + nodes = toset([ + "a", "b", "c" + ]) + + # Hosts IPs + # IP addresses of host servers that RavenDB cluster nodes will be deployed to + hosts = [ + "3.95.238.149", + "3.87.248.150", + "3.95.220.189" + ] + + # Node IPs + # For an Unsecure Setup + ravendb_nodes_urls_unsecure = [ + "http://3.95.238.149:8080", + "http://3.87.248.150:8080", + "http://3.95.220.189:8080" + ] + + # Node Addresses + # For a Secure Setup + ravendb_nodes_urls_secure = [ + "https://a.domain.development.run", + "https://b.domain.development.run", + "https://c.domain.development.run" + ] +\} +`} + + +### RavenDB Resource +Use the `resource` block to define your RavenDB node, its hosts, and its properties. +Terraform will use this resource to create your cluster nodes. + + +See the full list of available properties [here](https://github.com/ravendb/terraform-provider-ravendb#inputs), +including mandatory properties (also listed in the example below) and optional ones. + + + + +{`resource "ravendb_server" "server" \{ + + # Host IP addresses (see Terraform Local Parameters above) (Required) + # Type: list + hosts = local.hosts + + # RavenDB Database Name. If the database doesn't exist, it will be created (Optional) + # Type: string + database = "sampleDB" + + # Setup Type (Optional) + # false => Secure RavenDB server (Recommended) + # true => Unsecure RavenDB server (Not Recommended) + # Type: bool + unsecured = false + + # The path to a setup zip file used by RavenDB for a secure cluster setup (Optional) + # Type: string + cluster_setup_zip = "/path/to/cluster/setup.zip" + package \{ + # RavenDB version (Required) + # Type: string + version = "6.0.1" + + # Processor Architecture (Optional) + # Type: string + arch = "arm64" + + # Ubuntu Version (Optional) + # Type: string + UbuntuVersion = "20.04" + \} + + url \{ + # Nodes URLs (see Terraform Local Parameters above) (Required) + # Type: list(string) + list = local.ravendb_nodes_urls_secure + + # HTTP port (Optional) + # Type: int + http_port = 8080 + + # TCP port (Optional) + # Type: int + tcp_port = 38880 + \} + + # Path to RavenDB product license (Required) + # Type: filebase64 + license = filebase64("/path/to/license.json") + + # Settings defined here will override settings set by settings.json + # Optional + settings_override = \{ + "Indexing.MapBatchSize": 16384 + \} + + # Paths to files you want to upload to the server for future usage (Optional) + # Left side of the equation: Server path (absolute) to load to + # Right side of the equation: Original file path (absolute) + assets = \{ + "/path/to/file/file_name.extension" = filebase64("/path/to/file_name.extension") + \} + + # A User name and a path to an Access Key to your server (Required) + ssh \{ + user = "ubuntu" + pem = filebase64("/path/to/server.pem") + \} +\} +`} + + +### Terraform Output Values +Defining [Output Values](https://www.terraform.io/docs/language/values/outputs.html) +makes Terraform return values you're interested in after applying your configuration. + + + +{`# Return a list of installed RavenDB instances +output "public_instance_ips" \{ + value = local.list +\} + +# Verify that a database with the defined name exists +output "database_name" \{ + value = ravendb_server.server.database +\} +`} + + + + + +## Apply Terraform Configuration + +To apply your configuration, pass your configuration file to the +Terraform application via CLI. + +* Learn [here](https://www.terraform.io/docs/cli/commands/plan.html) + to make the application parse your configuration and create an execution plan. +* Learn [Here](https://www.terraform.io/docs/cli/commands/apply.html) + to execute the actions proposed in the execution plan. +* Learn [Here](https://www.terraform.io/docs/cli/commands/destroy.html) + to destroy your infrastructure (e.g. if you created it as + a temporary testing environment). + + + diff --git a/versioned_docs/version-7.1/migration/_category_.json b/versioned_docs/version-7.1/migration/_category_.json new file mode 100644 index 0000000000..88231dc12d --- /dev/null +++ b/versioned_docs/version-7.1/migration/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 8, + "label": "Migration Guide" +} diff --git a/versioned_docs/version-7.1/migration/client-api/_category_.json b/versioned_docs/version-7.1/migration/client-api/_category_.json new file mode 100644 index 0000000000..33bc7907c3 --- /dev/null +++ b/versioned_docs/version-7.1/migration/client-api/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 0, + "label": Client API, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/migration/client-api/client-breaking-changes.mdx b/versioned_docs/version-7.1/migration/client-api/client-breaking-changes.mdx new file mode 100644 index 0000000000..6f0e98362c --- /dev/null +++ b/versioned_docs/version-7.1/migration/client-api/client-breaking-changes.mdx @@ -0,0 +1,232 @@ +--- +title: "Migration: Client Breaking Changes" +hide_table_of_contents: true +sidebar_label: Client Breaking Changes +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Migration: Client Breaking Changes + +The features listed on this page were available in former RavenDB versions. +In RavenDB `7.0`, they are either unavailable or their behavior is inconsistent +with their behavior in previous versions. + +* In this page: + * [Subscription creation overload modification](../../migration/client-api/client-breaking-changes.mdx#subscription-creation-overload-modification) + * [HTTP-Compression algorithm is now `Zstd` by default](../../migration/client-api/client-breaking-changes.mdx#http-compression-algorithm-is-now-zstd-by-default) + * [Bulk-insert Compression is now Enabled by default](../../migration/client-api/client-breaking-changes.mdx#bulk-insert-compression-is-now-enabled-by-default) + * [Removed irrelevant `SingleNodeBatchCommand` parameters](../../migration/client-api/client-breaking-changes.mdx#removed-irrelevant-singlenodebatchcommand-parameters) + * [Removed obsolete methods](../../migration/client-api/client-breaking-changes.mdx#removed-obsolete-methods) + * [`FromEtl` is now internal](../../migration/client-api/client-breaking-changes.mdx#frometl-is-now-internal) + + +## Subscription creation overload modification + +* In RavenDB versions earlier than **7.0**, the `Create` method overload that accepted a predicate also allowed specifying a query through `SubscriptionCreationOptions`, + which could cause errors and confusion. +* To eliminate this ambiguity, starting from **7.0**, the `Create` overload for predicate-based subscriptions now accepts `PredicateSubscriptionCreationOptions`, + which no longer includes a `Query` property. +* Refer to the [Subscription creation API overview](../../client-api/data-subscriptions/creation/api-overview.mdx) for the complete list of available `Create` method overloads. + + + + +{`// The create overload using a predicate: +// ====================================== +string Create(Expression> predicate = null, + PredicateSubscriptionCreationOptions options = null, + string database = null); + +Task CreateAsync(Expression> predicate = null, + PredicateSubscriptionCreationOptions options = null, + string database = null, + CancellationToken token = default); + +// The options class: +// ================== +public sealed class PredicateSubscriptionCreationOptions +{ + public string Name { get; set; } + public string ChangeVector { get; set; } + public string MentorNode { get; set; } + public bool Disabled { get; set; } + public bool PinToMentorNode { get; set; } + public ArchivedDataProcessingBehavior? ArchivedDataProcessingBehavior { get; set; } +} +`} + + + + +{`// The create overload using a predicate: +// ====================================== +string Create(Expression> predicate = null, + SubscriptionCreationOptions options = null, + string database = null); + +Task CreateAsync(Expression> predicate = null, + SubscriptionCreationOptions options = null, + string database = null, + CancellationToken token = default); + +// The options class: +// ================== +public class SubscriptionCreationOptions +{ + public string Name { get; set; } + public string Query { get; set; } + public string ChangeVector { get; set; } + public string MentorNode { get; set; } + public virtual bool Disabled { get; set; } + public virtual bool PinToMentorNode { get; set; } + public ArchivedDataProcessingBehavior? ArchivedDataProcessingBehavior { get; set; } +} +`} + + + + + + +## HTTP-Compression algorithm is now `Zstd` by default +From RavenDB `7.0` on, the default HTTP compression algorithm is `Zstd` (instead of `Gzip`, used in earlier versions). + + + +Clients can switch to a different HTTP-Compression algorithm using `DocumentStore`'s +[DocumentConventions.HttpCompressionAlgorithm](../../client-api/configuration/conventions.mdx#httpcompressionalgorithm) convention. + + + +{`var DocumentConventions = new DocumentConventions +\{ + // Switch HTTP compression algorithm + HttpCompressionAlgorithm = HttpCompressionAlgorithm.Gzip +\}; +`} + + + + + + +If you migrate from an earlier RavenDB version to version `7.0` or higher, +please note the [potential significance of this change](../../migration/client-api/client-migration.mdx#client-migration-to-ravendb-7x). + + + + +## Bulk-insert Compression is now Enabled by default + +Compression is now [Enabled by default for bulk-insert operations](../../client-api/bulk-insert/how-to-work-with-bulk-insert-operation.mdx#section). + + + +{`CompressionLevel DefaultCompressionLevel = CompressionLevel.Fastest; +`} + + + + + +Clients can switch to a different bulk-insert compression state using `Store`'s +[BulkInsertOptions.CompressionLevel](../../client-api/bulk-insert/how-to-work-with-bulk-insert-operation.mdx#bulkinsertoptions) option. + + + +{`using (var bulk = store.BulkInsert(new BulkInsertOptions +\{ + // Disable bulk-insert compression + CompressionLevel = CompressionLevel.NoCompression +\})); +`} + + + + + + + +## Removed irrelevant `SingleNodeBatchCommand` parameters + +We removed from [SingleNodeBatchCommand](../../client-api/commands/batches/how-to-send-multiple-commands-using-a-batch.mdx)'s +definition the parameters that are mainly used internally and kept only those relevant to the user. + + + + + +{`public SingleNodeBatchCommand + (DocumentConventions conventions, + IList commands, + BatchOptions options = null) +`} + + + + +{`public SingleNodeBatchCommand + (DocumentConventions conventions, + JsonOperationContext context, + IList commands, + BatchOptions options = null, + TransactionMode mode = TransactionMode.SingleNode) +`} + + + + + + + +## Removed obsolete methods + +The following methods are no longer used and have been removed from RavenDB `7.0`. + +* `NextPageStart` + + +{`public int NextPageStart \{ get; set; \} +`} + + + +* `GenerateEntityIdOnTheClient` + + +{`public GenerateEntityIdOnTheClient(DocumentConventions conventions, + Func generateId) +`} + + + +* `InMemoryDocumentSessionOperations.GenerateId` + + +{`protected override string GenerateId(object entity) +`} + + + +* `InMemoryDocumentSessionOperations.GetOrGenerateDocumentIdAsync` + + +{`protected async Task GetOrGenerateDocumentIdAsync(object entity) +`} + + + + + +## `FromEtl` is now internal +The `CounterBatch` class `FromEtl` property is now **internal**. +`FromEtl` is used internally to get or set a value indicating whether a counters batch originated from an ETL process. + + diff --git a/versioned_docs/version-7.1/migration/client-api/client-migration.mdx b/versioned_docs/version-7.1/migration/client-api/client-migration.mdx new file mode 100644 index 0000000000..a5c63f8504 --- /dev/null +++ b/versioned_docs/version-7.1/migration/client-api/client-migration.mdx @@ -0,0 +1,37 @@ +--- +title: "Client Migration" +hide_table_of_contents: true +sidebar_label: Client Migration +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Client Migration + + +* In this page: + * [Client migration to RavenDB `7.x`](../../migration/client-api/client-migration.mdx#client-migration-to-ravendb-7x) + + +## Client migration to RavenDB `7.x` + +Prior to version `7.0`, our default HTTP compression algorithm was `Gzip`. +From version `7.0` on, our default HTTP compression algorithm is `Zstd`. + +Version `7.0`'s client API ability to connect to a server depends on the +**server's version** and on the **compression algorithm the client uses**. + +* Connecting your client to a server of version `6.0` or higher presents no problem. +* But if you want to connect the client to a server of version `5.4` or earlier, + you must switch the client algorithm back to `Gzip` for the connection to succeed. + + [See how to switch the algorithm to Gzip](../../migration/client-api/client-breaking-changes.mdx#http-compression-algorithm-is-now-zstd-by-default) + + + diff --git a/versioned_docs/version-7.1/migration/embedded/_category_.json b/versioned_docs/version-7.1/migration/embedded/_category_.json new file mode 100644 index 0000000000..0f02296b0b --- /dev/null +++ b/versioned_docs/version-7.1/migration/embedded/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 2, + "label": Embedded, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/migration/embedded/testdriver-breaking-changes.mdx b/versioned_docs/version-7.1/migration/embedded/testdriver-breaking-changes.mdx new file mode 100644 index 0000000000..380e5e645d --- /dev/null +++ b/versioned_docs/version-7.1/migration/embedded/testdriver-breaking-changes.mdx @@ -0,0 +1,73 @@ +--- +title: "Embedded Server: TestDriver Breaking Changes" +hide_table_of_contents: true +sidebar_label: TestDriver Breaking Changes +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Embedded Server: TestDriver Breaking Changes + +The features listed on this page were available in former RavenDB versions. +In RavenDB `6.2.x`, they are either unavailable or their behavior is inconsistent +with their behavior in previous versions. + +* In this page: + * [Unlicensed TestDriver throws an exception](../../migration/embedded/testdriver-breaking-changes.mdx#unlicensed-testdriver-throws-an-exception) + + +## Unlicensed TestDriver throws an exception + +### Background: + +The [RavenDB.TestDriver](https://www.nuget.org/packages/RavenDB.TestDriver/) package +allows users to create [unit tests](../../start/test-driver.mdx) for their applications, +and run the tests using an [embedded server](../../server/embedded.mdx) included in the package. + +Like other types of RavenDB server, the features that an embedded server supports +and the resources it can use are defined by its [license](https://ravendb.net/buy). +An unlicensed server, for example, can use only 3 CPU cores, while a server +licensed using a [free developers license](https://ravendb.net/buy#developer) +can use up to 9 cores and run way faster. + +* When a RavenDB server starts, its license is validated. + * If the validation succeeds, the server will run and offer the capabilities defined + by its license. + * If the validation fails, the server may still run but limit its capabilities to those + defined by the basic [AGPL](https://ravendb.net/legal/ravendb/commercial-license-eula) + license. + + If the validation fails because the license expired, and the expiration date precedes + the server build date, the server will not start at all. + + +* A `TestServerOptions.Licensing.ThrowOnInvalidOrMissingLicense` configuration option + is available since RavenDB `5.4`, determining whether to throw a `LicenseExpiredException` + exception if TestDriver uses an unlicensed embedded server. + * If `ThrowOnInvalidOrMissingLicense` is set to **`true`** and the validation fails, + a `LicenseExpiredException` exception will be thrown to **warn TestDriver users** + that in lack of a valid license, their server's capabilities are limited and they + may therefore miss out on much of their system's potential. + * If the configuration option is set to **`false`**, **no exception will be thrown** + even if a license cannot be validated. +### The breaking change: + +Up until RavenDB version `6.0`, we set `TestServerOptions.Licensing.ThrowOnInvalidOrMissingLicense` +to **`false`** by default, so no exception would be thrown even if license validation fails. +For an exception to be thrown, users needed to change the flag to **`true`** on their own initiative. + +In version `6.2`, the default value for this configuration option **changed** to **`true`**; +a `LicenseExpiredException` exception **would** be thrown if the embedded server used by +TestDriver fails to validate a license. + +Users that prefer that no exception would be thrown if an unlicensed embedded server is +used, can set `TestServerOptions.Licensing.ThrowOnInvalidOrMissingLicense` to **`false`**. + + + diff --git a/versioned_docs/version-7.1/migration/server/_category_.json b/versioned_docs/version-7.1/migration/server/_category_.json new file mode 100644 index 0000000000..9a592ca802 --- /dev/null +++ b/versioned_docs/version-7.1/migration/server/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 1, + "label": Server, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/migration/server/data-migration.mdx b/versioned_docs/version-7.1/migration/server/data-migration.mdx new file mode 100644 index 0000000000..1868895dd2 --- /dev/null +++ b/versioned_docs/version-7.1/migration/server/data-migration.mdx @@ -0,0 +1,112 @@ +--- +title: "Data Migration" +hide_table_of_contents: true +sidebar_label: Data migration +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Data Migration + + +* In this page: + * [Migration to RavenDB `7.x`](../../migration/server/data-migration.mdx#migration-to-ravendb-7x) + * [Migration from RavenDB 5.x to 6.x](../../migration/server/data-migration.mdx#migration-from-ravendb-5x-to-6x) + * [Migration from RavenDB 4.x to RavenDB 5.x and 6.x](../../migration/server/data-migration.mdx#migration-from-ravendb-4x-to-ravendb-5x-and-6x) + * [Migration from RavenDB 3.x](../../migration/server/data-migration.mdx#migration-from-ravendb-3x) + * [Migrating data into a sharded database](../../migration/server/data-migration.mdx#migrating-data-into-a-sharded-database) + + +## Migration to RavenDB `7.x` + + + +Starting with version `7.0`, RavenDB incorporates the +[NLog logging frmework](../../server/troubleshooting/logging.mdx) and writes all log +data through it. + +Logging settings applied in earlier RavenDB versions are respected by RavenDB `7.x`, +and logging should continue by these settings without interference after the migration. + +If you want to use NLog-specific features, though, you will have to address a different set +of settings that NLog requires. +You can [learn more here about migration and the new logging system](../../server/troubleshooting/logging.mdx#customize-after-migration). + + + + +From RavenDB `7.0` on, the default HTTP compression algorithm is `Zstd`. +Earlier versions used `Gzip`. + +* If your current server version is `6.0` or higher, the compression algorithm + will present no problem while connecting it to a server of version `7.0` and + migrating your data. + +* If your current server version is `5.4` or earlier, attempting to connect it + to a server that uses the `Zstd` compression algorithm will fail. + For the connection to succeed, you need to: + 1. Temporarily switch the target version `7.0` server compression algorithm to `Gzip`. + Do this by defining a `RAVEN_HTTP_COMPRESSION_ALGORITHM` environment variable on + the `7.0` server machine and setting its value to `Gzip`, and restarting the server. + 2. Connect your current server to the new server and perform the migration. + 3. When the new server is updated, remove the environment variable and restart the server. + + + + + +## Migration from RavenDB 5.x to 6.x + +* RavenDB `6.x` supports in-place data migration from RavenDB `5.x`. +* RavenDB `5.x` product licenses **do not apply** to RavenDB `6.x`. + To upgrade a valid `5.x` license to a RavenDB `6.x` license, + please use the **License upgrade tool** [as explained here](../../start/licensing/replace-license.mdx#upgrade-a-license-key-for-ravendb-6x). + + +Please note that once upgraded, RavenDB `6.x` cannot be downgraded to version `5.x`, +and the migrated data will no longer be accessible via RavenDB `5.x`. +**Please create a backup of your data before migrating.** + + + + +## Migration from RavenDB 4.x to RavenDB 5.x and 6.x + +* RavenDB `5.x` supports in-place data migration from RavenDB `4.x`. + + Upgrading directly from version `4.x` to `6.x` is possible, + but it is recommended to upgrade RavenDB `4.x` to `5.x` first, + and then proceed with an upgrade from version `5.x` to `6.x`. + +* RavenDB `4.x` product licenses **do not apply** to RavenDB `6.x`. + To upgrade a valid `4.x` license to a RavenDB `6.x` license, + please use the **License upgrade tool** [as explained here](../../start/licensing/replace-license.mdx#upgrade-a-license-key-for-ravendb-6x). + + +Please note that once upgraded, RavenDB `6.x` cannot be downgraded to version `4.x`, +and data migrated to `5.x` or `6.x` will no longer be accessible via RavenDB `4.x`. +**Please create a backup of your data before migrating.** + + + + +## Migration from RavenDB 3.x + +* The information above relates only to data migration from RavenDB `4.x` to `5.x`/`6.x` and from `5.x` to `6.x`. +* If you want to migrate your data from a RavenDB version earlier than `4.x`, + please read the dedicated article [here](https://ravendb.net/docs/article-page/4.2/csharp/migration/server/data-migration). + + + +## Migrating data into a sharded database + +If you want to migrate your data to a [sharded](../../sharding/overview.mdx) database (supported by RavenDB `6.0` and above), +please read the related article [here](../../sharding/migration.mdx). + + diff --git a/versioned_docs/version-7.1/migration/server/docker.mdx b/versioned_docs/version-7.1/migration/server/docker.mdx new file mode 100644 index 0000000000..de8cde614c --- /dev/null +++ b/versioned_docs/version-7.1/migration/server/docker.mdx @@ -0,0 +1,108 @@ +--- +title: "Migration: Migrating from Docker Image 5.x or lower to 6.0 or higher" +hide_table_of_contents: true +sidebar_label: Docker +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Migration: Migrating from Docker Image 5.x or lower to 6.0 or higher + + +* Starting from version `6.0` RavenDB for Docker introduces an + improved security model, using a dedicated user rather than `root`. +* RavenDB `6.0` and up also use a Debian archive file + ([.deb package](../../start/installation/gnu-linux/deb.mdx)), + applying a uniform internal structure for Ubuntu OS platforms. +* To conform with these changes, installing RavenDB `6.0` or higher + in a system that already hosts RavenDB `5.x` or lower requires + the migration procedure explained below. +* Read [here](../../start/containers/image-usage.mdx) more about running a RavenDB Docker image. + +* In this page: + * [Changes Made In RavenDB `6.0` And Up](../../migration/server/docker.mdx#changes-made-in-ravendb-60-and-up) + * [Migrating To `6.0` And Up](../../migration/server/docker.mdx#migrating-to-60-and-up) + + +## Changes Made In RavenDB 6.0 And Up + +The **directory structure** used by RavenDB of version `6.0` +and above, and the **user** we use to run RavenDB, are different +from the directory structure and user used by older versions. + +* RavenDB Docker images up to `5.x`: + * Create a unique directory structure under Windows. + * Are installed and accessed using the `root` user on Ubuntu. + +* RavenDB Docker images from `6.0` up: + * Use a Debian archive file ([.deb package](../../start/installation/gnu-linux/deb.mdx)) + and create a similar directory structure under Windows and Ubuntu. + * Are installed and accessed using a dedicated `ravendb` user + instead of `root`, to improve security. + +Learn below how to address these differences when migrating +from version `5.x` or lower to version `6.0` or higher. + + + +## Migrating To `6.0` And Up + +## Permit `ravendb` user to access mounted data directory. + +The default **UID** (User ID) and **GID** (Group ID) +used by `ravendb` are **999**. +Change owner of the RavenDB data directory to `ravendb` (`999` UID by default) on the container host. +E.g. `chown -R 999:999 $TARGET_DATA_DIR` + +## Customizing the RavenDB data directory owner user UID/GID + +To customize the **UID** and **GID** of the `ravendb` user (e.g. to match your host user or for volume permissions), you can build your own image using the official `ravendb` base image. + + + +{`FROM ravendb/ravendb:7.0-ubuntu-latest + +ARG USER_ID=1000 +ARG GROUP_ID=1000 + +USER root + +RUN groupmod -g "$\{GROUP_ID\}" "ravendb" && \\ +usermod -u "$\{USER_ID\}" -g "$\{GROUP_ID\}" ravendb && \\ +chown root:$\{USER_ID\} /etc/ravendb/settings.json && \\ +find / -xdev -uid 0 -gid 999 -exec chown "root:$\{GROUP_ID\}" \{\} + + + +USER ravendb +`} + + + +### Build Instructions + +1. Save the above as `dockerfile` +2. Open a terminal in that directory +3. Build the custom image with your desired UID and GID: `docker build --build-arg USER_ID=YourUserID --build-arg GROUP_ID=YourGroupID -t image-name -f dockerfile` + +## Migrate files and data + +The setup process will create the directory structure detailed +[here](../../start/installation/gnu-linux/deb.mdx#file-system-locations). + +The script within the image will attempt to link the old version's +data directory to the new version's data directory automatically upon start. +If this attempt fails, an error will be produced. + +When mounting host directory make sure that **RavenDB data** is mounted under its new +location on the container: `/var/lib/ravendb/data` + +Old data directory (default): `/opt/RavenDB/Server/RavenData` +New data directory (default): `/var/lib/ravendb/data` + + diff --git a/versioned_docs/version-7.1/migration/server/server-breaking-changes.mdx b/versioned_docs/version-7.1/migration/server/server-breaking-changes.mdx new file mode 100644 index 0000000000..82ec602f2b --- /dev/null +++ b/versioned_docs/version-7.1/migration/server/server-breaking-changes.mdx @@ -0,0 +1,70 @@ +--- +title: "Migration: Server Breaking Changes" +hide_table_of_contents: true +sidebar_label: Server Breaking Changes +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Migration: Server Breaking Changes + +The features listed on this page were available in former RavenDB versions. +In RavenDB `7.0.x`, they are either unavailable or their behavior is inconsistent +with their behavior in previous versions. + +* In this page: + * [RavenDB now incorporates NLog as its logging system](../../migration/server/server-breaking-changes.mdx#ravendb-incorporates-nlog-as-its-logging-system) + * [Removed obsolete properties](../../migration/server/server-breaking-changes.mdx#removed-obsolete-properties) + + +## RavenDB incorporates NLog as its logging system +RavenDB's logging system has changed; the server now incorporates the +NLog logging framework and writes all log data through it. +One of the changes that NLog brings to RavenDB is the richer set +of logging levels, visible right away through Studio's [admin-logs view](../../studio/server/debug/admin-logs.mdx). +Read more about Nlog [in the dedicated article](../../server/troubleshooting/logging.mdx). +If you migrate to RavenDB `7.x` from an earlier version, please +read the section related to NLog in the [migration page](../../migration/server/data-migration.mdx). + + + +## Removed obsolete properties +The following properties are no longer in use, and have been removed from RavenDB `7.0`. + +* `ServerOptions`'s `AcceptEula` property is no longer used, + Please use `Licensing.EulaAccepted` instead. + + + +{`// Removed +c bool AcceptEula +`} + + + +* The `MemoryInfoResult` struct no longer includes these classes: + - `MemoryUsageIntervals` + + +{`// Removed +c sealed class MemoryUsageIntervals +`} + + + - `MemoryUsageLowHigh` + + +{`// Removed +c sealed class MemoryUsageLowHigh +`} + + + + + diff --git a/versioned_docs/version-7.1/server/_category_.json b/versioned_docs/version-7.1/server/_category_.json new file mode 100644 index 0000000000..950e45a759 --- /dev/null +++ b/versioned_docs/version-7.1/server/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 6, + "label": "Server" +} diff --git a/versioned_docs/version-7.1/server/_embedded-csharp.mdx b/versioned_docs/version-7.1/server/_embedded-csharp.mdx new file mode 100644 index 0000000000..c7ac115dc0 --- /dev/null +++ b/versioned_docs/version-7.1/server/_embedded-csharp.mdx @@ -0,0 +1,553 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* This page explains how to run RavenDB as an embedded server. + +* In this page: + + * [Overview](../server/embedded.mdx#overview) + * [Prerequisites and Recommendations](../server/embedded.mdx#prerequisites-and-recommendations) + * [Installation](../server/embedded.mdx#installation) + * [Starting the server](../server/embedded.mdx#starting-the-server) + * [Server options](../server/embedded.mdx#server-options) + * [Setting server directory](../server/embedded.mdx#setting-server-directory) + * [Restarting the server](../server/embedded.mdx#restarting-the-server) + * [ServerProcessExited Event](../server/embedded.mdx#serverprocessexited-event) + * [Embedded server licensing](../server/embedded.mdx#embedded-server-licensing) + * [Licensing configuration options](../server/embedded.mdx#licensing-configuration-options) + * [License an embedded server using an Environment variable](../server/embedded.mdx#license-an-embedded-server-using-an-environment-variable) + * [`.NET` FrameworkVersion](../server/embedded.mdx#net-frameworkversion) + * [Security](../server/embedded.mdx#security) + * [Document store](../server/embedded.mdx#document-store) + * [Get server URL and process ID](../server/embedded.mdx#get-server-url-and-process-id) + * [Remarks](../server/embedded.mdx#remarks) + + +## Overview + +RavenDB can be easily embedded in your application. +Use the Embedded package to integrate RavenDB in just a few easy steps. + + + + +{`EmbeddedServer.Instance.StartServer(); +using (var store = EmbeddedServer.Instance.GetDocumentStore("Embedded")) +{ + using (var session = store.OpenSession()) + { + // Your code here + } +} +`} + + + + +{`EmbeddedServer.Instance.StartServer(); +using (var store = await EmbeddedServer.Instance.GetDocumentStoreAsync("Embedded")) +{ + using (var session = store.OpenAsyncSession()) + { + // Your code here + } +} +`} + + + + + + +## Prerequisites and Recommendations + +* **Prerequisites** + * Install [`.NET` Core runtime](https://dotnet.microsoft.com/en-us/download), + either manually or [along with a RavenDB full version](embedded#setting-server-directory). + * Verify that the RavenDB server [FrameworkVersion](../server/embedded.mdx#net-frameworkversion) + definition matches the `.NET` Core version that you install. + +* **Recommendations** + * Projects targeting `.NET Framework 4.6.1+` that use the old `packages.config` + for NuGet packages maintenance, should migrate to `PackageReference` package management. + Find additional details [below](../server/embedded.mdx#migrating-from--to--in-old-csproj-projects). +### `.NET` Core Runtime: + +RavenDB Embedded **does not include** the `.NET` Core runtime engine required for its operation. + +By default, `ServerOptions.FrameworkVersion` is set to the `.NET` Core version that we compiled +the server with and `ServerOptions.DotNetPath` is set to `dotnet` - meaning it is required to have +it declared in PATH. + +We highly recommend using the `.NET` Core framework version defined in `ServerOptions.FrameworkVersion` +for proper server function. +You can download the `.NET` Core runtime engine [here](https://dotnet.microsoft.com/download). +### Migrating from `packages.config` to `PackageReference` in old csproj projects: + +Due to NuGet limitations, we recommend installing the Embedded package via newer package management, +using `PackageReference` rather than the older `packages.config`. + +The transition between the two is made easy by the built-in Visual Studio migrator. +Find further guidance in this [Microsoft article](https://docs.microsoft.com/en-us/nuget/reference/migrate-packages-config-to-package-reference). + +Please note that _binding redirects_ in `App.config` are still required when 'PackageReference' +is used in old `csproj` projects. Failing to use _binding redirects_ might result in an assembly +load exception such as: + +``` +Could not load file or assembly 'System.Runtime.CompilerServices.Unsafe, Version=4.0.4.0, Culture=neutral, +PublicKeyToken=b03f5f7f11d50a3a' or one of its dependencies. The located assembly's manifest definition does not +match the assembly reference. (Exception from HRESULT: 0x80131040) +``` + + + + +{` + + + + + + + + + + + + + + + + + +`} + + + + + + +## Installation + +* Create a new project (.`NET Standard 2.0+`, `.NET Core 2.0+`, `.NET Framework 4.6.1+`). +* Grab our [NuGet package](https://www.nuget.org/packages/RavenDB.Embedded) + + +{`Install-Package RavenDB.Embedded -Version 4.1.0 +`} + + + + + +## Starting the Server + +RavenDB Embedded Server is available under `EmbeddedServer.Instance`. +Start the server using the `StartServer` method. + + + +{`// Start RavenDB Embedded Server with default options +EmbeddedServer.Instance.StartServer(); +`} + + + +For more control over the server startup, pass `StartServer` a `ServerOptions` object. + + + +## Server Options + +Set `ServerOptions` to change server settings such as `.NET` FrameworkVersion, DataDirectory, +and additional options. + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **DataDirectory** | `string` | Indicates where your data should be stored | +| **DotNetPath** | `string` | The path to exec `dotnet` (if it is in PATH, leave it)| +| **AcceptEula** | `bool` | If set to `false`, will ask to accept our terms & conditions | +| **ServerUrl** | `string` | What address we want to start our server (default `127.0.0.1:0`) | +| **MaxServerStartupTimeDuration** | `TimeSpan` | The timeout for the server to start | +| **CommandLineArgs** | `List` | The [command lines arguments](../server/configuration/configuration-options.mdx#command-line-arguments) to start the server with | +| **ServerDirectory** | `string` | The path to the server binary files<sup>[*](../server/embedded.mdx#setting-server-directory) | + + + +{`EmbeddedServer.Instance.StartServer(new ServerOptions +\{ + DataDirectory = "C:\\\\RavenData", + ServerUrl = "http://127.0.0.1:8080" +\}); +`} + + + +If `ServerOptions` is not provided, RavenDB server will start with a default value of `127.0.0.1:{Random Port}`. +### Setting Server Directory: +In case you are not interested in installing the `.NET` run-time environment on your system, you can - + +* [Download](https://ravendb.net/download) a full RavenDB version. + This version already includes a `.NET` run-time environment. +* Extract the downloaded version to a local folder. + E.g. `C:\RavenDB` +* Set the `ServerDirectory` server option to the RavenDB subfolder that contains - + * `Raven.Server.exe` in Windows + * `Raven.Server` in Posix + + + +{`EmbeddedServer.Instance.StartServer(new ServerOptions +\{ + ServerDirectory = @"C:\\RavenDB\\Server" +\}); +`} + + +### Restarting the Server: +To restart the server, use the `.RestartServerAsync()` method. + + + +{`public async Task RestartServerAsync(); +`} + + + +In code: + + + +{`await EmbeddedServer.Instance.RestartServerAsync(); +`} + + +### ServerProcessExited Event: +Use `.ServerProcessExited` to observe when the server has crashed or exited. + + + +{`event EventHandler? ServerProcessExited; +`} + + + +Event data is of type `ServerProcessExitedEventArgs`. + + + +## Embedded server licensing + +* The **same license types** available for Standalone RavenDB servers, are available for Embedded servers. +* A licensed server can be managed using Studio, and is given a superior feature set to that of + non-registered servers. See the full list of license types and their features [here](https://ravendb.net/buy). +* An embedded server can be licensed using **Configuration options** or an **Environment variable**. +### Licensing configuration options + +Embedded server licensing configuration options are gathered in the `ServerOptions.Licensing` class. +After acquiring a license, it can be passed to the server either as a string or as a file. + +* To pass your license to the server as a string, use the `ServerOptions.LicensingOptions.License` + configuration option. + + +{`EmbeddedServer.Instance.StartServer(new ServerOptions +\{ + Licensing = new ServerOptions.LicensingOptions + \{ + License = "your license here" + \} +\}); +`} + + +* To keep your license file in your file system and point the server to its path, + use the `ServerOptions.LicensingOptions.LicensePath` configuration option. + + +{`EmbeddedServer.Instance.StartServer(new ServerOptions +\{ + Licensing = new ServerOptions.LicensingOptions + \{ + LicensePath = "path to license.json file" + \} +\}); +`} + + +#### Available LicensingOptions configuration options: + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **License** | `string` | Specifies the full license string directly in the configuration.
If both `License` and `LicensePath` are defined, `License` takes precedence. | +| **LicensePath** | `string` | Specifies a path to a license file.
If both `License` and `LicensePath` are defined, `License` takes precedence.
Default: `license.json` | +| **EulaAccepted** | `bool` | Set to `false` to present a request to accept our terms & conditions. | +| **DisableAutoUpdate** | `bool` | Disable automatic license updates (from both the `api.ravendb.net` license server **and** the `License` and `LicensePath` configuration options). | +| **DisableAutoUpdateFromApi** | `bool` | Disable automatic license updates from the `api.ravendb.net` license server.
Note: when disabled, the license **can** still be updated using the `License` and `LicensePath` configuration options. | +| **DisableLicenseSupportCheck** | `bool` | Control whether to verify the support status of the current license and display it within Studio.
`true`: disable verification
`false`: enable verification | +| **ThrowOnInvalidOrMissingLicense** | `bool` | Throw an exception if the license is missing or cannot be validated. | +### License an embedded server using an Environment variable + +You can pass the same configuration options to the embedded server using environment variables. + +* To pass your license to the server as a string, define or edit the environment variable `RAVEN_License`. + Provide your license as a value for this variable. + **Note**, however, that you must first **reformat** the license JSON that you acquired, and + turn it to a single line, eliminating new-line symbols. + + + + +{`{ + "Id": "bad5fe9b-fba4-459c-9220-36b438e06e36", + "Name": "rdb", + "Keys": [ + "WBRG3G1zKd536ELfRbWw7x69J", + "zyFCZ+AcGLI9RgSyRq5r4KS7K", + "E0hMr5uzmbMBuxAI6WLBXZTSN", + "t+vGjgrVzqoycTPhHdQxNCK2v", + "7xOwXKUblAhZmHcDeY3xvF0jn", + "EZoZLdaeF0D8FFddNB8NrMWeQ", + "kwzAKfs1BMlXi9ZJsVZO9ABUE", + "yBSYoSQMqKywtLi8wJzEyMzQV", + "Fjc4OTo7PD0+nwIfIJ8CICCfA", + "iEgnwIjIEMkRAlieVc=" + ] +} +`} + + + + +{`{"Id": "bad5fe9b-fba4-459c-9220-36b438e06e36","Name": "rdb","Keys": ["WBRG3G1zKd536ELfRbWw7x69J","zyFCZ+AcGLI9RgSyRq5r4KS7K","E0hMr5uzmbMBuxAI6WLBXZTSN","t+vGjgrVzqoycTPhHdQxNCK2v","7xOwXKUblAhZmHcDeY3xvF0jn","EZoZLdaeF0D8FFddNB8NrMWeQ","kwzAKfs1BMlXi9ZJsVZO9ABUE","yBSYoSQMqKywtLi8wJzEyMzQV","Fjc4OTo7PD0+nwIfIJ8CICCfA","iEgnwIjIEMkRAlieVc="]} +`} + + + + + + You can reformat the license manually, or use a script to do it. + E.g., find below a Bash script that specifies the name of the file from which the license + will be read, and then uses the `-c` flag to compact the file's contents to a single line. + + +{`INPUT_FILE="license.json" +jq -c . "$INPUT_FILE" +`} + + + + +* Or, you can keep your license file in a folder accessible to the application that embeds your server, + and provide the path to the license file in the `RAVEN_LicensePath` environment variable. + + + +## `.NET` FrameworkVersion + +The default FrameworkVersion is defined to work with any `.NET` version from the time of the RavenDB server release +and newer by using the `+` moderator. +E.g. `ServerOptions.FrameworkVersion = 3.1.17+` + +Thus, by leaving the default FrameworkVersion definition, RavenDB embedded servers will automatically look for the ` +.NET` version that is currently running on the machine, starting from the version at the time of the server release. + + + +Each RavenDB release is compiled with the `.NET` version that was current at the time of the release. + +* To find which `.NET` version supports RavenDB 5.1, for example, open the + [RavenDB 5.1 What's New](https://ravendb.net/docs/article-page/5.1/csharp/start/whats-new) page. + The correct `.NET` version for RavenDB 5.1, `.NET` 5.0.6., is listed at the bottom of the **Server** section. +* By default, your RavenDB server will look for `.NET` 5.0.6, 5.0.7, etc. + So as long as you have at least one of these `.NET` versions running on your machine, RavenDB will work well. + + + +To stay within a major or minor `.NET` release, but ensure flexibility with patch releases, +use a floating integer `x`. +It will always use the newest version found on your machine. + +E.g., `ServerOptions.FrameworkVersion = 3.x` will look for the newest 3.x release, +`ServerOptions.FrameworkVersion = 3.2.x` will look for the newest 3.2 release. +Neither will look for 4.x. + +| ServerOption Name | Type | Description | +| ------------- | ------------- | ----- | +| **FrameworkVersion** | string | The `.NET` Core framework version to run the server with | + +| Parameter | Description | +| --------- | ------------- | +| `null` | The server will pick the newest `.NET` version installed on your machine. | +| `3.1.17+` | Default setting (Actual version number is set at the time of server release).
In this example, the server will work properly with `.NET` patch releases that are greater than or equal to 3.1.17 | +| `3.1.17` | The server will **only** work properly with this exact `.NET` release. | +| `3.1.x` | The server will pick the newest `.NET` patch release on your machine. | +| `3.x` | The server will pick the newest `.NET` minor releases and patch releases on your machine. | + + + +{`EmbeddedServer.Instance.StartServer(new ServerOptions +\{ + FrameworkVersion = "3.1.15+", +\}); +`} + + + + + +## Security + +RavenDB Embedded supports running a secured server. +Just run `Secured` method in the `ServerOptions` object. +### Set up security using the certificate path: + + + +{`var serverOptions = new ServerOptions(); +serverOptions.Secured( + certificate: "PathToServerCertificate", + certPassword: "CertificatePassword"); +`} + + + +The first way to enable authentication is to set the +[certificate with the path to your .pfx](../server/security/authentication/certificate-configuration.mdx#standard-manual-setup-with-certificate-stored-locally) +server certificate. +You can supply the certificate password using certPassword. +### Set up security using a custom script: + +To access the certificate via logic that is external to RavenDB, you can use the following approach: + + + +{`var serverOptionsWithExec = new ServerOptions(); +var certificate = new X509Certificate2(fileName, password); +serverOptionsWithExec.Secured( + certLoadExec: "powershell", + certExecArgs: "C:\\\\secrets\\\\give_me_cert.ps1", + serverCertThumbprint: certificate.Thumbprint, + clientCert: certificate); +`} + + + +This option is useful when you want to protect your certificate (private key) with other solutions such as +"Azure Key Vault", "HashiCorp Vault" or even Hardware-Based Protection. RavenDB will invoke a process you specify, +so you can write your own scripts / mini-programs and apply the logic that you need. + +This way a clean separation is kept between RavenDB and the secret store in use. + +RavenDB expects to get the raw binary representation (byte array) of the `.pfx` certificate +through the standard output. + + + +## Document Store + +After starting the server you can get the `DocumentStore` from the Embedded Server and start working with RavenDB. +Getting the `DocumentStore` from The Embedded Server is done simply by calling `GetDocumentStore` or `GetDocumentStoreAsync` +with the name of the database you choose to work with. + + + + +{`EmbeddedServer.Instance.GetDocumentStore("Embedded"); +`} + + + + +{`await EmbeddedServer.Instance.GetDocumentStoreAsync("Embedded"); +`} + + + + +For additional control over the process you can call the methods with a `DatabaseOptions` object. + + + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **DatabaseRecord** | `DatabaseRecord` | Instance of `DatabaseRecord` containing database configuration | +| **SkipCreatingDatabase** | `bool` | If set to true, will skip try creating the database | + + + + + + +{`var databaseOptions = new DatabaseOptions(new DatabaseRecord +{ + DatabaseName = "Embedded" +}); +EmbeddedServer.Instance.GetDocumentStore(databaseOptions); +`} + + + + +{`var databaseOptions = new DatabaseOptions(new DatabaseRecord +{ + DatabaseName = "Embedded" +}); +await EmbeddedServer.Instance.GetDocumentStoreAsync(databaseOptions); +`} + + + + + + +## Get Server URL and Process ID + +#### Server URL: + +The `GetServerUriAsync` method can be used to retrieve the Embedded server URL. +It must be called after the server was started, because it waits for the server's +initialization to complete. +The URL can be used, for example, to create a custom document store, omitting the +`GetDocumentStore` method entirely. + + + +{`Uri url = await EmbeddedServer.Instance.GetServerUriAsync(); +`} + + +#### Process ID: + +The `GetServerProcessIdAsync` method can be used to retrieve the system-generated process ID for the +embedded server. + + + +{`public async Task GetServerProcessIdAsync(CancellationToken token = default); +`} + + + + + +{`int processID = await EmbeddedServer.Instance.GetServerProcessIdAsync(); +`} + + + + + +## Remarks + +* You can have only one instance of `EmbeddedServer`. +* The `EmbeddedServer.Instance.OpenStudioInBrowser()` method can be used to open a browser instance with Studio. + + + + diff --git a/versioned_docs/version-7.1/server/_embedded-java.mdx b/versioned_docs/version-7.1/server/_embedded-java.mdx new file mode 100644 index 0000000000..ab55c9fcc4 --- /dev/null +++ b/versioned_docs/version-7.1/server/_embedded-java.mdx @@ -0,0 +1,186 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +## Overview + +RavenDB makes it very easy to be embedded within your application, with RavenDB Embedded package you can integrate your RavenDB server with few easy steps. + + + +{`EmbeddedServer.INSTANCE.startServer(); + +try (IDocumentStore store = EmbeddedServer.INSTANCE.getDocumentStore("Embedded")) \{ + try (IDocumentSession session = store.openSession()) \{ + // your code here + \} +\} +`} + + + + + +## Prerequisites + +There is one prerequsite and one recommendation for the Embedded package: + +Prerequsite: + +- **.NET Core runtime** must be installed manually + + + +RavenDB Embedded **does not include .NET Core runtime required for it to run**. + +By default the `ServerOptions.FrameworkVersion` is set to the .NET Core version that we compiled the server with and `ServerOptions.DotNetPath` is set to `dotnet` meaning that it will require to have it declared in PATH. + +We highly recommend using the .NET Core framework version defined in `ServerOptions.FrameworkVersion` for proper functioning of the Server. The .NET Core runtime can be downloaded from [here](https://dotnet.microsoft.com/download). + + + + + +## Getting Started + +### Installation + +* Create a new project +* Add package [net.ravendb:ravendb-embedded](https://search.maven.org/search?q=a:ravendb-embedded) as dependency + +### Starting the Server + +RavenDB Embedded Server is available under `EmbeddedServer.INSTANCE`. In order to start it call `startServer` method. + + + +{`// Start RavenDB Embedded Server with default options +EmbeddedServer.INSTANCE.startServer(); +`} + + + +For more control on how to start the server just pass to `startServer` method a `ServerOptions` object and that`s it. + + + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **frameworkVersion** | String | The .NET Core framework version to run the server with | +| **dataDirectory** | String | Indicates where your data should be stored | +| **dotNetPath** | String | The path to exec `dotnet` (if it is in PATH, leave it)| +| **targetServerLocation** | String | The path to extract server binaries | +| **acceptEula** | boolean | If set to `false`, will ask to accept our terms & conditions | +| **erverUrl** | String | What address we want to start our server (default `127.0.0.1:0`) | +| **maxServerStartupTimeDuration** | `Duration` | The timeout for the server to start | +| **commandLineArgs** | `List<String>` | The [command lines arguments](../server/configuration/configuration-options.mdx#command-line-arguments) to start the server with | + + + + + +{`ServerOptions serverOptions = new ServerOptions(); +// target location of RavenDB data +serverOptions.setDataDirectory("C:\\\\RavenData"); +serverOptions.setServerUrl("http://127.0.0.1:8080"); + +// location where server binaries will be extracted +serverOptions.setTargetServerLocation("c:\\\\RavenServer"); +EmbeddedServer.INSTANCE.startServer(serverOptions); +`} + + + + +Without the `ServerOptions`, RavenDB server will start with a default values on `127.0.0.1:{Random Port}` + + +### Security + +RavenDB Embedded support running a secured server. +Just run `secured` method in `ServerOptions` object. + +We have two overloads to `secured`: + + +{`ServerOptions serverOptions = new ServerOptions(); +serverOptions.secured("PathToCertificate", "CertificatePassword"); +`} + + + +The first way to enable authentication is to set certificate with the path to your .pfx +server certificate. You may supply the certificate password using certPassword. + + + +{`ServerOptions serverOptions = new ServerOptions(); +serverOptions.secured("powershell", + "c:\\\\secrets\\\\give_me_cert.ps1", + "a909502dd82ae41433e6f83886b00d4277a32a7b", + clientCertificate, + "PathToCaCertificateFile"); +`} + + + +This option is useful when you want to protect your certificate (private key) with other solutions such as "Azure Key Vault", "HashiCorp Vault" or even Hardware-Based Protection. +RavenDB will invoke a process you specify, so you can write your own scripts / mini programs and apply whatever logic you need. It creates a clean separation between RavenDB and the secret store in use. +RavenDB expects to get the raw binary representation (byte array) of the .pfx certificate through the standard output. +In this options you can control on your client certificate and to use in a different certificate for your client. + +### Document Store + +After starting the server you can get the DocumentStore from the Embedded Server and start working with RavenDB. +Getting the DocumentStore from The Embedded Server is pretty easy you only need to call `getDocumentStore` with the name of the database you like to work with. + + + +{`EmbeddedServer.INSTANCE.getDocumentStore("Embedded"); +`} + + + +For more control on the process you can call the methods with `DatabaseOptions` object. + + + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **DatabaseRecord** | DatabaseRecord | Instance of `DatabaseRecord` containing database configuration | +| **SkipCreatingDatabase** | boolean | If set to true, will skip try creating the database | + + + + + +{`DatabaseRecord databaseRecord = new DatabaseRecord(); +databaseRecord.setDatabaseName("Embedded"); +DatabaseOptions databaseOptions = new DatabaseOptions(databaseRecord); +EmbeddedServer.INSTANCE.getDocumentStore(databaseOptions); +`} + + + +### Get Server URL + +The `getServerUri` method can be used to retrieve the Embedded server URL. It must be called after server was started, because it waits for the server initialization to complete. +The URL can be used for example for creating a custom document store, omitting the `getDocumentStore` method entirely. + + + +{`String serverUri = EmbeddedServer.INSTANCE.getServerUri(); +`} + + + + + +## Remarks + +* You can have only one instance of `EmbeddedServer` +* Method `EmbeddedServer.INTANCE.openStudioInBrowser()` can be used to open an browser instance with Studio + + + diff --git a/versioned_docs/version-7.1/server/administration/_category_.json b/versioned_docs/version-7.1/server/administration/_category_.json new file mode 100644 index 0000000000..e26e7deb6b --- /dev/null +++ b/versioned_docs/version-7.1/server/administration/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 1, + "label": Administration, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/server/administration/assets/general-stats.png b/versioned_docs/version-7.1/server/administration/assets/general-stats.png new file mode 100644 index 0000000000..a6ad96c13c Binary files /dev/null and b/versioned_docs/version-7.1/server/administration/assets/general-stats.png differ diff --git a/versioned_docs/version-7.1/server/administration/cli.mdx b/versioned_docs/version-7.1/server/administration/cli.mdx new file mode 100644 index 0000000000..929eae8a30 --- /dev/null +++ b/versioned_docs/version-7.1/server/administration/cli.mdx @@ -0,0 +1,475 @@ +--- +title: "Administration: RavenDB CLI" +hide_table_of_contents: true +sidebar_label: CLI (Command Line Interface) +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Administration: RavenDB CLI + + +* Running RavenDB as a console application offers basic information along with + a Command Line Interface that can be used to get additional information regarding + the server and perform specific commands. + +* This page lists available CLI commands and explains their usage. + +* In this page: + * [`rvn`](../../server/administration/cli.mdx#rvn) + * [`info`](../../server/administration/cli.mdx#info) + * [`stats`](../../server/administration/cli.mdx#stats) + * [`log`](../../server/administration/cli.mdx#log) + * [`gc`](../../server/administration/cli.mdx#gc) + * [`shutdown | q`](../../server/administration/cli.mdx#shutdown-|-q) + * [`restartServer`, `resetServer`](../../server/administration/cli.mdx#restartserver,-resetserver) + * [`script`](../../server/administration/cli.mdx#script) + * [`generateClientCert`](../../server/administration/cli.mdx#generateclientcert) + * [`trustServerCert`](../../server/administration/cli.mdx#trustservercert) + * [`trustClientCert`](../../server/administration/cli.mdx#trustclientcert) + * [`replaceClusterCert`](../../server/administration/cli.mdx#replaceclustercert) + * [`clear`](../../server/administration/cli.mdx#clear) + * [`prompt`](../../server/administration/cli.mdx#prompt) + * [`logout`](../../server/administration/cli.mdx#logout) + * [`logo`](../../server/administration/cli.mdx#logo) + * [`help`](../../server/administration/cli.mdx#help) + * [`lowMem`](../../server/administration/cli.mdx#lowmem) + * [`timer`](../../server/administration/cli.mdx#timer) + + +## `rvn` + + +RavenDB can operate as a service/daemon without console input. +It is possible to access the CLI through a provided `rvn` (`rvn.exe` in Windows) +tool included in each distribution package. + +The process is as follows: + + +{`rvn admin-channel [RavenDB process ID] +`} + + + +The `rvn admin-channel` uses [Named Pipe Connection](https://en.wikipedia.org/wiki/Named_pipe), +and can connect to RavenDB CLI only when running on the same machine as the server and with +appropriate privileges. + + +The `rvn` executable can be found in the distribution package under **Server** directory + + + + +## `info` + +Prints basic information to the console, including build version information, process ID (PID), +bitness, and system hardware information. + + + +{`ravendb> info + Node A in cluster eabe7a24-054a-48ef-9391-7f7b7707969d + Build 40050, Version 4.0, SemVer 4.0.0, Commit fffffff + PID 17591, 64 bits, 8 Cores, Arch: X64 + 31.122 GBytes Physical Memory, 28.908 GBytes Available Memory + Using GC in server concurrent mode retaining memory from the OS. +`} + + + + + +## `stats` + +Online display of memory usage by RavenDB. You can separate into Working Set, Native Mem, +Managed Mem, and Memory Mapped Size. Hitting any key will return to CLI's input mode (beware +not to hit Ctrl+C / Break to avoid an unintended shutdown of the server). + + + +{`ravendb> stats + Showing stats, press any key to close... + working set | native mem | managed mem | mmap size | reqs/sec | docs (all dbs) + + 201.45 MBytes | 17.36 MBytes | 42.45 MBytes | 2.02 GBytes | 0 | 5,374,826 +`} + + + + + +## `log` + +Enable (or disable) online log printing to the console. + + + +{`log [on|off] [http-] [info|debug] [no-console] +`} + + + +| Parameters | Description | +| ------------- | ------------- | +| `http-on` or `http-off` | Enables or disables HTTP requests log information | +| `info`, `debug` | Sets logging to the requested [level](../../server/configuration/logs-configuration.mdx#logsminlevel) | +| `on` or `off` | Enables or disables log printing | +| `no-console` | Disables logging to console | + + + +Print to log: + + +{`ravendb> log on +`} + + + +Set logging to `debug` level: + + +{`ravendb> log debug +`} + + + +Set logging to `debug` level but do not log to the console: + + +{`ravendb> log debug no-console +`} + + + + + +If log is enabled using `rvn admin-channel`, the information will be displayed +in the main console application. If RavenDB is running as a service, you will see the +log output in the service log. + + + + +## `gc` + +Force Garbage Collection to a specific generation (0, 1 or 2). +See [GC.Collect Method](https://docs.microsoft.com/en-us/dotnet/api/system.gc.collect?redirectedfrom=MSDN&view=netframework-4.7.2#System_GC_Collect_System_Int32_) + + + +{`gc <0|1|2> +`} + + + + + +{`ravendb> gc 2 +Before collecting, managed memory used: 48.92 MBytes +Garbage Collecting... Collected. +After collecting, managed memory used: 10.09 MBytes at 0.0078154 Seconds +`} + + + + + +## `shutdown | q` + +Gracefully shuts down the Server. + + + +{`ravendb> q + +Are you sure you want to reset the server ? [y/N] : y +Starting shut down... +Shutdown completed +`} + + + +| Parameters | Description | +| ------------- | ------------- | +| `no-confirmation` | Skips the confirmation question and shuts down immediately. | + + + +## `restartServer`, `resetServer` + +Gracefully shut down and restart the Server. + + + +{`ravendb> restartServer + +Are you sure you want to reset the server ? [y/N] : y +Starting shut down... +Shutdown completed + +Restarting Server... +Using GC in server concurrent mode retaining memory from the OS. +Node A in cluster eabe7a24-054a-48ef-9391-7f7b7707969d +Server available on: http://rave-pc:8080 +Tcp listening on 0.0.0.0:32797 +Server started, listening to requests... +TIP: type 'help' to list the available commands. +ravendb> +`} + + + +The two commands are equivalent. + + + +{`ravendb> resetServer + +Are you sure you want to reset the server ? [y/N] : y +Starting shut down... +Shutdown completed + +Restarting Server... +Using GC in server concurrent mode retaining memory from the OS. +Node A in cluster eabe7a24-054a-48ef-9391-7f7b7707969d +Server available on: http://rave-pc:8080 +Tcp listening on 0.0.0.0:32797 +Server started, listening to requests... +TIP: type 'help' to list the available commands. +ravendb> +`} + + + + + + +## `script` + +Executes Admin JavaScript patches. + + + +{`script [database name] +`} + + + + + +{`ravendb> script database ProductionDB + +Enter JavaScript: +(to cancel enter in new line 'cancel' or 'EXEC' to execute) + +>>> return database.Configuration.Storage.MaxConcurrentFlushes +>>> EXEC +\{ "Result" : 10 \} +ravendb> +`} + + + + +Running scripts can harm the database beyond recovery. Use with care. + + + + +## `generateClientCert` + +Generate a new trusted client certificate with `ClusterAdmin` security clearance. + + + +{`ravendb> generateClientCert [password] +`} + + + +| Parameters | Description | +| - | - | +| `name` | The name of the client certificate that is to be generated. | +| `path to output folder` | The path to the certificate. The path is relative to the **server** folder in RavenDB. | +| `number of months until expiration` | The number of months you want the certificate to last before it expires. | +| `password` | The certificate's password. **Optional**. | + + + +## `trustServerCert` + +Register a server certificate of another node to be trusted on this server. +This is required when building a cluster where each node has a different certificate. + + + +{`ravendb> trustServerCert [password] +`} + + + +| Parameters | Description | +| - | - | +| `name` | The name of the server certificate that is to be trusted. | +| `path to pfx` | The path to the certificate. | +| `password` | The certificate's password, if it has one. | + + + +## `trustClientCert` + +Register a client certificate to be trusted on this server with `ClusterAdmin` security clearance. + + + +{`ravendb> trustClientCert [password] +`} + + + +| Parameters | Description | +| - | - | +| `name` | The name of the client certificate that is to be trusted. | +| `path to pfx` | The path to the certificate. | +| `password` | The certificate's password, if it has one. | + + + +## `replaceClusterCert` + +Replace the cluster certificate. + +If **replaceImmediately** is specified, RavenDB will replace the certificate by force, +even if some nodes are not responding. In that case, you will have to manually replace the +certificate in those nodes. Use with care. + + + + +{`ravendb> replaceClusterCert [-replaceImmediately] [password] +`} + + + +| Parameters | Description | +| - | - | +| `-replaceImmediately` | Whether to force replace the cluster certificate even if some nodes are not responding. Optional, use with caution. | +| `name` | The name of the new cluster certificate. | +| `path to pfx` | The path to the certificate. | +| `password` | The certificate's password, if it has one. | + + + +--- + +## `clear` + +Clears the screen. + + + +{`ravendb> clear +`} + + + + + +## `prompt` + +For usage type **helpPrompt**. + +Can be used to show memory information used by **stats** while using `rvn`. + + + +{`ravendb> prompt %M +`} + + + + + +## `logout` + +Exits CLI back to the terminal (with `rvn admin-channel` use only). + + + +{`ravendb> logout +`} + + + + + +## `logo` + +Prints initial logo. + + + +{`ravendb> logo +`} + + + + + +## `help` + +Display help screen. + + + +{`ravendb> help +`} + + + + + +--- + +## `lowMem` + + +Debugging command. Not intended for normal use. + + +Simulates low memory state in RavenDB. + + + +{`ravendb> lowMem +`} + + + + + +## `timer` + + +Debugging command. Not intended for normal use. + + +Enable (or disable) candidate selection timer (Rachis), or fires timeout immediately. + + + +{`timer +`} + + + + + diff --git a/versioned_docs/version-7.1/server/administration/monitoring/_category_.json b/versioned_docs/version-7.1/server/administration/monitoring/_category_.json new file mode 100644 index 0000000000..ddaffcce89 --- /dev/null +++ b/versioned_docs/version-7.1/server/administration/monitoring/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 1, + "label": Monitoring, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/server/administration/monitoring/assets/Prometheus_changed-index-priority.png b/versioned_docs/version-7.1/server/administration/monitoring/assets/Prometheus_changed-index-priority.png new file mode 100644 index 0000000000..874c8d45f7 Binary files /dev/null and b/versioned_docs/version-7.1/server/administration/monitoring/assets/Prometheus_changed-index-priority.png differ diff --git a/versioned_docs/version-7.1/server/administration/monitoring/assets/RavenDB_changed-index-priority.png b/versioned_docs/version-7.1/server/administration/monitoring/assets/RavenDB_changed-index-priority.png new file mode 100644 index 0000000000..0931009017 Binary files /dev/null and b/versioned_docs/version-7.1/server/administration/monitoring/assets/RavenDB_changed-index-priority.png differ diff --git a/versioned_docs/version-7.1/server/administration/monitoring/assets/additional-info.png b/versioned_docs/version-7.1/server/administration/monitoring/assets/additional-info.png new file mode 100644 index 0000000000..a086236700 Binary files /dev/null and b/versioned_docs/version-7.1/server/administration/monitoring/assets/additional-info.png differ diff --git a/versioned_docs/version-7.1/server/administration/monitoring/assets/prometheus_metrics-graph.png b/versioned_docs/version-7.1/server/administration/monitoring/assets/prometheus_metrics-graph.png new file mode 100644 index 0000000000..e1b2007480 Binary files /dev/null and b/versioned_docs/version-7.1/server/administration/monitoring/assets/prometheus_metrics-graph.png differ diff --git a/versioned_docs/version-7.1/server/administration/monitoring/assets/prometheus_select-metrics.png b/versioned_docs/version-7.1/server/administration/monitoring/assets/prometheus_select-metrics.png new file mode 100644 index 0000000000..cc148dd818 Binary files /dev/null and b/versioned_docs/version-7.1/server/administration/monitoring/assets/prometheus_select-metrics.png differ diff --git a/versioned_docs/version-7.1/server/administration/monitoring/assets/ravendb_prometheus-endpoint-output.png b/versioned_docs/version-7.1/server/administration/monitoring/assets/ravendb_prometheus-endpoint-output.png new file mode 100644 index 0000000000..3a64e8c2b9 Binary files /dev/null and b/versioned_docs/version-7.1/server/administration/monitoring/assets/ravendb_prometheus-endpoint-output.png differ diff --git a/versioned_docs/version-7.1/server/administration/monitoring/mib-generation.mdx b/versioned_docs/version-7.1/server/administration/monitoring/mib-generation.mdx new file mode 100644 index 0000000000..ad6be0b264 --- /dev/null +++ b/versioned_docs/version-7.1/server/administration/monitoring/mib-generation.mdx @@ -0,0 +1,60 @@ +--- +title: "Monitoring: MIB generation and usage" +hide_table_of_contents: true +sidebar_label: MIB generation +sidebar_position: 3 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Monitoring: MIB generation and usage + + +* RavenDB allows you to generate a MIB (Management Information Base) file that contains + a structured collection of SNMP OIDs. + +* The MIB can be generated through RavenDB's `/monitoring/snmp/mib` endpoint, + and fine-tuned using flags. + +* The created MIB can be used by monitoring tools to extract RavenDB metrics via SNMP. + +* In this page: + * [Generating a MIB](../../../server/administration/monitoring/mib-generation.mdx#generating-a-mib) + * [MIB generation endpoint](../../../server/administration/monitoring/mib-generation.mdx#mib-generation-endpoint) + * [Fine-tune the OIDs list](../../../server/administration/monitoring/mib-generation.mdx#fine-tune-the-oids-list) + + +## Generating a MIB + +### MIB generation endpoint + +To generate a MIB, use RavenDB's HTTP `/monitoring/snmp/mib` GET endpoint. +You can inspect this endpoint using your browser to download a text file with +RavenDB's OIDs, or connect it with a monitoring tool to utilize these OIDs. + +To use the endpoint, add its path to RavenDB's address (including RavenDB's port number). + +- To generate a MIB for RavenDB's live test server, for example. use: + [http://live-test.ravendb.net/monitoring/snmp/mib](http://live-test.ravendb.net/monitoring/snmp/mib) +### Fine-tune the OIDs list + +By default, the generated MIB includes **server** metrics OIDs. You can fine-tune +it to include the OIDs range your are interested in. Available options are: + +* `includeServer` - Include or exclude OIDs with **server** metrics. +* `includeCluster` - Include or exclude OIDs with **cluster** metrics. +* `includeDatabases` - Include or exclude OIDs with **databases** metrics. + +#### Examples: +To include **databases** metrics OIDs, for example, you can use the `includeDatabases` flag this way: +[http://live-test.ravendb.net/monitoring/snmp/mib?includeDatabases=true](http://live-test.ravendb.net/monitoring/snmp/mib?includeDatabases=true) +Or to exclude **server** metrics OIDs use the **includeServer** flag like so: +[http://live-test.ravendb.net/monitoring/snmp/mib?includeServer=false](http://live-test.ravendb.net/monitoring/snmp/mib?includeServer=false) + + + diff --git a/versioned_docs/version-7.1/server/administration/monitoring/open-telemetry.mdx b/versioned_docs/version-7.1/server/administration/monitoring/open-telemetry.mdx new file mode 100644 index 0000000000..7c1ade964f --- /dev/null +++ b/versioned_docs/version-7.1/server/administration/monitoring/open-telemetry.mdx @@ -0,0 +1,398 @@ +--- +title: "OpenTelemetry Support" +hide_table_of_contents: true +sidebar_label: OpenTelemetry +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# OpenTelemetry Support + + +* [OpenTelemetry](https://opentelemetry.io) is an open-source observability framework that provides a set of APIs, SDKs, and tools + for collecting, processing, and exporting telemetry data (metrics, traces, and logs) from applications and systems. + +* By standardizing data collection and integration, OpenTelemetry enables seamless monitoring and troubleshooting + across various platforms and backends, assisting in the analysis of software performance and behavior. +* **RavenDB leverages the OpenTelemetry SDK** to send metrics data via the OpenTelemetry Protocol, + allowing seamless data collection and analysis by an OpenTelemetry retriever. + +* In addition, an OpenTelemetry collector can retrieve data from RavenDB's [Prometheus](../../../server/administration/monitoring/prometheus.mdx) endpoint. + Learn more [below](../../../server/administration/monitoring/open-telemetry.mdx#retrieve-data-from-prometheus). + +* OpenTelemetry support is provided for RavenDB instances both on-premises and in the cloud. +* In this page: + * [Enabling OpenTelemetry in RavenDB](../../../server/administration/monitoring/open-telemetry.mdx#enabling-opentelemetry-in-ravendb) + * [RavenDB OpenTelemetry meters](../../../server/administration/monitoring/open-telemetry.mdx#ravendb-opentelemetry-meters) + * [Server identification in metrics](../../../server/administration/monitoring/open-telemetry.mdx#server-identification-in-metrics) + * [The metric instruments](../../../server/administration/monitoring/open-telemetry.mdx#the-metric-instruments) + * [Metrics export options](../../../server/administration/monitoring/open-telemetry.mdx#metrics-export-options) + * [Console](../../../server/administration/monitoring/open-telemetry.mdx#console) + * [OpenTelemetry Protocol](../../../server/administration/monitoring/open-telemetry.mdx#opentelemetry-protocol) + * [Configuring the OpenTelemetry Collector](../../../server/administration/monitoring/open-telemetry.mdx#configuring-the-opentelemetry-collector) + * [Receive data via OLTP ](../../../server/administration/monitoring/open-telemetry.mdx#receive-data-via-otlp) + * [Retrieve data from Prometheus](../../../server/administration/monitoring/open-telemetry.mdx#retrieve-data-from-prometheus) + + +## Enabling OpenTelemetry in RavenDB + +* To enable the OpenTelemetry metrics in RavenDB, + you **must** first set the [Monitoring.OpenTelemetry.Enabled](../../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetryenabled) configuration key to _true_. + +* Learn how to customize configuration keys in the [Configuration Overview](../../../server/configuration/configuration-options.mdx) article, + which outlines all available options. + +* Please remember that it is necessary to restart the RavenDB process for changes to take effect. + + + +## RavenDB OpenTelemetry meters + +Each meter listed below groups similar or related [metric instruments](../../../server/administration/monitoring/open-telemetry.mdx#the-metric-instruments). +Each metric instrument observes some metric value. + +Only the most commonly used meters are enabled by default. +This can be customized through the specified configuration keys. + +RavenDB exposes the following meters: + +* **ravendb.server.cpucredits** + Description: Exposes status of CPU credits (cloud) + Enabled by default: _false_ + Configuration key: [Monitoring.OpenTelemetry.Meters.Server.CPUCredits.Enabled](../../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetrymetersservercpucreditsenabled) +* **ravendb.server.gc** + Description: Exposes detailed information about the Garbage Collector + Enabled by default: _false_ + Configuration key: [Monitoring.OpenTelemetry.Meters.Server.GC.Enabled](../../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetrymetersservergcenabled) +* **ravendb.server.general** + Description: Exposes general info about the cluster and its licensing + Enabled by default: _true_ + Configuration key: [Monitoring.OpenTelemetry.Meters.Server.General.Enabled](../../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetrymetersservergeneralenabled) +* **ravendb.server.requests** + Description: Exposes information about requests processed by server + Enabled by default: _true_ + Configuration key: [Monitoring.OpenTelemetry.Meters.Server.Requests.Enabled](../../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetrymetersserverrequestsenabled) +* **ravendb.server.resources** + Description: Exposes detailed information about resources usage (e.g. CPU etc.) + Enabled by default: _true_ + Configuration key: [Monitoring.OpenTelemetry.Meters.Server.Resources.Enabled](../../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetrymetersserverresourcesenabled) +* **ravendb.server.storage** + Description: Exposes storage information + Enabled by default: _true_ + Configuration key: [Monitoring.OpenTelemetry.Meters.Server.Storage.Enabled](../../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetrymetersserverstorageenabled) +* **ravendb.server.totaldatabases** + Description: Exposes aggregated information about databases on the server + Enabled by default: _true_ + Configuration key: [Monitoring.OpenTelemetry.Meters.Server.TotalDatabases.Enabled](../../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetrymetersservertotaldatabasesenabled) +RavenDB also supports exposing meters developed by Microsoft for AspNetCore and .NET Runtime: + +* **Official AspNetCore instrumentation** + Description: See the official MS documentation [AspNetCore documentation](https://github.com/open-telemetry/opentelemetry-dotnet-contrib/blob/main/src/OpenTelemetry.Instrumentation.AspNetCore/README.md#metrics) + Enabled by default: _false_ + Configuration key: [Monitoring.OpenTelemetry.Meters.AspNetCore.Enabled](../../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetrymetersaspnetcoreenabled) +* **Official Runtime instrumentation** + Description: See the official MS documentation [.NET Runtime documentation](https://github.com/open-telemetry/opentelemetry-dotnet-contrib/tree/main/src/OpenTelemetry.Instrumentation.Runtime#metrics) + Enabled by default: _false_ + Configuration key: [Monitoring.OpenTelemetry.Meters.Runtime.Enabled](../../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetrymetersruntimeenabled) + + + +## Server identification in metrics + +* OpenTelemetry monitoring requires a service instance ID for initialization. + +* The identification of the server that originated each exposed metric will be listed in the `serviceInstanceId` property within the metric data. + +* The server instance identification is determined by the following sequence: + + 1. **Configuration Key** + First, attempt to retrieve the [Monitoring.OpenTelemetry.ServiceInstanceId](../../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetryserviceinstanceid) configuration key. + 2. **Public URL Hostname** + If the configuration key is Not defined, use the server's public URL hostname, provided it is available. + 3. **Node Tag** + If the public URL hostname is unavailable, attempt to use the node tag. + 4. **Initialization Failure** + If none of the above options are available, OpenTelemetry will Not be initialized. + + + +## The metric instruments + +#### `ravendb.server.cpucredits` + +| Instrument name | Description | Instrument type | +|---------------------------------------------------------|-------------------------------------------|--------------------| +| ravendb.server.cpucredits.alert_raised | CPU Credits Any Alert Raised | Gauge | +| ravendb.server.cpucredits.background.tasks.alert_raised | CPU Credits Background Tasks Alert Raised | Gauge | +| ravendb.server.cpucredits.base | CPU Credits Base | UpDownCounter | +| ravendb.server.cpucredits.consumption_current | CPU Credits Gained Per Second | UpDownCounter | +| ravendb.server.cpucredits.failover.alert_raised | CPU Credits Failover Alert Raised | Gauge | +| ravendb.server.cpucredits.max | CPU Credits Max | UpDownCounter | +| ravendb.server.cpucredits.remaining | CPU Credits Remaining | Gauge | + +#### `ravendb.server.gc` + +| Instrument name | Description | Instrument type | +|--------------------------------------------|---------------------------------------------------------------------------------------------------------------------|-------------------| +| ravendb.server.gc.compacted | Specifies if this is a compacting GC or not. | Gauge | +| ravendb.server.gc.concurrent | Specifies if this is a concurrent GC or not. | Gauge | +| ravendb.server.gc.finalizationpendingcount | Gets the number of objects ready for finalization this GC observed. | Gauge | +| ravendb.server.gc.fragmented | Gets the total fragmentation (in MB) when the last garbage collection occurred. | Gauge | +| ravendb.server.gc.gclohsize | Gets the large object heap size (in MB) after the last garbage collection of given kind occurred. | Gauge | +| ravendb.server.gc.generation | Gets the generation this GC collected. | Gauge | +| ravendb.server.gc.heapsize | Gets the total heap size (in MB) when the last garbage collection occurred. | Gauge | +| ravendb.server.gc.highmemoryloadthreshold | Gets the high memory load threshold (in MB) when the last garbage collection occurred. | Gauge | +| ravendb.server.gc.index | The index of this GC. | Gauge | +| ravendb.server.gc.memoryload | Gets the memory load (in MB) when the last garbage collection occurred. | Gauge | +| ravendb.server.gc.pausedurations1 | Gets the pause durations. First item in the array. | Gauge | +| ravendb.server.gc.pausedurations2 | Gets the pause durations. Second item in the array. | Gauge | +| ravendb.server.gc.pinnedobjectscount | Gets the number of pinned objects this GC observed. | Gauge | +| ravendb.server.gc.promoted | Gets the promoted MB for this GC. | Gauge | +| ravendb.server.gc.timepercentage | Gets the pause time percentage in the GC so far. | Gauge | +| ravendb.server.gc.totalavailablememory | Gets the total available memory (in MB) for the garbage collector to use when the last garbage collection occurred. | Gauge | +| ravendb.server.gc.totalcommitted | Gets the total committed MB of the managed heap. | Gauge | + +#### `ravendb.server.general` + +| Instrument name | Description | Instrument type | +|-------------------------------------------------------------------------------|------------------------------------|-------------------| +| ravendb.server.general.certificate_server_certificate_expiration_left_seconds | Server certificate expiration left | Gauge | +| ravendb.server.general.cluster.index | Cluster index | UpDownCounter | +| ravendb.server.general.cluster.node.state | Current node state | UpDownCounter | +| ravendb.server.general.cluster.term | Cluster term | UpDownCounter | +| ravendb.server.general.license.cores.max | Server license max CPU cores | Gauge | +| ravendb.server.general.license.cpu.utilized | Server license utilized CPU cores | Gauge | +| ravendb.server.general.license.expiration_left_seconds | Server license expiration left | Gauge | +| ravendb.server.general.license.type | Server license type | Gauge | + +#### `ravendb.server.resources` + +| Instrument name | Description | Instrument type | +|------------------------------------------------------------------------|----------------------------------------------------------------|-----------------| +| ravendb.server.resources.available_memory_for_processing | Available memory for processing \(in MB\) | Gauge | +| ravendb.server.resources.cpu.machine | Machine CPU usage in % | Gauge | +| ravendb.server.resources.cpu.process | Process CPU usage in % | Gauge | +| ravendb.server.resources.dirty_memory | Dirty Memory that is used by the scratch buffers in MB | Gauge | +| ravendb.server.resources.encryption_buffers.memory_in_pool | Server encryption buffers memory being in pool in MB | Gauge | +| ravendb.server.resources.encryption_buffers.memory_in_use | Server encryption buffers memory being in use in MB | Gauge | +| ravendb.server.resources.io_wait | IO wait in % | Gauge | +| ravendb.server.resources.low_memory_flag | Server low memory flag value | Gauge | +| ravendb.server.resources.machine.assigned_processor_count | Number of assigned processors on the machine | UpDownCounter | +| ravendb.server.resources.machine.processor_count | Number of processor on the machine | UpDownCounter | +| ravendb.server.resources.managed_memory | Server managed memory size in MB | Gauge | +| ravendb.server.resources.thread_pool.available_completion_port_threads | Number of available completion port threads in the thread pool | Gauge | +| ravendb.server.resources.thread_pool.available_worker_threads | Number of available worker threads in the thread pool | Gauge | +| ravendb.server.resources.total_memory | Server allocated memory in MB | Gauge | +| ravendb.server.resources.total.swap_usage | Server total swap usage in MB | Gauge | +| ravendb.server.resources.total.swap.size | Server total swap size in MB | Gauge | +| ravendb.server.resources.unmanaged_memory | Server unmanaged memory size in MB | Gauge | +| ravendb.server.resources.working_set_swap_usage | Server working set swap usage in MB | Gauge | + +#### `ravendb.server.requests` + +| Instrument name | Description | Instrument type | +|------------------------------------------------------|-----------------------------------------------|------------------| +| ravendb.server.requests.requests.average_duration | Average request time in milliseconds | Gauge | +| ravendb.server.requests.requests.concurrent_requests | Number of concurrent requests | UpDownCounter | +| ravendb.server.requests.requests.per_second | Number of requests per second. | Gauge | +| ravendb.server.requests.tcp.active.connections | Number of active TCP connections | Gauge | +| ravendb.server.requests.total.requests | Total number of requests since server startup | UpDownCounter | + +#### `ravendb.server.storage` + +| Instrument name | Description | Instrument type | +|----------------------------------------------------------------|-------------------------------------------|-------------------| +| ravendb.server.storage.storage.disk.ios.read_operations | IO read operations per second | Gauge | +| ravendb.server.storage.storage.disk.ios.write_operations | IO write operations per second | Gauge | +| ravendb.server.storage.storage.disk.queue_length | Queue length | Gauge | +| ravendb.server.storage.storage.disk.read_throughput | Read throughput in kilobytes per second | Gauge | +| ravendb.server.storage.storage.disk.remaining.space | Remaining server storage disk space in MB | Gauge | +| ravendb.server.storage.storage.disk.remaining.space_percentage | Remaining server storage disk space in % | Gauge | +| ravendb.server.storage.storage.disk.write_throughput | Write throughput in kilobytes per second | Gauge | +| ravendb.server.storage.storage.total_size | Server storage total size in MB | Gauge | +| ravendb.server.storage.storage.used_size | Server storage used size in MB | Gauge | + +#### `ravendb.server.totaldatabases` + +| Instrument name | Description | Instrument type | +|-------------------------------------------------------------------|----------------------------------------------------------------------------------------------------|-------------------| +| ravendb.server.totaldatabases.count_stale_indexes | Number of stale indexes in all loaded databases | UpDownCounter | +| ravendb.server.totaldatabases.data.written.per_second | Number of bytes written \(documents, attachments, counters, timeseries\) in all loaded databases | Gauge | +| ravendb.server.totaldatabases.database.disabled_count | Number of disabled databases | UpDownCounter | +| ravendb.server.totaldatabases.database.encrypted_count | Number of encrypted databases | UpDownCounter | +| ravendb.server.totaldatabases.database.faulted_count | Number of faulted databases | UpDownCounter | +| ravendb.server.totaldatabases.database.loaded_count | Number of loaded databases | UpDownCounter | +| ravendb.server.totaldatabases.database.node_count | Number of databases for current node | UpDownCounter | +| ravendb.server.totaldatabases.database.total_count | Number of all databases | UpDownCounter | +| ravendb.server.totaldatabases.map_reduce.index.mapped_per_second | Number of maps per second for map-reduce indexes \(one minute rate\) in all loaded databases | Gauge | +| ravendb.server.totaldatabases.map_reduce.index.reduced_per_second | Number of reduces per second for map-reduce indexes \(one minute rate\) in all loaded databases | Gauge | +| ravendb.server.totaldatabases.map.index.indexed_per_second | Number of indexed documents per second for map indexes \(one minute rate\) in all loaded databases | Gauge | +| ravendb.server.totaldatabases.number_error_indexes | Number of error indexes in all loaded databases | UpDownCounter | +| ravendb.server.totaldatabases.number_of_indexes | Number of indexes in all loaded databases | UpDownCounter | +| ravendb.server.totaldatabases.number.faulty_indexes | Number of faulty indexes in all loaded databases | UpDownCounter | +| ravendb.server.totaldatabases.writes_per_second | Number of writes \(documents, attachments, counters, timeseries\) in all loaded databases | Gauge | + + + +## Metrics export options + +RavenDB offers two options for exporting metrics: + + * Export data to the console for immediate metrics insight. + * Export data via the OpenTelemetry Protocol (OTLP) for integration with other observability platforms. + + + + __Console__: +* RavenDB can output telemetry data directly to the console, + providing real-time, local visibility of the OpenTelemetry metrics. + +* This is particularly useful for local development and debugging, + as it eliminates the need for integration with external monitoring systems or observability platforms. + +* To enable output to the console - + set the [Monitoring.OpenTelemetry.ConsoleExporter](../../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetryconsoleexporter) configuration key to _true_. + + + + + __OpenTelemetry Protocol__: +* OpenTelemetry supports the **OpenTelemetry Protocol** (OTLP), + a standard wire protocol used by all OpenTelemetry SDKs. + +* This protocol allows data to be sent from metrics producers (e.g. RavenDB) to any software that supports OTLP. + The recommended software, as suggested by OpenTelemetry, is called **OpenTelemetry Collector**. + The Collector can be further configured to forward and export this data to various analysis tools. + Learn about the OpenTelemetry Collector on the official documentation site: [OTel Collector](https://opentelemetry.io/docs/collector/). + +* RavenDB supports the official OTLP by default, allowing you to export RavenDB metrics to the OpenTelemetry Collector. + +* To enable exporting metrics via the OpenTelemetry Protocol - + set the [Monitoring.OpenTelemetry.OpenTelemetryProtocol.Enabled](../../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetryopentelemetryprotocolenabled) configuration key to _true_. + +* By default, RavenDB does not override the OpenTelemetry Protocol exporter default values. + However, customization is available via the following configuration options: + +| Configuration key | Description | Accepted values | +|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------| +| Monitoring.OpenTelemetry.OpenTelemetryProtocol
[.ExportProcessorType](../../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetryopentelemetryprotocolexportprocessortype) | Export processor type | Simple / Batch | +| Monitoring.OpenTelemetry.OpenTelemetryProtocol
[.Headers](../../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetryopentelemetryprotocolheaders) | Custom headers | string | +| Monitoring.OpenTelemetry.OpenTelemetryProtocol
[.Protocol](../../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetryopentelemetryprotocolprotocol) | Defines the transport protocol that OpenTelemetry Protocol should use to send data. | gRPC / HttpProtobuf | +| Monitoring.OpenTelemetry.OpenTelemetryProtocol
[.Endpoint](../../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetryopentelemetryprotocolendpoint) | Endpoint where OpenTelemetry Protocol should send data.
See configuration details [below](../../../server/administration/monitoring/open-telemetry.mdx#configure-endpoint). | string | +| Monitoring.OpenTelemetry.OpenTelemetryProtocol
[.Timeout](../../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetryopentelemetryprotocoltimeout) | Timeout | int | + + + + + +
__Configuring the OpenTelemetry Protocol endpoint__: +The defined endpoint must correspond to the endpoint [configured on the collector](../../../server/administration/monitoring/open-telemetry.mdx#configuring-the-opentelemetry-collector) for receiving metrics data. +The endpoint format should match the transport protocol you are using: + +
+ +* **HttpProtobuf** protocol: + + * HttpProtobuf is a transport protocol used for efficiently serializing structured data over HTTP + using Protocol Buffers (Protobuf). + + * The current official .NET implementation adheres to the OpenTelemetry specification, + which requires specifying the complete path to the collector endpoint when exporting metrics. + By default, the endpoint path for the OpenTelemetry Collector is `/v1/metrics.` + + * So, for example, if you are using the default settings for the OpenTelemetry Collector + and utilizing **HttpProtobuf** as the transport protocol, the endpoint used would be + `http://localhost:4318/v1/metrics` (replace the default port 4318 with whatever port you are using). + +* **gRPC** protocol: + + * When using the default settings for the OpenTelemetry Collector with the **gRPC** transport protocol, + the endpoint should be specified in the format `grpc://localhost:4317` + (replace the default port 4317 with whatever port you are using). + + * The gRPC endpoint does not include a path like _/v1/metrics_. + + + + + +## Configuring the OpenTelemetry Collector + +* The OpenTelemetry Collector is a vendor-agnostic proxy that can receive, process, and export telemetry data (traces, metrics, logs) + from multiple sources to different backends. It acts as a centralized point for collecting and processing telemetry data. + +* In the OpenTelemetry Collector configuration, a **receiver** is a component that listens for incoming telemetry data. + +* Configure the receiver to either: + * Receive data via OTLP from RavenDB + * Retrieve data from RavenDB's Prometheus endpoint + + + +
__Receive data via OTLP__: +* The `otlp` receiver is used for receiving telemetry data in the OpenTelemetry Protocol (OTLP) format. + +* Specify the `endpoints` where the OpenTelemetry Collector should listen for incoming telemetry data, + whether via gRPC or HTTP, and on which ports. + (4317 is the default port for gRPC, and 4318 is the default port for HTTP protocol). + +* For example, define your controller like so: + + + +{`receivers: + otlp: + protocols: + grpc: + endpoint: localhost:4317 + http: + endpoint: localhost:4318 +`} + + + + + + + __Retrieve data from Prometheus__: +* The OpenTelemetry Collector includes support for retrieving metrics from a Prometheus endpoint. + +* RavenDB provides a Prometheus endpoint that can be used as a data source for the OpenTelemetry Collector. + This endpoint provides metrics in a standardized format and integrates seamlessly without requiring you to enable OpenTelemetry in RavenDB. + +* Use the `prometheus_simple` receiver in your OpenTelemetry Collector configuration. + This setup instructs the collector to scrape metrics from a Prometheus endpoint. + +* Specify the address of your RavenDB server in the `endpoint` key. + Specify RavenDB's Prometheus endpoint path in the `metrics_path` key. + +* A sample configuration may look like this: + + + +{`receivers: + prometheus_simple: + endpoint: "http://localhost:8080" # Replace with your RavenDB instance address + metrics_path: "/admin/monitoring/v1/prometheus" # RavenDB's prometheus endpoint path + collection_interval: 10s + tls: + cert_file: "D:\\\\cert.crt" + key_file: "D:\\\\key.key" + insecure: false + insecure_skip_verify: false +`} + + + +* The configuration above sets up the OpenTelemetry Collector to scrape Prometheus-formatted metrics from a RavenDB server. + The collector will access `http://localhost:8080/admin/monitoring/v1/prometheus` to retrieve the metrics. + + + + diff --git a/versioned_docs/version-7.1/server/administration/monitoring/prometheus.mdx b/versioned_docs/version-7.1/server/administration/monitoring/prometheus.mdx new file mode 100644 index 0000000000..710628ded8 --- /dev/null +++ b/versioned_docs/version-7.1/server/administration/monitoring/prometheus.mdx @@ -0,0 +1,239 @@ +--- +title: "Monitoring: Prometheus" +hide_table_of_contents: true +sidebar_label: Prometheus +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Monitoring: Prometheus + + +* Prometheus is a popular monitoring tool designated to help in the inspection and + administration of networks, infrastructures, databases, etc. + +* RavenDB exposes data metrics via an HTTP endpoint in a Prometheus-compatible format, + allowing a Prometheus server to scrape the data from the endpoint and handle it. + +* A Prometheus endpoint is provided by RavenDB instances both on-premise and on the cloud. + +* In this page: + * [Prometheus](../../../server/administration/monitoring/prometheus.mdx#prometheus) + * [RavenDB Prometheus Endpoint](../../../server/administration/monitoring/prometheus.mdx#ravendb-prometheus-endpoint) + * [Omit or Include Selected Metrics](../../../server/administration/monitoring/prometheus.mdx#omit-or-include-selected-metrics) + * [Metrics Provided by the Prometheus Endpoint](../../../server/administration/monitoring/prometheus.mdx#metrics-provided-by-the-prometheus-endpoint) + * [Using the RavenDB Endpoint by a Prometheus Server](../../../server/administration/monitoring/prometheus.mdx#using-the-ravendb-endpoint-by-a-prometheus-server) + * [Fetching Additional RavenDB Information](../../../server/administration/monitoring/prometheus.mdx#fetching-additional-ravendb-information) + + +## Prometheus + +Prometheus is commonly used in conjunction with services that expose numeric data via HTTP endpoints. +A Prometheus server can be used (either manually or by an application using a dedicated library) to +scrape the data off an endpoint, monitor and store it, and put it to use in additional ways like raising +alerts related to it or forwarding it to analysis services. + + + +## RavenDB Prometheus Endpoint + +The path to the Prometheus endpoint of a RavenDB instance is: `/admin/monitoring/v1/prometheus` +To inspect the endpoint's output using a browser, add the endpoint path to the RavenDB server's URL. +E.g. [http://live-test.ravendb.net/admin/monitoring/v1/prometheus](http://live-test.ravendb.net/admin/monitoring/v1/prometheus) + +![RavenDB: Prometheus Endpoint Output](./assets/ravendb_prometheus-endpoint-output.png) + +* As prometheus handles only numeric values, the endpoint outputs all values as numbers, + providing legends that explain what the numbers mean. + Metrics values are also explained in the [table below](../../../server/administration/monitoring/prometheus.mdx#metrics-provided-by-the-prometheus-endpoint). + +## Omit or Include Selected Metrics + +To omit metrics related to selected topics from the data sent to the endpoint, set one or more of the +flags listed below, in the URL line. Each flag can be set to `true` to omit the topic from the results, +or to `false` to include it. +`skipServerMetrics` +`skipDatabasesMetrics` +`skipIndexesMetrics` +`skipCollectionsMetrics` + +E.g., to skip indexing metrics use - +http://localhost:8080/admin/monitoring/v1/prometheus?skipIndexesMetrics=true +And to skip both indexing and server metrics use - +http://localhost:8080/admin/monitoring/v1/prometheus?skipIndexesMetrics=true&skipServerMetrics=true + +## Metrics Provided by the Prometheus Endpoint + +Here is the list of metrics made available by the `/admin/monitoring/v1/prometheus` endpoint. + +| Metrics | Description | +| - | - | +| archived_data_processing_behavior | Archived data processing behavior + `0`/`1`/`2`
0 => ExcludeArchived
1 => IncludeArchived
2 => ArchivedOnly | +| backup_current_number_of_running_backups | Number of currently running backups | +| backup_max_number_of_concurrent_backups | Maximum number of concurrent backups | +| certificate_server_certificate_expiration_left_seconds | Server certificate expiration left in seconds | +| cluster_current_term | Cluster term | +| cluster_index | Cluster index | +| cluster_node_state | Current node state + `0`/`1`/`2`
0 => Passive
1 => Candidate
2 => Follower
3 => LeaderElect
4 => Leader | +| collection_documents_count | Number of documents in collection | +| collection_documents_size_bytes | Size of documents | +| collection_revisions_size_bytes | Size of revisions | +| collection_tombstones_size_bytes | Size of tombstones | +| collection_total_size_bytes | Total size of collection | +| cpu_assigned_processor_count | Number of assigned processors on the machine | +| cpu_machine_io_wait | IO wait in % | +| cpu_machine_usage | Machine CPU usage in % | +| cpu_process_usage | Process CPU usage in % | +| cpu_processor_count | Number of processors on the machine | +| cpu_thread_pool_available_completion_port_threads | Number of available completion port threads in the thread pool | +| cpu_thread_pool_available_worker_threads | Number of available worker threads in the thread pool | +| database_alerts_count | Number of alerts | +| database_attachments_count | Number of attachments | +| database_documents_count | Number of documents | +| database_indexes_auto_count | Number of auto indexes | +| database_indexes_count | Number of indexes | +| database_indexes_errored_count | Number of error indexes | +| database_indexes_disabled_count | Number of disabled indexes | +| database_indexes_errors_count | Number of indexing errors | +| database_indexes_idle_count | Number of idle indexes | +| database_indexes_stale_count | Number of stale indexes | +| database_indexes_static_count | Number of static indexes | +| database_performance_hints_count | Number of performance hints | +| database_rehabs_count | Number of rehabs | +| database_replication_factor | Database replication factor | +| database_revisions_count | Number of revision documents | +| database_statistics_doc_puts_per_second | Number of document puts per second (one minute rate) | +| database_statistics_map_index_indexes_per_second | Number of indexed documents per second for map indexes (one minute rate) | +| database_statistics_map_reduce_index_mapped_per_second | Number of maps per second for map-reduce indexes (one minute rate) | +| database_statistics_map_reduce_index_reduced_per_second | Number of reduces per second for map-reduce indexes (one minute rate) | +| database_statistics_request_average_duration_seconds | Average request time in seconds | +| database_statistics_requests_count | Number of requests from database start | +| database_statistics_requests_per_second | Number of requests per second (one minute rate) | +| database_storage_documents_allocated_data_file_bytes | Documents storage allocated size | +| database_storage_documents_used_data_file_bytes | Documents storage used size | +| database_storage_indexes_allocated_data_file_bytes | Index storage allocated size | +| database_storage_indexes_used_data_file_bytes | Index storage used size | +| database_storage_io_read_operations | Disk IO Read operations | +| database_storage_io_write_operations | Disk IO Write operations | +| database_storage_queue_length | Disk Queue length | +| database_storage_read_throughput_bytes | Disk Read Throughput | +| database_storage_total_allocated_storage_file_bytes | Total storage size | +| database_storage_total_free_space_bytes | Remaining storage disk space | +| database_storage_write_throughput_bytes | Disk Write Throughput | +| database_time_since_last_backup_seconds | Last backup | +| database_unique_attachments_count | Number of unique attachments | +| database_uptime_seconds | Database up-time | +| databases_loaded_count | Number of loaded databases | +| databases_total_count | Number of all databases | +| index_entries_count | Number of entries in the index | +| index_errors | Number of index errors | +| index_is_invalid | Indicates if index is invalid | +| index_lock_mode | Index lock mode + `0`/`1`/`2`
0 => Unlock
1 => LockedIgnore
2 => LockedError | +| index_mapped_per_second | Number of maps per second (one minute rate) | +| index_priority | Index priority + `0`/`1`/`2`
0 => Low
1 => Normal
2 => High | +| index_reduced_per_second | Number of reduces per second (one minute rate) | +| index_state | Index state + `0`/`1`/`2`/`3`
0 => Normal
1 => Disabled
2 => Idle
3 => Error | +| index_status | Index status + `0`/`1`/`2`/`3`
0 => Running
1 => Paused
2 => Disabled
3 => Pending (for rolling indexes) | +| index_time_since_last_indexing_seconds | Time since last indexing | +| index_time_since_last_query_seconds | Time since last query | +| index_type | Index type + `0`/`1`/`2`/`3`/`4`/`5`/`6`/`7`
0 => None
1 => AutoMap
2 => AutoMapReduce
3 => Map
4 => MapReduce
5 => Faulty
6 => JavaScriptMap
7 => JavaScriptMapReduce | +| license_expiration_left_seconds | Server license expiration left | +| license_max_cores | Server license max CPU cores | +| license_type | Server license type + `-1`/`0`/`1`/`2`/`3`/`4`/`5`/`6`
-1 => Invalid
0 => None
1 => Community
2 => Reserved
3 => Professional
4 => Enterprise
5 => Developer
6 => Essential | +| license_utilized_cpu_cores | Server license utilized CPU cores | +| memory_allocated_bytes | Server allocated memory | +| memory_installed_bytes | Installed memory | +| memory_low_memory_severity | Server low memory flag value + `0`/`1`/`2`
0 => None
1 => Low
2 => ExtremelyLow | +| memory_physical_bytes | Physical memory | +| memory_total_dirty_bytes | Dirty memory that is used by the scratch buffers | +| memory_total_swap_size_bytes | Server total swap size | +| memory_total_swap_usage_bytes | Server total swap usage | +| memory_working_set_swap_usage_bytes | Server working set swap usage | +| network_concurrent_requests_count | Number of concurrent requests | +| network_last_authorized_non_cluster_admin_request_time_in_sec | Server last authorized non cluster admin request time | +| network_last_request_time_in_seconds | Server last request time | +| network_requests_per_second | Number of requests per second (one minute rate) | +| network_tcp_active_connections | Number of active TCP connections | +| network_total_requests | Total number of requests since server startup | +| server_disk_remaining_storage_space_percentage | Remaining server storage disk space in % | +| server_disk_system_store_total_data_file_size_bytes | Server storage total size | +| server_disk_system_store_used_data_file_size_bytes | Server storage used size | +| server_info | Server Info | +| server_process_id | Server process ID | +| server_storage_io_read_operations | Disk IO Read operations | +| server_storage_io_write_operations | Disk IO Write operations | +| server_storage_queue_length | Disk Queue length | +| server_storage_read_throughput_bytes | Disk Read Throughput | +| server_storage_total_free_space_bytes | Remaining storage disk space | +| server_storage_write_throughput_bytes | Disk Write Throughput | +| server_uptime_seconds | Server up-time | + + + +## Using the RavenDB Endpoint by a Prometheus Server + +To direct a Prometheus server to the Prometheus endpoint of a RavenDB instance +[add an entry](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) +to the Prometheus `yml` configuration file. + +* **Prometheus.yml**: + + +{`- job_name: "local-raven-instance" + # Monitor a local, non-secure RavenDB server + metrics_path: "/admin/monitoring/v1/prometheus" + static_configs: + - targets: + - "localhost:8080" +- job_name: "cloud-raven-instance" + # Monitor 3 RavenDB nodes on the cloud + scheme: https + tls_config: + cert_file: 'a.client.certificate.crt' + key_file: 'a.client.certificate.key' + metrics_path: "/admin/monitoring/v1/prometheus" + static_configs: + - targets: + - "a.cloudtest.ravendb.org" + - "b.cloudtest.ravendb.org" + - "c.cloudtest.ravendb.org" +`} + + + +* When you run the Prometheus server, direct it to the configuration file you want it to use. + `./prometheus --config.file=prometheus.yml` + +* When the Prometheus server is running, you'll be able to provide it with various RavenDB metrics. + + ![Prometheus: Select Metrics](./assets/prometheus_select-metrics.png) + + ![Prometheus: Metrics Graph](./assets/prometheus_metrics-graph.png) + + * Use the search bar to search for relevant metrics. + Typing **raven** will display a list of metrics provided by the endpoint. + +* Metrics can also be found in RavenDB's enpoint output, using the browser. + In the following screenshot, for example, we can see that the priority of one of the indexes was updated to 2 (high). + + ![RavenDB Endpoint Output: Index Priority](./assets/RavenDB_changed-index-priority.png) + + Providing the Prometheus server with the metrics' name will display the event graphically. + + ![Prometheus Server: Index Priority](./assets/Prometheus_changed-index-priority.png) + +## Fetching Additional RavenDB Information + +To retrieve additional information about RavenDB, including its version, +HTTP and TCP URLs, and other details, pass Prometheus: `ravendb_server_info` + +![Additional Information](./assets/additional-info.png) + + + diff --git a/versioned_docs/version-7.1/server/administration/monitoring/telegraf.mdx b/versioned_docs/version-7.1/server/administration/monitoring/telegraf.mdx new file mode 100644 index 0000000000..3b70128fdb --- /dev/null +++ b/versioned_docs/version-7.1/server/administration/monitoring/telegraf.mdx @@ -0,0 +1,163 @@ +--- +title: "Monitoring: Telegraf Plugin" +hide_table_of_contents: true +sidebar_label: Telegraf Plugin +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Monitoring: Telegraf Plugin + + +* The endpoints listed in this page provide a wide variety of performance metrics for a RavenDB + instance, including, for example, information regarding indexing, communication inside a cluster, + and the server's memory usage. + +* These metrics can be collected using the [RavenDB Telegraf Plugin](https://docs.influxdata.com/telegraf/v1.18/plugins/#ravendb) + and displayed as live graphs using [Grafana](https://grafana.com/). + +* In this page: + * [Telegraf](../../../server/administration/monitoring/telegraf.mdx#telegraf) + * [Monitoring Endpoints](../../../server/administration/monitoring/telegraf.mdx#monitoring-endpoints) + * [JSON Fields Returned by the Endpoints](../../../server/administration/monitoring/telegraf.mdx#json-fields-returned-by-the-endpoints) + + +## Telegraf + +[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/) is a popular data collection +and processing agent designed to work with time series data. Version 1.18 of Telegraf has a new +plugin for RavenDB that collects data from RavenDB's monitoring endpoints. The recommended use +for the RavenDB plugin is to have Telegraf output to [InfluxDB](https://www.influxdata.com/products/influxdb/), +and from there the data can be queried by [Grafana](https://grafana.com/) and displayed on your own +data tracking dashboard. But this feature is flexible - Telegraf can output data to other destinations. + +## Monitoring Endpoints + +The monitoring endpoints output data in JSON format. There are four endpoints: + +* `/admin/monitoring/v1/server` +* `/admin/monitoring/v1/databases` +* `/admin/monitoring/v1/indexes` +* `/admin/monitoring/v1/collections` + +## JSON Fields Returned by the Endpoints + +The following is a list of JSON fields returned by the endpoints: + +| Endpoint Suffix | Field Name | Description | +| - | - | - | +| `collections` | `collection_name` | Collection name | +| `collections` | `database_name` | Name of this collection's database | +| `collections` | `documents_count` | Number of documents in collection | +| `collections` | `documents_size_in_bytes` | Size of documents in bytes | +| `collections` | `revisions_size_in_bytes` | Size of revisions in bytes | +| `collections` | `tombstones_size_in_bytes` | Size of tombstones in bytes | +| `collections` | `total_size_in_bytes` | Total size of collection in bytes | +| `databases` | `database_id` | Database ID | +| `databases` | `database_name` | Database name | +| `databases` | `counts_alerts` | Number of alerts | +| `databases` | `counts_attachments` | Number of attachments | +| `databases` | `counts_documents` | Number of documents | +| `databases` | `counts_performance_hints` | Number of performance hints | +| `databases` | `counts_rehabs` | Number of rehabs | +| `databases` | `counts_revisions` | Number of revision documents | +| `databases` | `counts_unique_attachments` | Number of unique attachments | +| `databases` | `indexes_auto_count` | Number of auto indexes | +| `databases` | `indexes_count` | Number of indexes | +| `databases` | `indexes_disabled_count` | Number of disabled indexes | +| `databases` | `indexes_errored_count` | Number of error indexes | +| `databases` | `indexes_errors_count` | Number of indexing errors | +| `databases` | `indexes_idle_count` | Number of idle indexes | +| `databases` | `indexes_stale_count` | Number of stale indexes | +| `databases` | `indexes_static_count` | Number of static indexes | +| `databases` | `statistics_doc_puts_per_sec` | Number of document puts per second (one minute rate) | +| `databases` | `statistics_map_index_indexes_per_sec` | Number of indexed documents per second for map indexes (one minute rate) | +| `databases` | `statistics_map_reduce_index_mapped_per_sec` | Number of maps per second for map-reduce indexes (one minute rate) | +| `databases` | `statistics_map_reduce_index_reduced_per_sec` | Number of reduces per second for map-reduce indexes (one minute rate) | +| `databases` | `statistics_request_average_duration_in_ms` | Average request time in milliseconds | +| `databases` | `statistics_requests_count` | Number of requests from database start | +| `databases` | `statistics_requests_per_sec` | Number of requests per second (one minute rate) | +| `databases` | `storage_documents_allocated_data_file_in_mb` | Documents storage allocated size in MB | +| `databases` | `storage_documents_used_data_file_in_mb` | Documents storage used size in MB | +| `databases` | `storage_indexes_allocated_data_file_in_mb` | Index storage allocated size in MB | +| `databases` | `storage_indexes_used_data_file_in_mb` | Index storage used size in MB | +| `databases` | `storage_total_allocated_storage_file_in_mb` | Total storage size in MB | +| `databases` | `storage_total_free_space_in_mb` | Remaining storage disk space in MB | +| `databases` | `storage_io_read_operations` | Storage I/O Read operations
Optional, Linux only | +| `databases` | `storage_io_write_operations` | Storage I/O Write operations
Optional, Linux only | +| `databases` | `storage_read_throughput_in_kb` | Storage Read throughput in KB
Optional, Linux only | +| `databases` | `storage_write_throughput_in_kb` | Storage Write throughput in KB
Optional, Linux only | +| `databases` | `storage_queue_length` | Storage queue length
Optional, Linux only | +| `databases` | `time_since_last_backup_in_sec` | LastBackup | +| `databases` | `uptime_in_sec` | Database up-time | +| `indexes` | `entries_count` | Number of entries in the index | +| `indexes` | `errors` | Number of index errors | +| `indexes` | `index_name` | Index name | +| `indexes` | `is_invalid` | Indicates if index is invalid | +| `indexes` | `lagtime` | Indexing Lag Time | +| `indexes` | `lock_mode` | Index lock mode | +| `indexes` | `mapped_per_sec` | Number of maps per second (one minute rate) | +| `indexes` | `priority` | Index priority | +| `indexes` | `reduced_per_sec` | Number of reduces per second (one minute rate) | +| `indexes` | `state` | Index state | +| `indexes` | `status` | Index status | +| `indexes` | `time_since_last_indexing_in_sec` | Time since last indexing | +| `indexes` | `time_since_last_query_in_sec` | Time since last query | +| `indexes` | `type` | Index type | +| `server` | `backup_current_number_of_running_backups` | Number of backups currently running | +| `server` | `backup_max_number_of_concurrent_backups` | Max number of backups that can run concurrently | +| `server` | `certificate_server_certificate_expiration_left_in_sec` | Server certificate expiration left | +| `server` | `certificate_well_known_admin_certificates` | List of well known admin certificate thumbprints | +| `server` | `cluster_current_term` | Cluster term | +| `server` | `cluster_id` | Cluster ID | +| `server` | `cluster_index` | Cluster index | +| `server` | `cluster_node_state` | Current node state | +| `server` | `node_tag` | Current node tag | +| `server` | `config_server_urls` | Server URLs | +| `server` | `public_server_url` | The server's public URL | +| `server` | `config_tcp_server_urls` | Server TCP URL | +| `server` | `config_public_tcp_server_urls` | Server public TCP URL | +| `server` | `cpu_assigned_processor_count` | Number of assigned processors on the machine | +| `server` | `cpu_machine_io_wait` | IO wait in % | +| `server` | `cpu_machine_usage` | Machine CPU usage in % | +| `server` | `cpu_process_usage` | Process CPU usage in % | +| `server` | `cpu_processor_count` | Number of processor on the machine | +| `server` | `cpu_thread_pool_available_worker_threads` | Number of available worker threads in the thread pool | +| `server` | `cpu_thread_pool_available_completion_port_threads` | Number of available completion port threads in the thread pool | +| `server` | `databases_loaded_count` | Number of loaded databases | +| `server` | `databases_total_count` | Number of all databases | +| `server` | `disk_remaining_storage_space_percentage` | Remaining server storage disk space in % | +| `server` | `disk_system_store_total_data_file_size_in_mb` | Server storage total size in MB | +| `server` | `disk_system_store_used_data_file_size_in_mb` | Server storage used size in MB | +| `server` | `disk_total_free_space_in_mb` | Remaining server storage disk space in MB | +| `server` | `license_expiration_left_in_sec` | Server license expiration left | +| `server` | `license_max_cores` | Server license max CPU cores | +| `server` | `license_type` | Server license type | +| `server` | `license_utilized_cpu_cores` | Server license utilized CPU cores | +| `server` | `memory_allocated_in_mb` | Server allocated memory in MB | +| `server` | `memory_installed_in_mb` | InstalledMemory | +| `server` | `memory_low_memory_severity` | Server low memory flag value | +| `server` | `memory_physical_in_mb` | PhysicalMemory | +| `server` | `memory_total_dirty_in_mb` | Dirty memory that is used by the scratch buffers in MB | +| `server` | `memory_total_swap_size_in_mb` | Server total swap size in MB | +| `server` | `memory_total_swap_usage_in_mb` | Server total swap usage in MB | +| `server` | `memory_working_set_swap_usage_in_mb` | Server working set swap usage in MB | +| `server` | `network_concurrent_requests_count` | Number of concurrent requests | +| `server` | `network_last_authorized_non_cluster_admin_request_time_in_sec` | Server last authorized non cluster admin request time | +| `server` | `network_last_request_time_in_sec` | Server last request time | +| `server` | `network_requests_per_sec` | Number of requests per second (one minute rate) | +| `server` | `network_tcp_active_connections` | Number of active TCP connections | +| `server` | `network_total_requests` | Total number of requests since server startup | +| `server` | `server_full_version` | Server full version | +| `server` | `server_process_id` | Server process ID | +| `server` | `server_version` | Server version | +| `server` | `uptime_in_sec` | Server up-time | + + + diff --git a/versioned_docs/version-7.1/server/administration/snmp/_category_.json b/versioned_docs/version-7.1/server/administration/snmp/_category_.json new file mode 100644 index 0000000000..4f12f7f761 --- /dev/null +++ b/versioned_docs/version-7.1/server/administration/snmp/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 2, + "label": SNMP, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/server/administration/snmp/assets/monitoring-zabbix-add-host.PNG b/versioned_docs/version-7.1/server/administration/snmp/assets/monitoring-zabbix-add-host.PNG new file mode 100644 index 0000000000..391dceb988 Binary files /dev/null and b/versioned_docs/version-7.1/server/administration/snmp/assets/monitoring-zabbix-add-host.PNG differ diff --git a/versioned_docs/version-7.1/server/administration/snmp/assets/monitoring-zabbix-create-action.PNG b/versioned_docs/version-7.1/server/administration/snmp/assets/monitoring-zabbix-create-action.PNG new file mode 100644 index 0000000000..3c9b02a542 Binary files /dev/null and b/versioned_docs/version-7.1/server/administration/snmp/assets/monitoring-zabbix-create-action.PNG differ diff --git a/versioned_docs/version-7.1/server/administration/snmp/assets/monitoring-zabbix-create-trigger.PNG b/versioned_docs/version-7.1/server/administration/snmp/assets/monitoring-zabbix-create-trigger.PNG new file mode 100644 index 0000000000..e87848c462 Binary files /dev/null and b/versioned_docs/version-7.1/server/administration/snmp/assets/monitoring-zabbix-create-trigger.PNG differ diff --git a/versioned_docs/version-7.1/server/administration/snmp/assets/monitoring-zabbix-cummunity-macro.PNG b/versioned_docs/version-7.1/server/administration/snmp/assets/monitoring-zabbix-cummunity-macro.PNG new file mode 100644 index 0000000000..44e44d9d58 Binary files /dev/null and b/versioned_docs/version-7.1/server/administration/snmp/assets/monitoring-zabbix-cummunity-macro.PNG differ diff --git a/versioned_docs/version-7.1/server/administration/snmp/assets/monitoring-zabbix-dashboard.PNG b/versioned_docs/version-7.1/server/administration/snmp/assets/monitoring-zabbix-dashboard.PNG new file mode 100644 index 0000000000..3141647040 Binary files /dev/null and b/versioned_docs/version-7.1/server/administration/snmp/assets/monitoring-zabbix-dashboard.PNG differ diff --git a/versioned_docs/version-7.1/server/administration/snmp/assets/monitoring-zabbix-link-template.PNG b/versioned_docs/version-7.1/server/administration/snmp/assets/monitoring-zabbix-link-template.PNG new file mode 100644 index 0000000000..c74ec1c7f1 Binary files /dev/null and b/versioned_docs/version-7.1/server/administration/snmp/assets/monitoring-zabbix-link-template.PNG differ diff --git a/versioned_docs/version-7.1/server/administration/snmp/setup-zabbix.mdx b/versioned_docs/version-7.1/server/administration/snmp/setup-zabbix.mdx new file mode 100644 index 0000000000..af85f6f667 --- /dev/null +++ b/versioned_docs/version-7.1/server/administration/snmp/setup-zabbix.mdx @@ -0,0 +1,96 @@ +--- +title: "How to Setup Zabbix Monitoring" +hide_table_of_contents: true +sidebar_label: Zabbix Monitoring +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# How to Setup Zabbix Monitoring + +RavenDB supports [SNMP](./snmp), which means that with a few quick steps you can monitor your server using Zabbix. + +### Installation + +Before you setup Zabbix to monitor RavenDB, make sure you have it up and running. If you haven't done so already, +you should [read about Zabbix](https://www.zabbix.com/documentation/3.4/start), +[install it](https://www.zabbix.com/documentation/3.4/manual/installation/getting_zabbix), +and [create your own user](https://www.zabbix.com/documentation/3.4/manual/quickstart/login). + +Once installed, login to the front end (the web interface provided with Zabbix). You should see the Zabbix dashboard. + +![Figure 1. Monitoring : How to setup Zabbix monitoring: Dashboard](./assets/monitoring-zabbix-dashboard.PNG) + +### Importing the RavenDB Template + +Navigate to `Configuration`->`Templates` and click the `Import` button on the top right corner. +Import the RavenDB template which can be downloaded from: +[https://github.com/ravendb/ravendb/blob/v4.0/src/Raven.Server/Monitoring/Snmp/Templates/zabbix_ravendb_template.xml](https://github.com/ravendb/ravendb/blob/v4.0/src/Raven.Server/Monitoring/Snmp/Templates/zabbix_ravendb_template.xml) + +### Adding a Host + +Navigate to `Configuration`->`Hosts` and click the `Create Host` button on the top right corner. +This is where we define what host we will monitor. In our case it's the server which runs the RavenDB instance. +Name your host and choose the `Database servers` group. +Remove the default `Agent interface` and instead add an `SNMP interface`, using either an IP address or a DNS name and a port. +Make sure you click the appropriate IP/DNS button. + +![Figure 2. Monitoring : How to setup Zabbix monitoring: Add host](./assets/monitoring-zabbix-add-host.PNG) + +Still under `Configuration`->`Hosts`, go to the next tab: `Templates` and add a link to the template we imported earlier. + +![Figure 3. Monitoring : How to setup Zabbix monitoring: Link to template](./assets/monitoring-zabbix-link-template.PNG) + +Still under `Configuration`->`Hosts`, go to the tab: `Macros` and add the {$SNMP_COMMUNITY} macro. Click `Save` when you're done. + +![Figure 4. Monitoring : How to setup Zabbix monitoring: Community macro](./assets/monitoring-zabbix-cummunity-macro.PNG) + +That's it! We've added our host and can start exploring. +Navigate to `Monitoring`->`Overview` and you should now see the different metrics RavenDB exposes. + +### Configuring a Trigger + +Let's see an example of what you can do with all these metrics. +We will create a trigger and action that will notify us when the server is up or down. +Navigate to `Configuration`->`Hosts` and click the host name. Then, in the top navigation bar click on `Triggers`. +Click on `Create trigger` on the top right corner. + +Let's assume you've imported the template earlier and that you named your host "RavenDB Amazing Server v7.0". + +Name the trigger "Server is down" and enter the following expression into the text box: + + +{`\{RavenDB Amazing Server v7.0:serverUpTime.nodata(1800)\}=1 +`} + + + +Alternatively, you could click on the `Add` button on the right and use the expression constructor to set your own conditions. +You will need to choose an item (one of the metrics we imported using the template). Choose conditions and set the parameters. + + +This stage must be done after importing the template into Zabbix (described earlier). If you didn't import the template, +you would have to manually add items in order to configure a trigger. [Read more about items](https://www.zabbix.com/documentation/3.4/manual/config/items). + + +You may add a description and severity level and make sure to check the `Enabled` check-box. Click `Save`. +We have created a trigger that will fire when the serverUpTime metric has no data for 1800 seconds. +Learn more about trigger expressions [here](https://www.zabbix.com/documentation/3.4/manual/config/triggers/expression). + +![Figure 5. Monitoring : How to setup Zabbix monitoring: Create trigger](./assets/monitoring-zabbix-create-trigger.PNG) + +Now we will define an action that will be executed whenever the trigger is fired. +Navigate to `Configuration`->`Actions` and click on the `Create action` button in the top right corner. +Name your action and enter the default subject and/or message, you can also define a Recovery message for when the trigger condition is no longer true. + +If you [configure e-mail as the delivery channel for messages](https://www.zabbix.com/documentation/3.4/manual/config/notifications/media/email), +the message you define in an action will be sent to your e-mail address every time the trigger is fired. + +![Figure 6. Monitoring : How to setup Zabbix monitoring: Create action](./assets/monitoring-zabbix-create-action.PNG) + diff --git a/versioned_docs/version-7.1/server/administration/snmp/snmp-overview.mdx b/versioned_docs/version-7.1/server/administration/snmp/snmp-overview.mdx new file mode 100644 index 0000000000..eb974cdb48 --- /dev/null +++ b/versioned_docs/version-7.1/server/administration/snmp/snmp-overview.mdx @@ -0,0 +1,495 @@ +--- +title: "SNMP Support" +hide_table_of_contents: true +sidebar_label: SNMP Support +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# SNMP Support + + +SNMP support is available for [Enterprise](../../../start/licensing/licensing-overview.mdx#enterprise) licenses only. + + + + + +* This page explains how to use SNMP to monitor RavenDB and what metrics can be accessed. + +* In this page: + * [Overview](../../../server/administration/snmp/snmp-overview.mdx#overview) + * [Enabling SNMP in RavenDB](../../../server/administration/snmp/snmp-overview.mdx#enabling-snmp-in-ravendb) + * [SNMP Configuration options](../../../server/administration/snmp/snmp-overview.mdx#snmp-configuration-options) + * [The Metrics](../../../server/administration/snmp/snmp-overview.mdx#the-metrics) + * [Access metrics via monitoring tools](../../../server/administration/snmp/snmp-overview.mdx#access-metrics-via-monitoring-tools) + * [Access metrics via SNMP agents](../../../server/administration/snmp/snmp-overview.mdx#access-metrics-via-snmp-agents) + * [Access metrics via HTTP](../../../server/administration/snmp/snmp-overview.mdx#access-metrics-via-http) + * [List of OIDs](../../../server/administration/snmp/snmp-overview.mdx#list-of-oids) + * [Server OIDs](../../../server/administration/snmp/snmp-overview.mdx#server-oids) + * [Cluster OIDs](../../../server/administration/snmp/snmp-overview.mdx#cluster-oids) + * [Database OIDs](../../../server/administration/snmp/snmp-overview.mdx#database-oids) + * [Index OIDs](../../../server/administration/snmp/snmp-overview.mdx#index-oids) + * [General OIDs](../../../server/administration/snmp/snmp-overview.mdx#general-oids) + * [Ongoing tasks OIDs](../../../server/administration/snmp/snmp-overview.mdx#ongoing-tasks-oids) + + +## Overview + +* Simple Network Management Protocol (SNMP) is an Internet-standard protocol for collecting and organizing + information about managed devices on IP networks. It is used primarily for monitoring network services. + SNMP exposes management data in the form of variables (metrics) that describe the system status and + configuration. These metrics can then be remotely queried (and, in some circumstances, manipulated) by + managing applications. + +* In RavenDB we have support for SNMP which allows monitoring tools like [Zabbix](https://www.zabbix.com), + [PRTG](https://www.paessler.com/prtg), and [Datadog](https://www.datadoghq.com/) direct access to the + internal details of RavenDB. We expose a long [list of metrics](../../../server/administration/snmp/snmp-overview.mdx#list-of-oids): CPU and memory usage, server total requests, + the loaded databases, and database-specific metrics like the number of indexed items per second, + document writes per second, storage space each database takes, and more. + +* You can still monitor what is going on with RavenDB directly from the Studio, or by using one of our + monitoring tools. However, using SNMP might be easier in some cases. As users start running large numbers + of RavenDB instances, it becomes impractical to deal with each of them individually, and using a monitoring + system that can watch many servers becomes advisable. + + + +## Enabling SNMP in RavenDB + +* To monitor RavenDB using SNMP you **must** first set the [Monitoring.Snmp.Enabled](../../../server/configuration/monitoring-configuration.mdx#monitoringsnmpenabled) configuration key to _true_. + +* To learn how to modify a configuration key, refer to the [Configuration Overview](../../../server/configuration/configuration-options.mdx) article, + which outlines all available options. + +* For example, add this key to your _settings.json_ file and restart the server. + + + +{`\{ + ... + "Monitoring.Snmp.Enabled": true + ... +\} +`} + + + + + +## SNMP configuration options + +There are several configurable SNMP properties in RavenDB: +##### For SNMPv1: + +* [Monitoring.Snmp.Port](../../../server/configuration/monitoring-configuration.mdx#monitoringsnmpport) + The SNMP port. + Default: `161` +* [Monitoring.Snmp.SupportedVersions](../../../server/configuration/monitoring-configuration.mdx#monitoringsnmpsupportedversions) + List of supported SNMP versions. + Default: `"V2C;V3"` + +##### For SNMPv2c: + +* [Monitoring.Snmp.Community](../../../server/configuration/monitoring-configuration.mdx#monitoringsnmpcommunity) + The community string is used as a password. + It is sent with each SNMP `GET` request and allows or denies access to the monitored device. + Default: `"ravendb"` + +##### For SNMPv3: + +* [Monitoring.Snmp.AuthenticationProtocol](../../../server/configuration/monitoring-configuration.mdx#monitoringsnmpauthenticationprotocol) + Authentication protocol. + Default: `"SHA1"` +* [Monitoring.Snmp.AuthenticationUser](../../../server/configuration/monitoring-configuration.mdx#monitoringsnmpauthenticationuser) + The user for authentication. + Default: `"ravendb"` +* [Monitoring.Snmp.AuthenticationPassword](../../../server/configuration/monitoring-configuration.mdx#monitoringsnmpauthenticationpassword) + The authentication password. + When set to `null` the community string is used instead. + Default: `null` +* [Monitoring.Snmp.PrivacyProtocol](../../../server/configuration/monitoring-configuration.mdx#monitoringsnmpprivacyprotocol) + Privacy protocol. + Default: `None` +* [Monitoring.Snmp.PrivacyPassword](../../../server/configuration/monitoring-configuration.mdx#monitoringsnmpprivacypassword) + Privacy password. + Default: `"ravendb"` + + +* See article [Monitoring Options](../../../server/configuration/monitoring-configuration.mdx) for the full list of **SNMP configuration keys**. + +* To learn how to modify a configuration key, refer to the [Configuration Overview](../../../server/configuration/configuration-options.mdx) article, + which outlines all available options. + + + + +## The Metrics + + + +#### Access metrics via monitoring tools + +* Querying the exposed metrics using a monitoring tool is typically straightforward (see this [Zabbix example](../../../server/administration/snmp/setup-zabbix.mdx)). + +* For a simplified setup, we have provided a few templates which can be found [here](https://github.com/ravendb/ravendb/tree/v4.0/src/Raven.Server/Monitoring/Snmp/Templates). + These templates include the metrics and their associated OIDs. + + + + + +#### Access metrics via SNMP agents + +* The metrics can be accessed directly using any SNMP agent such as [Net-SNMP](http://net-snmp.sourceforge.net/). + Each metric has a unique object identifier (OID) and can be accessed individually. + +* The most basic SNMP commands are `snmpget`, `snmpset` and `snmpwalk`. + For example, you can execute the following _snmpget_ commands to retrieve the server's [up-time metric](../../../server/administration/snmp/snmp-overview.mdx#13). + + ##### For SNMPv2c: + + + +{`// Request: +snmpget -v 2c -c ravendb live-test.ravendb.net 1.3.6.1.4.1.45751.1.1.1.3 + +// Result: +iso.3.6.1.4.1.45751.1.1.1.3 = Timeticks: (29543973) 3 days, 10:03:59.73 +`} + + + + * `ravendb` is the community string (set via the [Monitoring.Snmp.Community](../../../server/configuration/monitoring-configuration.mdx#monitoringsnmpcommunity) configuration key). + * `"live-test.ravendb.net"` is the host. + + ##### For SNMPv3: + + + + +{`snmpget -v 3 -l authNoPriv -u ravendb -a SHA \\ + -A ravendb live-test.ravendb.net 1.3.6.1.4.1.45751.1.1.1.3 +`} + + + + * `-l authNoPriv` - sets the security level to use authentication but no privacy. + * `-u ravendb` - sets the user for authentication purposes to "ravendb". + * `-a SHA` - sets the authentication protocol to SHA. + * `-A ravendb` - sets the authentication password to "ravendb". + + + + + +#### Access metrics via HTTP +**Access single OID value**: + +* An individual OID value can be retrieved via HTTP `GET` endpoint: + `/monitoring/snmp?oid=` + +* For example, a cURL request for the server [up-time metric](../../../server/administration/snmp/snmp-overview.mdx#13): + + + +{`// Request: +curl -X GET http://live-test.ravendb.net/monitoring/snmp?oid=1.3.6.1.4.1.45751.1.1.1.3 + +// Result: +\{ "Value" : "4.21:32:56.0700000" \} +`} + + +**Access multiple OID values**: + +* Multiple OID values can be retrieved by making either a `GET` or a `POST` request to the following HTTP endpoint: + `/monitoring/snmp/bulk` + +* For example, cURL requests for the server [managed memory](../../../server/administration/snmp/snmp-overview.mdx#167) and [unmanaged memory](../../../server/administration/snmp/snmp-overview.mdx#168) metrics: + + + +{`curl -X GET "http://live-test.ravendb.net/monitoring/snmp/bulk? \\ + oid=1.3.6.1.4.1.45751.1.1.1.6.7&oid=1.3.6.1.4.1.45751.1.1.1.6.8" +`} + + + + + +{`curl -X POST \\ + -H "Content-Type: application/json" \\ + -d '\{ "OIDs": ["1.3.6.1.4.1.45751.1.1.1.6.7", "1.3.6.1.4.1.45751.1.1.1.6.8"]\}' \\ + http://localhost:8080/monitoring/snmp/bulk +`} + + + + + +{`\{ + "Results": [ + \{ "OID": "1.3.6.1.4.1.45751.1.1.1.6.7", "Value": "410" \}, + \{ "OID": "1.3.6.1.4.1.45751.1.1.1.6.8", "Value": "4" \} + ] +\} +`} + + +
**Get all OIDs:** + +* You can get a list of all OIDs along with their description via this HTTP `GET` endpoint: + `/monitoring/snmp/oids` + +* For example: [http://live-test.ravendb.net/monitoring/snmp/oids](http://live-test.ravendb.net/monitoring/snmp/oids) + + + + + +## List of OIDs + + + +* RavenDB's **root OID** is: **1.3.6.1.4.1.45751.1.1.** + +* Values represented by `X`, `D`, or `I` in the OIDs list below will be: + * `X`: + `0` - **any kind of collection** + `1` - **a generation-0 or generation-1 collection** + `2` - **a blocking generation-2 collection** + `3` - **a background collection** (this is always a generation 2 collection) + * `D` - **Database number** + * `I` - **Index number** + + + + + +| OID | Metric (Server) | +|------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| 1.1.1 | Server URL | +| 1.1.2 | Server Public URL | +| 1.1.3 | Server TCP URL | +| 1.1.4 | Server Public TCP URL | +| 1.2.1 | Server version | +| 1.2.2 | Server full version | +| 1.3 | Server up-time | +| 1.3.6.1.2.1.1.3.0 | Server up-time (global) | +| 1.4 | Server process ID | +| 1.5.1 | Process CPU usage in % | +| 1.5.2 | Machine CPU usage in % | +| 1.5.3.1 | CPU Credits Base | +| 1.5.3.2 | CPU Credits Max | +| 1.5.3.3 | CPU Credits Remaining | +| 1.5.3.4 | CPU Credits Gained Per Second | +| 1.5.3.5 | CPU Credits Background Tasks Alert Raised | +| 1.5.3.6 | CPU Credits Failover Alert Raised | +| 1.5.3.7 | CPU Credits Any Alert Raised | +| 1.5.4 | IO wait in % | +| 1.6.1 | Server allocated memory in MB | +| 1.6.2 | Server low memory flag value | +| 1.6.3 | Server total swap size in MB | +| 1.6.4 | Server total swap usage in MB | +| 1.6.5 | Server working set swap usage in MB | +| 1.6.6 | Dirty Memory that is used by the scratch buffers in MB | +| 1.6.7 | Server managed memory size in MB | +| 1.6.8 | Server unmanaged memory size in MB | +| 1.6.9 | Server encryption buffers memory being in use in MB | +| 1.6.10 | Server encryption buffers memory being in pool in MB | +| 1.6.11.`X`.2 | GC info for `X`.
Specifies if this is a concurrent GC or not. | +|
1.6.11.`X`.3 | GC info for `X`.
Gets the number of objects ready for finalization this GC observed. | +|
1.6.11.`X`.4 | GC info for `X`.
Gets the total fragmentation (in MB) when the last garbage collection occurred. | +|
1.6.11.`X`.5 | GC info for `X`.
Gets the generation this GC collected. | +|
1.6.11.`X`.6 | GC info for `X`.
Gets the total heap size (in MB) when the last garbage collection occurred. | +|
1.6.11.`X`.7 | GC info for `X`.
Gets the high memory load threshold (in MB) when the last garbage collection occurred. | +|
1.6.11.`X`.8 | GC info for `X`.
The index of this GC. | +|
1.6.11.`X`.9 | GC info for `X`.
Gets the memory load (in MB) when the last garbage collection occurred. | +|
1.6.11.`X`.10.1 | GC info for `X`.
Gets the pause durations. First item in the array. | +|
1.6.11.`X`.10.2 | GC info for `X`.
Gets the pause durations. Second item in the array. | +|
1.6.11.`X`.11 | GC info for `X`.
Gets the pause time percentage in the GC so far. | +|
1.6.11.`X`.12 | GC info for `X`.
Gets the number of pinned objects this GC observed. | +|
1.6.11.`X`.13 | GC info for `X`.
Gets the promoted MB for this GC. | +|
1.6.11.`X`.14 | GC info for `X`.
Gets the total available memory (in MB) for the garbage collector to use when the last garbage collection occurred. | +|
1.6.11.`X`.15 | GC info for `X`.
Gets the total committed MB of the managed heap. | +|
1.6.11.`X`.16.3 | GC info for `X`.
Gets the large object heap size (in MB) after the last garbage collection of given kind occurred. | +|
1.6.12.{0} | Monitor [/proc/meminfo/](https://man7.org/linux/man-pages/man5/proc.5.html) metrics (unix/linux).
The description of each metric is available via endpoint `/monitoring/snmp/oids`.
See [Get all OIDs](../../../server/administration/snmp/snmp-overview.mdx#getalloids). | +|
1.6.13 | Available memory for processing (in MB) | +| 1.7.1 | Number of concurrent requests | +| 1.7.2 | Total number of requests since server startup | +| 1.7.3 | Number of requests per second (one minute rate) | +| 1.7.3.1 | Number of requests per second (five second rate) | +| 1.7.4 | Average request time in milliseconds | +| 1.8 | Server last request time | +| 1.8.1 | Server last authorized non cluster admin request time | +| 1.9.1 | Server license type | +| 1.9.2 | Server license expiration date | +| 1.9.3 | Server license expiration left | +| 1.9.4 | Server license utilized CPU cores | +| 1.9.5 | Server license max CPU cores | +| 1.10.1 | Server storage used size in MB | +| 1.10.2 | Server storage total size in MB | +| 1.10.3 | Remaining server storage disk space in MB | +| 1.10.4 | Remaining server storage disk space in % | +| 1.10.5 | IO read operations per second | +| 1.10.6 | IO write operations per second | +| 1.10.7 | Read throughput in kilobytes per second | +| 1.10.8 | Write throughput in kilobytes per second | +| 1.10.9 | Queue length | +| 1.11.1 | Server certificate expiration date | +| 1.11.2 | Server certificate expiration left | +| 1.11.3 | List of well known admin certificate thumbprints | +| 1.11.4 | List of well known admin certificate issuers | +| 1.11.5 | Number of expiring certificates | +| 1.11.6 | Number of expired certificates | +| 1.12.1 | Number of processor on the machine | +| 1.12.2 | Number of assigned processors on the machine | +| 1.13.1 | Number of backups currently running | +| 1.13.2 | Max number of backups that can run concurrently | +| 1.14.1 | Number of available worker threads in the thread pool | +| 1.14.2 | Number of available completion port threads in the thread pool | +| 1.15.1 | Number of active TCP connections | +| 1.16.1 | Indicates if any experimental features are used | +| 1.17.1 | Value of the '/proc/sys/vm/max_map_count' parameter | +| 1.17.2 | Number of current map files in '/proc/self/maps' | +| 1.17.3 | Value of the '/proc/sys/kernel/threads-max' parameter | +| 1.17.4 | Number of current threads | + + + +| OID | Metric (Cluster) | +|------------------------|--------------------| +| 3.1.1 | Current node tag | +| 3.1.2 | Current node state | +| 3.2.1 | Cluster term | +| 3.2.2 | Cluster index | +| 3.2.3 | Cluster ID | + + + +| OID | Metric (Database) | +|------------------------------------|--------------------------------------------------------------------------| +| 5.2.`D`.1.1 | Database name | +| 5.2.`D`.1.2 | Number of indexes | +| 5.2.`D`.1.3 | Number of stale indexes | +| 5.2.`D`.1.4 | Number of documents | +| 5.2.`D`.1.5 | Number of revision documents | +| 5.2.`D`.1.6 | Number of attachments | +| 5.2.`D`.1.7 | Number of unique attachments | +| 5.2.`D`.1.10 | Number of alerts | +| 5.2.`D`.1.11 | Database ID | +| 5.2.`D`.1.12 | Database up-time | +| 5.2.`D`.1.13 | Indicates if database is loaded | +| 5.2.`D`.1.14 | Number of rehabs | +| 5.2.`D`.1.15 | Number of performance hints | +| 5.2.`D`.1.16 | Number of indexing errors | +| 5.2.`D`.2.1 | Documents storage allocated size in MB | +| 5.2.`D`.2.2 | Documents storage used size in MB | +| 5.2.`D`.2.3 | Index storage allocated size in MB | +| 5.2.`D`.2.4 | Index storage used size in MB | +| 5.2.`D`.2.5 | Total storage size in MB | +| 5.2.`D`.2.6 | Remaining storage disk space in MB | +| 5.2.`D`.2.7 | IO read operations per second | +| 5.2.`D`.2.8 | IO write operations per second | +| 5.2.`D`.2.9 | Read throughput in kilobytes per second | +| 5.2.`D`.2.10 | Write throughput in kilobytes per second | +| 5.2.`D`.2.11 | Queue length | +| 5.2.`D`.3.1 | Number of document puts per second (one minute rate) | +| 5.2.`D`.3.2 | Number of indexed documents per second for map indexes (one minute rate) | +| 5.2.`D`.3.3 | Number of maps per second for map-reduce indexes (one minute rate) | +| 5.2.`D`.3.4 | Number of reduces per second for map-reduce indexes (one minute rate) | +| 5.2.`D`.3.5 | Number of requests per second (one minute rate) | +| 5.2.`D`.3.6 | Number of requests from database start | +| 5.2.`D`.3.7 | Average request time in milliseconds | +| 5.2.`D`.5.1 | Number of indexes | +| 5.2.`D`.5.2 | Number of static indexes | +| 5.2.`D`.5.3 | Number of auto indexes | +| 5.2.`D`.5.4 | Number of idle indexes | +| 5.2.`D`.5.5 | Number of disabled indexes | +| 5.2.`D`.5.6 | Number of error indexes | +| 5.2.`D`.5.7 | Number of faulty indexes | +| 5.2.`D`.6.1 | Number of writes (documents, attachments, counters, timeseries) | +| 5.2.`D`.6.2 | Number of bytes written (documents, attachments, counters, timeseries) | + + + +| OID | Metric (Index) | +|------------------------------------------|------------------------------------------------| +| 5.2.`D`.4.`I`.1 | Indicates if index exists | +| 5.2.`D`.4.`I`.2 | Index name | +| 5.2.`D`.4.`I`.4 | Index priority | +| 5.2.`D`.4.`I`.5 | Index state | +| 5.2.`D`.4.`I`.6 | Number of index errors | +| 5.2.`D`.4.`I`.7 | Last query time | +| 5.2.`D`.4.`I`.8 | Index indexing time | +| 5.2.`D`.4.`I`.9 | Time since last query | +| 5.2.`D`.4.`I`.10 | Time since last indexing | +| 5.2.`D`.4.`I`.11 | Index lock mode | +| 5.2.`D`.4.`I`.12 | Indicates if index is invalid | +| 5.2.`D`.4.`I`.13 | Index status | +| 5.2.`D`.4.`I`.14 | Number of maps per second (one minute rate) | +| 5.2.`D`.4.`I`.15 | Number of reduces per second (one minute rate) | +| 5.2.`D`.4.`I`.16 | Index type | + + + +| OID | Metric (General) | +|----------------------------|--------------------------------------------------------------------------------------------------| +| 5.1.1 | Number of all databases | +| 5.1.2 | Number of loaded databases | +| 5.1.3 | Time since oldest backup | +| 5.1.4 | Number of disabled databases | +| 5.1.5 | Number of encrypted databases | +| 5.1.6 | Number of databases for current node | +| 5.1.7.1 | Number of indexes in all loaded databases | +| 5.1.7.2 | Number of stale indexes in all loaded databases | +| 5.1.7.3 | Number of error indexes in all loaded databases | +| 5.1.7.4 | Number of faulty indexes in all loaded databases | +| 5.1.7.5 | Number of indexing errors in all loaded databases | +| 5.1.8.1 | Number of indexed documents per second for map indexes (one minute rate) in all loaded databases | +| 5.1.8.2 | Number of maps per second for map-reduce indexes (one minute rate) in all loaded databases | +| 5.1.8.3 | Number of reduces per second for map-reduce indexes (one minute rate) in all loaded databases | +| 5.1.9.1 | Number of writes (documents, attachments, counters, timeseries) in all loaded databases | +| 5.1.9.2 | Number of bytes written (documents, attachments, counters, timeseries) in all loaded databases | +| 5.1.10 | Number of faulted databases | + + + +| OID | Metric (Ongoing tasks) | +|--------------------------------|--------------------------------------------------------------------| +| 5.1.11.1 | Number of enabled ongoing tasks for all databases | +| 5.1.11.2 | Number of active ongoing tasks for all databases | +| 5.1.11.3 | Number of enabled external replication tasks for all databases | +| 5.1.11.4 | Number of active external replication tasks for all databases | +| 5.1.11.5 | Number of enabled RavenDB ETL tasks for all databases | +| 5.1.11.6 | Number of active RavenDB ETL tasks for all databases | +| 5.1.11.7 | Number of enabled SQL ETL tasks for all databases | +| 5.1.11.8 | Number of active SQL ETL tasks for all databases | +| 5.1.11.9 | Number of enabled OLAP ETL tasks for all databases | +| 5.1.11.10 | Number of active OLAP ETL tasks for all databases | +| 5.1.11.11 | Number of enabled Elasticsearch ETL tasks for all databases | +| 5.1.11.12 | Number of active Elasticsearch ETL tasks for all databases | +| 5.1.11.13 | Number of enabled Queue ETL tasks for all databases | +| 5.1.11.14 | Number of active Queue ETL tasks for all databases | +| 5.1.11.15 | Number of enabled Backup tasks for all databases | +| 5.1.11.16 | Number of active Backup tasks for all databases | +| 5.1.11.17 | Number of enabled Subscription tasks for all databases | +| 5.1.11.18 | Number of active Subscription tasks for all databases | +| 5.1.11.19 | Number of enabled Pull Replication As Sink tasks for all databases | +| 5.1.11.20 | Number of active Pull Replication As Sink tasks for all databases | +| 5.1.11.21 | Number of enabled Queue Sink tasks for all databases | +| 5.1.11.22 | Number of active Queue Sink tasks for all databases | +| 5.1.11.23 | Number of enabled Snowflake ETL tasks for all databases | +| 5.1.11.24 | Number of active Snowflake ETL tasks for all databases | +| 5.1.11.25 | Number of enabled Embeddings Generation tasks for all databases | +| 5.1.11.26 | Number of active Embeddings Generation tasks for all databases | + + + diff --git a/versioned_docs/version-7.1/server/administration/statistics.mdx b/versioned_docs/version-7.1/server/administration/statistics.mdx new file mode 100644 index 0000000000..245c51e121 --- /dev/null +++ b/versioned_docs/version-7.1/server/administration/statistics.mdx @@ -0,0 +1,416 @@ +--- +title: "Administration: Statistics" +hide_table_of_contents: true +sidebar_label: Statistics +sidebar_position: 3 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Administration: Statistics + +## Server statistics + +Server statistics are available in the `Studio` via the [Server Dashboard](../../studio/server/server-dashboard.mdx), and can +also be retrieved by dedicated endpoints. + +### Metrics + + +To get server metrics use the `{serverUrl}/admin/metrics` endpoint + + +A document containing the following metrics is retrieved: + +* **Requests** + * RequestsPerSec + * ConcurrentRequestsCount +* **Docs** + * BytesPutsPerSec + * PutsPerSec +* **Attachments** + * BytesPutsPerSec + * PutsPerSec +* **MapIndexes** + * IndexedPerSec +* **MapReduceIndexes** + * MappedPerSec + * ReducedPerSec + +where each meter has the following format: + + +{`\{ + Current: 3, + Count: 217937, + MeanRate: 0.2, + OneMinuteRate: 1.2, + FiveMinuteRate: 0.2, + FifteenMinuteRate: 0.1 +\} +`} + + + +### Memory Stats + + +In order to get server memory statistics, use the `{serverUrl}/admin/debug/memory/stats` endpoint + + +A document containing the following memory statistics is retrieved: + + +{`\{ + WorkingSet: 600551424, + TotalUnmanagedAllocations: 30422368, + ManagedAllocations: 217681656, + TotalMemoryMapped: 842530816, + PhysicalMem: "4 GBytes", + FreeMem: "2.38 GBytes", + HighMemLastOneMinute: "2.38 GBytes", + LowMemLastOneMinute: "2.38 GBytes", + HighMemLastFiveMinute: "2.38 GBytes", + LowMemLastFiveMinute: "2.35 GBytes", + HighMemSinceStartup: "2.87 GBytes", + LowMemSinceStartup: "1.06 GBytes", + Humane: \{ + WorkingSet: "572.73 MBytes", + TotalUnmanagedAllocations: "29.01 MBytes", + ManagedAllocations: "207.6 MBytes", + TotalMemoryMapped: "803.5 MBytes" + \}, + Threads: [ + \{ + Name: null, + Allocations: 62016514, + HumaneAllocations: "59.14 MBytes", + Ids: [ + \{ + Id: 11682, + Allocations: 29146296, + HumaneAllocations: "27.8 MBytes", + \}, + \{ + Id: 11897, + Allocations: 18383430, + HumaneAllocations: "17.53 MBytes", + \}, + \{ + Id: 11801, + Allocations: 8290370, + HumaneAllocations: "7.91 MBytes", + \}, + \{ + Id: 11748, + Allocations: 2183082, + HumaneAllocations: "2.08 MBytes", + \} + ], + \}, + \{ + Name: "RavenDB Tasks Executer", + Allocations: 1134462, + HumaneAllocations: "1.08 MBytes", + Id: 19, + \}, + \{ + Name: "Indexing of Orders/ByCompany of test", + Allocations: 0, + HumaneAllocations: "0 Bytes", + Id: 11896, + \}, + + ... + ], + Mappings : [ + \{ + Directory: "\\Databases\\Demo", + TotalDirectorySize: 134217728, + HumaneTotalDirectorySize: "128 MBytes", + Details: \{ + Raven.voron: \{ + FileSize: 134217728, + HumaneFileSize: "128 MBytes", + TotalMapped: 134217728, + HumaneTotalMapped: "128 MBytes", + Mappings: [ + \{ + Size: 134217728, + Count: 1, + \} + ], + \} + \}, + \}, + \{ + Directory: "\\Databases\\Demo\\Indexes\\Product_Search", + TotalDirectorySize: 33554432, + HumaneTotalDirectorySize: "32 MBytes", + Details: \{ + Raven.voron: \{ + FileSize: 33554432, + HumaneFileSize: "32 MBytes", + TotalMapped: 33554432, + HumaneTotalMapped: "32 MBytes", + Mappings: [ + \{ + Size: 33554432, + Count: 1, + \} + ], + \} + \}, + \}, + \{ + Directory: "\\Databases\\Demo\\Configuration", + TotalDirectorySize: 8388608, + HumaneTotalDirectorySize: "8 MBytes", + Details: \{ + Raven.voron: \{ + FileSize: 8388608, + HumaneFileSize: "8 MBytes", + TotalMapped: 8388608, + HumaneTotalMapped: "8 MBytes", + Mappings: [ + \{ + Size: 8388608, + Count: 1, + \} + ], + \} + \}, + \}, + \{ + Directory: "\\Databases\\Demo\\Indexes\\Product_Search\\Temp", + TotalDirectorySize: 131072, + HumaneTotalDirectorySize: "128 KBytes", + Details: \{ + compression.0000000000.buffers: \{ + FileSize: 65536, + HumaneFileSize: "64 KBytes", + TotalMapped: 65536, + HumaneTotalMapped: "64 KBytes", + Mappings: [ + \{ + Size: 65536, + Count: 1, + \} + ], + \}, + scratch.0000000000.buffers: \{ + FileSize: 65536, + HumaneFileSize: "64 KBytes", + TotalMapped: 65536, + HumaneTotalMapped: "64 KBytes", + Mappings: [ + \{ + Size: 65536, + Count: 1, + \} + ], + \}, + \}, + \}, + + ... + ], +\} +`} + + + +### CPU Stats + + +To get server CPU stats use the `{serverUrl}/admin/debug/cpu/stats` endpoint + + +A document containing the following information is retrieved: + + +{`\{ + CpuStats: [ + \{ + ProcessName: "Raven.Server", + ProcessorAffinity: 3, + PrivilegedProcessorTime: "00:10:49.9843750", + TotalProcessorTime: "00:57:11.9531250", + UserProcessorTime: "00:46:21.9687500", + \} + ], + ThreadPoolStats: [ + \{ + AvailableThreadPoolWorkerThreads: 32765, + AvailableThreadPoolCompletionPortThreads: 1000, + MinThreadPoolWorkerThreads: 2, + MinThreadPoolCompletionPortThreads: 2, + MaxThreadPoolWorkerThreads: 32767, + MaxThreadPoolCompletionPortThreads: 1000, + \} + ], +\} +`} + + + + + +## Database statistics + +Database statistics are available in the `Studio`, and can also be retrieved by dedicated endpoints +or via the Client API (details [here](../../client-api/operations/maintenance/get-stats.mdx)). + +### Database Statistics via the Studio + +![Figure 1. Database Statistics](./assets/general-stats.png) + +In the Studio, go to `Databases`, select a database and then go to `Stats`. +The following information is presented: + +* **General Stats** + * Database ID + * Database Change Vector + * Documents Count + * Indexes Count + * Conflicts Count + * Tombstones Count + * Attachments Count + * Last Document Etag + * Architecture + * Size On Disk +* **Indexes Stats** + * **AutoMap** + * Entries Count + * Map Attempts + * Map Successes + * **Map** + * Entries Count + * Map Attempts + * Map Successes + * Mapped Per Second Rate + * **MapReduce** + * Entries Count + * Map Attempts + * Map Successes** + * Mapped Per Second Rate + * Reduce Attempts + * Reduce Successes + * Reduced Per Second Rate + +### Database Statistics via Dedicated Endpoint + + +Database statistics can also be retrieved in a JSON format by using the +`{serverUrl}/databases/{databaseName}/stats` endpoint + + +A document containing the following database statistics is retrieved: + + +{`\{ + CountOfIndexes: 4, + CountOfDocuments: 1080, + CountOfRevisionDocuments: 4645, + CountOfTombstones: 0, + CountOfDocumentsConflicts: 0, + CountOfConflicts: 0, + CountOfAttachments: 17, + CountOfUniqueAttachments: 17, + DatabaseChangeVector: "A:5746-7aoBOQY250SGxlpFC/GcxQ", + DatabaseId: "7aoBOQY250SGxlpFC/GcxQ", + NumberOfTransactionMergerQueueOperations: 0, + Is64Bit: true, + Pager: "Voron.Platform.Win32.WindowsMemoryMapPager", + LastDocEtag: 5746, + LastIndexingTime: "2018-04-16T09:38:28.8303222Z", + SizeOnDisk: \{ + HumaneSize: "259 MBytes", + SizeInBytes: 271581184, + \}, + Indexes: [ + \{ + IsStale: false, + Name: "Auto/Companies/ByAddress.CountryAndSearch(Name)", + LockMode: "Unlock", + Priority: "Normal", + State: "Normal", + Type: "AutoMap", + LastIndexingTime: "2018-04-16T09:38:28.8303222Z", + \}, + \{ + IsStale: false, + Name: "Auto/Employees/ByFirstNameAndLastName", + LockMode: "Unlock", + Priority: "Normal", + State: "Idle", + Type: "AutoMap", + LastIndexingTime: "2018-04-16T09:38:28.7994098Z", + \}, + \{ + IsStale: false, + Name: "Orders/ByCompany", + LockMode: "Unlock", + Priority: "Normal", + State: "Normal", + Type: "MapReduce", + LastIndexingTime: "2018-04-16T09:38:28.7270848Z", + \}, + \{ + IsStale: false, + Name: "Product/Search", + LockMode: "Unlock", + Priority: "Normal", + State: "Normal", + Type: "Map", + LastIndexingTime: "2018-04-16T09:38:28.7761331Z", + \}, + ], +\} +`} + + + +### Database Metrics + +To get the database metrics use the `{serverUrl}/databases/{databaseName}/metrics` endpoint + + +Similarly to the server metrics endpoint, a document containing the following database metrics is retrieved: + +* **Requests** + * RequestsPerSec + * ConcurrentRequestsCount +* **Docs** + * BytesPutsPerSec + * PutsPerSec +* **Attachments** + * BytesPutsPerSec + * PutsPerSec +* **MapIndexes** + * IndexedPerSec +* **MapReduceIndexes** + * MappedPerSec + * ReducedPerSec + +where each meter has the following format: + + +{`\{ + Current: 3, + Count: 217937, + MeanRate: 0.2, + OneMinuteRate: 1.2, + FiveMinuteRate: 0.2, + FifteenMinuteRate: 0.1 +\} +`} + + + + diff --git a/versioned_docs/version-7.1/server/assets/tcp-compression-license.png b/versioned_docs/version-7.1/server/assets/tcp-compression-license.png new file mode 100644 index 0000000000..bab06ebf93 Binary files /dev/null and b/versioned_docs/version-7.1/server/assets/tcp-compression-license.png differ diff --git a/versioned_docs/version-7.1/server/clustering/_category_.json b/versioned_docs/version-7.1/server/clustering/_category_.json new file mode 100644 index 0000000000..66f61b24c4 --- /dev/null +++ b/versioned_docs/version-7.1/server/clustering/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 2, + "label": Clustering, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/server/clustering/cluster-api.mdx b/versioned_docs/version-7.1/server/clustering/cluster-api.mdx new file mode 100644 index 0000000000..5b8efe5cc2 --- /dev/null +++ b/versioned_docs/version-7.1/server/clustering/cluster-api.mdx @@ -0,0 +1,199 @@ +--- +title: "Cluster: Cluster API" +hide_table_of_contents: true +sidebar_label: Cluster API +sidebar_position: 6 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Cluster: Cluster API + +The easiest way to manage a RavenDB cluster is through the Management Studio's [Cluster View](../../studio/cluster/cluster-view.mdx). +All cluster operations are available through the GUI. However, sometimes you'd want to automate things. + +In this section we will demonstrate of how to perform certain cluster operations programmatically using the REST API. +In the first example we will show how to make a secure call using PowerShell, cURL and the RavenDB C# Client. The rest of the examples are cURL only. + +- [Add node to the cluster](../../server/clustering/cluster-api.mdx#add-node-to-the-cluster) +- [Delete node from the cluster](../../server/clustering/cluster-api.mdx#delete-node-from-the-cluster) +- [Promote a watcher node](../../server/clustering/cluster-api.mdx#promote-a-node) +- [Demote a watcher node](../../server/clustering/cluster-api.mdx#demote-a-node) +- [Force elections](../../server/clustering/cluster-api.mdx#force-elections) +- [Force timeout](../../server/clustering/cluster-api.mdx#force-timeout) +- [Bootstrap the cluster](../../server/clustering/cluster-api.mdx#bootstrap-cluster) + + +If authentication is turned on, a client certificate with either `Cluster Admin` or `Operator` [Security Clearance](../../server/security/authorization/security-clearance-and-permissions.mdx) must be supplied, depending on the endpoint. + + +## Add node to the cluster + +Adding a node to the cluster can be done using an HTTP PUT request to the `/admin/cluster/node` endpoint with the following arguments: + +| Argument | Description | Required | Default | +| - | - | - | - | +| new-node-url | The address of the new node we want to add to the cluster | true | - | +| new-node-tag | 1-4 uppercase unicode letters | false | 'A' - 'Z' assigned by order of addition | +| is-watcher | Add the new node as a watcher | false | `false` | +| max-utilized-cores | The maximum number of cores that can be assigned to the new node | false | Number of processors on the machine or the license limit (smallest) | + +SecurityClearance: `Cluster Admin` + +### C# Client + +* To make a secure call, the Document Store must be supplied with the client certificate ([example](../../client-api/setting-up-authentication-and-authorization.mdx#example---initializing-document-store-with-a-client-certificate)). + + + +{`store.GetRequestExecutor().HttpClient.SendAsync(new HttpRequestMessage(HttpMethod.Put, "https:///admin/cluster/node?url=&tag=&watcher=&assignedCores=")); +`} + + + +### Powershell + + + +{`[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 +$clientCert = Get-PfxCertificate -FilePath +Invoke-WebRequest -Method Put -URI "https:///admin/cluster/node?url=&tag=&watcher=&maxUtilizedCores= -Certificate $cert" +`} + + + +### cURL + + + +{`curl -X PUT https:///admin/cluster/node?url=&tag=&watcher=&maxUtilizedCores= --cert +`} + + + + + +## Delete node from the cluster + +Deleting a node from the cluster can be done using an HTTP DELETE request to the `/admin/cluster/node` endpoint with the following argument: + +| Argument | Description | Required | Default | +| - | - | - | - | +| node-tag | The tag of the node to delete | true | - | + +SecurityClearance: `Cluster Admin` + +### Example + + + +{`curl -X DELETE https:///admin/cluster/node?nodeTag= +`} + + + + + +## Promote a Node + +Promoting a node can be done using an HTTP POST request to the `/admin/cluster/promote` +endpoint with the following argument: + +| Argument | Description | Required | Default | +| - | - | - | - | +| node-tag | The tag of the node to promote | true | - | + +The POST request body should be empty. + +SecurityClearance: `ClusterAdmin` + +### Example + + + +{`curl -X POST https:///admin/cluster/promote?nodeTag= -d '' +`} + + + + + +## Demote a Node + +Demoting a node can be done using an HTTP POST request to the `/admin/cluster/demote` endpoint with the following argument: + +| Argument | Description | Required | Default | +| - | - | - | - | +| node-tag | The tag of the node to demote | true | - | + +The POST request body should be empty. + +SecurityClearance: `ClusterAdmin` + +### Example + + + +{`curl -X POST https:///admin/cluster/demote?nodeTag= -d '' +`} + + + + + +## Force Elections + +Forcing an election can be done using an empty HTTP POST request to the `/admin/cluster/reelect` endpoint: + +SecurityClearance: `Operator` + +### Example + + + +{`curl -X POST https:///admin/cluster/reelect -d '' +`} + + + + + +## Force Timeout + +Forcing a timeout can be done using an empty HTTP POST request to the `/admin/cluster/timeout` endpoint: + +SecurityClearance: `Operator` + +### Example + + + +{`curl -X POST https:///admin/cluster/timeout -d '' +`} + + + + + +## Bootstrap Cluster + +Bootstrapping the cluster can be done using an empty HTTP POST request to the `/admin/cluster/bootstrap` endpoint: +Note: This option is only available when the server is in the Passive state. + +SecurityClearance: `Cluster Admin` + +### Example + + + +{`curl -X POST https:///admin/cluster/bootstrap -d '' +`} + + + + diff --git a/versioned_docs/version-7.1/server/clustering/cluster-best-practice-and-configuration.mdx b/versioned_docs/version-7.1/server/clustering/cluster-best-practice-and-configuration.mdx new file mode 100644 index 0000000000..a26a5a1a65 --- /dev/null +++ b/versioned_docs/version-7.1/server/clustering/cluster-best-practice-and-configuration.mdx @@ -0,0 +1,54 @@ +--- +title: "Cluster: Best Practices" +hide_table_of_contents: true +sidebar_label: Best Practice and Configuration +sidebar_position: 5 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Cluster: Best Practices + + +* In this page: + * [Clusters should have an odd number of at least 3 nodes](../../server/clustering/cluster-best-practice-and-configuration.mdx#clusters-should-have-an-odd-number-of-at-least-3-nodes) + * [Avoid different cluster configurations among the cluster's nodes](../../server/clustering/cluster-best-practice-and-configuration.mdx#avoid-different-cluster-configurations-among-the-clusters-nodes) + + + + +### Clusters should have an odd number of at least 3 nodes + +We recommend setting up clusters with an odd number of nodes equal to or greater than 3. + +**A single node cluster:** +Will not have the ability to automatically failover to another node if it goes down. +This means that it is not highly available. + +**A two nodes cluster:** +Also not recommended since the cluster must have a consensus among the majority of nodes to operate. +With a two-node cluster, if one of the nodes is down or partitioned, the other node is not considered a 'majority' +and thus no [Raft](../../glossary/raft-algorithm.mdx) +command will be created, although any database on the surviving node will still be responsive to the user. + +**Odd number of 3 or more nodes:** +For ACID guarantees, a majority of the nodes must agree on every [cluster-wide transaction](../../server/clustering/cluster-transactions.mdx), +so having an odd number of nodes makes achieving the majority easier. + + + + + +### Avoid different cluster configurations among the cluster's nodes + +Configuration mismatches tend to cause interaction problems between nodes. + +If you must set [cluster configurations](../../server/configuration/cluster-configuration.mdx) differently in separate nodes, +**we recommend first testing it** in a development environment to see that each node interacts properly with the others. + + diff --git a/versioned_docs/version-7.1/server/clustering/cluster-transactions.mdx b/versioned_docs/version-7.1/server/clustering/cluster-transactions.mdx new file mode 100644 index 0000000000..fb29eb4bfd --- /dev/null +++ b/versioned_docs/version-7.1/server/clustering/cluster-transactions.mdx @@ -0,0 +1,201 @@ +--- +title: "Cluster: Cluster-Wide Transactions" +hide_table_of_contents: true +sidebar_label: Cluster Transactions +sidebar_position: 4 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Cluster: Cluster-Wide Transactions + + +* Cluster-wide transactions are a way to ensure that certain operations will favor consistency over availability in the CAP theorem. + +* Code examples and client API can be found [here](../../client-api/session/saving-changes.mdx#transaction-mode---cluster-wide). + +* In this page: + * [Why Cluster-Wide Transactions](../../server/clustering/cluster-transactions.mdx#why-cluster-wide-transactions) + * [How Cluster Transactions Work](../../server/clustering/cluster-transactions.mdx#how-cluster-transactions-work) described by the flow of a cluster transaction request example. + * [Cluster Transactions Properties](../../server/clustering/cluster-transactions.mdx#cluster-transactions-properties) + * [Concurrent Cluster-Wide and Single-Node Transactions](../../server/clustering/cluster-transactions.mdx#concurrent-cluster-wide-and-single-node-transactions) + * [Failure Modes in Cluster-Wide Transactions](../../server/clustering/cluster-transactions.mdx#failure-modes-in-cluster-wide-transactions) + * [Debug Cluster-Wide Transactions](../../server/clustering/cluster-transactions.mdx#debug-cluster-wide-transactions) + + +## Why Cluster-Wide Transactions + +Usually, RavenDB uses the [multi-master model](../../client-api/faq/transaction-support.mdx#multi-master-model) and applies a transaction on a single node first and then asynchronously replicates the data to other +members in the cluster. This ensures that even in the presence of network partitions or hard failures, RavenDB can +accept writes and safely keep them. + +The downside of the multi-master model is that certain error modes can cause two clients to try to modify the same set of documents +on two different database nodes. +That can cause [Conflicts](../../server/clustering/replication/replication-conflicts.mdx) and make it hard to provide certain guarantees to the application. +For example, ensuring the uniqueness of a user's email in a distributed cluster. Just checking for the existence of the email is not sufficient. +Perhaps two clients may be talking to separate database nodes and both of them check that the user does not exist. +They will both create what will end up as a duplicate user. + +To handle this (and similar) scenarios, RavenDB offers the cluster-wide transaction feature. It allows you to explicitly state that you want +a particular interaction with the database to favor consistency over availability to ensure that changes are going to be applied in an +identical manner across the cluster even in the presence of failures and network partitions. + +In order to ensure that, RavenDB requires that a cluster-wide transaction will be accepted by at least a majority of the voting nodes in +the cluster. If it is not able to do so, the cluster-wide transaction will fail. + +For the rest of this document we are going to refer to single-node transactions, applied on a single node and then disseminated using async +replication vs. cluster-wide transactions that are accepted by a majority of the nodes in the cluster and then applied on each of them. + + + +## How Cluster Transactions Work + +1. A request sent from the client via [SaveChanges()](../../client-api/session/saving-changes.mdx#transaction-mode---cluster-wide) using `TransactionMode.ClusterWide` will generate a [Raft Command](../../server/clustering/rachis/consensus-operations.mdx#implementation-details) + and the server will wait for a consensus on it. +2. When consensus is achieved, each node will validate the compare-exchange values first. + If this fails, the entire session transaction is rolled back. From the nature of the [Raft consensus algorithm](../../server/clustering/rachis/what-is-rachis.mdx#what-is-raft-?) + the cluster-wide transaction should either _eventually_ be accepted on _all_ nodes or fail on _all_ of them. +3. Once the validation has passed, the request is stored on the local cluster state machine of every node and is + processed asynchronously by the relevant database. +4. The relevant database notices that it has pending cluster transactions and starts to execute them. + Since order matters, a failure at this stage will halt the cluster transaction execution until it is fixed. + The possible failure modes for this scenario are listed below. +5. Every document that has been added by the cluster transaction gets the `RAFT:int64-sequential-number` [Change Vector](../../server/clustering/replication/change-vector.mdx) + and will have priority if a conflict arises between that document and a document from a regular transaction. +6. After the database has executed the requested transaction, a response is returned to the client. + * Upon success, the client receives the transaction's [Raft Index](../../server/clustering/rachis/consensus-operations.mdx#raft-index) + which will be added to any future requests. Performing an operation against any other node will wait + for that Raft index to be applied first, ensuring order of operations. +7. In the background, the [Cluster Observer](../../server/clustering/distribution/cluster-observer.mdx) tracks the completed cluster transactions + and removes the local cluster-state-machine only when it has been successfully committed on _all_ of the database nodes. + + + +## Cluster Transactions Properties + +The Cluster transaction feature enables RavenDB to perform consistent cluster-wide ACID transactions. +It can be composed of two optional parts: + +1. [Compare Exchange](../../client-api/operations/compare-exchange/overview.mdx) values, which will be validated and executed by the cluster. + + Compare exchange key/value pairs can be created and managed explicitly in your code. + Starting from RavenDB 5.2, they can also be created and managed automatically by RavenDB. + Compare exchange entries that are automatically administered by RavenDB are called **Atomic Guards**, + read more about them [here](../../client-api/session/cluster-transaction/atomic-guards.mdx). + +2. Store/Delete operations on documents, which are executed by the database nodes after the transaction has been accepted. + +**Atomicity** +After having a quorum for the cluster transaction request by raft and a successful concurrency check +of the compare exchange values, the transaction is guaranteed to be executed. +Failure during the quorum or the concurrency check will roll back the entire session transaction, +while failure during the commit of the documents will halt any further cluster transactions execution on the database +until that failure is remedied (failure mode for the documents commits are described later [here](../../server/clustering/cluster-transactions.mdx#failure-modes-in-cluster-wide-transactions)). + +**Consistency** +Consistency is guaranteed on the requested node. The node will complete the request only when the transaction is completed +and the documents are persisted on the node. The response to the client will contain the cluster transaction [Raft Index](../../server/clustering/rachis/consensus-operations.mdx#raft-index). +It will be added to any future requests in order to ensure that the node has committed that transaction before serving the client. + +**Durability** +Once the transaction has been accepted, it is guaranteed to run on all the database's nodes, even in the case of system +(or even cluster-wide) restarts or failures. + + + +## Concurrent Cluster-Wide and Single-Node Transactions + +### Case 1: Multiple concurrent cluster transactions + +Optimistic concurrency for cluster-wide transactions is handled using the compare-exchange feature. +The transaction compare-exchange operations are validated and if they can't be executed because the compare-exchange values +have changed since the transaction was initiated, the entire session transaction is aborted and an error is returned to the client. + +Optimistic concurrency at the document level is _not_ supported for cluster-wide transactions. +Compare-exchange operations should be used to ensure consistency in that regard. Concurrent cluster-wide transactions are guaranteed +to appear as if they are run one at a time. + +Cluster-wide transactions may only contain `PUT` / `DELETE` commands. This is required to ensure that we can apply the transaction +to each of the database nodes without regard to the current node state (note: the update of a document is effectively executed as `PUT` command). + + +If the concurrency check of the compare exchange has passed, the transaction will proceed and will be committed on all the database nodes. + +### Case 2: Concurrent cluster and non-cluster transaction + +When mixing cluster-wide transactions and single-node transactions, you need to be aware of the rules RavenDB uses to +resolve conflicts between them. + +Documents changed by the **cluster-wide transactions will always have precedence** in such a conflict and will overwrite changes +made in a single node transaction. It is common to use cluster-wide transactions for certain high-value operations such as +the creation of a new user, sale of a product with a limited inventory, etc., and use single-node transactions for the common case. + +A single node transaction that operates on data that has been modified by a cluster-wide transaction will operate as usual, as the cluster-wide +transaction has already been applied (either directly on the node or via [replication](../../server/clustering/replication/replication-overview.mdx)) +the cluster-wide transaction will not be executed again. + +[Replication](../../server/clustering/replication/replication-conflicts.mdx) will try to synchronize the data, so in order to avoid conflicts +every document that was modified under the cluster transaction will receive the special `RAFT:int64-sequential-number` +[Change Vector](../../server/clustering/replication/change-vector.mdx) and the special flag `FromClusterTx` which ensures precedence +over a regular change vector. +### Case 3: Cluster transaction with an External incoming replication + +While the internal replication with the cluster is discussed in the previous case, the case where two clusters are connected via +external replication is a bit different. + +The logic of documents that were changed by a cluster transaction versus documents that were changed by a regular transaction stays the same. +However, in the case where a conflict is on a document that was changed by both a local cluster transaction and a remote cluster transaction, +the local one will have precedence. Furthermore, the `FromClusterTx` flag will be removed, which means that on the next conflict the local +is no longer treated as modified by a cluster-wide transaction. + + + +## Failure Modes in Cluster-Wide Transactions + +### No majority + +A cluster-wide transaction can operate only within a functional cluster. +Thus, if no consensus was acquired for the cluster transaction by the majority of the nodes or currently there is no leader, +the transaction will be rolled back. +### Concurrency issues for compare-exchange operations + +Acquiring a consensus doesn't mean the acceptance of the transaction. Once the consensus is acquired, each node does a +concurrency check on the compare-exchange values. If the concurrency check fails, the transaction will be rolled back. +### Failure to apply transaction on database nodes + +Once the transaction has passed the compare-exchange concurrency check, the transaction is guaranteed to be committed. +Any failure at this stage must be remedied. + +| Failure | How to fix it | +| ----- | ----- | +| Out of disk space | Freeing space will fix the problem and allow cluster transactions to be committed. | +| Creation/Deletion of a document with a different collection | Deleting the document from the other collection | + + +The execution of cluster transactions on the database will be stopped until these types of failures are fixed. + + + + +## Debug Cluster-Wide Transactions + +The current state of the cluster transactions that are waiting to be completed by all of the database nodes can be found at: + +| URL | Type | Permission | +| --- | --- | --- | +|`/databases/*/admin/debug/cluster/txinfo` | `GET` | `DatabaseAdmin` | + +Parameters + +| Name | Type | Description | +| --- | --- | --- | +|`from` (optional) | `long` (default: 0)| Get cluster transactions from the raft change vector index. | +|`take` (optional) | `int` (default: `int.MaxValue`) | The number of cluster transaction to show. | + + + diff --git a/versioned_docs/version-7.1/server/clustering/distribution/_category_.json b/versioned_docs/version-7.1/server/clustering/distribution/_category_.json new file mode 100644 index 0000000000..e82ea21c45 --- /dev/null +++ b/versioned_docs/version-7.1/server/clustering/distribution/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 3, + "label": Distribution, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/server/clustering/distribution/assets/pinning-etl-task.png b/versioned_docs/version-7.1/server/clustering/distribution/assets/pinning-etl-task.png new file mode 100644 index 0000000000..ba3f423d3e Binary files /dev/null and b/versioned_docs/version-7.1/server/clustering/distribution/assets/pinning-etl-task.png differ diff --git a/versioned_docs/version-7.1/server/clustering/distribution/cluster-observer.mdx b/versioned_docs/version-7.1/server/clustering/distribution/cluster-observer.mdx new file mode 100644 index 0000000000..f9135f66a7 --- /dev/null +++ b/versioned_docs/version-7.1/server/clustering/distribution/cluster-observer.mdx @@ -0,0 +1,82 @@ +--- +title: "Cluster Observer" +hide_table_of_contents: true +sidebar_label: Cluster Observer +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Cluster Observer + + +* The primary goal of the **Cluster Observer** is to monitor the health of each database in the cluster + and adjust its topology to maintain the desired [Replication Factor](../../../server/clustering/distribution/distributed-database.mdx#replication-factor). + +* This observer is always running on the [Leader](../../../server/clustering/rachis/cluster-topology.mdx#leader) node. + +* In this page: + * [Operation flow](../../../server/clustering/distribution/cluster-observer.mdx#operation-flow) + * [Interacting with the Cluster Observer](../../../server/clustering/distribution/cluster-observer.mdx#interacting-with-the-cluster-observer) + + +## Operation flow + +* To maintain the Replication Factor, every newly elected [Leader](../../../server/clustering/rachis/cluster-topology.mdx#leader) starts measuring the health of each node + by creating dedicated maintenance TCP connections to all other nodes in the cluster. + +* Each node reports the current status of _all_ its databases at intervals of [500 milliseconds](../../../server/configuration/cluster-configuration.mdx#clusterworkersampleperiodinms) (by default). + The `Cluster Observer` consumes those reports every [1000 milliseconds](../../../server/configuration/cluster-configuration.mdx#clustersupervisorsampleperiodinms) (by default). + +* Upon a **node failure**, the [Dynamic Database Distribution](../../../server/clustering/distribution/distributed-database.mdx#dynamic-database-distribution) sequence + will take place in order to ensure that the `Replication Factor` does not change. + + + + **For example**: + + * Let us assume a five-node cluster with servers A, B, C, D, E. + We create a database with a replication factor of 3 and define an ETL task. + + * The newly created database will be distributed automatically to three of the cluster nodes. + Let's assume it is distributed to B, C, and E (so the database group is [B,C,E]), + and the cluster decides that node C is responsible for performing the ETL task. + + * If node C goes offline or becomes unreachable, the Cluster Observer detects the issue. + Initially: + * After the duration specified in the [Cluster.TimeBeforeMovingToRehabInSec](../../../server/configuration/cluster-configuration.mdx#clustertimebeforemovingtorehabinsec) configuration, + the observer moves node C to rehab mode, allowing time for recovery. + * The ETL task fails over to another available node in the Database Group. + + * If node C remains offline beyond the period specified in the [Cluster.TimeBeforeAddingReplicaInSec](../../../server/configuration/cluster-configuration.mdx#clustertimebeforeaddingreplicainsec) configuration, + the observer begins replicating the database to another node in the Database Group as a last resort. + + + + + + **Note**: + + * The _Cluster Observer_ stores its information **in memory**, so when the `Leader` loses leadership, + the collected reports of the _Cluster Observer_ and its decision log are lost. + + + + + +## Interacting with the Cluster Observer + +You can interact with the `Cluster Observer` using the following REST API calls: + +| URL | Method | Query Params | Description | +|-------------------------------------|---------|----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `/admin/cluster/observer/suspend` | POST | value=[`bool`] | Setting `false` will suspend the _Cluster Observer_ operation for the current [Leader term](../../../studio/cluster/cluster-view.mdx#cluster-nodes-states--types-flow). | +| `/admin/cluster/observer/decisions` | GET | | Fetch the log of the recent decisions made by the cluster observer. | +| `/admin/cluster/maintenance-stats` | GET | | Fetch the latest reports of the _Cluster Observer_ | + + diff --git a/versioned_docs/version-7.1/server/clustering/distribution/distributed-database.mdx b/versioned_docs/version-7.1/server/clustering/distribution/distributed-database.mdx new file mode 100644 index 0000000000..cc736f52ee --- /dev/null +++ b/versioned_docs/version-7.1/server/clustering/distribution/distributed-database.mdx @@ -0,0 +1,137 @@ +--- +title: "Distributed Database" +hide_table_of_contents: true +sidebar_label: Distributed Database +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Distributed Database + + +* In RavenDB, a database can be replicated across multiple nodes, depending on its [Replication Factor](../../../server/clustering/distribution/distributed-database.mdx#replication-factor). + +* A node where a database resides is referred to as a `Database Node`. + The group of _Database Nodes_ that assemble the distributed database, is called a [Database Group](../../../studio/database/settings/manage-database-group.mdx). + +* Unless [Sharding](../../../server/clustering/distribution/distributed-database.mdx#sharding) is employed, + each Database Node has a **full copy** of the database, containing **all** the database documents, and indexes them **locally**. + This greatly simplifies executing query requests, as there is no need to aggregate data from multiple nodes. + +* In this page: + * [The Database Record](../../../server/clustering/distribution/distributed-database.mdx#the-database-record) + * [Replication Factor](../../../server/clustering/distribution/distributed-database.mdx#replication-factor) + * [Database Topology](../../../server/clustering/distribution/distributed-database.mdx#database-topology) + * [Dynamic Database Distribution](../../../server/clustering/distribution/distributed-database.mdx#dynamic-database-distribution) + * [Sharding](../../../server/clustering/distribution/distributed-database.mdx#sharding) + + +## The Database Record + +Each database instance keeps its configuration (e.g. Index definitions, Database topology) in a [Database Record](../../../client-api/operations/server-wide/create-database.mdx) object. +Upon database creation, this object is propagated through Rachis to all nodes in the cluster. + +After that, each node updates its own `Database Record` independently upon receiving any new Raft command, +e.g. when an index has changed. + + + +## Replication Factor + +When creating a database, you can either: + +* Explicitly specify the _exact_ nodes to use for the `Database Group` +* Or, specify only the number of nodes needed and let RavenDB automatically choose the nodes + on which the database will reside. + +In either case, the number of nodes will represent the `Replication Factor`. + +Once the database is created by getting a [Consensus](../../../server/clustering/rachis/consensus-operations.mdx), +the [Cluster Observer](../../../server/clustering/distribution/cluster-observer.mdx) begins monitoring the _Database Group_ in order to maintain this Replication Factor. + + + +## Database Topology + +The `Database Topology` describes the relationships between the Database Nodes within the [Database Group](../../../studio/database/settings/manage-database-group.mdx). +Each **Database Node** can be in one of the following states: + +| State | Description | +|----------------|---------------------------------------------------------------------------------| +| **Member** | Fully updated and functional database node. | +| **Promotable** | A node that has been recently added to the group and is being updated. | +| **Rehab** | A former Member node that is assumed to be _not_ up-to-date due to a partition. | + + + +* In general, all nodes in a newly created database are in a `Member` state. +* When adding a new Database Node to an already existing database group, a `Mentor Node` is selected in order to update it. + The new node will be in a `Promotable` state until it receives _and_ indexes all the documents from the mentor node. +* Learn more in: + * [Cluster node types](../../../studio/cluster/cluster-view.mdx#cluster-nodes-types) + * [Cluster node states and types flow](../../../studio/cluster/cluster-view.mdx#cluster-nodes-states--types-flow) + + + + + +* The database topology is kept in a list that is always ordered with `Member` nodes first, + then `Rehabs` and `Promotables` are last. +* The order is important since it defines the client's order of access into the Database Group, + (see [Load balancing client requests](../../../client-api/configuration/load-balance/overview.mdx)). +* The order can be modified using the [Client-API](../../../client-api/operations/server-wide/reorder-database-members.mdx) + or via the [Studio](../../../studio/database/settings/manage-database-group.mdx#database-group-topology---actions). + + + + + +All `Members` have master-master [Replication](../../../server/clustering/replication/replication-overview.mdx) in order to keep the documents in sync across the nodes. + + + + +## Dynamic Database Distribution + +If any of the `Database Nodes` is down or partitioned, the [Cluster Observer](../../../server/clustering/distribution/cluster-observer.mdx) will recognize it and act as follows: + +1. If the time that is defined in [TimeBeforeMovingToRehabInSec](../../../server/configuration/cluster-configuration.mdx#clustertimebeforemovingtorehabinsec) (default: 60 seconds) has passed + and the node is still unreachable, the node will be moved to a `Rehab` state. + +2. If the node remains in `Rehab` for the time defined in [TimeBeforeAddingReplicaInSec](../../../server/configuration/cluster-configuration.mdx#clustertimebeforeaddingreplicainsec) (default: 900 seconds), + a new database node will be automatically added to the database group to replace the `Rehab` node. + +3. If the `Rehab` node is online again, it will be assigned a [Mentor Node](../../../server/clustering/distribution/highly-available-tasks.mdx#responsible-node) to update it with the recent changes. + +4. The first node to be up-to-date stays, while the other is deleted. + + + +The `Rehab` node is deleted only when it reconnects to the cluster, +and only AFTER it has finished sending all new documents it may have (while disconnected) to the other nodes in the _Database Group_. + + + +The _Dynamic Database Distribution_ feature can be toggled on and off with the following request: + +| URL | Method | URL Params | +|--------------------------------------------|----------|-----------------------------------------| +| /admin/databases/dynamic-node-distribution | `POST` | name=[`database-name`], enable=[`bool`] | + + + +## Sharding + +* Sharding, supported by RavenDB as an out-of-the-box solution starting with version **6.0**, + is the distribution of a database's content across autonomous shards. + +* Learn more about sharding in this dedicated [Sharding overview](../../../sharding/overview.mdx) article. + + + diff --git a/versioned_docs/version-7.1/server/clustering/distribution/highly-available-tasks.mdx b/versioned_docs/version-7.1/server/clustering/distribution/highly-available-tasks.mdx new file mode 100644 index 0000000000..637184c642 --- /dev/null +++ b/versioned_docs/version-7.1/server/clustering/distribution/highly-available-tasks.mdx @@ -0,0 +1,180 @@ +--- +title: "Highly Available Tasks" +hide_table_of_contents: true +sidebar_label: Highly Available Tasks +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Highly Available Tasks + + +* A **RavenDB Task** can be one of the following: + + * An [Ongoing Task](../../../studio/database/tasks/ongoing-tasks/general-info.mdx) + * Updating a `Rehab` or a `Promotable` [Database Node](../../../server/clustering/distribution/distributed-database.mdx#database-topology) + +* There is no single coordinator handing out tasks to a specific node. + Instaed, each node decides on its own if it is the [Reponsible Node](../../../server/clustering/distribution/highly-available-tasks.mdx#responsible-node) of the task. + +* Each node will re-evaluate its responsibilities with every change made to the [Database Record](../../../client-api/operations/server-wide/create-database.mdx), + such as defining a new _index_, configuring or modifying an _Ongoing Task_, any _Database Topology_ change, etc. + +* In this page: + * [License](../../../server/clustering/distribution/highly-available-tasks.mdx#license) + * [Constraints](../../../server/clustering/distribution/highly-available-tasks.mdx#constraints) + * [Responsible Node](../../../server/clustering/distribution/highly-available-tasks.mdx#responsible-node) + * [Tasks Relocation](../../../server/clustering/distribution/highly-available-tasks.mdx#tasks-relocation) + * [Pinning a Task](../../../server/clustering/distribution/highly-available-tasks.mdx#pinning-a-task) + +## License + +Please see [available license types](https://ravendb.net/buy) and check your +[own license](http://live-test.ravendb.net/studio/index.html#about) +to verify whether the Highly Available Tasks feature is activated in your database. + +* If your license **provides** highly available tasks, the responsibilities of + a failed cluster node will be assigned automatically to another, available, node. + + Supported tasks include: + * Backup + * Data subscription + * All ETL types + +* If your license does **not** provide highly available tasks, the responsibilites of a failed node will be + resumed when the node returns online. + +* Scenarios [below](../../../server/clustering/distribution/highly-available-tasks.mdx#tasks-relocation) + are meant to demonstrate the behavior of a system licensed for highly available tasks. + + + +## Constraints + +1. Task is defined per [Database Group](../../../server/clustering/distribution/distributed-database.mdx). + +2. Task is executed by a single `Database Node` only. + With Backup Task being an exception in case of a cluster partition, see [Backup Task - When Cluster or Node are Down](../../../studio/database/tasks/backup-task.mdx#when-the-cluster-or-node-are-down). + +3. A `Database Node` can be assigned with many tasks. + +4. The node must be in a [Member](../../../server/clustering/distribution/distributed-database.mdx#database-topology) state in the `Database Group` in order to perform a task. + +5. Cluster must be in a functional state. + + +## Responsible Node + +* `Responsible Node` is the node that is responsible to perform a specific Ongoing Task. + +* Each node checks whether it is the `Responsible Node` for the task by executing a local function that is based on the + _unique hash value_ of the task and the current [Database Topology](../../../server/clustering/distribution/distributed-database.mdx#database-topology). + +* Since the `Database Topology` is _eventually consistent_ across the cluster, + there will be an **eventually consistent single Responsible Node**, which will answer the above constraints. + + +Learn more [here](../../../server/clustering/distribution/distributed-database.mdx#database-topology) +about database nodes' relations and states. + + + + +## Tasks Relocation + +Upon a `Database Topology` change, _all_ existing tasks will be re-evaluated and +re-distributed among the functional nodes. + + + +**For example**: + +Let's assume that we have a 5 nodes cluster [A, B, C, D, E] with a database on [A, B, E] and a task on node B. + +Node B has network issues and is separated from the cluster. +So nodes [A, C, D, E] are on one side and node [B] is on the other side. + +The [Cluster Observer](../../../server/clustering/distribution/cluster-observer.mdx) will note that it can't reach node B +and issue a [Raft Command](../../../server/clustering/rachis/consensus-operations.mdx) in order to move node B to a `Rehab` state. + +Once this change has propagated, it will trigger a re-assessment of _all_ tasks in _all_ reachable nodes. +In our example the task will move to either A or E. + +In the meanwhile, node B which has no communication with the [Cluster Leader](../../../server/clustering/rachis/cluster-topology.mdx), +moves itself to be a `Candidate` and removes all its tasks. + + + + +## Pinning a Task + +It is sometimes preferable to **prevent** the failover of tasks to different responsible nodes. +An example for such a case is a heavy duty backup task, that better be left for the continuous care +of its original node than reassigned during a backup operation. +Another example is an ETL task that transfers +[artificial documents](../../../studio/database/indexes/create-map-reduce-index.mdx#saving-map-reduce-results-in-a-collection-(artificial-documents)). +In this case a reassigned task might skip some of the artificial documents that were created on +the original node. + +The failover of a task to another responsible node can be prevented by **pinning the task** to a mentor node. + +* A pinned task will be handled only by the node it is pinned to as long as this node is a database + group member. +* If the node the task is pinned to fails, the task will **not** be executed until the node is back online. + When the node awakes, the task will be resumed from the failure point on. +* If a node remains offline for the period set by + [cluster.timebeforeaddingreplicainsec](../../../server/configuration/cluster-configuration.mdx#clustertimebeforeaddingreplicainsec), + the cluster observer will attempt to select an available node to replace it in the database group + and redistribute the fallen node's tasks, including pinned ones, among database group members. +A task can be pinned to a selected node via Studio or using code. + +#### Pinning via Studio + +![Pinning an ETL Task Using Studio](./assets/pinning-etl-task.png) + +#### Pinning using code +To pin a task to the node that runs it, set the task's `PinToMentorNode` configuration +option to `true`. +In the following example, a RavenDB ETL task is pinned. + + + +{`AddEtlOperation operation = new AddEtlOperation( + new RavenEtlConfiguration + \{ + ConnectionStringName = "raven-connection-string-name", + Name = "Employees ETL", + Transforms = + \{ + new Transformation + \{ + Name = "Script #1", + Collections = + \{ + "Employees" + \}, + Script = @"loadToEmployees (\{ + Name: this.FirstName + ' ' + this.LastName, + Title: this.Title + \});" + \} + \}, + + // Pin the task to prevent failover to another node + PinToMentorNode = true + + \}); + +AddEtlOperationResult result = store.Maintenance.Send(operation); +`} + + + + + diff --git a/versioned_docs/version-7.1/server/clustering/overview.mdx b/versioned_docs/version-7.1/server/clustering/overview.mdx new file mode 100644 index 0000000000..c1b4a40c67 --- /dev/null +++ b/versioned_docs/version-7.1/server/clustering/overview.mdx @@ -0,0 +1,78 @@ +--- +title: "Cluster: Overview" +hide_table_of_contents: true +sidebar_label: Clustering Overview +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Cluster: Overview + + +RavenDB's clustering provides redundancy and an increased availability of data that is consistent +across a fault-tolerant, [High-Availability](https://en.wikipedia.org/wiki/High-availability_cluster) cluster. + + + +* A [RavenDB Cluster](../../glossary/ravendb-cluster.mdx) consists of one or more RavenDB server instances which are called [Cluster Nodes](../../glossary/cluster-node.mdx). +* Each node has a specific state and type, learn more in [Cluster Topology](../../server/clustering/rachis/cluster-topology.mdx). + + + + +* Some actions, such as creating a new database or creating an index, require a [cluster consensus](../../server/clustering/rachis/consensus-operations.mdx) in order to occur. +* The cluster nodes are kept in consensus by using [Rachis](../../server/clustering/rachis/what-is-rachis.mdx), + which is RavenDB's Raft Consensus Algorithm implementation for distributed systems. +* **Rachis** algorithm ensures the following: + * These actions are done only if the majority of the nodes in the cluster agreed to it ! + * Any such series of events (each called a [Raft Command](../../glossary/raft-command.mdx)) will be executed in the _same_ order on each node. + + + + +* In RavenDB, the database is replicated to multiple nodes - see [Database Distribution](../../server/clustering/distribution/distributed-database.mdx). +* A group of nodes in the cluster that contains the same database is called a [Database Group](../../studio/database/settings/manage-database-group.mdx). + (The number of nodes in the database group is set by the replication factor supplied when creating the database). +* Documents are kept in sync across the _Database Group_ nodes with a [master to master replication](../../server/clustering/replication/replication-overview.mdx). +* Any document related change such as a CRUD operation doesn't go through Raft, + instead, it is automatically **replicated** to the other database instances to in order to keep the data up-to-date. + + + + +* Due to the consistency of the data, even if the majority of the cluster is down, as long as a single node is available, we can still process Reads and Writes. +* Read requests can be spread among the cluster's nodes for better performance. + + + + +* Whenever there's a [Work Task](../../server/clustering/distribution/highly-available-tasks.mdx) for a _Database Group_ to do (e.g. a Backup task), + the cluster will decide which node will actually be responsible for it. +* These tasks are operational even if the node to which the client is connected to is down, as this nodes' tasks are **re-assigned** to other available nodes in the _Database Group_. + + + + +* The cluster's health is monitored by the [Cluster Observer](../../server/clustering/distribution/cluster-observer.mdx) which checks upon each node in the cluster. +* The node state is recorded in the relevant database groups so that the cluster can maintain the database replication factor and re-distribute its work tasks if needed. + + + + +* A cluster is limited in the maximum number of CPU cores that can be used by all of its nodes +at a given time. +* This limit is determined by the cluster's RavenDB license. For example, a Community licensed +cluster can have up to 3 cores. This means that when there is just one node in the cluster, it +can use 1-3 cores. If there are 3 nodes, each will use 1 core (since a server is allowed to be +assigned with at least 1 core). +* The number of assigned cores is divided evenly between all the nodes of a cluster. +* The maximum number of cores a node can use can be limited + [using Studio](../../studio/cluster/cluster-view.mdx#reassign-cores). + + diff --git a/versioned_docs/version-7.1/server/clustering/rachis/_category_.json b/versioned_docs/version-7.1/server/clustering/rachis/_category_.json new file mode 100644 index 0000000000..b48a7269a2 --- /dev/null +++ b/versioned_docs/version-7.1/server/clustering/rachis/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 1, + "label": Rachis, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/server/clustering/rachis/assets/cluster-states.png b/versioned_docs/version-7.1/server/clustering/rachis/assets/cluster-states.png new file mode 100644 index 0000000000..9f117a5914 Binary files /dev/null and b/versioned_docs/version-7.1/server/clustering/rachis/assets/cluster-states.png differ diff --git a/versioned_docs/version-7.1/server/clustering/rachis/cluster-topology.mdx b/versioned_docs/version-7.1/server/clustering/rachis/cluster-topology.mdx new file mode 100644 index 0000000000..5791298242 --- /dev/null +++ b/versioned_docs/version-7.1/server/clustering/rachis/cluster-topology.mdx @@ -0,0 +1,150 @@ +--- +title: "Cluster Topology" +hide_table_of_contents: true +sidebar_label: Cluster Topology +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Cluster Topology + + +* The cluster topology is defined by the type and state of each node in the cluster and the relation between them. + +* Clusters are typically composed of any odd number of nodes equal or greater than 3, see [Clusters Best Practice](../../../server/clustering/cluster-best-practice-and-configuration.mdx). + +* In this page: + * [Fetching the topology](../../../server/clustering/rachis/cluster-topology.mdx#fetching-the-topology) + * [Modifying the topology](../../../server/clustering/rachis/cluster-topology.mdx#modifying-the-topology) + * [Nodes states and types](../../../server/clustering/rachis/cluster-topology.mdx#nodes-states-and-types) + * [Changing node type](../../../server/clustering/rachis/cluster-topology.mdx#changing-node-type) + + +## Fetching the topology + +Any client with [Valid User](../../security/authorization/security-clearance-and-permissions.mdx#user) privileges can fetch the topology from the server +by sending the following REST API call: + +| Action | Method | URL | +|----------------|----------|-------------------| +| Fetch Topology | `GET` | /cluster/topology | + + + +## Modifying the topology + +With [Cluster Admin](../../../server/security/authorization/security-clearance-and-permissions.mdx#cluster-admin) privileges, +the client can modify the cluster through the [Studio](../../../studio/cluster/cluster-view.mdx#cluster-view-operations), +or by using the following REST API calls: + +| Action | Method | URL | +|-------------|----------|------------------------------------------| +| Add Node | `PUT` | /admin/cluster/node?url=`` | +| Remove Node | `DELETE` | /admin/cluster/node?nodeTag=`` | + +Optional parameters (for the Add Node endpoint): + +| Name | Value type | Description | +|------------------|--------------|--------------------------------------------------------------------------------------------------------------| +| tag | `string` | The node tag. 1-4 uppercase unicode letters.
Default: 'A' - 'Z' assigned by order of addition. | +| watcher | `bool` | Add the node as a [Watcher](../../../server/clustering/rachis/cluster-topology.mdx#watcher).
Default: `false` | +| maxUtilizedCores | `uint` | Maximum number of cores that can be assigned to this node.
Default: number of processors. | + +See the [Cluster API page](../../../server/clustering/cluster-api.mdx) for usage examples. + +## Nodes states and types + +In Rachis every cluster node has a [state](../../../server/clustering/rachis/cluster-topology.mdx#state) and a [type](../../../server/clustering/rachis/cluster-topology.mdx#type). + + + +##### Type + +The type defines the node's ability to **vote** on accepting a new raft command in the cluster, +and **elect** a new leader when needed. + +| Node type | Description | +|----------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| **Member** | A fully functional voting node in the cluster. | +| **Promotable** | A non-voting node.
This is an intermediate stage until node is promoted by the Leader to a Member. | +| **Watcher** | A non-voting node that is still fully managed by the cluster.
Can be assigned with databases and tasks. See more [below](../../../server/clustering/rachis/cluster-topology.mdx#watcher). | + +
+ + + +##### State + +The Rachis state defines the **current role** of the node in the cluster. + +| Rachis state | Description | +|------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **Passive** | Not a part of _any_ cluster.

Every newly created RavenDB server starts at the initial **Passive** state.
A passive node can become a Leader of a single-node-cluster,
or it can be added to an already existing cluster.

A node that is removed from the topology moves back to the **Passive** state. | +| **Candidate** | Has no Leader and tries to get elected for leadership. | +| **Follower** | Has a Leader and operates normally. | +| **Leader-Elect** | Just elected for leadership, but will take office and become the leader only after the dummy `noop` raft command was successfully applied in the cluster. | +| **Leader** | Leader of the cluster. See more [below](../../../server/clustering/rachis/cluster-topology.mdx#leader). | + +
+ + + +![Figure 3. States Transitions](./assets/cluster-states.png) + +**1.** New server/node will start as **Passive**, meaning it is _not_ part of any cluster yet. +**2.** When a node is added to a cluster, it immediately becomes the **Leader** if it is the only node in the cluster. +**3.** When a node is added to a cluster and a Leader already exists, it will become **Promotable** (on its way to either becoming a Member or a Watcher - depending on what was specified when created.) +**4.** Node will become a **Member** of the cluster if not specified otherwise. +**5.** Node will become a **Watcher** if specified when adding the node. +**6.** Member can be `Demoted` to a Watcher. +**7.** Watcher can be `Promoted` to a Member. It will first become Promotable and a Member thereafter. +**8.** Member (a regular Member or a Leader - but not a Watcher) can become a **Candidate** when a voting process for a new Leader takes place. +**9.** When voting is over and a new Leader is elected, one candidate becomes the Leader and the rest go back to being his Followers. + + + + + +##### Leader +* The Leader makes sure that decisions are consistent at the cluster level, as long as a majority of the nodes are functioning and can talk to the Leader. + +* For example, the decision to add a database to a node will be either accepted by the entire cluster (eventually) or fail to register altogether. + +* [Raft Commands](../../../server/clustering/rachis/consensus-operations.mdx#raft-commands-implementation-details) can't be accepted while there is no Leader or if the Leader is down. + + + + + +##### Watcher +Increase your RavenDB cluster by adding Watchers with the advantage of _not_ suffering from large voting majorities and the latencies they can incur, +as watchers don't take part in majority calculations. + +* A Watcher can be assigned databases and tasks as a regular Member but is not included in the decisions making flow, + so cluster decisions can be made with a small majority of nodes while the actual size of the cluster can be much higher. + +* Any number of Watchers can be added to handle the workload. + + + + + +## Changing Node Type + +Node types can be altered on the fly by using the following REST API calls: + +| Action | Method | URL | Clearance | +|--------------------------------|---------|------------------------|--------------| +| Promote Watcher to be a Member | `POST` | /admin/cluster/promote | ClusterAdmin | +| Demote Member to be a Watcher | `POST` | /admin/cluster/demote | ClusterAdmin | +| Force Elections | `POST` | /admin/cluster/reelect | Operator | + +See the [Cluster API page](../../../server/clustering/cluster-api.mdx) for usage examples. + + diff --git a/versioned_docs/version-7.1/server/clustering/rachis/consensus-operations.mdx b/versioned_docs/version-7.1/server/clustering/rachis/consensus-operations.mdx new file mode 100644 index 0000000000..06738ebaca --- /dev/null +++ b/versioned_docs/version-7.1/server/clustering/rachis/consensus-operations.mdx @@ -0,0 +1,85 @@ +--- +title: "Consensus Operations" +hide_table_of_contents: true +sidebar_label: Consensus Operations +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Consensus Operations + + +* Any operation that is made at the _cluster level_ requires a consensus, + meaning it will either be accepted by the majority of the cluster nodes (n/2 + 1), or completely fail to register. + +* This operation is named **Raft Command** or Raft Log. + Once issued, it is propagated through [Rachis](../../../server/clustering/rachis/what-is-rachis.mdx) to all the nodes. + Only after the cluster's approval (majority of nodes ), it will _eventually_ be executed on all cluster nodes. + +* In this page: + * [Operations that require Consensus](../../../server/clustering/rachis/consensus-operations.mdx#operations-that-require-consensus) + * [Operations that do not require Consensus](../../../server/clustering/rachis/consensus-operations.mdx#operations-that-do-not-require-consensus) + * [Raft Commands Implementation Details](../../../server/clustering/rachis/consensus-operations.mdx#raft-commands-implementation-details) + +## Operations that require Consensus + +Since getting a consensus is an expensive operation, it is limited to the following operations only: + +* Creating / Deleting a database +* Adding / Removing node to / from a Database Group +* Changing database settings (e.g. revisions configuraton , conflict resolving) +* Creating / Deleting Indexes (static and auto indexes) +* Configuring the [Ongoing Tasks](../../../studio/database/tasks/ongoing-tasks/general-info.mdx) + +See [Implementation Details](../../../server/clustering/rachis/consensus-operations.mdx#implementation-details) below. + + + +## Operations that do not require Consensus + +* It is important to understand that any document related operation **does not** require a consensus. + Any **document CRUD operation** or performing a **query on an _existing_ index** is executed against a _specific node_, + even in the case of a cluster partition. + +* Since RavenDB keeps documents synchronized by [Replication](../../../server/clustering/replication/replication-overview.mdx), + any such operation is automatically replicated to all other nodes in the Database Group, + so documents are always available for _Read_, _Write_ and _Query_ even if there is no majority of nodes in the cluster. + + +## Raft Commands Implementation Details + +### Raft Index + +* Every Raft command is assigned a **Raft Index**, which corresponds to the commands sequence execution order. + For example, an operation with the index 7 is executed only after _all_ operations with a smaller index have been executed. + +* If needed, a client with [Valid User](../../../server/security/authorization/security-clearance-and-permissions.mdx#user) privileges + can wait for a certain Raft command index to be executed on a specific cluster node. + This is done by issuing the following REST API call: + + | Action | Method | URL | + | - | - | - | + | Wait for Raft Command | `GET` | /rachis/waitfor?index=index | + +* The request will return after the corresponding Raft command was successfully applied -or- + a `timeout` is returned after `Cluster.OperationTimeoutInSec` has passed (default: 15 seconds). + +### Raft Command Events Sequence + +* When a Raft command is sent, the following **sequence of events** occurs: + + 1. The client sends the command to a cluster node. + 2. If the receiving node is not the Leader, it redirects the command to the Leader. + 3. The Leader appends the command to its log and propagates the command to all other nodes. + 4. If the Leader receives an acknowledgment from the majority of nodes, the command is actually executed. + 5. If the command is executed at the Leader node, it is committed to the Leader Log, and notification is sent to other nodes. + Once the other nodes receive the notification, they execute the command as well. + 6. If a Non-Leader node executes the command, it is added to the node log as well. + 7. The client receives the Raft Index of the command issued, so it can be waited upon. + diff --git a/versioned_docs/version-7.1/server/clustering/rachis/what-is-rachis.mdx b/versioned_docs/version-7.1/server/clustering/rachis/what-is-rachis.mdx new file mode 100644 index 0000000000..2233b76882 --- /dev/null +++ b/versioned_docs/version-7.1/server/clustering/rachis/what-is-rachis.mdx @@ -0,0 +1,50 @@ +--- +title: "Rachis - RavenDB's Raft Implementation" +hide_table_of_contents: true +sidebar_label: What Is Rachis +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Rachis - RavenDB's Raft Implementation + + +* **Rachis** is the RavenDB's **Raft** implementation. + +* Definition of Rachis: Spinal column, also the distal part of the shaft of a feather that bears the web. + +## What is Raft ? + +* **Raft** is a simple and easy-to-understand distributed consensus protocol. + +* It allows you to execute an ordered set of operations across your entire cluster. + This means that you can apply a set of operations on a state machine, and have the _same_ final state machine in all the cluster's nodes. + So any series of events (called a [Raft Commands](../../../server/clustering/rachis/consensus-operations.mdx#implementation-details)) will be executed in the _same_ order on each node. + +* The [Leader Node](../../../server/clustering/rachis/cluster-topology.mdx#leader) accepts all the Raft Commands requests for the cluster and handles committing them cluster-wide. + These Raft Commands are done only if the majority of the nodes in the cluster agreed to it ! + + + +* Nice visualizations can be found in [the secret life of data](http://thesecretlivesofdata.com/raft/) & [raft.github](https://raft.github.io/). +* The full Raft thesis can be found in: [Raft paper](http://web.stanford.edu/~ouster/cgi-bin/papers/raft-atc14). + + + +## What is Rachis ? + +**Rachis** is RavenDB's Raft implementation with the following added features: + +* Support for in memory and persistent large multi-tasks state machines. +* Reliably committing updates to a distributed set of state machines. +* Support for voting & non-voting cluster members, see [Cluster Toplogy](../../../server/clustering/rachis/cluster-topology.mdx). +* Dynamic topology, nodes can be added and removed from the cluster on the fly. +* Managing situations such as handling a Leader timeout and forcing a leader to step down. +* ACID local log using the Voron Storage Engine. + diff --git a/versioned_docs/version-7.1/server/clustering/replication/_category_.json b/versioned_docs/version-7.1/server/clustering/replication/_category_.json new file mode 100644 index 0000000000..aa6362e815 --- /dev/null +++ b/versioned_docs/version-7.1/server/clustering/replication/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 2, + "label": Replication, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/server/clustering/replication/advanced-replication.mdx b/versioned_docs/version-7.1/server/clustering/replication/advanced-replication.mdx new file mode 100644 index 0000000000..30810e9612 --- /dev/null +++ b/versioned_docs/version-7.1/server/clustering/replication/advanced-replication.mdx @@ -0,0 +1,37 @@ +--- +title: "Advanced Replication Topics" +hide_table_of_contents: true +sidebar_label: Advanced Replication +sidebar_position: 3 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Advanced Replication Topics + +## Replication Handshake Procedure + +Whenever the replication process between two databases is initiated it has to determine the process state. + +1. The first message is a request to establish a TCP connection of type _replication_ with a _protocol version_. +2. The _destination_ verifies that the protocol version matches and that the request is authorized. +3. Once the _source_ gets the OK message it queries the _destination_ about the latest ETag it got from him. +4. The _destination_ sends back a heartbeat message with both the latest `ETag` he got from the _source_ and the current [Change Vector](../../../server/clustering/replication/change-vector.mdx) of the database. +5. The `ETag` is used as a starting point for the replication process but it is then been filtered by the _destination's_ current `change vector`, +meaning we will skip documents with higher `ETag` and lower `Change Vector`, this is done to prevent the [Ripple Effect](../../../server/clustering/replication/advanced-replication.mdx#preventing-the-ripple-effect). + +## Preventing the Ripple Effect + +RavenDB [Database Group](../../../server/clustering/distribution/distributed-database.mdx#distributed-database) is a fully connected graph of replication channels, meaning that if there are `n` nodes in a `Database Group` there are `n*(n-1)` replication channels. +We wanted to prevent the case where inserting data into one database will cause the data to propagate multiple times through multiple paths to all the other nodes. +We have managed to do so by delaying the propagation of data coming from the replication logic itself. + +If the sole source of incoming data is replication we will not replicate it right away, we will wait up to `15 seconds` before sending data. +This will allow the destination to inform us about his current change vector and most of the time the data will get filtered at the source. +On a stable system the steady state will have a `Spanning Tree` of replication channels, `n-1` of the fastest channels available that are doing the actual work and the rest are just sending heartbeats. + diff --git a/versioned_docs/version-7.1/server/clustering/replication/assets/conflict-resolution-script-in-studio.jpg b/versioned_docs/version-7.1/server/clustering/replication/assets/conflict-resolution-script-in-studio.jpg new file mode 100644 index 0000000000..29d30e7e9c Binary files /dev/null and b/versioned_docs/version-7.1/server/clustering/replication/assets/conflict-resolution-script-in-studio.jpg differ diff --git a/versioned_docs/version-7.1/server/clustering/replication/assets/resolve-conflicted-document-screen.jpg b/versioned_docs/version-7.1/server/clustering/replication/assets/resolve-conflicted-document-screen.jpg new file mode 100644 index 0000000000..453fff4d1d Binary files /dev/null and b/versioned_docs/version-7.1/server/clustering/replication/assets/resolve-conflicted-document-screen.jpg differ diff --git a/versioned_docs/version-7.1/server/clustering/replication/assets/resolve-conflicted-document-screen2.jpg b/versioned_docs/version-7.1/server/clustering/replication/assets/resolve-conflicted-document-screen2.jpg new file mode 100644 index 0000000000..dc688e3f68 Binary files /dev/null and b/versioned_docs/version-7.1/server/clustering/replication/assets/resolve-conflicted-document-screen2.jpg differ diff --git a/versioned_docs/version-7.1/server/clustering/replication/change-vector.mdx b/versioned_docs/version-7.1/server/clustering/replication/change-vector.mdx new file mode 100644 index 0000000000..b48ec6d05f --- /dev/null +++ b/versioned_docs/version-7.1/server/clustering/replication/change-vector.mdx @@ -0,0 +1,190 @@ +--- +title: "Change Vector" +hide_table_of_contents: true +sidebar_label: Change Vector +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Change Vector + + +* Change vectors are the RavenDB implementation of the [Vector clock](https://en.wikipedia.org/wiki/Vector_clock) concept. + They give us partial order over modifications of documents in a RavenDB cluster. + +* Change vectors are updated every time a document is [modified](../../../server/clustering/replication/change-vector.mdx#concurrency-control--change-vectors) + in any way. + +* In this page: + * [What are Change Vectors Constructed From?](../../../server/clustering/replication/change-vector.mdx#what-are-change-vectors-constructed-from) + * [How are Change Vectors used to Determine Order?](../../../server/clustering/replication/change-vector.mdx#how-are-change-vectors-used-to-determine-order) + * [Concurrency Control & Change Vectors](../../../server/clustering/replication/change-vector.mdx#concurrency-control--change-vectors) + * [Change Vector Comparisons](../../../server/clustering/replication/change-vector.mdx#change-vector-comparisons) + * [Concurrency Control at the Cluster](../../../server/clustering/replication/change-vector.mdx#concurrency-control-at-the-cluster) + * [Database Global Change Vector](../../../server/clustering/replication/change-vector.mdx#database-global-change-vector) + * [After Restoring a Database from Backup](../../../server/clustering/replication/change-vector.mdx#after-restoring-a-database-from-backup) + + +## What are Change Vectors Constructed From? + +A change vector is constructed from entries, one per database. +. +It looks like this: +`[A:1-0tIXNUeUckSe73dUR6rjrA, B:7-kSXfVRAkKEmffZpyfkd+Zw]` + +This change vector is constructed from two entries, + +`A:1-0tIXNUeUckSe73dUR6rjrA` and `B:7-kSXfVRAkKEmffZpyfkd+Zw`. + +Each entry has the following structure `Node tag`:`ETag`-`Database ID`, so `A:1-0tIXNUeUckSe73dUR6rjrA` means that +the document was modified on node `A` its local `ETag` is `1` and the database ID is `0tIXNUeUckSe73dUR6rjrA`. Entries accumulate +as the number of database instances in the database group grows. If an instance is no longer being used, its entry can be removed +from the change vector to save space using the [UpdateUnusedDatabasesOperation](../../../client-api/operations/maintenance/clean-change-vector.mdx). + +An `ETag` is a value that is scoped to a database instance on a particular node and is guaranteed to always increase. It is used internally for many purposes and is the natural +sort order for many operations (indexing, replication, ETL, etc). +The database ID is used for cases where the node tag is not unique, which could happen when using external replication or restoring from backup. + + + +## How are Change Vectors used to Determine Order? + +Given two change vectors `X` and `Y` we would say that `X` >= `Y` if foreach entry of `X` the value of the ETag is greater or equal to the corresponding entry in `Y` and `Y` has no entries that `X` doesn't have. +The same goes for <= we would say that `X` <= `Y` if foreach entry of `X` the value of the ETag is smaller or equal to the corresponding entry in `Y` and `X` has no entries that `Y` doesn't have. +We would say that `X` <> `Y` (no order or conflict) if `X` has an entry with a higher ETag value than `Y`, and `Y` has a different entry where its ETag value is greater than X. + + + +## Concurrency Control & Change Vectors + +RavenDB defines some simple rules to determine how to handle concurrent operations on the same document across the cluster. +It uses the document's change vector. +This allows RavenDB to detect concurrency issues when writing to a document and throws an exception when the document +has been concurrently modified on different nodes during network partition to prevent data corruption. + +Every document in RavenDB has a corresponding change vector. +This change vector is updated by RavenDB every time the document is changed. +This happens when on document creation and +any modification such as `PUT`, `PATCH`, `DELETE` or their bulk versions. +A delete operation will also cause RavenDB to update the document change vector, +however, at that point, the change vector will belong to +the document tombstone (since the document itself has already been deleted). + +A change vector is present in the document's metadata and each time +a document is updated, the server will update the change vector. +This is mostly used internally inside RavenDB for many purposes +(conflict detection, deciding what documents a particular +subscription has already seen, what was sent to an ETL destination, etc) +but can also be very useful for clients. + +In particular, the change vector is _guaranteed_ to change whenever the document changes and can be used as part of optimistic concurrency checks. A document modification can all specify an expected change vector for a document (with an empty change vector signifying that the document does not exist). In such a case, all operations in the +transaction will be aborted and no changes will be applied to any of the documents modified in the transaction. + + + +## Change Vector Comparisons + +Conceptually, comparing two change vectors means answering a question - which change vector refers to an earlier event. + +The comparison is defined as follows: + +* Two change vectors are equal when and only when all etags _equal_ between corresponding node and database IDs +* Change vector A is larger than change vector B if and only if all etags are _larger or equal_ between corresponding node and database IDs +* Change vector A conflicts with change vector B if and only if at least one of the etags + is _larger, equal, or has node etag (and the other doesn't)_ and at least one etags is _smaller_ between + corresponding node and database IDs + +Note that the change vectors are unsortable for two reasons: + +* Change vectors are unsorted collections of node tags/etag tuples, they can be sorted in multiple ways +* Conflicting change vectors cannot be compared + +### Example 1 +Let us assume two change vectors, v1 = [A:8, B:10, C:34], v2 = [A:23, B:12, C:65] +When we compare v1 and v2, we will do three comparisons: + +* A --> 8 (v1) < 23 (v2) +* B --> 10 (v1) < 12 (v2) +* C --> 34 (v1) < 65 (v2) + +Corresponding etags in v2 are greater than the ones in v1. This means that v1 < v2 + +### Example 2 +Let us assume two change vectors, v1 = [A:18, B:12, C:51], v2 = [A:23, B:12, C:65] +When we compare v1 and v2, we will do three comparisons: + +* A --> 18 (v1) < 23 (v2) +* B --> 12 (v1) = 12 (v2) +* C --> 51 (v1) < 65 (v2) + +Corresponding etags in v2 are greater than the ones in v1. This means that v1 < v2 + + +### Example 3 +Let us assume two change vectors, v1 = [A:18, B:12, C:65], v2 = [A:58, B:12, C:51] +When we compare v1 and v2, we will do three comparisons: + +* A --> 18 (v1) < 58 (v2) +* B --> 12 (v1) = 12 (v2) +* C --> 65 (v1) > 51 (v2) + +Etag 'A' in v1 is smaller than in v2, and Etag 'C' is larger in v1 than in v2. This means that v1 conflicts with v2. + + + +## Concurrency Control at the Cluster + +RavenDB implements a multi master strategy for handling database writes. This means that it will _never_ reject a valid write to a document (under the assumption that if you tried to write +data to the database, you're probably interested in keeping it). This behavior can lead to certain edge cases. In particular, under network partition scenario, it is possible for two clients +to talk to two RavenDB nodes and to update a document with optimistic concurrency check. + +The concurrency check is done at the _local node_ level, to ensure that we can still process writes in the case of a network partition or partial failure scenario. That can mean that two +writes to separate servers will both succeed, even if each write specified the same original change vector, because each server did the check independently. Under such a scenario, the +generated change vectors for the document on each server will be different, and as soon as replication between these nodes will run the databases will detect this conflict and resolve +it according to your conflict resolution strategy. + +In practice, this kind of scenario is rare, since RavenDB attempts to direct all writes to the same node for each database under normal conditions to +ensure that optimistic concurrency checks will always happen on the same machine. + + + +## Database Global Change Vector + +The database global change vector is the same entity as the one used by the data it contains. +The value of the _global change vector_ entries is determined by the maximum value of all the change vectors contained in its data. + +E.g, a database containing the following documents: + +* Document `A` with _change vector_ `[A:1-0tIXNUeUckSe73dUR6rjrA, B:7-kSXfVRAkKEmffZpyfkd+Zw]` +* Document `B` with _change vector_ `[B:3-kSXfVRAkKEmffZpyfkd+Zw, C:13-ASFfVrAllEmzzZpyrtlrGq]` + +Will have the following _global change vector_: + +* `[A:1-0tIXNUeUckSe73dUR6rjrA, B:7-kSXfVRAkKEmffZpyfkd+Zw, C:13-ASFfVrAllEmzzZpyrtlrGq]` + +The global change vector is used in the replication process to determine which data is already contained by the database. +A document that all of his entries are lower or equal from/to the _global change vector_ is considered contained. + + + +Note that if data is considered contained by a database it doesn't mean it is present in the database, it may already be overwritten by more up-to-date data. + + + + + +## After Restoring a Database from Backup + +* [Snapshot backups](../../../client-api/operations/maintenance/backup/backup-overview.mdx#snapshot) save [change vector data](../../../server/ongoing-tasks/backup-overview.mdx#backup-contents). +* [Logical backups](../../../client-api/operations/maintenance/backup/backup-overview.mdx#logical-backup) do not save change vector data. + Restoring a database from a logical backup will renew the document change vector's frequently [incrementing ETag](../../../server/clustering/replication/change-vector.mdx#what-are-change-vectors-constructed-from) + to its original value. + + + diff --git a/versioned_docs/version-7.1/server/clustering/replication/replication-and-embedded-instance.mdx b/versioned_docs/version-7.1/server/clustering/replication/replication-and-embedded-instance.mdx new file mode 100644 index 0000000000..1e88fe5069 --- /dev/null +++ b/versioned_docs/version-7.1/server/clustering/replication/replication-and-embedded-instance.mdx @@ -0,0 +1,158 @@ +--- +title: "Replication: Using Embedded Instance" +hide_table_of_contents: true +sidebar_label: Replication And Embedded Instances +sidebar_position: 4 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Replication: Using Embedded Instance + +## Overview +[Replication](../../../server/clustering/replication/replication-overview.mdx) works by using long-living TCP connections between cluster nodes, or in case of [External Replication](../../../server/ongoing-tasks/external-replication.mdx) instances external to the cluster. +Essentially, Embedded RavenDB is the same as non-embedded, their only difference is what process is a host. [Here](../../../server/embedded.mdx) you can read more in-depth about Embedded RavenDB functionality. + +## Configuring Replication between Embedded Instance and a Cluster +## Examples in this page + * [Configuring cluster membership of embedded instance through the Studio](../../../server/clustering/replication/replication-and-embedded-instance.mdx#configuring-cluster-membership-of-embedded-instance-through-the-studio) + * [Programmatically configuring cluster membership of embedded instance](../../../server/clustering/replication/replication-and-embedded-instance.mdx#programmatically-configuring-cluster-membership-of-embedded-instance) + * [Configuring embedded instance as External Replication destination through the Studio](../../../server/clustering/replication/replication-and-embedded-instance.mdx#configuring-embedded-instance-as-external-replication-destination-through-the-studio) + * [Programmatically embedded instance as External Replication destination](../../../server/clustering/replication/replication-and-embedded-instance.mdx#programmatically-embedded-instance-as-external-replication-destination) + + + +## Configuring Cluster Membership of Embedded Instance through the Studio +One possibility would be to configure it through [the studio](../../../studio/cluster/setting-a-cluster.mdx#add-another-node-to-the-cluster), by adding the Embedded instance to the cluster, then adding the database replicated to relevant [database group](../../../server/clustering/distribution/distributed-database.mdx). +Studio of the Embedded instance can be opened in the following way: + + +{`//this starts the embedded server +EmbeddedServer.Instance.StartServer(new ServerOptions +\{ + ServerUrl = "http://localhost:8090" //if we don't specify a port, it will have random port +\}); + +//this opens Embedded RavenDB Studio in the default browser +EmbeddedServer.Instance.OpenStudioInBrowser(); +`} + + + +## Programmatically Configuring Cluster Membership of Embedded Instance +Another possibility would be to configure it programmatically. + + + +{`//first, initialize connection with one of cluster nodes +var ravenClusterNodeUrl = "http://localhost:8080"; +using (var store = new DocumentStore +\{ + Urls = new[] \{ravenClusterNodeUrl\}, + Database = "Northwind" +\}) +\{ + store.Initialize(); + + //first, start the embedded server + //note: by default the embedded server will use a random port. + // if there is a need to replicate TO the embedded server, we would need to specify the port directly + EmbeddedServer.Instance.StartServer(new ServerOptions + \{ + ServerUrl = "http://localhost:8090" + \}); + + //then, add embedded instance to existing cluster + //(programmatically, this is done via REST) + var embeddedServerUrl = + (await EmbeddedServer.Instance.GetServerUriAsync().ConfigureAwait(false)).ToString(); + var addToClusterCommandUrl = $"\{ravenClusterNodeUrl\}/admin/cluster/node?url=\{embeddedServerUrl\}"; + await store.GetRequestExecutor() + .HttpClient + .SendAsync( + new HttpRequestMessage( + HttpMethod.Put, + addToClusterCommandUrl)).ConfigureAwait(false); + + var getTopologyCommand = new GetClusterTopologyCommand(); + var embeddedStore = EmbeddedServer.Instance.GetDocumentStore("Northwind"); + string embeddedTag; + using (var session = embeddedStore.OpenSession()) + \{ + //fetch topology info from embedded, so we can fetch the tag assigned by the cluster + await embeddedStore.GetRequestExecutor() + .ExecuteAsync(getTopologyCommand, session.Advanced.Context) + .ConfigureAwait(false); + embeddedTag = getTopologyCommand.Result.Topology.LastNodeId; + \} + + //this sends the command to add the database "Northwind" to its database group + await store.Maintenance + .Server + .SendAsync( + new AddDatabaseNodeOperation(databaseName: "Northwind", + node: embeddedTag)) + .ConfigureAwait(false); +\} +`} + + + +## Configuring Embedded Instance as External Replication Destination through the Studio +Another possibility to configure the embedded instance as an [external replication](../../../server/ongoing-tasks/external-replication.mdx) source or a destination. +This can be done in [RavenDB Studio](../../../studio/database/tasks/ongoing-tasks/external-replication-task.mdx). + + External Replication is configured only one way, so in order to create two-way external replication, we would need to configure External Replication Tasks at both RavenDB instances. + + +## Programmatically Embedded Instance as External Replication Destination +It is also possible to configure [external replication](../../../server/ongoing-tasks/external-replication.mdx) programmatically, so the embedded instance can serve as a source, destination or both. + + +{`//first, initialize connection with one of cluster nodes +var ravenClusterNodeUrl = "http://localhost:8080"; +using (var store = new DocumentStore +\{ + Urls = new[] \{ravenClusterNodeUrl\}, + Database = "Northwind" +\}) +\{ + store.Initialize(); + + //first, start the embedded server + EmbeddedServer.Instance.StartServer(new ServerOptions + \{ + ServerUrl = "http://localhost:8090", + AcceptEula = true + \}); + + var embeddedServerUrl = + (await EmbeddedServer.Instance.GetServerUriAsync().ConfigureAwait(false)).ToString(); + + // create watcher definition that will be added to existing cluster + var externalReplicationWatcher = new ExternalReplication( + database: "Northwind", + connectionStringName: "Embedded Northwind Instance"); + + //create the connection string for the embedded instance on the existing cluster + await store.Maintenance.SendAsync( + new PutConnectionStringOperation(new RavenConnectionString + \{ + Name = externalReplicationWatcher.ConnectionStringName, + Database = externalReplicationWatcher.Database, + TopologyDiscoveryUrls = new[] \{embeddedServerUrl\} //urls to discover topology at destination + \})).ConfigureAwait(false); + + //create External Replication task from the cluster to the embedded RavenDB instance + await store.Maintenance.SendAsync(new UpdateExternalReplicationOperation(externalReplicationWatcher)) + .ConfigureAwait(false); +\} +`} + + + diff --git a/versioned_docs/version-7.1/server/clustering/replication/replication-conflicts.mdx b/versioned_docs/version-7.1/server/clustering/replication/replication-conflicts.mdx new file mode 100644 index 0000000000..d5e6b5051e --- /dev/null +++ b/versioned_docs/version-7.1/server/clustering/replication/replication-conflicts.mdx @@ -0,0 +1,134 @@ +--- +title: "Replication Conflicts" +hide_table_of_contents: true +sidebar_label: Replication Conflicts +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Replication Conflicts + +## What is a Conflict? + +A conflict occurs when the same document is updated concurrently on two different nodes. +This can happen because of a network split or because of several client updates that each talked to different +nodes faster than we could replicate the information between the nodes. + +In a distributed system, we can either choose to run a consensus (which requires consulting a majority on every decision) +or accept the potential for conflicts. + +For document writes, RavenDB chooses to accept conflicts as a trade-off of always being able to accept writes on any node. + +## Conflict Detection +Each document in a RavenDB cluster has a [Change Vector](../../../server/clustering/replication/change-vector.mdx) which is used for conflict detection. + +When the server receives an incoming replication batch, it compares the [Change Vector](../../../server/clustering/replication/change-vector.mdx) +of the incoming document with the [Change Vector](../../../server/clustering/replication/change-vector.mdx) of the local document. + +Let's assume _remote_cv_ to be the change vector of a remote document, and _local_cv_ to be a change vector of a local document. +The [Comparison](../../../server/clustering/replication/change-vector.mdx#change-vector-comparisons) of the [Change Vectors](../../../server/clustering/replication/change-vector.mdx) may yield three possible results: + +* _remote_cv_ <= _local_cv_ -> Nothing to do, the local document is more up-to-date. +* _remote_cv_ > _local_cv_ -> Remote document is more recent than local, replace local document with remote +* _remote_cv_ **conflicts** with _local_cv_ -> Try to resolve conflict. + +When there is a conflict between two or more document versions, RavenDB will try multiple steps to resolve it. If any of the steps succeeds, the conflict resolution will end and resolved document will be written to the storage. + +1. If `ResolveToLatest` flag is set (by default it is `true`), resolve the conflict to the document variant where the [Latest Modified](../../../client-api/session/how-to/get-entity-last-modified.mdx) property is the latest. +2. Check whether the document contents are identical, if so, then there is no conflict and the change vector of the two documents is merged. + +The identity check applies to [Tombstone](../../../glossary/tombstone.mdx) as well, and it will always resolve to the local one, since the tombstones are always considered equal. + +3. Try to resolve the conflict by using a script, which is set up by configuring a [Conflict Resolver](../../../server/clustering/replication/replication-conflicts.mdx#conflict-resolution-script). +4. If all else fails, record conflicting document variants as "Conflicted Documents" which will have to be resolved [Manually](../../../server/clustering/replication/replication-conflicts.mdx#manually-resolving-conflicts). + +## Conflict Resolution Script +A resolution script is defined per collection and have the following input arguments: + +| Name | Type | Description | +| - | - | - | +| `docs` | Array of Objects | An unsorted array of the conflicted documents, excluding the `Tombstones`. | +| `hasTomestone` | boolean | Indicate if there is a `Tombstone` among the conflicted documents. | +| `resolveToTombstone` | string | upon returning will resolve the conflict to `Tombstone`. | + + +The returned value from the script, can be: + +* An object (any object), which will be the conflict resolution. +* `null` or simply `return;`, which will leave the conflict as is. +* The `resolveToTombstone` string, which will resolve the conflict to `Tombstone`. + + +If the script will encounter an exception, the execution will aborted and the conflict will remain. + + +### Configuring Conflict Resolution Using the Client +Setting up conflict resolution strategy in the client is done via sending cluster-level operation - [ModifyConflictSolverOperation](../../../client-api/operations/server-wide/modify-conflict-solver.mdx), which is a [Raft command](../../../glossary/raft-command.mdx). + +### Configuring Conflict Resolution Using the Management Studio +Conflict resolution scripts can be set up also via the Management Studio as well. +Using resolution scripts, we can implement custom logic to be executed when a conflict occurs. +For example, given multiple conflicting Northwind's _Order_ document variants, the following script will merge the ordered items so there will be no missing items and the amount of items ordered will be maximal. + + + +{`var final = docs[0]; + +for(var i = 1; i < docs.length; i++) +\{ + var currentOrder = docs[i]; + for(var j = 0; j < currentOrder.Lines.length; j++) + \{ + var item = currentOrder.Lines[j]; + var match = final.Lines + .find(i => i.Product == item.Product); + if(!match) + \{ + // not in Order, add + final.Lines.push(item); + \} + else + \{ + match.Quantity = Math.max( + item.Quantity, + match.Quantity); + \} + \} +\} + +return final; +`} + + + +On this screenshot, we can see the conflict resolution screen in which we would write the above script. +![Conflict Resolution Script in Management Studio](./assets/conflict-resolution-script-in-studio.jpg) + + + +## Manually Resolving Conflicts +In case one or more documents are in a "conflicted" state, we need to resolve the conflict manually. + +This can be done in two ways: + +* A **PUT** operation with the "conflicted" document Id will cause the conflict to be resolved to contain the value from the **PUT** operation +* The conflict can be resolved via the Management Studio where a document which will resolve the conflict can be picked manually from all conflicting versions. The Studio will attempt at merging the documents as much as possible, and where it is not possible, the studio will leave merge tags that will mark the merged areas + + +![Conflict Resolution Script in Management Studio](./assets/resolve-conflicted-document-screen.jpg) + + +In case any document is conflicted and the conflict is not resolved, the conflicting document variants are stored as revisions to their original document while the document itself is removed. + + +In the example we have in this conflict, once we remove merging tags in the document and press "resolve and save", +the resulting document will look like the following: + +![Document after resolved conflict](./assets/resolve-conflicted-document-screen2.jpg) + diff --git a/versioned_docs/version-7.1/server/clustering/replication/replication-overview.mdx b/versioned_docs/version-7.1/server/clustering/replication/replication-overview.mdx new file mode 100644 index 0000000000..3f08dc1d67 --- /dev/null +++ b/versioned_docs/version-7.1/server/clustering/replication/replication-overview.mdx @@ -0,0 +1,219 @@ +--- +title: "Replication Overview" +hide_table_of_contents: true +sidebar_label: Replication Overview +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Replication Overview + + +* Replication in RavenDB is the process of __transferring data__ from one database instance to another. + +* __Data is highly available__ as _Reads & Writes_ can be done on any cluster node. + _Writes_ are always accepted and are automatically replicated to the other nodes, + either within the database-group, or to the nodes defined by a replication task, + (see the different types below). + +* __Conflicts__, which may be involved when the data replicates, + are resolved according to the defined [conflict resolution](../../../server/clustering/replication/replication-conflicts.mdx) policy on the receiving end. + +* In this page: + * [Replication types](../../../server/clustering/replication/replication-overview.mdx#replication-types) + * [Internal replication](../../../server/clustering/replication/replication-overview.mdx#internal-replication) + * [External replication](../../../server/clustering/replication/replication-overview.mdx#external-replication) + * [Hub/Sink replication](../../../server/clustering/replication/replication-overview.mdx#hubsink-replication) + * [What is replicated](../../../server/clustering/replication/replication-overview.mdx#what-is-replicated) + * [How replication works](../../../server/clustering/replication/replication-overview.mdx#how-replication-works) + * [Replication & transaction boundary](../../../server/clustering/replication/replication-overview.mdx#replication--transaction-boundary) + + +## Replication types + +#### Internal replication + +* __Replicate between__: All database-group nodes. + __Handled by__: Automatically handled by the [Database-Group](../../../studio/database/settings/manage-database-group.mdx) nodes. + __Direction__: Master-to-master replication among all database-group nodes. + __Filtering__: No filtering is available. Data is replicated as exists on the source. + __Conflicts__: Handled by the database resolution policy, which is the same for all the database instances. + __Delays__: There are no delays, data is immediately replicated. + +* __Usage__: This replication keeps the database data in sync across the database-group nodes. + The data is highly available as _reads & writes_ can be done on any of the nodes. + +* You can _write_ to any node in the database group, + that _write_ will be recorded and automatically replicated to all other nodes in the database-group. +#### External replication + +* __Replicate between__: Two databases that are typically set on different clusters. + __Handled by__: Handled by the ongoing [External Replication Task](../../../server/ongoing-tasks/external-replication.mdx) defined by the user. + __Direction__: One-way replication. + __Filtering__: No filtering is available. Data is replicated as exists on the source. + __Conflicts__: Handled as defined by the destination database resolution policy. + __Delays__: Replication can be delayed as defined within the external-replication task. + +* __Usage__: This replication allows you to have a live database replica in another cluster, + which can be used as a failover target. + +* It is possible to define two such tasks on separate clusters that will replicate to one another. +#### Hub/Sink replication + +* __Replicate between__: Multiple Sinks that connect to a single Hub on different clusters. + __Handled by__: Handled by the ongoing [Hub/Sink Replication Tasks](../../../server/ongoing-tasks/hub-sink-replication.mdx) defined by the user. + __Direction__: Hub to Sink only, Sink to Hub only, or both directions (as defined by the task). + __Filtering__: Documents filtering is available when working with secure servers. + __Conflicts__: Handled as defined by the destination database resolution policy. + __Delays__: Replication can be delayed as defined within the Hub/Sink tasks. + +* __Usage__: Data is replicated between the Hub and all Sinks connected to that Hub. + The connection is always triggered by the Sink. + + + +## What is replicated + + +__The following database-items are replicated__: + +* Documents +* Revisions +* Attachments +* Conflicts +* Tombstones +* Counters +* Time Series + +__Content replicated__: + +* The content of the replicated items data is Not changed. +* If content change is required then consider using [ETL tasks](../../../studio/database/tasks/ongoing-tasks/ravendb-etl-task.mdx#ravendb-etl-task--vs--replication-task) that use transformation scripts. + + +__The following cluster-level features are Not replicated__: + +* Index definitions and index data +* Ongoing tasks definitions +* Compare-exchange items +* Identities +* Conflict resolution scripts +* __With internal replication__: + When you define a cluster-level behavior, i.e. create an index, + then consistency between the database instances in the database-group is achieved by the [Raft Protocol](../../../server/clustering/rachis/what-is-rachis.mdx). + +* __With a replication task__: + Replication controls only the flow of the data without dictating how it's going to be processed on the receiving end, + thus different configurations can be defined on the source cluster and on the destination cluster. + + + + +## How replication works + +* Each database instance holds a __TCP connection__ to each of the other database instance destinations. + With internal replication - the destinations are all other database-group nodes. + With a replication task - the destinations are the nodes defined in the task. + +* Whenever there is a _'write'_ on one instance, + it will be sent to all the other nodes in the database-group immediately over this connection. + If a replication task is also defined, then the data will also replicate to the destination database. + +* Sending the data is done in an async manner. + If the database instance is unable to replicate the data, it will still accept that 'write action' and send it later. + +* Each database instance has its own local __database-ETag__. + This Etag increases on every storage write. + The item triggered the write will get that next consecutive number. + The order by which the items are replicated is set by their __item-ETag__, from low(oldest) to high(newest). + +* The data is sent in batches from the source to the destination. + Once the batch is processed successfully on the destination side, + the destination records the ETag of the last item it had received in that batch ( __last-accepted-ETag__ ). + +* The destination sends a response back to the source with that last-accepted-ETag + so that the source will know where to continue sending the next batch from. + +* In case of a replication failure, when sending the next batch, replication will start from the item + that has this last-accepted-ETag, which is known from the previous successful batch. + + + +## Replication & transaction boundary + + +__Transactions atomicity__ + +* RavenDB guarantees that modifications made in the same transaction will always be replicated + to the destination in a __single batch__ and won't be broken into separate batches. + +* This is true for both the internal-replication and the replication-tasks. + + + +__Replication & cluster-wide transactions__ + +* A [cluster-wide transaction](../../../client-api/session/cluster-transaction/overview.mdx), which is implemented by the [Raft Protocol](../../../server/clustering/rachis/consensus-operations.mdx#consensus-operations), + is either persisted on all database group nodes or rolled back on all upon failure. + +* After a cluster consensus is reached, the Raft command to be executed is propagated to all nodes. + When the command is executed locally on a node, if the items that are persisted are of the [type that replicates](../../../server/clustering/replication/replication-overview.mdx#what-is-replicated), + then the node will __replicate__ them to the other nodes via the replication process. + +* A node that receives such replication will accept this write, + unless it has already committed it through a raft command it received before. + + + +__Transaction boundaries in single-node transactions__ + +* If there are several document modifications in the same transaction they will be sent in the same replication + batch, keeping the transaction boundary on the destination as well. + +* However, when a document is modified in two separate transactions, + and if replication of the __1st__ transaction has not yet occurred, + then that document will Not be sent when the __1st__ transaction is replicated, + it will be sent with the __2nd__ transaction. + +* If you care about all the modifications that were done then enable revisions: + When a revision is created for a document it is written as part of the same transaction as the document. + The revision is then replicated along with the document in the same indivisible batch. + + + + +#### How revisions replication help data consistency + + Consider a scenario in which two documents, `Users/1` and `Users/2`, are **created in the same transaction**, + and then `Users/2` is **modified in a different transaction**. + + * **How will `Users/1` and `Users/2` be replicated?** + When RavenDB creates replication batches, it keeps the + transaction boundary by always sending documents that were modified in the same transaction, + **in the same batch**. + In our scenario, however, `Users/2` was modified after its creation, it + is now recognized by its Etag as a part of a different transaction than + that of `Users/1`, and the two documents may be replicated in two different + batches, `Users/1` first and `Users/2` later. + If this happens, `Users/1` will be replicated to the destination without `Users/2` + though they were created in the same transaction, causing a data inconsistency that + will persist until the arrival of `Users/2`. + + * **The scenario will be different if revisions are enabled.** + In this case the creation of `Users/1` and `Users/2` will also create revisions + for them both. These revisions will continue to carry the Etag given to them + at their creation, and will be replicated in the same batch. + When the batch arrives at the destination, data consistency will be kept: + `Users/1` will be stored, and so will the `Users/2` revision, that will become + a live `Users/2` document. + + + + + diff --git a/versioned_docs/version-7.1/server/configuration/_category_.json b/versioned_docs/version-7.1/server/configuration/_category_.json new file mode 100644 index 0000000000..9bfee48f88 --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 3, + "label": Configuration, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/server/configuration/ai-integration-configuration.mdx b/versioned_docs/version-7.1/server/configuration/ai-integration-configuration.mdx new file mode 100644 index 0000000000..18c96fcc28 --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/ai-integration-configuration.mdx @@ -0,0 +1,62 @@ +--- +title: "Configuration: AI Integration" +hide_table_of_contents: true +sidebar_label: AI Integration Configuration +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: AI Integration + + +* The following configuration keys apply to integrating **AI-powered embeddings generation**: + + * Embeddings can be generated from your document content via [AI-powered tasks](../../ai-integration/generating-embeddings/overview.mdx) and stored in a dedicated collection in the database. + * When performing vector search queries, embeddings are also generated from the search term to compare against the stored vectors. + +* In this article: + * [Ai.Embeddings.MaxBatchSize](../../server/configuration/ai-integration-configuration.mdx#aiembeddingsmaxbatchsize) + * [Ai.Embeddings.MaxConcurrentBatches](../../server/configuration/ai-integration-configuration.mdx#aiembeddingsmaxconcurrentbatches) + * [Ai.Embeddings.MaxFallbackTimeInSec](../../server/configuration/ai-integration-configuration.mdx#aiembeddingsmaxfallbacktimeinsec) + + +## Ai.Embeddings.MaxBatchSize + +The maximum number of documents processed in a single batch by an embeddings generation task. +Higher values may improve throughput but can increase latency and require more resources and higher limits from the embeddings generation service. + +- **Type**: `int` +- **Default**: `128` +- **Scope**: Server-wide or per database + + + +## Ai.Embeddings.MaxConcurrentBatches + +The maximum number of **query embedding batches** that can be processed concurrently. +This setting controls the degree of parallelism when sending query embedding requests to AI providers. +Higher values may improve throughput but can increase resource usage and may trigger rate limits. + +- **Type**: `int` +- **Default**: `4` +- **Min value**: `1` +- **Scope**: Server-wide or per database + + + +## Ai.Embeddings.MaxFallbackTimeInSec + +The maximum time (in seconds) the embeddings generation task remains suspended (fallback mode) following a connection failure to the embeddings generation service. +Once this time expires, the system will retry the connection automatically. + +- **Type**: `int` +- **Default**: `60 * 15` +- **Scope**: Server-wide or per database + + diff --git a/versioned_docs/version-7.1/server/configuration/backup-configuration.mdx b/versioned_docs/version-7.1/server/configuration/backup-configuration.mdx new file mode 100644 index 0000000000..63f164ba99 --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/backup-configuration.mdx @@ -0,0 +1,115 @@ +--- +title: "Configuration: Backup" +hide_table_of_contents: true +sidebar_label: Backup Options +sidebar_position: 3 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: Backup + + +* Configuration options for backups (both on premise and RavenDB Cloud). + +* Another relevant server configuration option can be found [here](../../server/configuration/server-configuration.mdx#servercpucreditsexhaustionbackupdelayinmin). + +* In this page: + * [Backup.TempPath](../../server/configuration/backup-configuration.mdx#backuptemppath) + * [Backup.LocalRootPath](../../server/configuration/backup-configuration.mdx#backuplocalrootpath) + * [Backup.AllowedDestinations](../../server/configuration/backup-configuration.mdx#backupalloweddestinations) + * [Backup.AllowedAwsRegions](../../server/configuration/backup-configuration.mdx#backupallowedawsregions) + * [Backup.MaxNumberOfConcurrentBackups](../../server/configuration/backup-configuration.mdx#backupmaxnumberofconcurrentbackups) + * [Backup.ConcurrentBackupsDelayInSec](../../server/configuration/backup-configuration.mdx#backupconcurrentbackupsdelayinsec) + * [Backup.LowMemoryBackupDelayInMin](../../server/configuration/backup-configuration.mdx#backuplowmemorybackupdelayinmin) + + +## Backup.TempPath + +Use this setting to specify a different path to the temporary backup files. +By default, backup temporary files are written under the Database directory or under [Storage.TempPath](../../server/configuration/storage-configuration.mdx) if defined. +Learn more about RavenDB directory structure [here](../../server/storage/directory-structure.mdx). + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide or per database + + + +## Backup.LocalRootPath + +Local backups can only be created under this root path. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Backup.AllowedDestinations + +Semicolon separated list of allowed backup destinations. If not specified, all destinations are allowed. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + +Possible values: + +- `None` +- `Local` +- `Azure` +- `AmazonGlacier` +- `AmazonS3` +- `FTP` + + + +## Backup.AllowedAwsRegions + +Semicolon separated list of allowed AWS regions. If not specified, all regions are allowed. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Backup.MaxNumberOfConcurrentBackups + +Maximum number of concurrent backup tasks. + +- **Type**: `int` +- **Default**: the number of CPU cores assigned to this server, divided by 2. + (By default a server with 1, 2, or 3 CPU cores can perform 1 backup at a time. A server with 4 or 5 cores can perform 2 backups at a time, 6 or 7 cores can perform 3, and so on) +- **Scope**: Server-wide only + + + +## Backup.ConcurrentBackupsDelayInSec + +Number of seconds to delay the backup after hitting the maximum number of concurrent backups limit (see `MaxNumberOfConcurrentBackups` above). + +- **Type**: `TimeSetting` +- **TimeUnit**: `TimeUnit.Seconds` +- **Default**: `30` +- **Scope**: Server-wide only + + + +## Backup.LowMemoryBackupDelayInMin + +Number of minutes to delay the backup if the server enters a low memory state. + +- **Type**: `TimeSetting` +- **TimeUnit**: `TimeUnit.Minutes` +- **Default**: `10` +- **Scope**: Server-wide only + + + diff --git a/versioned_docs/version-7.1/server/configuration/cluster-configuration.mdx b/versioned_docs/version-7.1/server/configuration/cluster-configuration.mdx new file mode 100644 index 0000000000..b448e5b269 --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/cluster-configuration.mdx @@ -0,0 +1,290 @@ +--- +title: "Configuration: Cluster" +hide_table_of_contents: true +sidebar_label: Cluster Configuration +sidebar_position: 4 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: Cluster + + +* **Avoid using different cluster configurations across nodes:** + Configuration mismatches can lead to interaction problems between nodes. + +* If you must set different configurations for individual nodes, + we recommend testing the setup in a development environment first to ensure proper interaction between all nodes. + + + + + +* In this page: + * Server-wide scope: + [Cluster.CompareExchangeExpiredDeleteFrequencyInSec](../../server/configuration/cluster-configuration.mdx#clustercompareexchangeexpireddeletefrequencyinsec) + [Cluster.CompareExchangeTombstonesCleanupIntervalInMin](../../server/configuration/cluster-configuration.mdx#clustercompareexchangetombstonescleanupintervalinmin) + [Cluster.ElectionTimeoutInMs](../../server/configuration/cluster-configuration.mdx#clusterelectiontimeoutinms) + [Cluster.HardDeleteOnReplacement](../../server/configuration/cluster-configuration.mdx#clusterharddeleteonreplacement) + [Cluster.LogHistoryMaxEntries](../../server/configuration/cluster-configuration.mdx#clusterloghistorymaxentries) + [Cluster.MaxChangeVectorDistance](../../server/configuration/cluster-configuration.mdx#clustermaxchangevectordistance) + [Cluster.MaxClusterTransactionCompareExchangeTombstoneCheckIntervalInMin](../../server/configuration/cluster-configuration.mdx#clustermaxclustertransactioncompareexchangetombstonecheckintervalinmin) + [Cluster.MaxSizeOfSingleRaftCommandInMb](../../server/configuration/cluster-configuration.mdx#clustermaxsizeofsingleraftcommandinmb) + [Cluster.MaximalAllowedClusterVersion](../../server/configuration/cluster-configuration.mdx#clustermaximalallowedclusterversion) + [Cluster.OnErrorDelayTimeInMs](../../server/configuration/cluster-configuration.mdx#clusteronerrordelaytimeinms) + [Cluster.OperationTimeoutInSec](../../server/configuration/cluster-configuration.mdx#clusteroperationtimeoutinsec) + [Cluster.ReceiveFromWorkerTimeoutInMs](../../server/configuration/cluster-configuration.mdx#clusterreceivefromworkertimeoutinms) + [Cluster.StatsStabilizationTimeInSec](../../server/configuration/cluster-configuration.mdx#clusterstatsstabilizationtimeinsec) + [Cluster.SupervisorSamplePeriodInMs](../../server/configuration/cluster-configuration.mdx#clustersupervisorsampleperiodinms) + [Cluster.TcpReceiveBufferSizeInBytes](../../server/configuration/cluster-configuration.mdx#clustertcpreceivebuffersizeinbytes) + [Cluster.TcpSendBufferSizeInBytes](../../server/configuration/cluster-configuration.mdx#clustertcpsendbuffersizeinbytes) + [Cluster.TcpTimeoutInMs](../../server/configuration/cluster-configuration.mdx#clustertcptimeoutinms) + [Cluster.TimeBeforeAddingReplicaInSec](../../server/configuration/cluster-configuration.mdx#clustertimebeforeaddingreplicainsec) + [Cluster.TimeBeforeMovingToRehabInSec](../../server/configuration/cluster-configuration.mdx#clustertimebeforemovingtorehabinsec) + [Cluster.TimeBeforeRotatingPreferredNodeInSec](../../server/configuration/cluster-configuration.mdx#clustertimebeforerotatingpreferrednodeinsec) + [Cluster.WorkerSamplePeriodInMs](../../server/configuration/cluster-configuration.mdx#clusterworkersampleperiodinms) + * Server-wide, or database scope: + [Cluster.DisableAtomicDocumentWrites](../../server/configuration/cluster-configuration.mdx#clusterdisableatomicdocumentwrites) + [Cluster.MaxClusterTransactionBatchSize](../../server/configuration/cluster-configuration.mdx#clustermaxclustertransactionbatchsize) + + +## Cluster.CompareExchangeExpiredDeleteFrequencyInSec + +Time (in seconds) between cleanup of expired compare exchange items. + +- **Type**: `int` +- **Default**: `60` +- **Scope**: Server-wide only + + + +## Cluster.CompareExchangeTombstonesCleanupIntervalInMin + +Time (in minutes) between cleanup of compare exchange tombstones. + +- **Type**: `int` +- **Default**: `10` +- **Scope**: Server-wide only + + + +## Cluster.ElectionTimeoutInMs + +Timeout (in milliseconds) within which the node expects to receive a heartbeat from the leader. + +- **Type**: `int` +- **Default**: `300` +- **Scope**: Server-wide only + + + +## Cluster.HardDeleteOnReplacement + +Set hard/soft delete for a database that was removed by the observer from the cluster topology in order to maintain the replication factor. + +- **Type**: `bool` +- **Default**: `true` +- **Scope**: Server-wide only + + + +## Cluster.LogHistoryMaxEntries + +Maximum number of log entries to keep in the history log table. + +- **Type**: `int` +- **Default**: `2048` +- **Scope**: Server-wide only + + + +## Cluster.MaxChangeVectorDistance + +Exceeding the allowed change vector distance between two nodes will move the lagged node to rehab. + +- **Type**: `long` +- **Default**: `65536` +- **Scope**: Server-wide only + + + +## Cluster.MaxClusterTransactionCompareExchangeTombstoneCheckIntervalInMin + +The maximum interval (in minutes) between checks for compare exchange tombstones that are performed by the cluster-wide transaction mechanism. + +- **Type**: `int` +- **Default**: `5` +- **Scope**: Server-wide only + + + +## Cluster.MaxSizeOfSingleRaftCommandInMb + +EXPERT ONLY: +The maximum allowed size (in megabytes) for a single raft command. + +- **Type**: `int` +- SizeUnit(SizeUnit.Megabytes) +- **Default**: `128` +- **Scope**: Server-wide only + + + +## Cluster.MaximalAllowedClusterVersion + +EXPERT ONLY: +If exceeded, restrict the cluster to the specified version. + +- **Type**: `int?` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Cluster.OnErrorDelayTimeInMs + +How long the maintenance supervisor waits (in milliseconds) after receiving an exception from a worker before retrying. + +- **Type**: `int` +- **Default**: `5000` +- **Scope**: Server-wide only + + + +## Cluster.OperationTimeoutInSec + +As a cluster node, how long (in seconds) to wait before timing out an operation between two cluster nodes. + +- **Type**: `int` +- **Default**: `15` +- **Scope**: Server-wide only + + + +## Cluster.ReceiveFromWorkerTimeoutInMs + +How long the maintenance supervisor waits (in milliseconds) for a response from a worker before timing out. + +- **Type**: `int` +- **Default**: `5000` +- **Scope**: Server-wide only + + + +## Cluster.StatsStabilizationTimeInSec + +How long to wait (in seconds) for cluster stats to stabilize after a database topology change. + +- **Type**: `int` +- **Default**: `5` +- **Scope**: Server-wide only + + + +## Cluster.SupervisorSamplePeriodInMs + +How long the maintenance supervisor waits (in milliseconds) between sampling the information received from the nodes. + +- **Type**: `int` +- **Default**: `1000` +- **Scope**: Server-wide only + + + +## Cluster.TcpReceiveBufferSizeInBytes + +The size (in bytes) of the TCP connection receive buffer. + +- **Type**: `int` +- **Default**: `32 * 1024` +- **Scope**: Server-wide only + + + +## Cluster.TcpSendBufferSizeInBytes + +The size (in bytes) of the TCP connection send buffer. + +- **Type**: `int` +- **Default**: `32 * 1024` +- **Scope**: Server-wide only + + + +## Cluster.TcpTimeoutInMs + +TCP connection read/write timeout (in milliseconds). + +- **Type**: `int` +- **Default**: `15_000` +- **Scope**: Server-wide only + + + +## Cluster.TimeBeforeAddingReplicaInSec + +The time (in seconds) a database instance must be in a good and responsive state before we add a replica to match the replication factor. + +- **Type**: `int` +- **Default**: `900` +- **Scope**: Server-wide only + + + +## Cluster.TimeBeforeMovingToRehabInSec + +The grace period (in seconds) we give a node before it is moved to rehab. + +- **Type**: `int` +- **Default**: `60` +- **Scope**: Server-wide only + + + +## Cluster.TimeBeforeRotatingPreferredNodeInSec + +The grace period (in seconds) we give the preferred node before moving it to the end of the members list. + +- **Type**: `int` +- **Default**: `5` +- **Scope**: Server-wide only + + + +## Cluster.WorkerSamplePeriodInMs + +The time (in milliseconds) between sampling database information and sending it to the maintenance supervisor. + +- **Type**: `int` +- **Default**: `500` +- **Scope**: Server-wide only + + + +## Cluster.DisableAtomicDocumentWrites + +EXPERT ONLY: +Disable automatic atomic writes with cluster write transactions. +If set to _true_, will only consider explicitly added compare exchange values to validate cluster wide transactions. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide or per database + + + +## Cluster.MaxClusterTransactionBatchSize + +EXPERT ONLY: +Specifies the maximum size of the cluster transaction batch to be executed on the database at once. + +- **Type**: `int` +- **Default**: `256` +- **Scope**: Server-wide or per database + + diff --git a/versioned_docs/version-7.1/server/configuration/command-line-arguments.mdx b/versioned_docs/version-7.1/server/configuration/command-line-arguments.mdx new file mode 100644 index 0000000000..eebbeda4ef --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/command-line-arguments.mdx @@ -0,0 +1,62 @@ +--- +title: "Configuration: Command Line Arguments" +hide_table_of_contents: true +sidebar_label: Command Line Arguments +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: Command Line Arguments + +Running RavenDB using command line options provides the ability to setup both its behavior (e.g. running as daemon or service) and its configuration options. + +## Arguments + +| Argument | Details | Example | +|------------------------|:-------------------------------------|:--------------------------| +| `-h` \| `-?` \| `--help` | Print command line arguments list | `Raven.Server -h` | +| `-v` \| `--version` | Displays version and exits | `Raven.Server -v` | +| `--print-id` | Prints server ID upon server start | `Raven.Server --print-id` | +| `-n` \| `--non-interactive` | Run in non-interactive mode. After RavenDB finishes initialization and starts up, no CLI prompt will be displayed. This is useful when running as service. CLI management is still fully available through the use of `rvn admin-channel`. Do note it is possible to enter non-interactive mode automatically if prompt is not available due to OS limitations, but still `rvn` use is available. More information about 'Running as a Service' can be found [here](../../start/installation/running-as-service.mdx). | `Raven.Server -n` | +| `--service-name=` | Set service name. Only applies to RavenDB running on Windows OS as Service | `Raven.Server --service-name=RavenDbService` | +| `-c=` \| `--config-path=` | Sets custom configuration file path. Sets the `setting.json` file to be used by RavenDB | `Raven.Server -c=/home/myuser/settings.local.json` | +| `--browser` | Attempts to open RavenDB Studio in the browser | `Raven.Server --browser` | +| `-l` \| `--log-to-console` | Print logs to console (when run in non-interactive mode) | `Raven.Server -l` | + +## Docker + + + +If you are interested in hosting the server in a Docker container, please +read [our dedicated article](../../start/installation/running-in-docker-container.mdx). + + + +Running a Docker instance using `-e` Docker's argument can help you pass few configuration options to RavenDB, e.g. : + + + +{`docker run --name docker_nightly -e PublicServerUrl=http://10.0.75.2:8080 -e UNSECURED_ACCESS_ALLOWED=PublicNetwork -p 8081:8080 -p 38889:38888 ravendb/ravendb-nightly +`} + + + +The environment variables available when running Docker are: + +* BindPort (8080) +* BindTcpPort (38888) +* ConfigPath +* DataDir +* PublicServerUrl +* PublicTcpServerUrl +* LogsMode +* CertificatePath +* CertificatePassword +* Hostname + diff --git a/versioned_docs/version-7.1/server/configuration/configuration-options.mdx b/versioned_docs/version-7.1/server/configuration/configuration-options.mdx new file mode 100644 index 0000000000..29d9771e16 --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/configuration-options.mdx @@ -0,0 +1,136 @@ +--- +title: "Configuration Overview" +hide_table_of_contents: true +sidebar_label: Configuration Overview +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration Overview + + +* RavenDB comes with default settings that are configured for best results. + If needed, you can customize the default configuration to suit your specific needs. + +* Any **configuration key** can be modified by either of the following options: + * [Environment variables](../../server/configuration/configuration-options.mdx#environment-variables) + * [settings.json](../../server/configuration/configuration-options.mdx#settingsjson) + * [Command line arguments](../../server/configuration/configuration-options.mdx#command-line-arguments) + * [Database settings view](../../server/configuration/configuration-options.mdx#database-settings-view) (database scope only) + + + +## Environment variables + +* To set a configuration key as an environment variable: + + * Add the prefix `RAVEN_` to the configuration key name + * Replace all period characters (`.`) with the underscore character (`_`) + +* The server will retrieve these environment variables and apply their values. + + +To set the [Security.Certificate.Path](../../server/configuration/security-configuration.mdx#securitycertificatepath) +configuration key using an environment variable, add the environment variable `RAVEN_Security_Certificate_Path`. + + +{`// In Windows PowerShell: +$Env:RAVEN_Security_Certificate_Path=/config/raven-server.certificate.pfx + +// This will set the path to your .pfx certificate file +`} + + + + + + +## settings.json + + +_settings.json_ configuration values **override** their matching +[environment variables](../../server/configuration/configuration-options.mdx#environment-variables) values. + + +* The `settings.json` file is created by RavenDB when running the server for the first time, + duplicating the `settings.default.json` file located in the same directory as the server executable. + +* If you want to apply configuration keys to _settings.json_ prior to running the server for + the first time, you can manually copy _settings.default.json_ as _settings.json_ and edit the new file. + +* The file is read and applied only on server startup. + +* To set a configuration key from _settings.json_ simply add the key and its value to the file. + + + + +{`\{ +"ServerUrl": "http://127.0.0.1:8080", +"Setup.Mode": "None", +"License.Path": "D:\\\\RavenDB\\\\Server\\\\license.json" +\} +`} + + + + + +Configuration options that include multiple values (like strings separated by `;`) +can be configured using regular JSON arrays. +To set [Security.WellKnownCertificates.Admin](../../server/configuration/security-configuration.mdx#securitywellknowncertificatesadmin), +for example, use: + + +{`\{ +"Security.WellKnownCertificates.Admin" : [ "297430d6d2ce259772e4eccf97863a4dfe6b048c", + "e6a3b45b062d509b3382282d196efe97d5956ccb" ] +\} +`} + + + + + + +## Command line arguments + + +Command line arguments configuration values **override** their matching +[environment variables](../../server/configuration/configuration-options.mdx#environment-variables) +and [settings.json](../../server/configuration/configuration-options.mdx#settingsjson) values. + + +* The server can be configured using command line arguments that are passed to the console application + (or while running as a daemon). + +* Find additional details about Command Line Arguments [here](../../server/configuration/command-line-arguments.mdx). + + + + +{`./Raven.Server --Setup.Mode=None +`} + + + + + + +## Database settings view + +* When the server is up and running, you can modify configuration keys that are in the + **database scope** from Studio's [Database settings view](../../studio/database/settings/database-settings.mdx). + +* After modifying a database configuration key from this view, the database must be + [reloaded](../../studio/database/settings/database-settings.mdx#how-to-reload-the-database) + for the change to take effect. + + + diff --git a/versioned_docs/version-7.1/server/configuration/core-configuration.mdx b/versioned_docs/version-7.1/server/configuration/core-configuration.mdx new file mode 100644 index 0000000000..88c1e4b8f3 --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/core-configuration.mdx @@ -0,0 +1,304 @@ +--- +title: "Configuration: Core" +hide_table_of_contents: true +sidebar_label: Core Configuration +sidebar_position: 5 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: Core + +[settings.json](../../server/configuration/configuration-options.mdx#settingsjson) variables change your server's behavior in various ways. + + +RavenDB reads `settings.json` only during startup. +When you edit the file, restart the server to apply changes. + + +## ServerUrl + +The URLs which the server should listen to. + +- **Type**: `string` +- **Default**: `http://localhost:8080` +- **Scope**: Server-wide only + +Indicates the IP addresses or host addresses with ports and protocols that the server should listen on for requests. Use `0.0.0.0` to indicate that the server should listen for requests on any IP address or hostname using the specified port and protocol. The protocol (`http://` or `https://`) must be included with each URL. + +Valid IP addresses can be localhost, domains, IPv4 or IPv6. Ports can be specified after the address using ':' as a separator, or if the default is being used: *port 80* for *http* protocol, and *port 443* for *HTTPS* protocol. + + +Setting to a non loopback address using the ***HTTP*** protocol will expose the server to the network and requires security measurements (using HTTPS, certificates). When set, RavenDB will prevent a startup unless **UnsecuredAccessAllowed** is set to **PublicNetwork** manually. + + +### Examples + +* The server will listen to incoming requests in all the network devices available on the machine on the specific port + + + +{`http://0.0.0.0:8080 +`} + + + +* The server will listen to loopback device only: + + + +{`http://localhost:8080 +`} + + + +* Server using IPV6 loopback only address + + + +{`http://[0:0:0:0:0:0:0:1]:8080 +`} + + + + + +## ServerUrl.Tcp + +The TCP URLs which the server should listen to. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + +Indicates the IP addresses or host addresses with ports and protocols that the server should listen on for incoming TCP connections, are used for inter-node communication. +Valid IP addresses can be localhost, domains, IPv4 or IPv6 addresses. Ports **must be specified** after the address using ':' as separator or just as number without address. + +If no URL is set, the ServerUrl will be used along with random port +If just a number is set, the ServerUrl will be used with the specified number as port +If the address and port are set, RavenDB will listen to the address and port specified + + +Same security consideration as in ***ServerUrl*** option should be applied (see above) + + +### Examples + +* The server will listen to TCP connections in all the network devices available on the machine + + + +{`tcp://0.0.0.0:38888 +`} + + + + + +## PublicServerUrl + +The URL under which server is publicly available. + +- **Type**: `string` +- **Default**: `null` (local Server URL) +- **Scope**: Server-wide only + +Set the URL to be accessible by clients and other nodes, regardless of which IP is used to access the server internally. This is useful when using a secured connection via https URL, or behind a proxy server. + +### Examples + +* Use LAN proxy server address 10.0.0.1 + + + +{`http://10.0.0.1:80 +`} + + + +* Use a specific https domain + + + +{`https://example.com:8080 +`} + + + + +In the above example, `example.com` is the external domain/ip provided by the ISP, and `ServerUrl` must be specified when the server is behind a firewall, proxy, or router + + + + +## PublicServerUrl.Tcp + +The TCP URL under which server is publicly listened to. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + +Set the public TCP address of the server. Used for inter-node communication and access from behind a firewall, proxy, etc. + +### Examples + + + +{`tcp://example.com:38888 +`} + + + + + +## ServerUrl.Cluster + +When this configuration option is used to expose a server's +internal IP address, other servers will try to communicate with +it through this IP first. If the communication fails, they will +failover to its public IP/URL. + +Using internal IPs (rather than the public network, requiring DNS +resolutions etc.) can **accelerate internal cluster communications**. + + +Internal cluster communications can be particularly useful when the +cluster is [sharded](../../sharding/overview.mdx), because of the amount +of internal communication needed: for a shard to satisfy its client +requests, it often has to communicate with all other shards. Performing +these communications internally can provide a better user experience, +and save much time and money over time. + + +- **Type**: `UriSetting` +- **Default**: `null` +- **Scope**: Server-wide only + +### Examples + + + +{`// must include http or https +http://192.168.10.9 +`} + + + + + +{`// can include port number +https://example.com:8080 +`} + + + + + +## ExternalIp + +External IP address. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## RunInMemory + +Set whether the database should run purely in memory. + +- **Type**: `bool` +- **Default**: `null` +- **Scope**: Server-wide or per database + +When running in memory, RavenDB does not write to the disk. If the server is restarted, all data will be lost. This is mostly useful for testing. + + + +## DataDir + +Path to the data directory of RavenDB + +- **Type**: `string` +- **Default**: `Databases/{name}` +- **Scope**: Server-wide or per database + +Relative paths will be based from the application base directory (where the Raven.Server executable is located). + +### Examples + + + +{`/home/user/databases +`} + + + + + +## Setup.Mode + +Determines what kind of security was chosen during setup, or not to use setup on startup at all (`None`). + +- **Type**: `enum` +- **Default**: `None` +- **Scope**: Server-wide only + +Possible values: + +- `None`: No setup process on RavenDB server startup +- `Initial`: Start the wizard process to setup RavenDB on the first server startup +- `LetsEncrypt`: Let RavenDB know that it needs to take care of refreshing certificates on the fly via LE +- `Secured`: This value will be set internally by RavenDB +- `Unsecured`: Run the server in unsecured mode + + + +## AcmeUrl + +The URLs which the server should contact when requesting certificates from using the ACME protocol. + +- **Type**: `string` +- **Default**: `https://acme-v01.api.letsencrypt.org/directory` +- **Scope**: Server-wide only + + + +## ThrowIfAnyIndexCannotBeOpened + + Indicates if we should throw an exception if any index could not be opened. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide or per database + + + +## Features.Availability + +This [settings.json](../../server/configuration/configuration-options.mdx#settingsjson) variable determines whether to run RavenDB with its standard +features set, or add to a set of experimental features. +Some features, like ones recently released, are considered **experimental**. They are disabled by default, you can enable +them by setting `Features.Availability` to `Experimental`. + +- **Type**: `enum` +- **Default**: `Stable` +- **Scope**: Server-wide only + +Possible values: + +- `Stable`: Standard set of features +- `Experimental`: Enables experimental features + + +We'd be grateful for any feedback you send us regarding experimental features. + + + diff --git a/versioned_docs/version-7.1/server/configuration/database-configuration.mdx b/versioned_docs/version-7.1/server/configuration/database-configuration.mdx new file mode 100644 index 0000000000..1573e0d592 --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/database-configuration.mdx @@ -0,0 +1,188 @@ +--- +title: "Configuration: Database" +hide_table_of_contents: true +sidebar_label: Database Configuration +sidebar_position: 6 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: Database + + +* The following configuration keys control database behavior. + +* In this article: + * Server-wide scope: + [Databases.Compression.CompressAllCollectionsDefault](../../server/configuration/database-configuration.mdx#databasescompressioncompressallcollectionsdefault) + [Databases.Compression.CompressRevisionsDefault](../../server/configuration/database-configuration.mdx#databasescompressioncompressrevisionsdefault) + [Databases.ConcurrentLoadTimeoutInSec](../../server/configuration/database-configuration.mdx#databasesconcurrentloadtimeoutinsec) + [Databases.FrequencyToCheckForIdleInSec](../../server/configuration/database-configuration.mdx#databasesfrequencytocheckforidleinsec) + [Databases.MaxConcurrentLoads](../../server/configuration/database-configuration.mdx#databasesmaxconcurrentloads) + + * Server-wide, or database scope: + [Databases.CollectionOperationTimeoutInSec](../../server/configuration/database-configuration.mdx#databasescollectionoperationtimeoutinsec) + [Databases.DeepCleanupThresholdInMin](../../server/configuration/database-configuration.mdx#databasesdeepcleanupthresholdinmin) + [Databases.MaxIdleTimeInSec](../../server/configuration/database-configuration.mdx#databasesmaxidletimeinsec) + [Databases.OperationTimeoutInSec](../../server/configuration/database-configuration.mdx#databasesoperationtimeoutinsec) + [Databases.PulseReadTransactionLimitInMb](../../server/configuration/database-configuration.mdx#databasespulsereadtransactionlimitinmb) + [Databases.QueryOperationTimeoutInSec](../../server/configuration/database-configuration.mdx#databasesqueryoperationtimeoutinsec) + [Databases.QueryTimeoutInSec](../../server/configuration/database-configuration.mdx#databasesquerytimeoutinsec) + [Databases.RegularCleanupThresholdInMin](../../server/configuration/database-configuration.mdx#databasesregularcleanupthresholdinmin) + + +## Databases.Compression.CompressAllCollectionsDefault + +Set whether [documents compression](../../server/storage/documents-compression.mdx) is enabled by default for ALL COLLECTIONS in newly created databases. +Setting this to _false_ does not prevent you from enabling compression later, after a database is created. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only + + + +## Databases.Compression.CompressRevisionsDefault + +Set whether [documents compression](../../server/storage/documents-compression.mdx) is enabled by default for REVISIONS in newly created databases. +It may be useful to disable this option if the database is expected to run on very low-end hardware. +Setting this to _false_ does not prevent you from enabling compression later, after a database is created. + +- **Type**: `bool` +- **Default**: `true` +- **Scope**: Server-wide only + + + +## Databases.ConcurrentLoadTimeoutInSec + +The time (in seconds) to wait for a database to start loading (and become available) when the system is under load - when many different databases are being loaded concurrently. + +- **Type**: `int` +- **Default**: `60` +- **Scope**: Server-wide only + + + +## Databases.FrequencyToCheckForIdleInSec + +The interval (in seconds) at which the system checks for idle databases. + +- **Type**: `int` +- **Default**: `60` +- **Scope**: Server-wide only + + + +## Databases.MaxConcurrentLoads + +Specifies the maximum number of databases that can be loaded concurrently. + +- **Type**: `int` +- **Default**: `8` +- **Scope**: Server-wide only + + + +## Databases.CollectionOperationTimeoutInSec + +The time (in seconds) to wait before canceling certain collection operations (e.g., batch delete documents). +If the operation exceeds the specified duration, an *OperationCanceledException* is thrown. + +- **Type**: `int` +- **Default**: `300` +- **Scope**: Server-wide or per database + + + +## Databases.DeepCleanupThresholdInMin + +EXPERT ONLY. +A deep database cleanup will be performed when this number of minutes has passed since the last time work was done on the database. + +- **Type**: `int` +- **Default**: `5` +- **Scope**: Server-wide or per database + + + +## Databases.MaxIdleTimeInSec + +Sets the maximum idle time (in seconds) for a database. +After this period, an idle database will be unloaded from memory. +Consider using a lower value if memory resources are limited. + +- **Type**: `int` +- **Default**: `900` +- **Scope**: Server-wide or per database + + + +## Databases.OperationTimeoutInSec + +The time (in seconds) to wait before canceling certain operations, such as indexing terms. + +- **Type**: `int` +- **Default**: `300` +- **Scope**: Server-wide or per database + + + +## Databases.PulseReadTransactionLimitInMb + +The number of megabytes used by encryption buffers (for encrypted databases) or 32-bit mapped buffers (on 32-bit systems), +after which a read transaction is renewed to reduce memory usage during long-running operations such as backups or streaming. + +- **Type**: `int` +- **Default**: The default value is determined by the total physical memory (RAM) available on the machine: + * On 32-bit platforms, or when less than 1 GB of RAM is available: `16 MB` + * Up to 4 GB RAM: `32 MB` + * Up to 16 GB RAM: `64 MB` + * Up to 64 GB RAM: `128 MB` + * More than 64 GB RAM: `256 MB` +- **Scope**: Server-wide or per database + + + +## Databases.QueryOperationTimeoutInSec + +The time (in seconds) to wait before canceling a query-related operation (e.g., patch or delete query). +The timeout resets with each processed document, +and will only be exceeded if no document is processed within the specified period. + +- **Type**: `int` +- **Default**: `300` +- **Scope**: Server-wide or per database + + + +## Databases.QueryTimeoutInSec + +The time (in seconds) to wait before canceling a query. +Applies to both regular and streamed queries. + +If the query exceeds the specified time, an *OperationCanceledException* is thrown. +For streamed queries, the timeout is reset each time a result is pushed to the stream. +The timeout will be exceeded only if no result is streamed within the timeout period. + +- **Type**: `int` +- **Default**: `300` +- **Scope**: Server-wide or per database + + + +## Databases.RegularCleanupThresholdInMin + +EXPERT ONLY. +A regular database cleanup will be performed when this number of minutes has passed since the database was last idle. + +- **Type**: `int` +- **Default**: `10` +- **Scope**: Server-wide or per database + + diff --git a/versioned_docs/version-7.1/server/configuration/embedded-configuration.mdx b/versioned_docs/version-7.1/server/configuration/embedded-configuration.mdx new file mode 100644 index 0000000000..234d3e5ac8 --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/embedded-configuration.mdx @@ -0,0 +1,25 @@ +--- +title: "Configuration: Embedded Options" +hide_table_of_contents: true +sidebar_label: Embedded Configuration +sidebar_position: 7 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: Embedded Options + +## Embedded.ParentProcessId or Testing.ParentProcessId + +Watch the parent process ID and exit when it exited as well. + +- **Type**: `int` +- **Default**: `null` +- **Scope**: Server-wide only + + diff --git a/versioned_docs/version-7.1/server/configuration/etl-configuration.mdx b/versioned_docs/version-7.1/server/configuration/etl-configuration.mdx new file mode 100644 index 0000000000..4153772ea1 --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/etl-configuration.mdx @@ -0,0 +1,130 @@ +--- +title: "Configuration: ETL Options" +hide_table_of_contents: true +sidebar_label: ETL Configuration +sidebar_position: 8 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: ETL Options + + +* In this page: + * [ETL.ExtractAndTransformTimeoutInSec](../../server/configuration/etl-configuration.mdx#etlextractandtransformtimeoutinsec) + * [ETL.MaxBatchSizeInMb](../../server/configuration/etl-configuration.mdx#etlmaxbatchsizeinmb) + * [ETL.MaxFallbackTimeInSec](../../server/configuration/etl-configuration.mdx#etlmaxfallbacktimeinsec) + * [ETL.MaxNumberOfExtractedDocuments](../../server/configuration/etl-configuration.mdx#etlmaxnumberofextracteddocuments) + * [ETL.MaxNumberOfExtractedItems](../../server/configuration/etl-configuration.mdx#etlmaxnumberofextracteditems) + * [ETL.OLAP.MaxNumberOfExtractedDocuments](../../server/configuration/etl-configuration.mdx#etlolapmaxnumberofextracteddocuments) + * [ETL.Queue.AzureQueueStorage.TimeToLiveInSec](../../server/configuration/etl-configuration.mdx#etlqueueazurequeuestoragetimetoliveinsec) + * [ETL.Queue.AzureQueueStorage.VisibilityTimeoutInSec](../../server/configuration/etl-configuration.mdx#etlqueueazurequeuestoragevisibilitytimeoutinsec) + * [ETL.Queue.Kafka.InitTransactionsTimeoutInSec](../../server/configuration/etl-configuration.mdx#etlqueuekafkainittransactionstimeoutinsec) + * [ETL.SQL.CommandTimeoutInSec](../../server/configuration/etl-configuration.mdx#etlsqlcommandtimeoutinsec) + + +## ETL.ExtractAndTransformTimeoutInSec + +Number of seconds after which extraction and transformation will end and loading will start. + +- **Type**: `int` +- **Default**: `30` +- **Scope**: Server-wide or per database + + + +## ETL.MaxNumberOfExtractedDocuments + +* Max number of extracted documents in an ETL batch. +* If value is not set, or set to null, the number of extracted documents fallbacks to `ETL.MaxNumberOfExtractedItems` value. +- **Type**: `int` +- **Default**: `8192` +- **Scope**: Server-wide or per database + + + +## ETL.MaxBatchSizeInMb + +* Maximum size in megabytes of a batch of data (documents and attachments) that will be sent to the destination as a single batch after transformation. +* If value is not set, or set to null, the size of the batch isn't limited in the processed ETL batch. +- **Type**: `Size` +- **Size Unit**: `Megabytes` +- **Default**: `64` +- **Scope**: Server-wide or per database + + + +## ETL.MaxFallbackTimeInSec + +* Maximum number of seconds the ETL process will be in a fallback mode after a load connection failure to a destination. +* The fallback mode means suspending the process. +- **Type**: `int` +- **Default**: `900` +- **Scope**: Server-wide or per database + + + +## ETL.MaxNumberOfExtractedItems + +* Max number of extracted items (documents, counters, etc) in an ETL batch. +* If value is not set, or set to null, the number of extracted items isn't limited in the processed ETL batch. +- **Type**: `int` +- **Default**: `8192` +- **Scope**: Server-wide or per database + + + +## ETL.OLAP.MaxNumberOfExtractedDocuments + +Max number of extracted documents in OLAP ETL batch. + +- **Type**: `int` +- **Default**: `64 * 1024` +- **Scope**: Server-wide or per database + + + +## ETL.Queue.AzureQueueStorage.TimeToLiveInSec + +Lifespan of a message in the queue. + +- **Type**: `int` +- **Default**: `604800` (7 days) +- **Scope**: Server-wide or per database + + + +## ETL.Queue.AzureQueueStorage.VisibilityTimeoutInSec + +How long a message is hidden after being retrieved but not deleted. + +- **Type**: `int` +- **Default**: `0` +- **Scope**: Server-wide or per database + + + +## ETL.Queue.Kafka.InitTransactionsTimeoutInSec + +Timeout to initialize transactions for the Kafka producer. + +- **Type**: `int` +- **Default**: `60` +- **Scope**: Server-wide or per database + + + +## ETL.SQL.CommandTimeoutInSec + +Number of seconds after which the SQL command will timeout. + +- **Type**: `int` +- **Default**: `null` (use provider default) +- **Scope**: Server-wide or per database + + diff --git a/versioned_docs/version-7.1/server/configuration/http-configuration.mdx b/versioned_docs/version-7.1/server/configuration/http-configuration.mdx new file mode 100644 index 0000000000..a25b3fb359 --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/http-configuration.mdx @@ -0,0 +1,232 @@ +--- +title: "Configuration: HTTP" +hide_table_of_contents: true +sidebar_label: HTTP Configuration +sidebar_position: 9 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: HTTP + + +* RavenDB uses [Kestrel](https://learn.microsoft.com/en-us/dotnet/api/microsoft.aspnetcore.server.kestrel?view=aspnetcore-8.0), which is an HTTP web server built on ASP.NET Core. + +* You can set Kestrel's properties via the following RavenDB configuration keys. + +* In this page: + * [Http.MinDataRateBytesPerSec](../../server/configuration/http-configuration.mdx#httpmindataratebytespersec) + * [Http.MinDataRateGracePeriodInSec](../../server/configuration/http-configuration.mdx#httpmindatarategraceperiodinsec) + * [Http.MaxRequestBufferSizeInKb](../../server/configuration/http-configuration.mdx#httpmaxrequestbuffersizeinkb) + * [Http.MaxRequestLineSizeInKb](../../server/configuration/http-configuration.mdx#httpmaxrequestlinesizeinkb) + * [Http.Http2.KeepAlivePingTimeoutInSec](../../server/configuration/http-configuration.mdx#httphttp2keepalivepingtimeoutinsec) + * [Http.Http2.KeepAlivePingDelayInSec](../../server/configuration/http-configuration.mdx#httphttp2keepalivepingdelayinsec) + * [Http.Http2.MaxStreamsPerConnection](../../server/configuration/http-configuration.mdx#httphttp2maxstreamsperconnection) + * [Http.UseResponseCompression](../../server/configuration/http-configuration.mdx#httpuseresponsecompression) + * [Http.AllowResponseCompressionOverHttps](../../server/configuration/http-configuration.mdx#httpallowresponsecompressionoverhttps) + * [Http.GzipResponseCompressionLevel](../../server/configuration/http-configuration.mdx#httpgzipresponsecompressionlevel) + * [Http.DeflateResponseCompressionLevel](../../server/configuration/http-configuration.mdx#httpdeflateresponsecompressionlevel) + * [Http.ZstdResponseCompressionLevel](../../server/configuration/http-configuration.mdx#httpzstdresponsecompressionlevel) + * [Http.StaticFilesResponseCompressionLevel](../../server/configuration/http-configuration.mdx#httpstaticfilesresponsecompressionlevel) + * [Http.Protocols](../../server/configuration/http-configuration.mdx#httpprotocols) + * [Http.AllowSynchronousIO](../../server/configuration/http-configuration.mdx#httpallowsynchronousio) + + +## Http.MinDataRateBytesPerSec + +* Set Kestrel's minimum required data rate in bytes per second. + +* This option must be configured together with [Http.MinDataRateGracePeriod](../../server/configuration/http-configuration.mdx#httpmindatarategraceperiodinsec). +- **Type**: `int` +- **Default**: `null` +- **Scope**: Server-wide only +- **Used for setting Kestrel's properties**: + - [MinResponseDataRate](https://learn.microsoft.com/en-us/dotnet/api/microsoft.aspnetcore.server.kestrel.core.kestrelserverlimits.minresponsedatarate?view=aspnetcore-8.0#microsoft-aspnetcore-server-kestrel-core-kestrelserverlimits-minresponsedatarate) + - [MinRequestBodyDataRate](https://learn.microsoft.com/en-us/dotnet/api/microsoft.aspnetcore.server.kestrel.core.kestrelserverlimits.minrequestbodydatarate?view=aspnetcore-8.0) + + + +## Http.MinDataRateGracePeriodInSec + +* Set Kestrel's allowed request and response grace period in seconds. + This option must be configured together with [Http.MinDataRateBytesPerSec](../../server/configuration/http-configuration.mdx#httpmindataratebytespersec) + +* Kestrel checks every second if data is coming in at the specified rate in bytes/second. + If the rate drops below the minimum set by _MinResponseDataRate_, the connection is timed out. + +* The grace period _Http.MinDataRateGracePeriodInSec_ is the amount of time that Kestrel gives the client to increase its send rate up to the minimum. The rate is not checked during that time. + The grace period helps avoid dropping connections that are initially sending data at a slow rate due to TCP slow-start. + +* When set to `null` then rates are unlimited, no minimum data rate will be enforced. +- **Type**: `int` +- **Default**: `null` +- **Scope**: Server-wide only +- **Used for setting Kestrel's properties**: + - [MinResponseDataRate](https://learn.microsoft.com/en-us/dotnet/api/microsoft.aspnetcore.server.kestrel.core.kestrelserverlimits.minresponsedatarate?view=aspnetcore-8.0#microsoft-aspnetcore-server-kestrel-core-kestrelserverlimits-minresponsedatarate) + - [MinRequestBodyDataRate](https://learn.microsoft.com/en-us/dotnet/api/microsoft.aspnetcore.server.kestrel.core.kestrelserverlimits.minrequestbodydatarate?view=aspnetcore-8.0) + + +If __either__ one of _Http.MinDataRateBytesPerSec_ or _Http.MinDataRateGracePeriodInSec_ is Not set or set to `null`, +then __both__ Kestrel's properties ( _MinResponseDataRate_ & _MinRequestBodyDataRate_ ) will be set to `null`. + + + + +## Http.MaxRequestBufferSizeInKb + +* Set the maximum size of the response buffer before write calls begin to block or return tasks that don't complete until the buffer size drops below the configured limit. + +* If not set, or set to `null`, then the size of the request buffer is unlimited. +- **Type**: `int` +- **Default**: `null` +- **Scope**: Server-wide only +- **Used for setting Kestrel property**: [MaxRequestBufferSize](https://learn.microsoft.com/en-us/dotnet/api/microsoft.aspnetcore.server.kestrel.core.kestrelserverlimits.maxrequestbuffersize?view=aspnetcore-8.0#microsoft-aspnetcore-server-kestrel-core-kestrelserverlimits-maxrequestbuffersize) + + + +## Http.MaxRequestLineSizeInKb + +Set the maximum allowed size for the HTTP request line. + +- **Type**: `int` +- **Default**: `16` +- **Scope**: Server-wide only +- **Used for setting Kestrel property**: [MaxRequestLineSize](https://learn.microsoft.com/en-us/dotnet/api/microsoft.aspnetcore.server.kestrel.core.kestrelserverlimits.maxrequestlinesize?view=aspnetcore-8.0#microsoft-aspnetcore-server-kestrel-core-kestrelserverlimits-maxrequestlinesize) + + + +## Http.Http2.KeepAlivePingTimeoutInSec + +Set Kestrel's HTTP2 keep alive ping timeout. + +- **Type**: `int` +- **Default**: `null` +- **Scope**: Server-wide only +- **Used for setting Kestrel property**: [KeepAlivePingTimeout](https://learn.microsoft.com/en-us/dotnet/api/microsoft.aspnetcore.server.kestrel.core.http2limits.keepalivepingtimeout?view=aspnetcore-8.0#microsoft-aspnetcore-server-kestrel-core-http2limits-keepalivepingtimeout) + + + +## Http.Http2.KeepAlivePingDelayInSec + +Set Kestrel's HTTP2 keep alive ping delay. + +- **Type**: `int` +- **Default**: `null` +- **Scope**: Server-wide only +- **Used for setting Kestrel property**: [KeepAlivePingDelay](https://learn.microsoft.com/en-us/dotnet/api/microsoft.aspnetcore.server.kestrel.core.http2limits.keepalivepingdelay?view=aspnetcore-8.0#microsoft-aspnetcore-server-kestrel-core-http2limits-keepalivepingdelay) + + + +## Http.Http2.MaxStreamsPerConnection + +* Set Kestrel's HTTP2 max streams per connection. + +* This limits the number of concurrent request streams per HTTP/2 connection. + Excess streams will be refused. + +* When _Http.Http2.MaxStreamsPerConnection_ is `null` or not set, + RavenDB assigns _int.MaxValue_ to _MaxStreamsPerConnection_. +- **Type**: `int` +- **Default**: `null` (no limit) +- **Scope**: Server-wide only +- **Used for setting Kestrel property**: [MaxStreamsPerConnection](https://learn.microsoft.com/en-us/dotnet/api/microsoft.aspnetcore.server.kestrel.core.http2limits.maxstreamsperconnection?view=aspnetcore-8.0#microsoft-aspnetcore-server-kestrel-core-http2limits-maxstreamsperconnection) + + + +## Http.UseResponseCompression + +* Set whether Raven's HTTP server should compress its responses. + +* Using compression lowers the network bandwidth usage. + However, setting to `false` is needed in order to debug or view the response via sniffer tools. +- **Type**: `bool` +- **Default**: `true` +- **Scope**: Server-wide only + + + +## Http.AllowResponseCompressionOverHttps + +* Set whether Raven's HTTP server should allow response compression to happen when HTTPS is enabled. + +* Please see http://breachattack.com/ before enabling this. +- **Type**: `bool` +- **Default**: `true` +- **Scope**: Server-wide only +- **Used for setting Kestrel property**: [EnableForHttps](https://learn.microsoft.com/en-us/dotnet/api/microsoft.aspnetcore.responsecompression.responsecompressionoptions.enableforhttps?view=aspnetcore-8.0#microsoft-aspnetcore-responsecompression-responsecompressionoptions-enableforhttps) + + + +## Http.GzipResponseCompressionLevel + +Set the compression level to be used when compressing HTTP responses with GZip. + +- **Type**: `enum CompressionLevel` (`Optimal`, `Fastest`, `NoCompression`, `SmallestSize`) +- **Default**: `Fastest` +- **Scope**: Server-wide only +- **Used for setting Kestrel property**: [Level](https://learn.microsoft.com/en-us/dotnet/api/microsoft.aspnetcore.responsecompression.gzipcompressionprovideroptions.level?view=aspnetcore-8.0#microsoft-aspnetcore-responsecompression-gzipcompressionprovideroptions-level) + + + +## Http.DeflateResponseCompressionLevel + +Set the compression level to be used when compressing HTTP responses with Deflate. + +- **Type**: `enum CompressionLevel` (`Optimal`, `Fastest`, `NoCompression`, `SmallestSize`) +- **Default**: `Fastest` +- **Scope**: Server-wide only +- **Used for setting Kestrel property**: [Level](https://learn.microsoft.com/en-us/dotnet/api/microsoft.aspnetcore.responsecompression.gzipcompressionprovideroptions.level?view=aspnetcore-8.0#microsoft-aspnetcore-responsecompression-gzipcompressionprovideroptions-level) + + + +## Http.ZstdResponseCompressionLevel + +Set the compression level to be used when compressing HTTP responses with Zstd. + +- **Type**: `enum CompressionLevel` (`Optimal`, `Fastest`, `NoCompression`, `SmallestSize`) +- **Default**: `Fastest` +- **Scope**: Server-wide only +- **Used for setting Kestrel property**: [Level](https://learn.microsoft.com/en-us/dotnet/api/microsoft.aspnetcore.responsecompression.gzipcompressionprovideroptions.level?view=aspnetcore-8.0#microsoft-aspnetcore-responsecompression-gzipcompressionprovideroptions-level) + + + +## Http.StaticFilesResponseCompressionLevel + +Set the compression level to be used when compressing static files. + +- **Type**: `enum CompressionLevel` (`Optimal`, `Fastest`, `NoCompression`, `SmallestSize`) +- **Default**: `Optimal` +- **Scope**: Server-wide only + + + +## Http.Protocols + +* Set HTTP protocols that should be supported by the server. + +* By default, the HTTP protocol is set by the constructor of class `HttpConfiguration` + (that is what is meant by the value `"DefaultValueSetInConstructor"`). + +* If the platform running RavenDB is either Windows 10 or higher, Windows Server 2016 or newer, or POSIX, + the constructor sets Http.Protocols to `Http1AndHttp2`. Otherwise, it is set to `Http1`. +- **Type**: `enum HttpProtocols` ( `None`, `Http1`, `Http2`, `Http1AndHttp2`, `Http3`, `Http1AndHttp2AndHttp3`) +- **Default**: `DefaultValueSetInConstructor` +- **Scope**: Server-wide only + + + +## Http.AllowSynchronousIO + +Set a value that controls whether synchronous IO is allowed for the Request and Response. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only +- **Used for setting Kestrel property**: [AllowSynchronousIO](https://learn.microsoft.com/en-us/dotnet/api/microsoft.aspnetcore.server.kestrel.core.kestrelserveroptions.allowsynchronousio?view=aspnetcore-8.0#microsoft-aspnetcore-server-kestrel-core-kestrelserveroptions-allowsynchronousio) + + diff --git a/versioned_docs/version-7.1/server/configuration/indexing-configuration.mdx b/versioned_docs/version-7.1/server/configuration/indexing-configuration.mdx new file mode 100644 index 0000000000..bec58ae41f --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/indexing-configuration.mdx @@ -0,0 +1,1041 @@ +--- +title: "Configuration: Indexing" +hide_table_of_contents: true +sidebar_label: Indexing Configuration +sidebar_position: 10 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: Indexing + + +* The below **indexing configuration keys** can be modified via any of the following methods: + * As explained in the [Config overview](../../server/configuration/configuration-options.mdx) article + * Set a custom configuration per index from the [Client API](../../indexes/creating-and-deploying.mdx#customizing-configuration) + * Set a custom configuration per index from the [Studio](../../studio/database/indexes/create-map-index.mdx#configuration) + + + + + +* In this article: + * Server-wide scope: + [Indexing.CleanupIntervalInMin](../../server/configuration/indexing-configuration.mdx#indexingcleanupintervalinmin) + [Indexing.Corax.VectorSearch.MaxNumberOfThreadsForLocalEmbeddingsGeneration](../../server/configuration/indexing-configuration.mdx#indexingcoraxvectorsearchmaxnumberofthreadsforlocalembeddingsgeneration) + [Indexing.GlobalScratchSpaceLimitInMb](../../server/configuration/indexing-configuration.mdx#indexingglobalscratchspacelimitinmb) + [Indexing.MaxNumberOfConcurrentlyRunningIndexes](../../server/configuration/indexing-configuration.mdx#indexingmaxnumberofconcurrentlyrunningindexes) + [Indexing.MaxTimeToWaitAfterFlushAndSyncWhenExceedingScratchSpaceLimitInSec](../../server/configuration/indexing-configuration.mdx#indexingmaxtimetowaitafterflushandsyncwhenexceedingscratchspacelimitinsec) + [Indexing.NuGetAllowPreReleasePackages](../../server/configuration/indexing-configuration.mdx#indexingnugetallowprereleasepackages) + [Indexing.NuGetPackageSourceUrl](../../server/configuration/indexing-configuration.mdx#indexingnugetpackagesourceurl) + [Indexing.NuGetPackagesPath](../../server/configuration/indexing-configuration.mdx#indexingnugetpackagespath) + [Indexing.QueryClauseCache.ExpirationScanFrequencyInSec](../../server/configuration/indexing-configuration.mdx#indexingqueryclausecacheexpirationscanfrequencyinsec) + [Indexing.QueryClauseCache.RepeatedQueriesCount](../../server/configuration/indexing-configuration.mdx#indexingqueryclausecacherepeatedqueriescount) + [Indexing.QueryClauseCache.SizeInMb](../../server/configuration/indexing-configuration.mdx#indexingqueryclausecachesizeinmb) + * Server-wide, or database scope: + [Indexing.Auto.ArchivedDataProcessingBehavior](../../server/configuration/indexing-configuration.mdx#indexingautoarchiveddataprocessingbehavior) + [Indexing.Auto.DeploymentMode](../../server/configuration/indexing-configuration.mdx#indexingautodeploymentmode) + [Indexing.Auto.SearchEngineType](../../server/configuration/indexing-configuration.mdx#indexingautosearchenginetype) + [Indexing.Disable](../../server/configuration/indexing-configuration.mdx#indexingdisable) + [Indexing.DisableQueryOptimizerGeneratedIndexes](../../server/configuration/indexing-configuration.mdx#indexingdisablequeryoptimizergeneratedindexes) + [Indexing.ErrorIndexStartupBehavior](../../server/configuration/indexing-configuration.mdx#indexingerrorindexstartupbehavior) + [Indexing.History.NumberOfRevisions](../../server/configuration/indexing-configuration.mdx#indexinghistorynumberofrevisions) + [Indexing.IndexStartupBehavior](../../server/configuration/indexing-configuration.mdx#indexingindexstartupbehavior) + [Indexing.ResetMode](../../server/configuration/indexing-configuration.mdx#indexingresetmode) + [Indexing.RunInMemory](../../server/configuration/indexing-configuration.mdx#indexingruninmemory) + [Indexing.SkipDatabaseIdValidationOnIndexOpening](../../server/configuration/indexing-configuration.mdx#indexingskipdatabaseidvalidationonindexopening) + [Indexing.Static.ArchivedDataProcessingBehavior](../../server/configuration/indexing-configuration.mdx#indexingstaticarchiveddataprocessingbehavior) + [Indexing.Static.DeploymentMode](../../server/configuration/indexing-configuration.mdx#indexingstaticdeploymentmode) + [Indexing.Static.RequireAdminToDeployJavaScriptIndexes](../../server/configuration/indexing-configuration.mdx#indexingstaticrequireadmintodeployjavascriptindexes) + [Indexing.TempPath](../../server/configuration/indexing-configuration.mdx#indexingtemppath) + [Indexing.TimeBeforeDeletionOfSupersededAutoIndexInSec](../../server/configuration/indexing-configuration.mdx#indexingtimebeforedeletionofsupersededautoindexinsec) + [Indexing.TimeToWaitBeforeDeletingAutoIndexMarkedAsIdleInHrs](../../server/configuration/indexing-configuration.mdx#indexingtimetowaitbeforedeletingautoindexmarkedasidleinhrs) + [Indexing.TimeToWaitBeforeMarkingAutoIndexAsIdleInMin](../../server/configuration/indexing-configuration.mdx#indexingtimetowaitbeforemarkingautoindexasidleinmin) + * Server-wide, or database, or per index: + [Indexing.AllowStringCompilation](../../server/configuration/indexing-configuration.mdx#indexingallowstringcompilation) + [Indexing.Analyzers.Default](../../server/configuration/indexing-configuration.mdx#indexinganalyzersdefault) + [Indexing.Analyzers.Exact.Default](../../server/configuration/indexing-configuration.mdx#indexinganalyzersexactdefault) + [Indexing.Analyzers.Search.Default](../../server/configuration/indexing-configuration.mdx#indexinganalyzerssearchdefault) + [Indexing.Corax.DocumentsLimitForCompressionDictionaryCreation](../../server/configuration/indexing-configuration.mdx#indexingcoraxdocumentslimitforcompressiondictionarycreation) + [Indexing.Corax.IncludeDocumentScore](../../server/configuration/indexing-configuration.mdx#indexingcoraxincludedocumentscore) + [Indexing.Corax.IncludeSpatialDistance](../../server/configuration/indexing-configuration.mdx#indexingcoraxincludespatialdistance) + [Indexing.Corax.MaxAllocationsAtDictionaryTrainingInMb](../../server/configuration/indexing-configuration.mdx#indexingcoraxmaxallocationsatdictionarytraininginmb) + [Indexing.Corax.MaxMemoizationSizeInMb](../../server/configuration/indexing-configuration.mdx#indexingcoraxmaxmemoizationsizeinmb) + [Indexing.Corax.Static.ComplexFieldIndexingBehavior](../../server/configuration/indexing-configuration.mdx#indexingcoraxstaticcomplexfieldindexingbehavior) + [Indexing.Corax.UnmanagedAllocationsBatchSizeLimitInMb](../../server/configuration/indexing-configuration.mdx#indexingcoraxunmanagedallocationsbatchsizelimitinmb) + [Indexing.Corax.VectorSearch.DefaultMinimumSimilarity](../../server/configuration/indexing-configuration.mdx#indexingcoraxvectorsearchdefaultminimumsimilarity) + [Indexing.Corax.VectorSearch.DefaultNumberOfCandidatesForIndexing](../../server/configuration/indexing-configuration.mdx#indexingcoraxvectorsearchdefaultnumberofcandidatesforindexing) + [Indexing.Corax.VectorSearch.DefaultNumberOfCandidatesForQuerying](../../server/configuration/indexing-configuration.mdx#indexingcoraxvectorsearchdefaultnumberofcandidatesforquerying) + [Indexing.Corax.VectorSearch.DefaultNumberOfEdges](../../server/configuration/indexing-configuration.mdx#indexingcoraxvectorsearchdefaultnumberofedges) + [Indexing.Corax.VectorSearch.MaximumConcurrentBatchesForHnswAcceleration](../../server/configuration/indexing-configuration.mdx#indexingcoraxvectorsearchmaximumconcurrentbatchesforhnswacceleration) + [Indexing.Corax.VectorSearch.OrderByScoreAutomatically](../../server/configuration/indexing-configuration.mdx#indexingcoraxvectorsearchorderbyscoreautomatically) + [Indexing.Encrypted.TransactionSizeLimitInMb](../../server/configuration/indexing-configuration.mdx#indexingencryptedtransactionsizelimitinmb) + [Indexing.IndexEmptyEntries](../../server/configuration/indexing-configuration.mdx#indexingindexemptyentries) + [Indexing.IndexMissingFieldsAsNull](../../server/configuration/indexing-configuration.mdx#indexingindexmissingfieldsasnull) + [Indexing.Lucene.Analyzers.NGram.MaxGram](../../server/configuration/indexing-configuration.mdx#indexingluceneanalyzersngrammaxgram) + [Indexing.Lucene.Analyzers.NGram.MinGram](../../server/configuration/indexing-configuration.mdx#indexingluceneanalyzersngrammingram) + [Indexing.Lucene.IndexInputType](../../server/configuration/indexing-configuration.mdx#indexingluceneindexinputtype) + [Indexing.Lucene.LargeSegmentSizeToMergeInMb](../../server/configuration/indexing-configuration.mdx#indexinglucenelargesegmentsizetomergeinmb) + [Indexing.Lucene.MaximumSizePerSegmentInMb](../../server/configuration/indexing-configuration.mdx#indexinglucenemaximumsizepersegmentinmb) + [Indexing.Lucene.MaxTimeForMergesToKeepRunningInSec](../../server/configuration/indexing-configuration.mdx#indexinglucenemaxtimeformergestokeeprunninginsec) + [Indexing.Lucene.MergeFactor](../../server/configuration/indexing-configuration.mdx#indexinglucenemergefactor) + [Indexing.Lucene.NumberOfLargeSegmentsToMergeInSingleBatch](../../server/configuration/indexing-configuration.mdx#indexinglucenenumberoflargesegmentstomergeinsinglebatch) + [Indexing.Lucene.ReaderTermsIndexDivisor](../../server/configuration/indexing-configuration.mdx#indexinglucenereadertermsindexdivisor) + [Indexing.Lucene.UseCompoundFileInMerging](../../server/configuration/indexing-configuration.mdx#indexingluceneusecompoundfileinmerging) + [Indexing.ManagedAllocationsBatchSizeLimitInMb](../../server/configuration/indexing-configuration.mdx#indexingmanagedallocationsbatchsizelimitinmb) + [Indexing.MapBatchSize](../../server/configuration/indexing-configuration.mdx#indexingmapbatchsize) + [Indexing.MapTimeoutAfterEtagReachedInMin](../../server/configuration/indexing-configuration.mdx#indexingmaptimeoutafteretagreachedinmin) + [Indexing.MapTimeoutInSec](../../server/configuration/indexing-configuration.mdx#indexingmaptimeoutinsec) + [Indexing.MaxStepsForScript](../../server/configuration/indexing-configuration.mdx#indexingmaxstepsforscript) + [Indexing.MaxTimeForDocumentTransactionToRemainOpenInSec](../../server/configuration/indexing-configuration.mdx#indexingmaxtimefordocumenttransactiontoremainopeninsec) + [Indexing.MaxTimeToWaitAfterFlushAndSyncWhenReplacingSideBySideIndexInSec](../../server/configuration/indexing-configuration.mdx#indexingmaxtimetowaitafterflushandsyncwhenreplacingsidebysideindexinsec) + [Indexing.Metrics.Enabled](../../server/configuration/indexing-configuration.mdx#indexingmetricsenabled) + [Indexing.MinNumberOfMapAttemptsAfterWhichBatchWillBeCanceledIfRunningLowOnMemory](../../server/configuration/indexing-configuration.mdx#indexingminnumberofmapattemptsafterwhichbatchwillbecanceledifrunninglowonmemory) + [Indexing.MinimumTotalSizeOfJournalsToRunFlushAndSyncWhenReplacingSideBySideIndexInMb](../../server/configuration/indexing-configuration.mdx#indexingminimumtotalsizeofjournalstorunflushandsyncwhenreplacingsidebysideindexinmb) + [Indexing.NumberOfConcurrentStoppedBatchesIfRunningLowOnMemory](../../server/configuration/indexing-configuration.mdx#indexingnumberofconcurrentstoppedbatchesifrunninglowonmemory) + [Indexing.NumberOfLargeSegmentsToMergeInSingleBatch](../../server/configuration/indexing-configuration.mdx#indexingnumberoflargesegmentstomergeinsinglebatch) + [Indexing.OrderByScoreAutomaticallyWhenBoostingIsInvolved](../../server/configuration/indexing-configuration.mdx#indexingorderbyscoreautomaticallywhenboostingisinvolved) + [Indexing.OrderByTicksAutomaticallyWhenDatesAreInvolved](../../server/configuration/indexing-configuration.mdx#indexingorderbyticksautomaticallywhendatesareinvolved) + [Indexing.QueryClauseCache.Disabled](../../server/configuration/indexing-configuration.mdx#indexingqueryclausecachedisabled) + [Indexing.QueryClauseCache.RepeatedQueriesTimeFrameInSec](../../server/configuration/indexing-configuration.mdx#indexingqueryclausecacherepeatedqueriestimeframeinsec) + [Indexing.ScratchSpaceLimitInMb](../../server/configuration/indexing-configuration.mdx#indexingscratchspacelimitinmb) + [Indexing.Static.SearchEngineType](../../server/configuration/indexing-configuration.mdx#indexingstaticsearchenginetype) + [Indexing.Throttling.TimeIntervalInMs](../../server/configuration/indexing-configuration.mdx#indexingthrottlingtimeintervalinms) + [Indexing.TimeSinceLastQueryAfterWhichDeepCleanupCanBeExecutedInMin](../../server/configuration/indexing-configuration.mdx#indexingtimesincelastqueryafterwhichdeepcleanupcanbeexecutedinmin) + [Indexing.TransactionSizeLimitInMb](../../server/configuration/indexing-configuration.mdx#indexingtransactionsizelimitinmb) + + +## Indexing.CleanupIntervalInMin + +Time (in minutes) between auto-index cleanup. + +- **Type**: `int` +- **Default**: `10` +- **Scope**: Server-wide only + + + +## Indexing.Corax.VectorSearch.MaxNumberOfThreadsForLocalEmbeddingsGeneration + +* Maximum number of threads that will be used for generating embedding from text locally. + +* The default value, which is determined based on the number of processors cores, + is set by the constructor of class `IndexingConfiguration`: + + * If the machine has 2 or fewer processor cores, only 1 thread is used. + * For machines with up to 8 cores, 2 threads are used. + * For machines with up to 16 cores, 4 threads are used. + * For machines with more than 16 cores, the value is set to 6 threads. +- **Type**: `int` +- **Default**: `DefaultValueSetInConstructor` +- **Scope**: Server-wide only + + + +## Indexing.GlobalScratchSpaceLimitInMb + +* Maximum amount of scratch space in megabytes that we allow to use for all index storages per server. + +* After exceeding this limit the indexes will complete their current indexing batches and force flush and sync storage environments. +- **Type**: `int` +- **Default**: `null` (no limit) +- **Scope**: Server-wide only + + + +## Indexing.MaxNumberOfConcurrentlyRunningIndexes + +Set how many indexes can run concurrently on the server to prevent overwhelming system resources and slow indexing. + +- **Type**: `int` +- **Default**: `null` (No limit) +- **MinValue**: 1 +- **Scope**: Server-wide only + + + +## Indexing.MaxTimeToWaitAfterFlushAndSyncWhenExceedingScratchSpaceLimitInSec + +Max time to wait in seconds when forcing the storage environment flush and sync after exceeding the scratch space limit. + +- **Type**: `int` +- **Default**: `30` +- **Scope**: Server-wide only +- **Alias:** `Indexing.MaxTimeToWaitAfterFlushAndSyncWhenExceedingScratchSpaceLimit` + + + +## Indexing.NuGetAllowPreReleasePackages + +Allow installation of NuGet prerelease packages. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only +- **Aliases:** + * `Indexing.NuGetAllowPreleasePackages` + * `Indexing.NuGet.AllowPreReleasePackages` + * `Indexing.NuGet.AllowPreleasePackages` + + + +## Indexing.NuGetPackageSourceUrl + +Default NuGet source URL. + +- **Type**: `string` +- **Default**: `https://api.nuget.org/v3/index.json` +- **Scope**: Server-wide only +- **Alias:** `Indexing.NuGet.PackageSourceUrl` + + + +## Indexing.NuGetPackagesPath + +Location of NuGet packages cache. + +- **Type**: `string` +- **Default**: `Packages/NuGet` +- **Scope**: Server-wide only +- **Alias:** `Indexing.NuGet.PackagesPath` + + + +## Indexing.QueryClauseCache.ExpirationScanFrequencyInSec + +EXPERT ONLY: +The frequency by which to scan the query clause cache for expired values. + +- **Type**: `int` +- **Default**: `180` +- **Scope**: Server-wide only + + + +## Indexing.QueryClauseCache.RepeatedQueriesCount + +EXPERT ONLY: +The number of recent queries that we will keep to identify repeated queries, relevant for caching. + +- **Type**: `int` +- **Default**: `512` +- **Scope**: Server-wide only + + + +## Indexing.QueryClauseCache.SizeInMb + +EXPERT ONLY: + +* Maximum size that the query clause cache will utilize for caching partial query clauses, + defaulting to 10% of the system memory on 64-bit machines. + +* The default value, which is determined based on your platform details, is set by the constructor of class `IndexingConfiguration`. +- **Type**: `int` +- **Default**: `DefaultValueSetInConstructor` +- **Scope**: Server-wide only + + + +## Indexing.Auto.ArchivedDataProcessingBehavior + +The default processing behavior for archived documents in auto-indexes. + +- **Type**: `enum ArchivedDataProcessingBehavior`: + * `ExcludeArchived`: only non-archived documents are processed by the index. + * `IncludeArchived`: both archived and non-archived documents are processed by the index. + * `ArchivedOnly`: only archived documents are processed by the index. +- **Default**: `ExcludeArchived` +- **Scope**: Server-wide, or per database + + + +## Indexing.Auto.DeploymentMode + +Set the default deployment mode for auto indexes. + +- **Type**: `enum IndexDeploymentMode` (`Parallel`, `Rolling`) +- **Default**: `Parallel` +- **Scope**: Server-wide, or per database + + + +## Indexing.Auto.SearchEngineType + +Set the search engine to be used with auto-indexes. + +- **Type**: `enum SearchEngineType` (`Lucene`, `Corax`) +- **Default**: `Lucene` +- **Scope**: Server-wide, or per database + + + +## Indexing.Disable + +Set whether to disable all indexes in the database. +All indexes in the database will be disabled when set to `true`. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide, or per database + + + +## Indexing.DisableQueryOptimizerGeneratedIndexes + +EXPERT ONLY: +Disable query optimizer generated indexes (auto-indexes). Dynamic queries will not be supported. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide, or per database + + + +## Indexing.ErrorIndexStartupBehavior + +Set how faulty indexes should behave on database startup when they are loaded. +By default they are not started. + +- **Type**: `enum ErrorIndexStartupBehaviorType` (`Default`, `Start`, `ResetAndStart`) +- **Default**: `Default` +- **Scope**: Server-wide, or per database + + + +## Indexing.History.NumberOfRevisions + +Number of index history revisions to keep per index. + +- **Type**: `int` +- **Default**: `10` +- **Scope**: Server-wide, or per database + + + +## Indexing.IndexStartupBehavior + +* Set how indexes should behave on database startup when they are loaded. + By default they are started immediately. + +* Setting this param can prevent slow index startup behavior in scenarios where many indexes open and start processing concurrently, which may cause IO usage to max out system resources. +- **Type**: `enum IndexStartupBehaviorType` (`Default`, `Immediate`, `Pause`, `Delay`) +- **Default**: `Default` +- **Scope**: Server-wide, or per database + +Optional values: + +- `Default`: Each index starts as soon as it is loaded. +- `Immediate`: Same as Default. +- `Pause`: Loads all indexes, but they are paused until manually started. +- `Delay`: Delays starting index processes until all indexes are loaded. + + + +## Indexing.ResetMode + +The default mode of the index reset operation. + +- **Type**: `enum IndexResetMode` (`InPlace`, `SideBySide`) +- **Default**: `InPlace` +- **Scope**: Server-wide, or per database + + + +## Indexing.RunInMemory + +* Set if indexes should run purely in memory. + +* When running in memory: + * No index data is written to the disk, and if the server is restarted, all index data will be lost. + * Note that the index definition itself is kept on disk and remains unaffected by server restarts. + * This is mostly useful for testing or faster, non-persistent indexing. + +* If _Indexing.RunInMemory_ is not set explicitly, + then this configuration key will take the value of the core configuration key [RunInMemory](../../server/configuration/core-configuration.mdx#runinmemory). +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide, or per database + +Optional values: + +* `true` - indexing is run only in memory +* `false` - the index data is stored on disk + + + +## Indexing.SkipDatabaseIdValidationOnIndexOpening + +EXPERT ONLY: +Allow to open an index without checking if current Database ID matched the one for which index was created. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide, per database + + + +## Indexing.Static.ArchivedDataProcessingBehavior + +* Set the default processing behavior for archived documents in static indexes. +* This setting applies only to static indexes that use _Documents_ as their data source. + It does not apply to indexes based on _Time Series_ or _Counters_, which default to `IncludeArchived`. +- **Type**: `enum ArchivedDataProcessingBehavior`: + * `ExcludeArchived`: only non-archived documents are processed by the index. + * `IncludeArchived`: both archived and non-archived documents are processed by the index. + * `ArchivedOnly`: only archived documents are processed by the index. +- **Default**: `ExcludeArchived` +- **Scope**: Server-wide, or per database + + + +## Indexing.Static.DeploymentMode + +Set the default deployment mode for static indexes. + +- **Type**: `enum IndexDeploymentMode` (`Parallel`, `Rolling`) +- **Default**: `Parallel` +- **Scope**: Server-wide, or per database + + + +## Indexing.Static.RequireAdminToDeployJavaScriptIndexes + +Require database `Admin` [clearance](../../server/security/authorization/security-clearance-and-permissions.mdx) to deploy [JavaScript indexes](../../indexes/javascript-indexes.mdx). + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide, or per database + + + +## Indexing.TempPath + +* Use this setting to specify a different path for the indexes' temporary files. + +* By default, temporary files are created under the `Temp` directory inside the index data directory. + Learn more about RavenDB directory structure [here](../../server/storage/directory-structure.mdx). +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide, or per database + + + +## Indexing.TimeBeforeDeletionOfSupersededAutoIndexInSec + +Set the number of seconds to keep a superseded auto index. + +- **Type**: `int` +- **Default**: `15` +- **Scope**: Server-wide, or per database + + + +## Indexing.TimeToWaitBeforeDeletingAutoIndexMarkedAsIdleInHrs + +Set the number of hours the database should wait before deleting an auto-index that is marked as idle. + +- **Type**: `int` +- **Default**: `72` +- **Scope**: Server-wide, or per database + + + +## Indexing.TimeToWaitBeforeMarkingAutoIndexAsIdleInMin + +Set the number of minutes to wait before marking an auto index as idle. + +- **Type**: `int` +- **Default**: `30` +- **Scope**: Server-wide, or per database + + + +## Indexing.AllowStringCompilation + +* When defining a [JavaScript index](../../indexes/javascript-indexes.mdx), + this option determines whether the JavaScript engine is allowed to compile code from strings at runtime, + using constructs such as `eval(...)` or `new Function(arg1, arg2, ..., functionBody)`. + +* A `JavaScriptException` is thrown if this option is disabled and such a construct is used. +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.Analyzers.Default + +[Default analyzer](../../indexes/using-analyzers.mdx#ravendb) that will be used for fields. + +- **Type**: `string` +- **Default**: `LowerCaseKeywordAnalyzer` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.Analyzers.Exact.Default + +[Default analyzer](../../indexes/using-analyzers.mdx#ravendb) that will be used for exact fields. + +- **Type**: `string` +- **Default**: `KeywordAnalyzer` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.Analyzers.Search.Default + +[Default analyzer](../../indexes/using-analyzers.mdx#ravendb) that will be used for search fields. + +- **Type**: `string` +- **Default**: `RavenStandardAnalyzer` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.Corax.DocumentsLimitForCompressionDictionaryCreation + +Corax index compression max documents used for dictionary creation. + +- **Type**: `int` +- **Default**: `100_000` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.Corax.IncludeDocumentScore + +Include score value in the metadata when sorting by score. +Disabling this option could enhance query performance. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.Corax.IncludeSpatialDistance + +Include spatial information in the metadata when sorting by distance. +Disabling this option could enhance query performance. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.Corax.MaxAllocationsAtDictionaryTrainingInMb + +EXPERT ONLY: + +* The maximum amount of megabytes that we'll allocate for training indexing dictionaries. + +* The default value, which is determined based on your platform details, is set by the constructor of class `IndexingConfiguration`. +- **Type**: `int` +- **Default**: `DefaultValueSetInConstructor` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.Corax.MaxMemoizationSizeInMb + +The maximum amount of memory in megabytes that Corax can use for memoization during query processing. + +- **Type**: `int` +- **Default**: `512` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.Corax.Static.ComplexFieldIndexingBehavior + +* Set Corax's [default behavior](../../indexes/search-engine/corax.mdx#if-corax-encounters-a-complex-property-while-indexing) + when a static index is requested to index a complex JSON object. + + * `CoraxComplexFieldIndexingBehavior.Throw` - + Corax will throw a `NotSupportedInCoraxException` exception. + * `CoraxComplexFieldIndexingBehavior.Skip` - + Corax will skip indexing the complex field without throwing an exception. +- **Type**: `enum CoraxComplexFieldIndexingBehavior` (`Throw`, `Skip`) +- **Default**: `Throw` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.Corax.UnmanagedAllocationsBatchSizeLimitInMb + +* The maximum amount of unmanaged memory (in MB) that Corax can allocate during a single indexing batch. + When this limit is reached, the batch completes and indexing continues in a new batch. + +* The default value is set by the constructor of the `IndexingConfiguration` class and depends on the environment: + * If the machine is running in a 32-bit environment, + or if RavenDB is explicitly configured to use a 32-bit pager on a 64-bit system, the default is `128 MB`. + * In all other cases (i.e., standard 64-bit environments), the default is `2048 MB`. +- **Type**: `int` +- **Default**: `DefaultValueSetInConstructor` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.Corax.VectorSearch.DefaultMinimumSimilarity + +* The default minimum similarity to use when making a vector search query. + +* Values can be between `0.0f` and `1.0f`. + A value closer to `1.0f` requires higher similarity between vectors, + while a value closer to `0.0f` allows for less similarity. +- **Type**: `float` +- **Default**: `0f` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.Corax.VectorSearch.DefaultNumberOfCandidatesForIndexing + +The default number of candidates (potential neighboring vectors) that RavenDB evaluates during vector indexing. + +- **Type**: `int` +- **Default**: `16` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.Corax.VectorSearch.DefaultNumberOfCandidatesForQuerying + +* The default maximum number of vectors that we will return from a graph search. +* The number of the resulting documents that correspond to these vectors may be: + * lower than this number - when multiple vectors originated from the same document. + * higher than this number - when the same vector is shared between multiple documents. +- **Type**: `int` +- **Default**: `16` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.Corax.VectorSearch.DefaultNumberOfEdges + +The default number of edges that will be created for a vector during vector indexing. + +- **Type**: `int` +- **Default**: `12` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.Corax.VectorSearch.MaximumConcurrentBatchesForHnswAcceleration + +EXPERT ONLY: +The maximum number of concurrent batches for HNSW distance computation acceleration. + +- **Type**: `int` +- **Default**: `512` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.Corax.VectorSearch.OrderByScoreAutomatically + +Order by score automatically when `vector.search` is inside a query. + +- **Type**: `bool` +- **Default**: `true` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.Encrypted.TransactionSizeLimitInMb + +* Transaction size limit in megabytes for _encrypted_ databases, after which an index will stop and complete the current batch. + +* The default value, which is determined based on your platform details, is set by the constructor of class `IndexingConfiguration`. +- **Type**: `int` +- **Default**: `DefaultValueSetInConstructor` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.IndexEmptyEntries + +* Set how the indexing process should handle documents that are missing fields. + +* When set to `true`, the indexing process will index documents even if they lack the fields that are supposed to be indexed. +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.IndexMissingFieldsAsNull + +* Set how the indexing process should handle fields that are missing. + +* When set to `true`, missing fields will be indexed with a `null` value. +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.Lucene.Analyzers.NGram.MaxGram + +* This configuration applies only to the Lucene indexing engine. + +* Largest n-gram to generate when NGram analyzer is used. +- **Type**: `int` +- **Default**: `6` +- **Scope**: Server-wide, or per database, or per index +- **Alias:** `Indexing.Analyzers.NGram.MaxGram` + + + +## Indexing.Lucene.Analyzers.NGram.MinGram + +* This configuration applies only to the Lucene indexing engine. + +* Smallest n-gram to generate when NGram analyzer is used. +- **Type**: `int` +- **Default**: `2` +- **Scope**: Server-wide, or per database, or per index +- **Alias:** `Indexing.Analyzers.NGram.MinGram` + + + +## Indexing.Lucene.IndexInputType + +Lucene index input + +- **Type**: `enum LuceneIndexInputType` (`Standard`, `Buffered`) +- **Default**: `Buffered` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.Lucene.LargeSegmentSizeToMergeInMb + +EXPERT ONLY: + +* This configuration applies only to the Lucene indexing engine. + +* The definition of a large segment in MB. + We won't merge more than [Indexing.NumberOfLargeSegmentsToMergeInSingleBatch](../../server/configuration/indexing-configuration.mdx#indexingnumberoflargesegmentstomergeinsinglebatch) in a single batch. + +* The default value, which is determined based on your platform details, is set by the constructor of class `IndexingConfiguration`. +- **Type**: `int` +- **Default**: `DefaultValueSetInConstructor` +- **Scope**: Server-wide, or per database, or per index +- **Alias:** `Indexing.LargeSegmentSizeToMergeInMb` + + + +## Indexing.Lucene.MaximumSizePerSegmentInMb + +EXPERT ONLY: + +* This configuration applies only to the Lucene indexing engine. + +* The maximum size in MB that we'll consider for segments merging. + +* The default value, which is determined based on your platform details, is set by the constructor of class `IndexingConfiguration`. +- **Type**: `int` +- **Default**: `DefaultValueSetInConstructor` +- **Scope**: Server-wide, or per database, or per index +- **Alias:** `Indexing.MaximumSizePerSegmentInMb` + + + +## Indexing.Lucene.MaxTimeForMergesToKeepRunningInSec + +EXPERT ONLY: + +* This configuration applies only to the Lucene indexing engine. + +* How long will we let merges to run before we close the transaction. +- **Type**: `int` +- **Default**: `15` +- **Scope**: Server-wide, or per database, or per index +- **Alias:** `Indexing.MaxTimeForMergesToKeepRunningInSec` + + + +## Indexing.Lucene.MergeFactor + +EXPERT ONLY: + +* This configuration applies only to the Lucene indexing engine. + +* Set how often index segments are merged into larger ones. + The merge process will start when the number of segments in an index reaches this number. + +* With smaller values, less RAM is used while indexing, and searches on unoptimized indexes are faster, but indexing speed is slower. +- **Type**: `int` +- **Default**: `10` +- **Scope**: Server-wide, or per database, or per index +- **Alias:** `Indexing.MergeFactor` + + + +## Indexing.Lucene.NumberOfLargeSegmentsToMergeInSingleBatch + +EXPERT ONLY: + +* This configuration applies only to the Lucene indexing engine. + +* Number of large segments defined by [Indexing.LargeSegmentSizeToMergeInMb](../../server/configuration/indexing-configuration.mdx#indexinglargesegmentsizetomergeinmb) to merge in a single batch. +- **Type**: `int` +- **Default**: `2` +- **Scope**: Server-wide, or per database, or per index +- **Alias:** `Indexing.NumberOfLargeSegmentsToMergeInSingleBatch` + + + +## Indexing.Lucene.ReaderTermsIndexDivisor + +EXPERT ONLY: +Control how many terms we'll keep in the cache for each field. +Higher values reduce the memory usage at the expense of increased search time for each term. + +- **Type**: `int` +- **Default**: `1` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.Lucene.UseCompoundFileInMerging + +EXPERT ONLY: + +* This configuration applies only to the Lucene indexing engine. + +* Use compound file in merging. +- **Type**: `bool` +- **Default**: `true` +- **Scope**: Server-wide, or per database, or per index +- **Alias:** `Indexing.UseCompoundFileInMerging` + + + +## Indexing.ManagedAllocationsBatchSizeLimitInMb + +Managed allocations limit in an indexing batch after which the batch will complete and an index will continue by starting a new one. + +- **Type**: `int` +- **Default**: `2048` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.MapBatchSize + +Maximum number of documents to be processed by the index per indexing batch. + +- **Type**: `int?` +- **Default**: `null` (no limit) +- **MinValue**: `128` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.MapTimeoutAfterEtagReachedInMin + +* Number of minutes after which mapping will end even if there is more to map. + +* This will only be applied if we pass the last etag we saw in the collection when the batch was started. +- **Type**: `int` +- **Default**: `15` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.MapTimeoutInSec + +Number of seconds after which mapping will end even if there is more to map. +Using the default value of `-1` will map everything possible in a single batch. + +- **Type**: `int` +- **Default**: `-1` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.MaxStepsForScript + +The maximum number of steps in the script execution of a JavaScript index. + +- **Type**: `int` +- **Default**: `10_000` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.MaxTimeForDocumentTransactionToRemainOpenInSec + +Set how many seconds indexing will keep document transaction open when indexing. +When triggered, transaction will be closed and a new one will be opened. + +- **Type**: `int` +- **Default**: `15` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.MaxTimeToWaitAfterFlushAndSyncWhenReplacingSideBySideIndexInSec + +Max time to wait when forcing the storage environment flush and sync when replacing side-by-side index. + +- **Type**: `int` +- **Default**: `30` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.Metrics.Enabled + +Set whether indexing performance metrics will be gathered. + +- **Type**: `bool` +- **Default**: `true` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.MinNumberOfMapAttemptsAfterWhichBatchWillBeCanceledIfRunningLowOnMemory + +EXPERT ONLY: +Set minimum number of map attempts after which the batch will be canceled if running low on memory. + +- **Type**: `int` +- **Default**: `512` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.MinimumTotalSizeOfJournalsToRunFlushAndSyncWhenReplacingSideBySideIndexInMb + +Minimum total size of journals to run flush and sync when replacing side by side index in megabytes. + +- **Type**: `int` +- **Default**: `512` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.NumberOfConcurrentStoppedBatchesIfRunningLowOnMemory + +EXPERT ONLY: +Number of concurrent stopped batches if running low on memory. + +- **Type**: `int` +- **Default**: `2` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.OrderByScoreAutomaticallyWhenBoostingIsInvolved + +Set whether query results will be automatically ordered by score when a boost factor is involved in the query. +(A boost factor may be [assigned inside an index definition](../../indexes/boosting.mdx) or can be [applied at query time](../../client-api/session/querying/text-search/boost-search-results.mdx)). + +- **Type**: `bool` +- **Default**: `true` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.OrderByTicksAutomaticallyWhenDatesAreInvolved + +Sort by ticks when field contains dates. +When sorting in descending order, null dates are returned at the end with this option enabled. + +- **Type**: `bool` +- **Default**: `true` + + **Note** that the default value for this configuration key has changed in version 6.0 from `false` to `true` + +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.QueryClauseCache.Disabled + +EXPERT ONLY: + +* Disable the query clause cache for a server, database, or a single index. + +* The default value is set by the constructor of class `IndexingConfiguration`. + It will be `true` if your core configuration key [Features.Availability](../../server/configuration/core-configuration.mdx#featuresavailability) is Not set to 'Experimental'. +- **Type**: `bool` +- **Default**: `DefaultValueSetInConstructor` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.QueryClauseCache.RepeatedQueriesTimeFrameInSec + +EXPERT ONLY: +Queries that repeat within this time frame will be considered worth caching. + +- **Type**: `int` +- **Default**: `300` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.ScratchSpaceLimitInMb + +* Amount of scratch space in megabytes that we allow to use for the index storage. + +* After exceeding this limit the current indexing batch will complete and the index will force flush and sync storage environment. +- **Type**: `int` +- **Default**: `null` (no limit) +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.Static.SearchEngineType + +Set the search engine to be used with static indexes. + +- **Type**: `enum SearchEngineType` (`Lucene`, `Corax`) +- **Default**: `Lucene` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.Throttling.TimeIntervalInMs + +How long the index should delay processing after new work is detected in milliseconds. + +- **Type**: `int` +- **Default**: `null` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.TimeSinceLastQueryAfterWhichDeepCleanupCanBeExecutedInMin + +Set how many minutes to wait before deep cleaning an idle index. +Deep cleanup reduces the cost of idle indexes. +It might slow the first query after the deep cleanup, thereafter queries return to normal performance. + +- **Type**: `int` +- **Default**: `10` +- **Scope**: Server-wide, or per database, or per index + + + +## Indexing.TransactionSizeLimitInMb + +Transaction size limit in megabytes after which an index will stop and complete the current batch. + +- **Type**: `int` +- **Default**: `null` (no limit) +- **Scope**: Server-wide, or per database, or per index + + diff --git a/versioned_docs/version-7.1/server/configuration/license-configuration.mdx b/versioned_docs/version-7.1/server/configuration/license-configuration.mdx new file mode 100644 index 0000000000..823d396655 --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/license-configuration.mdx @@ -0,0 +1,139 @@ +--- +title: "Configuration: License Options" +hide_table_of_contents: true +sidebar_label: License Configuration +sidebar_position: 11 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: License Options + +## License + +* When using this configuration key in _settings.json_ embed the license key as a **string**, e.g.: + `"License": "{ paste your license key including curly brackets here }"`. + +* When using this configuration key as an [environment variable](../../server/configuration/configuration-options.mdx#environment-variables) embed the JSON license key **object**. + +* If `License` is specified, it overrides the `License.Path` configuration. +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## License.Path + +Save the license key to a `license.json` file. +Provide the path to this file in the 'License.Path' configuration key: + + * Either the **full** path to the license file, e.g.: + `"License.Path": "D:\\RavenDB\\Server\\license.json"` + + * Or, a **relative** path to the license file from the Server folder, e.g.: + `"License.Path": "License\\license.json"` + (where 'License' folder is under the 'Server' folder) +- **Type**: `string` +- **Default**: `license.json` +- **Scope**: Server-wide only + + + +## License.Eula.Accepted + +Indicates if End-User License Agreement was accepted. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only + + + +## License.CanActivate + +EXPERT ONLY. +Indicates if license can be activated. + +- **Type**: `bool` +- **Default**: `true` +- **Scope**: Server-wide only + + + +## License.CanForceUpdate + +EXPERT ONLY. +Indicates if license can be updated from the License Server (api.ravendb.net). + +- **Type**: `bool` +- **Default**: `true` +- **Scope**: Server-wide only + + + +## License.CanRenewLicense / License.CanRenew + +EXPERT ONLY. +Indicates if license can be renewed from the License Server (api.ravendb.net). +Relevant only for Developer and Community licenses. + +- **Type**: `bool` +- **Default**: `true` +- **Scope**: Server-wide only + + + +## License.SkipLeasingErrorsLogging + +EXPERT ONLY. +Skip logging of lease license errors. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only + + + +## License.DisableAutoUpdate + +EXPERT ONLY. +Disable all updates of the license, from string, from path and from the License Server (api.ravendb.net). + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only + + + +## License.DisableAutoUpdateFromApi + +EXPERT ONLY. +Disable automatic updates of the license from the License Server (api.ravendb.net). +Can still update the license by either: + +* Setting the [License](../../server/configuration/license-configuration.mdx#license) configuration +* Setting the [License.Path](../../server/configuration/license-configuration.mdx#licensepath) configuration +* Replacing the _license.json_ file on disk +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only + + + +## License.DisableLicenseSupportCheck + +EXPERT ONLY. +Disable checking the license support options from the License Server (api.ravendb.net). + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only + + + diff --git a/versioned_docs/version-7.1/server/configuration/logs-configuration.mdx b/versioned_docs/version-7.1/server/configuration/logs-configuration.mdx new file mode 100644 index 0000000000..9b7fb6264b --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/logs-configuration.mdx @@ -0,0 +1,225 @@ +--- +title: "Configuration: Logs Options" +hide_table_of_contents: true +sidebar_label: Logs Configuration +sidebar_position: 12 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: Logs Options + + +* The following configuration keys allow you to control logging behavior in a RavenDB server. + To learn more about RavenDB's logging see [Logging](../../server/troubleshooting/logging.mdx). + +* In this page: + * RavenDB logging configuration keys: + [Logs.ArchiveAboveSizeInMb](../../server/configuration/logs-configuration.mdx#logsarchiveabovesizeinmb) + [Logs.ConfigPath](../../server/configuration/logs-configuration.mdx#logsconfigpath) + [Logs.EnableArchiveFileCompression](../../server/configuration/logs-configuration.mdx#logsenablearchivefilecompression) + [Logs.MaxArchiveDays](../../server/configuration/logs-configuration.mdx#logsmaxarchivedays) + [Logs.MaxArchiveFiles](../../server/configuration/logs-configuration.mdx#logsmaxarchivefiles) + [Logs.Microsoft.MinLevel](../../server/configuration/logs-configuration.mdx#logsmicrosoftminlevel) + [Logs.MinLevel](../../server/configuration/logs-configuration.mdx#logsminlevel) + [Logs.NuGet.AdditionalPackages](../../server/configuration/logs-configuration.mdx#logsnugetadditionalpackages) + [Logs.NuGet.AllowPreReleasePackages](../../server/configuration/logs-configuration.mdx#logsnugetallowprereleasepackages) + [Logs.NuGet.PackagesPath](../../server/configuration/logs-configuration.mdx#logsnugetpackagespath) + [Logs.NuGet.PackageSourceUrl](../../server/configuration/logs-configuration.mdx#logsnugetpackagesourceurl) + [Logs.Path](../../server/configuration/logs-configuration.mdx#logspath) + [Logs.ThrowConfigExceptions](../../server/configuration/logs-configuration.mdx#logsthrowconfigexceptions) + * Internal NLog configuration keys: + [Logs.Internal.Level](../../server/configuration/logs-configuration.mdx#logsinternallevel) + [Logs.Internal.LogToStandardError](../../server/configuration/logs-configuration.mdx#logsinternallogtostandarderror) + [Logs.Internal.LogToStandardOutput](../../server/configuration/logs-configuration.mdx#logsinternallogtostandardoutput) + [Logs.Internal.Path](../../server/configuration/logs-configuration.mdx#logsinternalpath) + + +## Logs.ArchiveAboveSizeInMb + +The maximum size (in megabytes) a log file may reach before it is archived and logging is directed to a new file. +This setting ensures that logs are stored in multiple smaller files rather than a few large ones. + +- **Type**: `int` +- **Default**: `128` +- **Min Value**: `16` +- **Scope**: Server-wide only + + + +## Logs.ConfigPath + +The path to an XML file that overrides all logging configuration parameters. +Set to `null` to apply the configuration params detailed in this section, +or provide a path to an XML configuration file whose content overrides these settings. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Logs.EnableArchiveFileCompression + +Determines whether to compress archived log files. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only +- **Alias:** `Logs.Compress` + + + +## Logs.MaxArchiveDays + +The maximum number of days to retain an archived log file. +Set this value to the number of days after which log files will be deleted, +or set it to `null` to keep log files indefinitely. + +- **Type**: `int?` +- **Default**: `3` +- **Scope**: Server-wide only + + + +## Logs.MaxArchiveFiles + +The maximum number of archived log files to keep. +Set to `null` to keep log files indefinitely. + +- **Type**: `int?` +- **Default**: `null` +- **Min Value**: `0` +- **Scope**: Server-wide only + + + +## Logs.Microsoft.MinLevel + +The minimum logging level for Microsoft logs. + +- **Type**: `enum LogLevel` (`Trace`, `Debug`, `Info`, `Warn`, `Error`, `Fatal`, `Off`) +- **Default**: `LogLevel.Error` +- **Scope**: Server-wide only + + + +## Logs.MinLevel + +Determines the minimum logging level. +Log entries will be included starting from the specified MinLevel and higher. + +- **Type**: `enum LogLevel` (`Trace`, `Debug`, `Info`, `Warn`, `Error`, `Fatal`, `Off`) +- **Default**: `LogLevel.Info` +- **Scope**: Server-wide only + + + +## Logs.NuGet.AdditionalPackages + +A dictionary of additional NuGet packages to load during server startup for additional logging targets. +Each key represents the package name, and the corresponding value specifies the package version. + +- **Type**: `Dictionary` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Logs.NuGet.AllowPreReleasePackages + +Determines whether to allow installation of NuGet pre-release packages. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only + + + +## Logs.NuGet.PackagesPath + +The path where NuGet packages required by RavenDB are downloaded. + +- **Type**: `string` +- **Default**: `Packages/NuGet/Logging` +- **Scope**: Server-wide only + + + +## Logs.NuGet.PackageSourceUrl + +The default URL for the NuGet package source. + +- **Type**: `string` +- **Default**: `https://api.nuget.org/v3/index.json` +- **Scope**: Server-wide only + + + +## Logs.Path + +The path to the folder where RavenDB server log files are stored. +By default, it is the `Logs` folder under the server folder. + +- **Type**: `string` +- **Default**: `Logs` +- **Scope**: Server-wide only + + + +## Logs.ThrowConfigExceptions + +Determines whether to throw an exception if NLog detects a logging configuration error. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only + + + +## Logs.Internal.Level + +Determines the logging level for NLog's internal logs. + +- **Type**: `enum LogLevel` (`Trace`, `Debug`, `Info`, `Warn`, `Error`, `Fatal`, `Off`) +- **Default**: `LogLevel.Info` +- **Scope**: Server-wide only + + + +## Logs.Internal.LogToStandardError + +Determines whether to write NLog's internal logs to the standard error stream. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only + + + +## Logs.Internal.LogToStandardOutput + +Determines whether to write NLog's internal logs to the standard output stream. +This can be useful when running the server to verify that it operates without issues. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only + + + +## Logs.Internal.Path + +The path to the folder where NLog's internal logs are written. +This is useful for debugging NLog. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + diff --git a/versioned_docs/version-7.1/server/configuration/memory-configuration.mdx b/versioned_docs/version-7.1/server/configuration/memory-configuration.mdx new file mode 100644 index 0000000000..defd4b6cc3 --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/memory-configuration.mdx @@ -0,0 +1,55 @@ +--- +title: "Configuration: Memory Options" +hide_table_of_contents: true +sidebar_label: Memory Configuration +sidebar_position: 13 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: Memory Options + +## Memory.LowMemoryLimitInMb + +The minimum amount of available memory RavenDB will attempt to achieve (free memory lower than this value will trigger low memory behavior). Value is in MB. + +- **Type**: `int` +- **Default**: minimum of either `10% of total physical memory` or `2048` +- **Scope**: Server-wide only + + + +## Memory.LowMemoryCommitLimitInMb + +The minimum amount of available commited memory RavenDB will attempt to achieve (free commited memory lower than this value will trigger low memory behavior). Value is in MB. + +- **Type**: `int` +- **Default**: `512` +- **Scope**: Server-wide only + + + +## Memory.MinimumFreeCommittedMemoryPercentage + +EXPERT: The minimum amount of committed memory that RavenDB will attempt to ensure remains available. Reducing this value too much may cause RavenDB to fail if there is not enough memory available for the operation system to handle operations. + +- **Type**: `float` +- **Default**: `0.05f` +- **Scope**: Server-wide only + + + +## Memory.MaxFreeCommittedMemoryToKeepInMb + +EXPERT: The maximum amount of committed memory that RavenDB will attempt to ensure remains available. Reducing this value too much may cause RavenDB to fail if there is not enough memory available for the operation system to handle operations. Value is in MB. + +- **Type**: `int` +- **Default**: `128` +- **Scope**: Server-wide only + + diff --git a/versioned_docs/version-7.1/server/configuration/monitoring-configuration.mdx b/versioned_docs/version-7.1/server/configuration/monitoring-configuration.mdx new file mode 100644 index 0000000000..daf532627d --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/monitoring-configuration.mdx @@ -0,0 +1,437 @@ +--- +title: "Configuration: Monitoring Options" +hide_table_of_contents: true +sidebar_label: Monitoring Configuration +sidebar_position: 14 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: Monitoring Options + + +* In this page: + * OpenTelemetry monitoring: + [Monitoring.OpenTelemetry.ConsoleExporter](../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetryconsoleexporter) + [Monitoring.OpenTelemetry.Enabled](../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetryenabled) + [Monitoring.OpenTelemetry.Meters.AspNetCore.Enabled](../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetrymetersaspnetcoreenabled) + [Monitoring.OpenTelemetry.Meters.Runtime.Enabled](../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetrymetersruntimeenabled) + [Monitoring.OpenTelemetry.Meters.Server.CPUCredits.Enabled](../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetrymetersservercpucreditsenabled) + [Monitoring.OpenTelemetry.Meters.Server.Enabled](../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetrymetersserverenabled) + [Monitoring.OpenTelemetry.Meters.Server.GC.Enabled](../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetrymetersservergcenabled) + [Monitoring.OpenTelemetry.Meters.Server.General.Enabled](../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetrymetersservergeneralenabled) + [Monitoring.OpenTelemetry.Meters.Server.Requests.Enabled](../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetrymetersserverrequestsenabled) + [Monitoring.OpenTelemetry.Meters.Server.Resources.Enabled](../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetrymetersserverresourcesenabled) + [Monitoring.OpenTelemetry.Meters.Server.Storage.Enabled](../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetrymetersserverstorageenabled) + [Monitoring.OpenTelemetry.Meters.Server.TotalDatabases.Enabled](../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetrymetersservertotaldatabasesenabled) + [Monitoring.OpenTelemetry.OpenTelemetryProtocol.Enabled](../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetryopentelemetryprotocolenabled) + [Monitoring.OpenTelemetry.OpenTelemetryProtocol.Endpoint](../..//server/configuration/monitoring-configuration.mdx#monitoringopentelemetryopentelemetryprotocolendpoint) + [Monitoring.OpenTelemetry.OpenTelemetryProtocol.ExportProcessorType](../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetryopentelemetryprotocolexportprocessortype) + [Monitoring.OpenTelemetry.OpenTelemetryProtocol.Headers](../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetryopentelemetryprotocolheaders) + [Monitoring.OpenTelemetry.OpenTelemetryProtocol.Protocol](../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetryopentelemetryprotocolprotocol) + [Monitoring.OpenTelemetry.OpenTelemetryProtocol.Timeout](../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetryopentelemetryprotocoltimeout) + [Monitoring.OpenTelemetry.ServiceInstanceId](../../server/configuration/monitoring-configuration.mdx#monitoringopentelemetryserviceinstanceid) + * SNMP monitoring + [Monitoring.Snmp.AuthenticationPassword](../../server/configuration/monitoring-configuration.mdx#monitoringsnmpauthenticationpassword) + [Monitoring.Snmp.AuthenticationPassword.Secondary](../../server/configuration/monitoring-configuration.mdx#monitoringsnmpauthenticationpasswordsecondary) + [Monitoring.Snmp.AuthenticationProtocol](../../server/configuration/monitoring-configuration.mdx#monitoringsnmpauthenticationprotocol) + [Monitoring.Snmp.AuthenticationProtocol.Secondary](../../server/configuration/monitoring-configuration.mdx#monitoringsnmpauthenticationprotocolsecondary) + [Monitoring.Snmp.AuthenticationUser](../../server/configuration/monitoring-configuration.mdx#monitoringsnmpauthenticationuser) + [Monitoring.Snmp.AuthenticationUser.Secondary](../../server/configuration/monitoring-configuration.mdx#monitoringsnmpauthenticationusersecondary) + [Monitoring.Snmp.Community](../../server/configuration/monitoring-configuration.mdx#monitoringsnmpcommunity) + [Monitoring.Snmp.DisableTimeWindowChecks](../../server/configuration/monitoring-configuration.mdx#monitoringsnmpdisabletimewindowchecks) + [Monitoring.Snmp.Enabled](../../server/configuration/monitoring-configuration.mdx#monitoringsnmpenabled) + [Monitoring.Snmp.Port](../../server/configuration/monitoring-configuration.mdx#monitoringsnmpport) + [Monitoring.Snmp.PrivacyPassword](../../server/configuration/monitoring-configuration.mdx#monitoringsnmpprivacypassword) + [Monitoring.Snmp.PrivacyPassword.Secondary](../../server/configuration/monitoring-configuration.mdx#monitoringsnmpprivacypasswordsecondary) + [Monitoring.Snmp.PrivacyProtocol](../../server/configuration/monitoring-configuration.mdx#monitoringsnmpprivacyprotocol) + [Monitoring.Snmp.PrivacyProtocol.Secondary](../../server/configuration/monitoring-configuration.mdx#monitoringsnmpprivacyprotocolsecondary) + [Monitoring.Snmp.SupportedVersions](../../server/configuration/monitoring-configuration.mdx#monitoringsnmpsupportedversions) + * Other monitoring: + [Monitoring.Cpu.Exec](../../server/configuration/monitoring-configuration.mdx#monitoringcpuexec) + [Monitoring.Cpu.Exec.Arguments](../../server/configuration/monitoring-configuration.mdx#monitoringcpuexecarguments) + [Monitoring.Disk.ReadStatsDebounceTimeInMs](../../server/configuration/monitoring-configuration.mdx#monitoringdiskreadstatsdebouncetimeinms) + + +## Monitoring.OpenTelemetry.ConsoleExporter + +Indicates if metrics should be exported to the console output. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only + + + +## Monitoring.OpenTelemetry.Enabled + +Indicates if OpenTelemetry is enabled or not. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only + + + +## Monitoring.OpenTelemetry.Meters.AspNetCore.Enabled + +Indicates if AspNetCore metric is enabled or not. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only + + + +## Monitoring.OpenTelemetry.Meters.Runtime.Enabled + +Indicates if Runtime metric is enabled or not. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only + + + +## Monitoring.OpenTelemetry.Meters.Server.CPUCredits.Enabled + +Expose metrics related to CPU credits. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only + + + +## Monitoring.OpenTelemetry.Meters.Server.Enabled + +Indicates if RavenDB's OpenTelemetry metrics are enabled or not. + +- **Type**: `bool` +- **Default**: `true` +- **Scope**: Server-wide only + + + +## Monitoring.OpenTelemetry.Meters.Server.GC.Enabled + +Expose metrics related to GC. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only + + + +## Monitoring.OpenTelemetry.Meters.Server.General.Enabled + +Expose metrics related to general information about the cluster and its licensing. + +- **Type**: `bool` +- **Default**: `true` +- **Scope**: Server-wide only + + + +## Monitoring.OpenTelemetry.Meters.Server.Requests.Enabled + +Expose metrics related to requests. + +- **Type**: `bool` +- **Default**: `true` +- **Scope**: Server-wide only + + + +## Monitoring.OpenTelemetry.Meters.Server.Resources.Enabled + +Expose metrics related to resources usage. + +- **Type**: `bool` +- **Default**: `true` +- **Scope**: Server-wide only + + + +## Monitoring.OpenTelemetry.Meters.Server.Storage.Enabled + +Expose metrics related to server storage. + +- **Type**: `bool` +- **Default**: `true` +- **Scope**: Server-wide only + + + +## Monitoring.OpenTelemetry.Meters.Server.TotalDatabases.Enabled + +Expose metrics related to aggregated database statistics. + +- **Type**: `bool` +- **Default**: `true` +- **Scope**: Server-wide only + + + +## Monitoring.OpenTelemetry.OpenTelemetryProtocol.Enabled + +Indicates if metrics should be exported via the OpenTelemetry protocol. + +- **Type**: `bool` +- **Default**: `true` +- **Scope**: Server-wide only + + + +## Monitoring.OpenTelemetry.OpenTelemetryProtocol.Endpoint + +Endpoint where OpenTelemetryProtocol should sends data. + +- **Type**: `string` +- **Default**: `null` (internal OTLP default settings) +- **Scope**: Server-wide only + + + +## Monitoring.OpenTelemetry.OpenTelemetryProtocol.ExportProcessorType + +OpenTelemetryProtocol export processor type. + +- **Type**: `enum ExportProcessorType` (Simple | Batch) +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Monitoring.OpenTelemetry.OpenTelemetryProtocol.Headers + +OpenTelemetryProtocol custom headers. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Monitoring.OpenTelemetry.OpenTelemetryProtocol.Protocol + +Defines the protocol that OpenTelemetryProtocol should use to send data. + +- **Type**: `enum OtlpExportProtocol` (Grpc | HttpProtobuf) +- **Default**: `null` (internal OTLP default settings) +- **Scope**: Server-wide only + + + +## Monitoring.OpenTelemetry.OpenTelemetryProtocol.Timeout + +OpenTelemetryProtocol timeout value. + +- **Type**: `int?` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Monitoring.OpenTelemetry.ServiceInstanceId + +* OpenTelemetry monitoring requires a service instance ID for initialization. + You can set the OpenTelemetry service instance ID using this configuration option. + +* If this configuration key is not set, RavenDB will default to using the server's public URL hostname. + If the public hostname is unavailable, RavenDB will use the node tag as the identifier. +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Monitoring.Snmp.AuthenticationPassword + +Authentication password used for SNMP v3 authentication. +When set to `null` then the value from 'Monitoring.Snmp.Community' is used. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Monitoring.Snmp.AuthenticationPassword.Secondary + +Authentication password used by secondary user for SNMP v3 authentication. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Monitoring.Snmp.AuthenticationProtocol + +Authentication protocol used for SNMP v3 authentication. + +- **Type**: `SnmpAuthenticationProtocol` +- **Default**: `SHA1` +- **Scope**: Server-wide only + + + +## Monitoring.Snmp.AuthenticationProtocol.Secondary + +Authentication protocol used by secondary user for SNMP v3 authentication. + +- **Type**: `SnmpAuthenticationProtocol` +- **Default**: `SHA1` +- **Scope**: Server-wide only + + + +## Monitoring.Snmp.AuthenticationUser + +Authentication user used for SNMP v3 authentication. + +- **Type**: `string` +- **Default**: `"ravendb"` +- **Scope**: Server-wide only + + + +## Monitoring.Snmp.AuthenticationUser.Secondary + +Authentication secondary user used for SNMP v3 authentication. + +- **Type**: `string` +- **Default**: `null (disabled)` +- **Scope**: Server-wide only + + + +## Monitoring.Snmp.Community + +Community string used for SNMP v2c authentication. + +- **Type**: `string` +- **Default**: `"ravendb"` +- **Scope**: Server-wide only + + + +## Monitoring.Snmp.DisableTimeWindowChecks + +EXPERT ONLY. +Disables time window checks, which are problematic for some SNMP engines. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only + + + +## Monitoring.Snmp.Enabled + +Indicates if SNMP endpoint is enabled or not. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only + + + +## Monitoring.Snmp.Port + +Port at which SNMP listener will be active. + +- **Type**: `int` +- **Default**: `161` +- **Scope**: Server-wide only + + + +## Monitoring.Snmp.PrivacyPassword + +Privacy password used for SNMP v3 privacy. + +- **Type**: `string` +- **Default**: `"ravendb"` +- **Scope**: Server-wide only + + + +## Monitoring.Snmp.PrivacyPassword.Secondary + +Privacy password used by secondary user for SNMP v3 privacy. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Monitoring.Snmp.PrivacyProtocol + +Privacy protocol used for SNMP v3 privacy. + +- **Type**: `SnmpPrivacyProtocol` +- **Default**: `SnmpPrivacyProtocol.None` +- **Scope**: Server-wide only + + + +## Monitoring.Snmp.PrivacyProtocol.Secondary + +Privacy protocol used by secondary user for SNMP v3 privacy. + +- **Type**: `SnmpPrivacyProtocol` +- **Default**: `SnmpPrivacyProtocol.None` +- **Scope**: Server-wide only + + + +## Monitoring.Snmp.SupportedVersions + +List of supported SNMP versions. Values must be semicolon separated. + +- **Type**: `string[]` +- **Default**: `V2C;V3` +- **Scope**: Server-wide only + + + +## Monitoring.Cpu.Exec + +A command or executable to run which will provide machine CPU usage and process CPU to standard output. +If specified, RavenDB will use this information for monitoring CPU usage. +Note: the write to standard output should be unbuffered to work properly. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Monitoring.Cpu.Exec.Arguments + +The command line arguments for the 'Monitoring.Cpu.Exec' command or executable. +The arguments must be escaped for the command line. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Monitoring.Disk.ReadStatsDebounceTimeInMs + +The minimum interval between measures to calculate the disk stats. + +- **Type**: `TimeSetting` +- **Default**: `1000` +- **Scope**: Server-wide only + + diff --git a/versioned_docs/version-7.1/server/configuration/patching-configuration.mdx b/versioned_docs/version-7.1/server/configuration/patching-configuration.mdx new file mode 100644 index 0000000000..8a4f7316bd --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/patching-configuration.mdx @@ -0,0 +1,60 @@ +--- +title: "Configuration: Patching" +hide_table_of_contents: true +sidebar_label: Patching Configuration +sidebar_position: 15 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: Patching + + +* The following configuration options control the behavior of the **JavaScript engine** during patch operations. +* Learn more about patching in: + * [Single document patch operations](../../client-api/operations/patching/single-document.mdx) + * [Set-base patch operations](../../client-api/operations/patching/set-based.mdx) + * [Apply patching from the Studio](../../studio/database/documents/patch-view.mdx) +* In this article: + * [Patching.AllowStringCompilation](../../server/configuration/patching-configuration.mdx#patchingallowstringcompilation) + * [Patching.MaxStepsForScript](../../server/configuration/patching-configuration.mdx#patchingmaxstepsforscript) + * [Patching.StrictMode](../../server/configuration/patching-configuration.mdx#patchingstrictmode) + + +## Patching.AllowStringCompilation + +* Determines whether the JavaScript engine is allowed to compile code from strings at runtime, + using constructs such as `eval(...)` or `new Function(arg1, arg2, ..., functionBody)`. + +* A `JavaScriptException` is thrown if this option is disabled and such a construct is used. +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide or per database + + + +## Patching.MaxStepsForScript + +Specifies the maximum number of execution steps a patch script can perform. +A `Jint.Runtime.StatementsCountOverflowException` is thrown if the script exceeds this number. + +- **Type**: `int` +- **Default**: `10_000` +- **Scope**: Server-wide or per database + + + +## Patching.StrictMode + +Enables strict mode in the JavaScript engine used during patching. + +- **Type**: `bool` +- **Default**: `true` +- **Scope**: Server-wide or per database + + diff --git a/versioned_docs/version-7.1/server/configuration/performance-hints-configuration.mdx b/versioned_docs/version-7.1/server/configuration/performance-hints-configuration.mdx new file mode 100644 index 0000000000..a07e423094 --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/performance-hints-configuration.mdx @@ -0,0 +1,127 @@ +--- +title: "Configuration: Performance Hints Options" +hide_table_of_contents: true +sidebar_label: Performance Hints Configuration +sidebar_position: 16 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: Performance Hints Options + + +* In this page: + * [PerformanceHints.Documents.HugeDocumentsCollectionSize](../../server/configuration/performance-hints-configuration.mdx#performancehintsdocumentshugedocumentscollectionsize) + * [PerformanceHints.Documents.HugeDocumentSizeInMb](../../server/configuration/performance-hints-configuration.mdx#performancehintsdocumentshugedocumentsizeinmb) + * [PerformanceHints.Indexing.AlertWhenSourceDocumentIncludedInOutput](../../server/configuration/performance-hints-configuration.mdx#performancehintsindexingalertwhensourcedocumentincludedinoutput) + * [PerformanceHints.Indexing.MaxDepthOfRecursionInLinqSelect](../../server/configuration/performance-hints-configuration.mdx#performancehintsindexingmaxdepthofrecursioninlinqselect) + * [PerformanceHints.Indexing.MaxIndexOutputsPerDocument](../../server/configuration/performance-hints-configuration.mdx#performancehintsindexingmaxindexoutputsperdocument) + * [PerformanceHints.Indexing.MaxNumberOfLoadsPerReference](../../server/configuration/performance-hints-configuration.mdx#performancehintsindexingmaxnumberofloadsperreference) + * [PerformanceHints.MaxNumberOfResults](../../server/configuration/performance-hints-configuration.mdx#performancehintsmaxnumberofresults) + * [PerformanceHints.Memory.MinSwapSizeInMb](../../server/configuration/performance-hints-configuration.mdx#performancehintsmemoryminswapsizeinmb) + * [PerformanceHints.TooLongRequestThresholdInSec](../../server/configuration/performance-hints-configuration.mdx#performancehintstoolongrequestthresholdinsec) + + +## PerformanceHints.Documents.HugeDocumentsCollectionSize + +The maximum size of the huge documents collection. + +- **Type**: `int` +- **Default**: `100` +- **Scope**: Server-wide or per database + + + +## PerformanceHints.Documents.HugeDocumentSizeInMb + +The size of a document after which it will get into the huge documents collection. +Value is in MB. + +- **Type**: `int` +- **Default**: `5` +- **Scope**: Server-wide or per database + + + +## PerformanceHints.Indexing.AlertWhenSourceDocumentIncludedInOutput + +Alert when source document in indexed as field. + +- **Type**: `bool` +- **Default**: `true` +- **Scope**: Server-wide or per database + + + +## PerformanceHints.Indexing.MaxDepthOfRecursionInLinqSelect + +Maximum depth of recursion in LINQ Select clause. + +- **Type**: `int` +- **Default**: `32` +- **Scope**: Server-wide or per database + + + +## PerformanceHints.Indexing.MaxIndexOutputsPerDocument + +The maximum number of index outputs per document after which we will create a performance hint. + +- **Type**: `int` +- **Default**: `1024` +- **Scope**: Server-wide or per database + + + +## PerformanceHints.Indexing.MaxNumberOfLoadsPerReference + +The maximum number of `LoadDocument()` / `LoadCompareExchangeValue()` calls per a document/compare-exchange reference value +after which we will create a performance hint. + +- **Type**: `int` +- **Default**: `1024` +- **Scope**: Server-wide or per database + + + +## PerformanceHints.MaxNumberOfResults + +The maximum number of query results after which we will create a performance hint. + +- **Type**: `int` +- **Default**: `2048` +- **Scope**: Server-wide or per database + + + +## PerformanceHints.Memory.MinSwapSizeInMb + +* The minimum swap size (Linux only) in Megabytes. + If the swap size is below this value, a notification will be triggered. + +* The default value is set by the constructor of class `PerformanceHintsConfiguration`: + * If the total physical memory is less than 8 GB, + the default value is set to 1 GB. + * If the total physical memory is greater than or equal to 8 GB, + the default value is set to the smaller value between half of the total physical memory and 8 GB. +- **Type**: `int` +- **Default**: `DefaultValueSetInConstructor` +- **Scope**: Server-wide only + + + +## PerformanceHints.TooLongRequestThresholdInSec + +Request latency threshold before the server would issue a performance hint. +Value is in seconds. + +- **Type**: `int` +- **Default**: `30` +- **Scope**: Server-wide or per database + + diff --git a/versioned_docs/version-7.1/server/configuration/query-configuration.mdx b/versioned_docs/version-7.1/server/configuration/query-configuration.mdx new file mode 100644 index 0000000000..68c0b9d8cc --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/query-configuration.mdx @@ -0,0 +1,25 @@ +--- +title: "Configuration: Query Options" +hide_table_of_contents: true +sidebar_label: Query Configuration +sidebar_position: 17 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: Query Options + +## Query.MaxClauseCount + +Maximum number of clauses that a Lucene query can process. + +- **Type**: `int` +- **Default**: `null` (use Lucene default - 1024) +- **Scope**: Server-wide only + + diff --git a/versioned_docs/version-7.1/server/configuration/queue-sink-configuration.mdx b/versioned_docs/version-7.1/server/configuration/queue-sink-configuration.mdx new file mode 100644 index 0000000000..cc73861176 --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/queue-sink-configuration.mdx @@ -0,0 +1,37 @@ +--- +title: "Configuration: Queue Sink" +hide_table_of_contents: true +sidebar_label: Queue Sink Configuration +sidebar_position: 18 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: Queue Sink +## `QueueSink.MaxBatchSize` + +The maximum number of pulled messages consumed in a single batch. + +- **Default**: `8192` +- **Scope**: [Server-wide](../../server/configuration/configuration-options.mdx#settingsjson) + or [per database](../../studio/database/settings/database-settings.mdx#view-database-settings) + + + +## `QueueSink.MaxFallbackTimeInSec` + +The maximum number of seconds the queue sink process will be in a fallback +mode (i.e. suspending the process) after a connection failure. + +- **Default**: `15*60` +- **TimeUnit**: `TimeUnit.Seconds` +- **Scope**: [Server-wide](../../server/configuration/configuration-options.mdx#settingsjson) + or [per database](../../studio/database/settings/database-settings.mdx#view-database-settings) + + + diff --git a/versioned_docs/version-7.1/server/configuration/replication-configuration.mdx b/versioned_docs/version-7.1/server/configuration/replication-configuration.mdx new file mode 100644 index 0000000000..5b633fd8bd --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/replication-configuration.mdx @@ -0,0 +1,78 @@ +--- +title: "Configuration: Replication Options" +hide_table_of_contents: true +sidebar_label: Replication Configuration +sidebar_position: 19 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: Replication Options + +## Replication.ActiveConnectionTimeoutInSec + +Threshold in seconds under which an incoming replication connection is considered active. If an incoming connection receives messages within this time-span, a new connection coming from the same source will be rejected (as the existing connection is considered active). + +- **Type**: `int` +- **Default**: `30` +- **Scope**: Server-wide or per database + + + +## Replication.ReplicationMinimalHeartbeatInSec + +Minimal time in seconds before sending another heartbeat. + +- **Type**: `int` +- **Default**: `15` +- **Scope**: Server-wide or per database + + + +## Replication.RetryReplicateAfterInSec + +This option determines how often the queue for retry attempts is updated. It does _not_ determine the timeout between retry attempts. To configure that, +use `Replication.RetryMaxTimeoutInSec` below. + +- **Type**: `int` +- **Default**: `15` +- **Scope**: Server-wide or per database + + + +## Replication.RetryMaxTimeoutInSec + +Maximum timeout in seconds for successive retry attempts. +If a replication fails, the server will retry after a timeout, and will continue to retry until it succeeds. The timeout value increases between each +attempt, so the attempts become less frequent. The timeout continues to increase until it reaches this maximum value. + +- **Type**: `int` +- **Default**: `300` +- **Scope**: Server-wide or per database + + + +## Replication.MaxItemsCount + +Maximum number of items sent in a single batch during replication. If set to `null`, the number of items in the batch is not limited. + +- **Type**: `int` +- **Default**: `16 * 1024` +- **Scope**: Server-wide or per database + + + +## Replication.MaxSizeToSendInMb + +Maximum size in Mb of a single batch sent during replication. If set to `null`, the size of the batch is not limited. + +- **Type**: `int` +- **Default**: `64` +- **Scope**: Server-wide or per database + + diff --git a/versioned_docs/version-7.1/server/configuration/security-configuration.mdx b/versioned_docs/version-7.1/server/configuration/security-configuration.mdx new file mode 100644 index 0000000000..e224c97f47 --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/security-configuration.mdx @@ -0,0 +1,510 @@ +--- +title: "Configuration: Security" +hide_table_of_contents: true +sidebar_label: Security Configuration +sidebar_position: 20 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: Security + + +* The following configuration keys allow you to control the desired level of security in a RavenDB server. + To learn more about RavenDB's security features, see this [security overview](../../server/security/overview.mdx). + +* In this page: + * Security.AuditLog: + [Security.AuditLog.EnableArchiveFileCompression](../../server/configuration/security-configuration.mdx#securityauditlogenablearchivefilecompression) + [Security.AuditLog.FolderPath](../../server/configuration/security-configuration.mdx#securityauditlogfolderpath) + [Security.AuditLog.ArchiveAboveSizeInMb](../../server/configuration/security-configuration.mdx#securityauditlogarchiveabovesizeinmb) + [Security.AuditLog.MaxArchiveDays](../../server/configuration/security-configuration.mdx#securityauditlogmaxarchivedays) + * Security.Certificate: + [Security.Certificate.Change.Exec](../../server/configuration/security-configuration.mdx#securitycertificatechangeexec) + [Security.Certificate.Change.Exec.Arguments](../../server/configuration/security-configuration.mdx#securitycertificatechangeexecarguments) + [Security.Certificate.Exec](../../server/configuration/security-configuration.mdx#securitycertificateexec) + [Security.Certificate.Exec.TimeoutInSec](../../server/configuration/security-configuration.mdx#securitycertificateexectimeoutinsec) + [Security.Certificate.ExpiringThresholdInDays](../../server/configuration/security-configuration.mdx#securitycertificateexpiringthresholdindays) + [Security.Certificate.LetsEncrypt.Email](../../server/configuration/security-configuration.mdx#securitycertificateletsencryptemail) + [Security.Certificate.Load.Exec](../../server/configuration/security-configuration.mdx#securitycertificateloadexec) + [Security.Certificate.Load.Exec.Arguments](../../server/configuration/security-configuration.mdx#securitycertificateloadexecarguments) + [Security.Certificate.Password](../../server/configuration/security-configuration.mdx#securitycertificatepassword) + [Security.Certificate.Path](../../server/configuration/security-configuration.mdx#securitycertificatepath) + [Security.Certificate.Renew.Exec](../../server/configuration/security-configuration.mdx#securitycertificaterenewexec) + [Security.Certificate.Renew.Exec.Arguments](../../server/configuration/security-configuration.mdx#securitycertificaterenewexecarguments) + [Security.Certificate.Validation.Exec](../../server/configuration/security-configuration.mdx#securitycertificatevalidationexec) + [Security.Certificate.Validation.Exec.Arguments](../../server/configuration/security-configuration.mdx#securitycertificatevalidationexecarguments) + [Security.Certificate.Validation.Exec.TimeoutInSec](../../server/configuration/security-configuration.mdx#securitycertificatevalidationexectimeoutinsec) + [Security.Certificate.Validation.KeyUsages](../../server/configuration/security-configuration.mdx#securitycertificatevalidationkeyusages) + * Security.Csrf: + [Security.Csrf.AdditionalOriginHeaders](../../server/configuration/security-configuration.mdx#securitycsrfadditionaloriginheaders) + [Security.Csrf.Enabled](../../server/configuration/security-configuration.mdx#securitycsrfenabled) + [Security.Csrf.TrustedOrigins](../../server/configuration/security-configuration.mdx#securitycsrftrustedorigins) + * Security.MasterKey: + [Security.MasterKey.Exec](../../server/configuration/security-configuration.mdx#securitymasterkeyexec) + [Security.MasterKey.Exec.Arguments](../../server/configuration/security-configuration.mdx#securitymasterkeyexecarguments) + [Security.MasterKey.Exec.TimeoutInSec](../../server/configuration/security-configuration.mdx#securitymasterkeyexectimeoutinsec) + [Security.MasterKey.Path](../../server/configuration/security-configuration.mdx#securitymasterkeypath) + * Security.TwoFactor: + [Security.TwoFactor.DefaultSessionDurationInMin](../../server/configuration/security-configuration.mdx#securitytwofactordefaultsessiondurationinmin) + [Security.TwoFactor.MaxSessionDurationInMin](../../server/configuration/security-configuration.mdx#securitytwofactormaxsessiondurationinmin) + * Certificate and issuer validation: + [Security.WellKnownCertificates.Admin](../../server/configuration/security-configuration.mdx#securitywellknowncertificatesadmin) + [Security.WellKnownIssuerHashes.Admin](../../server/configuration/security-configuration.mdx#securitywellknownissuerhashesadmin) + [Security.WellKnownIssuers.Admin](../../server/configuration/security-configuration.mdx#securitywellknownissuersadmin) + [Security.WellKnownIssuers.Admin.ValidateCertificateNames](../../server/configuration/security-configuration.mdx#securitywellknownissuersadminvalidatecertificatenames) + * Other: + [Security.DisableHsts](../../server/configuration/security-configuration.mdx#securitydisablehsts) + [Security.DisableHttpsRedirection](../../server/configuration/security-configuration.mdx#securitydisablehttpsredirection) + [Security.DoNotConsiderMemoryLockFailureAsCatastrophicError](../../server/configuration/security-configuration.mdx#securitydonotconsidermemorylockfailureascatastrophicerror) + [Security.TlsCipherSuites](../../server/configuration/security-configuration.mdx#securitytlsciphersuites) + [Security.UnsecuredAccessAllowed](../../server/configuration/security-configuration.mdx#securityunsecuredaccessallowed) + + +## Security.AuditLog.EnableArchiveFileCompression + +Determines whether to compress the audit log files. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only +- **Alias:** `Security.AuditLog.Compress` + + + +## Security.AuditLog.FolderPath + +The folder path where RavenDB stores audit log files. +Setting the path enables writing to the audit log. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Security.AuditLog.ArchiveAboveSizeInMb + +The largest size (in megabytes) that an audit log file may reach +before it is archived and logging is directed to a new file. + +- **Type**: `Size` +- **Default**: `128` +- **MinValue**: `16` +- **Scope**: Server-wide only + + + +## Security.AuditLog.MaxArchiveDays + +The maximum number of days that an archived audit log file is kept. + +- **Type**: `int?` +- **Default**: `3` +- **Scope**: Server-wide only + + + +## Security.AuditLog.MaxArchiveFiles + +The maximum number of archived audit log files to keep. +Set this value to the number of days after which audit log files will be deleted, +or set it to `null` to refrain from removing audit log files. + +- **Type**: `int?` +- **Default**: `null` +- **Min Value**: `0` +- **Scope**: Server-wide only + + + +## Security.Certificate.Change.Exec + +A command or executable that handles cluster certificate changes. +This executable allows you to implement your own custom logic for persisting the new certificate on all nodes. + +Note: it will only be triggered if [Security.Certificate.Path](../../server/configuration/security-configuration.mdx#securitycertificatepath) is not defined. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Security.Certificate.Change.Exec.Arguments + +The command line arguments for the [Security.Certificate.Change.Exec](../../server/configuration/security-configuration.mdx#securitycertificatechangeexec) command or executable. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Security.Certificate.Exec + +Deprecated. +Use [Security.Certificate.Load.Exec](../../server/configuration/security-configuration.mdx#securitycertificateloadexec) +along with [Security.Certificate.Renew.Exec](../../server/configuration/security-configuration.mdx#securitycertificaterenewexec) +and [Security.Certificate.Change.Exec](../../server/configuration/security-configuration.mdx#securitycertificatechangeexec) instead. + + + +## Security.Certificate.Exec.TimeoutInSec + +* The number of seconds to wait for the certificate executables to exit. +* Applies to: + * [Security.Certificate.Load.Exec](../../server/configuration/security-configuration.mdx#securitycertificateloadexec) + * [Security.Certificate.Renew.Exec](../../server/configuration/security-configuration.mdx#securitycertificaterenewexec) + * [Security.Certificate.Change.Exec](../../server/configuration/security-configuration.mdx#securitycertificatechangeexec) +- **Type**: `int` +- **Default**: `30` +- **Scope**: Server-wide only + + + +## Security.Certificate.ExpiringThresholdInDays + +The number of days before certificate expiration when it will be considered _expiring_. + +- **Type**: `int` +- **Default**: `14` +- **Scope**: Server-wide only + + + +## Security.Certificate.LetsEncrypt.Email + +The E-mail address associated with the Let's Encrypt certificate. +Used for renewal requests. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Security.Certificate.Load.Exec + +* A command or executable that provides the `.pfx` cluster certificate when invoked by RavenDB. + If specified, RavenDB will use HTTPS/SSL for all network activities. + +* The [Security.Certificate.Path](../../server/configuration/security-configuration.mdx#securitycertificatepath) setting takes precedence over this executable. + +* Learn more in [get certificate via loader](../../server/security/authentication/certificate-configuration.mdx#with-logic-foreign-to-ravendb-or-external-certificate-storage). +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Security.Certificate.Load.Exec.Arguments + +The command line arguments for the [Security.Certificate.Load.Exec](../../server/configuration/security-configuration.mdx#securitycertificateloadexec) command or executable. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Security.Certificate.Password + +The (optional) password of the .pfx certificate file. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Security.Certificate.Path + +The path to the `.pfx` certificate file. If specified, RavenDB will use HTTPS/SSL for all network activities. +Certificate setting priority order: +1. Path +2. Executable + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Security.Certificate.Renew.Exec + +* A command or executable that handles automatic renewals, providing a renewed `.pfx` cluster certificate. + +* The [leader node](../../server/clustering/rachis/cluster-topology.mdx#leader) will invoke this executable once every hour, and if a new certificate is received, + it will be sent to all other nodes. + +* The executable specified in [Security.Certificate.Change.Exec](../../server/configuration/security-configuration.mdx#securitycertificatechangeexec) + will then be used to persist the certificate across the cluster on all nodes. +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Security.Certificate.Renew.Exec.Arguments + +The command line arguments for the [Security.Certificate.Renew.Exec](../../server/configuration/security-configuration.mdx#securitycertificaterenewexecarguments) command or executable. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Security.Certificate.Validation.Exec + +EXPERT ONLY: + +A command or executable to validate a server authentication request. +RavenDB will execute: `command [user-arg-1] ... [user-arg-n] `. + +The executable will return a case-insensitive boolean string through the standard output (e.g. true, false) indicating whether to approve the connection. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Security.Certificate.Validation.Exec.Arguments + +EXPERT ONLY: + +The optional user arguments for the [Security.Certificate.Validation.Exec](../../server/configuration/security-configuration.mdx#securitycertificatevalidationexecarguments) command or executable. +The arguments must be escaped for the command line. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Security.Certificate.Validation.Exec.TimeoutInSec + +The number of seconds to wait for the [Security.Certificate.Validation.Exec](../../server/configuration/security-configuration.mdx#securitycertificatevalidationexecarguments) executable to exit. + +- **Type**: `int` +- **Default**: `5` +- **Scope**: Server-wide only + + + +## Security.Certificate.Validation.KeyUsages + +EXPERT ONLY: + +Indicates if 'KeyUsage' validation of certificates should be turned on or off. + +- **Type**: `bool` +- **Default**: `true` +- **Scope**: Server-wide only + + + +## Security.Csrf.AdditionalOriginHeaders + +Specify additional request headers that RavenDB will check for the Origin of a request. +For example: `X-Forwarded-Host`. + +- **Type**: `string[]` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Security.Csrf.Enabled + +Indicates whether the Cross-Site Request Forgery (CSRF) protection is enabled in RavenDB. + +- **Type**: `bool` +- **Default**: `true` +- **Scope**: Server-wide only + + + +## Security.Csrf.TrustedOrigins + +List of Trusted Origins for CSRF filter. +Requests from these origins will be allowed without triggering CSRF checks. + +- **Type**: `string[]` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Security.MasterKey.Exec + +A command or executable that RavenDB will run to obtain a 256-bit Master Key. +If specified, RavenDB will use this key to protect secrets. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Security.MasterKey.Exec.Arguments + +The command line arguments for the [Security.MasterKey.Exec](../../server/configuration/security-configuration.mdx#securitymasterkeyexec) command or executable. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Security.MasterKey.Exec.TimeoutInSec + +The number of seconds to wait for the Master Key executable to exit. + +- **Type**: `int` +- **Default**: `30` +- **Scope**: Server-wide only + + + +## Security.MasterKey.Path + +The file path to a (256-bit) Master Key. +If specified, RavenDB will use this key to protect secrets. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Security.TwoFactor.DefaultSessionDurationInMin + +The default duration of a two-factor authentication (2FA) session, in minutes. + +After successfully completing the 2FA process, the session will remain active for this duration before requiring re-authentication. + +- **Type**: `int` +- **Default**: `120` +- **Scope**: Server-wide only + + + +## Security.TwoFactor.MaxSessionDurationInMin + +The maximum duration of a two-factor authentication (2FA) session, in minutes. +This duration takes precedence over the default duration setting. + +- **Type**: `int` +- **Default**: `1440` +- **Scope**: Server-wide only + + + +## Security.WellKnownCertificates.Admin + +Specify well-known certificate thumbprints that will be trusted by the server as cluster admins. + +- **Type**: `string[]` or `string with thumbprints values separated by ;` +- **Example**: `"297430d6d2ce259772e4eccf97863a4dfe6b048c;e6a3b45b062d509b3382282d196efe97d5956ccb"` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Security.WellKnownIssuerHashes.Admin + +OBSOLETE. +This is no longer supported or used. +Use [Security.WellKnownIssuers.Admin](../../server/configuration/security-configuration.mdx#securitywellknownissuersadmin) instead. + + + +## Security.WellKnownIssuers.Admin + +Specify well-known issuer certificates in Base64 format or provide file paths to the certificate files. +This will be used to validate a new client certificate when the issuer's certificate changes. + +- **Type**: `string[]` or `string with values separated by ;` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Security.WellKnownIssuers.Admin.ValidateCertificateNames + +Determine whether the server will validate the subject alternative names (SANs) of well-known issuer certificates against the server's domain name. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only + + + +## Security.DisableHsts + +Disable HTTP Strict Transport Security (HSTS) on the server. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only + + + +## Security.DisableHttpsRedirection + +Disable automatic redirection when listening to HTTPS. +By default, when using port 443, RavenDB redirects all incoming HTTP traffic on port 80 to HTTPS on port 443. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only + + + +## Security.DoNotConsiderMemoryLockFailureAsCatastrophicError + +EXPERT ONLY: + +Determines whether RavenDB will consider memory lock error to be catastrophic. +This is used with encrypted databases to ensure that temporary buffers are never written to disk and are locked to memory. + +Setting this to true is **not** recommended and should be done only after proper security analysis has been performed. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide or per database + + + +## Security.TlsCipherSuites + +EXPERT ONLY: + +Defines a list of supported TLS Cipher Suites. +Values must be semicolon-separated. + +- **Type**: `TlsCipherSuite[]` +- **Example**: `TLS_RSA_WITH_RC4_128_MD5;TLS_RSA_WITH_RC4_128_SHA` +- **Default**: `null` (Operating System defaults) +- **Scope**: Server-wide only + + + +## Security.UnsecuredAccessAllowed + +If authentication is disabled, set the address range type for which server access is unsecured +(`None | Local | PrivateNetwork | PublicNetwork`). + +- **Type**: `enum UnsecuredAccessAddressRange` +- **Default**: `Local` +- **Scope**: Server-wide only + + diff --git a/versioned_docs/version-7.1/server/configuration/server-configuration.mdx b/versioned_docs/version-7.1/server/configuration/server-configuration.mdx new file mode 100644 index 0000000000..f0b54c7224 --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/server-configuration.mdx @@ -0,0 +1,98 @@ +--- +title: "Configuration: Server Options" +hide_table_of_contents: true +sidebar_label: Server Configuration +sidebar_position: 21 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: Server Options + + +* Various configuration options for the server's behavior. + +* In this page: + * [Server.MaxTimeForTaskToWaitForDatabaseToLoadInSec](../../server/configuration/server-configuration.mdx#servermaxtimefortasktowaitfordatabasetoloadinsec) + * [Server.ProcessAffinityMask](../../server/configuration/server-configuration.mdx#serverprocessaffinitymask) + * [Server.IndexingAffinityMask](../../server/configuration/server-configuration.mdx#serverindexingaffinitymask) + * [Server.NumberOfUnusedCoresByIndexes](../../server/configuration/server-configuration.mdx#servernumberofunusedcoresbyindexes) + * [Server.CpuCredits.ExhaustionBackupDelayInMin](../../server/configuration/server-configuration.mdx#servercpucreditsexhaustionbackupdelayinmin) + * [Server.Tcp.Compression.Disable](../../server/configuration/server-configuration.mdx#servertcpcompressiondisable) + + +## Server.MaxTimeForTaskToWaitForDatabaseToLoadInSec + +This setting is indicating how many seconds a task (e.g. request) will wait for the database to load (when it is unloaded - e.g. after server restart). + +- **Type**: `int` +- **Default**: `30` +- **Scope**: Server-wide only + + + +## Server.ProcessAffinityMask + +EXPERT: The process affinity mask. + +- **Type**: `long` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Server.IndexingAffinityMask + +EXPERT: The affinity mask to be used for indexing. +Overrides the `Server.NumberOfUnusedCoresByIndexes` value. +Should only be used if you also set `Server.ProcessAffinityMask`. + +- **Type**: `long` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## Server.NumberOfUnusedCoresByIndexes + +EXPERT: The numbers of cores that will NOT be running indexing. +Defaults to 1 core that is kept for all other tasks and will not be used for indexing. + +- **Type**: `int` +- **Default**: `1` +- **Scope**: Server-wide only + + + +## Server.CpuCredits.ExhaustionBackupDelayInMin + +EXPERT: When CPU credits are exhausted, backup tasks are canceled. +This value determines how many minutes the server will wait before retrying the backup task. + +- **Type**: `TimeSetting` +- **TimeUnit**: `TimeUnit.Minutes` +- **Default**: `10` +- **Scope**: Server-wide only + + +If you have an enterprise license, you can access information about CPU credits +using [SNMP](../../server/administration/snmp/snmp-overview.mdx). + + + + + +## Server.Tcp.Compression.Disable + +Disable TCP Compression + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only + + diff --git a/versioned_docs/version-7.1/server/configuration/storage-configuration.mdx b/versioned_docs/version-7.1/server/configuration/storage-configuration.mdx new file mode 100644 index 0000000000..49e2093187 --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/storage-configuration.mdx @@ -0,0 +1,170 @@ +--- +title: "Configuration: Storage Options" +hide_table_of_contents: true +sidebar_label: Storage Configuration +sidebar_position: 22 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: Storage Options + +The following configuration options allow you configure [the storage engine](../../server/storage/storage-engine.mdx). + +## Storage.TempPath + +* Use this configuration option to customize the path for the temporary files of the following [directories](../../server/storage/directory-structure.mdx): + * `System` + * `Configuration` + * `Databases/{database_name}` + +* By default, the temporary files are created under the `Temp` folder in those directories. + +* When the `Storage.TempPath` is configured: + + * The System temporary files will be written to `"/System"`. + * The Databases temporary files will be written to `"/Databases/{database-name}"`. + * The Configuration temporary files will be written to `"/Databases/{database-name}/Configuration"`. + +* To specify a different path for the indexes temporary files go to [Indexing.TempPath](../../server/configuration/indexing-configuration.mdx#indexingtemppath). + +* Learn more about RavenDB directory structure [here](../../server/storage/directory-structure.mdx). +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide or per database + + + +## Storage.MaxConcurrentFlushes + +Maximum concurrent flushes. + +- **Type**: `int` +- **Default**: `10` +- **Scope**: Server-wide or per database + + + +## Storage.TimeToSyncAfterFlushInSec + +Time to sync after flush in seconds + +- **Type**: `int` +- **Default**: `30` +- **Scope**: Server-wide or per database + + + +## Storage.NumberOfConcurrentSyncsPerPhysicalDrive + +Number of concurrent syncs per physical drive. + +- **Type**: `int` +- **Default**: `3` +- **Scope**: Server-wide or per database + + + +## Storage.CompressTxAboveSizeInKb + +Compress transactions above size (value in KB) + +- **Type**: `int` +- **Default**: `512` +- **Scope**: Server-wide or per database + + + +## Storage.ForceUsing32BitsPager + +Use the 32 bits memory mapped pager even when running on 64 bits. + +- **Type**: `bool` +- **Default**: `false` +- **Scope**: Server-wide only + + + +## Storage.MaxScratchBufferSizeInMb + +Maximum size of `.buffers` files + +- **Type**: `int` +- **Default**: `256` when running on 64 bits, `32` when running on 32 bits or `Storage.ForceUsing32BitsPager` is set to `true` +- **Scope**: Server-wide or per database + + + + +## Storage.PrefetchBatchSizeInKb + +Size of the batch in kilobytes that will be requested to the OS from disk when prefetching (value in powers of 2). Some OSs may not honor certain values. Experts only. + +- **Type**: `int` +- **Default**: `1024` +- **Scope**: Server-wide or per database + + + +## Storage.PrefetchResetThresholdInGb + +How many gigabytes of memory should be prefetched before restarting the prefetch tracker table. Experts only. + +- **Type**: `int` +- **Default**: `8` +- **Scope**: Server-wide or per database + + + +## Storage.OnDirectoryInitialize.Exec + +A command or executable to run when creating/opening a directory (storage environment). Experts only. +RavenDB will execute: + + +{`command [user-arg-1] ... [user-arg-n] +`} + + + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide or per database + + + +## Storage.OnDirectoryInitialize.Exec.Arguments + +The optional user arguments for the 'Storage.OnDirectoryInitialize.Exec' command or executable. The arguments must be escaped for the command line. Experts only. + +- **Type**: `string` +- **Default**: `null` +- **Scope**: Server-wide or per database + + + +## Storage.OnDirectoryInitialize.Exec.TimeoutInSec + +The number of seconds to wait for the OnDirectoryInitialize executable to exit. Default: 30 seconds. Experts only. + +- **Type**: `int` +- **Default**: `30` +- **Scope**: Server-wide or per database + + + +## Storage.EnablePrefetching + +Enables memory prefetching mechanism if OS supports it. + +- **Type**: `bool` +- **Default**: `true` +- **Scope**: Server-wide only + + + diff --git a/versioned_docs/version-7.1/server/configuration/studio-configuration.mdx b/versioned_docs/version-7.1/server/configuration/studio-configuration.mdx new file mode 100644 index 0000000000..17be23067f --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/studio-configuration.mdx @@ -0,0 +1,25 @@ +--- +title: "Configuration: Studio Options" +hide_table_of_contents: true +sidebar_label: Studio Configuration +sidebar_position: 23 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: Studio Options + +## Studio.Path + +The directory in which RavenDB will search the studio files, defaults to the base directory. + +- **Type**: `string` +- **Default**: `null` (base directory) +- **Scope**: Server-wide only + + diff --git a/versioned_docs/version-7.1/server/configuration/subscription-configuration.mdx b/versioned_docs/version-7.1/server/configuration/subscription-configuration.mdx new file mode 100644 index 0000000000..f92aae5306 --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/subscription-configuration.mdx @@ -0,0 +1,48 @@ +--- +title: "Configuration: Subscriptions" +hide_table_of_contents: true +sidebar_label: Subscription Configuration +sidebar_position: 24 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: Subscriptions + + +* The following configuration keys control various aspects of subscription behavior in RavenDB. + Learn more about subscriptions in [Data subscriptions](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx). + +* In this article: + * [Subscriptions.ArchivedDataProcessingBehavior](../../server/configuration/subscription-configuration.mdx#subscriptionsarchiveddataprocessingbehavior) + * [Subscriptions.MaxNumberOfConcurrentConnections](../../server/configuration/subscription-configuration.mdx#subscriptionsmaxnumberofconcurrentconnections) + + + +## Subscriptions.ArchivedDataProcessingBehavior + +The default processing behavior for archived documents in a subscription query. + +- **Type**: `enum ArchivedDataProcessingBehavior`: + * `ExcludeArchived`: only non-archived documents are processed by the subscription query. + * `IncludeArchived`: both archived and non-archived documents are processed by the subscription query. + * `ArchivedOnly`: only archived documents are processed by the subscription query. +- **Default**: `ExcludeArchived` +- **Scope**: Server-wide, or per database + + + +## Subscriptions.MaxNumberOfConcurrentConnections + +The maximum number of concurrent subscription connections allowed per database. + +- **Type**: `int` +- **Default**: `1000` +- **Scope**: Server-wide or per database + + diff --git a/versioned_docs/version-7.1/server/configuration/tombstone-configuration.mdx b/versioned_docs/version-7.1/server/configuration/tombstone-configuration.mdx new file mode 100644 index 0000000000..38c67a744a --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/tombstone-configuration.mdx @@ -0,0 +1,44 @@ +--- +title: "Configuration: Tombstone Options" +hide_table_of_contents: true +sidebar_label: Tombstone Configuration +sidebar_position: 25 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: Tombstone Options + +## Tombstones.CleanupIntervalInMin + +Time (in minutes) between tombstone cleanups. + +- **Type**: `TimeUnit.Minutes` +- **Default**: `5` +- **Scope**: Server-wide or per database + + +## Tombstones.RetentionTimeWithReplicationHubInHrs + +Time (in hours) to save tombsones from deletion if this server is defined +as a replication hub. + +- **Type**: `TimeUnit.Hours` +- **Default**: `336` (14 days) +- **Scope**: Server-wide or per database + + +## Tombstones.CleanupIntervalWithReplicationHubInMin + +Time (in minutes) between tombstone cleanups if this server is defined as +a replication hub. + +- **Type**: `TimeUnit.Minutes` +- **Default**: `1440` (1 day) +- **Scope**: Server-wide or per database + diff --git a/versioned_docs/version-7.1/server/configuration/traffic-watch-configuration.mdx b/versioned_docs/version-7.1/server/configuration/traffic-watch-configuration.mdx new file mode 100644 index 0000000000..0d6eb246bf --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/traffic-watch-configuration.mdx @@ -0,0 +1,140 @@ +--- +title: "Configuration: Traffic Watch Options" +hide_table_of_contents: true +sidebar_label: Traffic Watch Configuration +sidebar_position: 26 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: Traffic Watch Options + +## TrafficWatch.Mode + +Traffic Watch logging mode. + +- **Type**: `TrafficWatchMode` +- **Default**: `Off` +- **Scope**: Server-wide only + +Possible values: + +- `Off` +- `ToLogFile` + + + +## TrafficWatch.Databases + +A semicolon-separated list of database names by which the Traffic Watch logging entities will be filtered. +If not specified, Traffic Watch entities of all databases will be included. +A sample list: `\"test-database;another-database;the-third-database\"` + +- **Type**: `List` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## TrafficWatch.StatusCodes + +A semicolon-separated list of response status codes by which the Traffic Watch logging entities will be filtered. +If not specified, Traffic Watch entities with any response status code will be included. +A sample list: `\"200;500;404\"` + +- **Type**: `List` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## TrafficWatch.MinimumResponseSizeInBytes + +Minimum response size by which the Traffic Watch logging entities will be filtered. + +- **Type**: `int` +- **Default**: `0` +- **Minimum**: `0` +- **Scope**: Server-wide only + + + +## TrafficWatch.MinimumRequestSizeInBytes + +Minimum request size by which the Traffic Watch logging entities will be filtered. + +- **Type**: `int` +- **Default**: `0` +- **Minimum**: `0` +- **Scope**: Server-wide only + + + +## TrafficWatch.MinimumDurationInMs + +Minimum duration by which the Traffic Watch logging entities will be filtered. + +- **Type**: `int` +- **Default**: `0` +- **Scope**: Server-wide only + + + +## TrafficWatch.HttpMethods + +A semicolon-separated list of request HTTP methods by which the Traffic Watch logging entities will be filtered. +If not specified, Traffic Watch entities with any HTTP request method will be included. +A sample list: `\"GET;POST\"` + +- **Type**: `List` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## TrafficWatch.ChangeTypes + +A semicolon-separated list of Traffic Watch change types by which the Traffic Watch logging entities will be filtered. +If not specified, Traffic Watch entities with any change type will be included. +A sample list: `\"Queries;Documents\"` + +- **Type**: `List` +- **Default**: `null` +- **Scope**: Server-wide only + + + +## TrafficWatch.CertificateThumbprints + +A semicolon-separated list of specific client certificate thumbprints by which the Traffic Watch logging entities will be filtered. +If not specified, Traffic Watch entities with any certificate thumbprint will be included, +including those without any thumbprint. +A sample list: `\"0123456789ABCDEF0123456789ABCDEF01234567;FEDCBA9876543210FEDCBA9876543210FEDCBA98\"` + +- **Type**: `List` +- **Default**: `null` +- **Scope**: Server-wide only + +Possible values: + +- `None` +- `Queries` +- `Operations` +- `MultiGet` +- `BulkDocs` +- `Index` +- `Counters` +- `Hilo` +- `Subscriptions` +- `Streams` +- `Documents` +- `TimeSeries` +- `Notifications` +- `ClusterCommands` + + diff --git a/versioned_docs/version-7.1/server/configuration/transaction-merger-configuration.mdx b/versioned_docs/version-7.1/server/configuration/transaction-merger-configuration.mdx new file mode 100644 index 0000000000..b0bed6735a --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/transaction-merger-configuration.mdx @@ -0,0 +1,45 @@ +--- +title: "Configuration: Transaction Merger Options" +hide_table_of_contents: true +sidebar_label: Transaction Merger Configuration +sidebar_position: 27 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: Transaction Merger Options + +## TransactionMerger.MaxTimeToWaitForPreviousTxInMs + +EXPERT: Time to wait (in milliseconds) for the previous async commit before checking for the tx size. + +- **Type**: `int` +- **Default**: `0` +- **Scope**: Server-wide or per database + + + +## TransactionMerger.MaxTimeToWaitForPreviousTxBeforeRejectingInMs + +EXPERT: Time to wait for the previous async commit transaction before rejecting the request due to long duration IO. + +- **Type**: `int` +- **Default**: `5000` +- **Scope**: Server-wide or per database + + + +## TransactionMerger.MaxTxSizeInMb + +EXPERT: Maximum size (in MB) for the merged transaction. + +- **Type**: `int` +- **Default**: 4MB for 32-bit or minimum of either `10% of total physical memory` or `512` for 64-bit +- **Scope**: Server-wide or per database + + diff --git a/versioned_docs/version-7.1/server/configuration/updates-configuration.mdx b/versioned_docs/version-7.1/server/configuration/updates-configuration.mdx new file mode 100644 index 0000000000..ffb492520e --- /dev/null +++ b/versioned_docs/version-7.1/server/configuration/updates-configuration.mdx @@ -0,0 +1,41 @@ +--- +title: "Configuration: Update Options" +hide_table_of_contents: true +sidebar_label: Updates Configuration +sidebar_position: 28 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Configuration: Update Options + +## Updates.Channel + +Indicates what release channel should be used to perform latest version checks. + +- **Type**: `string` +- **Default**: `Patch` +- **Scope**: Server-wide only + +Possible values: + +- `Stable` +- `Patch` +- `Dev` + + + +## Updates.BackgroundChecks.Disable + +Disable background latest version checks. + +- **Type**: `boolean` +- **Default**: `false` +- **Scope**: Server-wide only + + diff --git a/versioned_docs/version-7.1/server/embedded.mdx b/versioned_docs/version-7.1/server/embedded.mdx new file mode 100644 index 0000000000..228f832256 --- /dev/null +++ b/versioned_docs/version-7.1/server/embedded.mdx @@ -0,0 +1,36 @@ +--- +title: "Server: Running an Embedded Instance" +hide_table_of_contents: true +sidebar_label: Embedded +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import EmbeddedCsharp from './_embedded-csharp.mdx'; +import EmbeddedJava from './_embedded-java.mdx'; + +export const supportedLanguages = ["csharp", "java"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/server/extensions/_category_.json b/versioned_docs/version-7.1/server/extensions/_category_.json new file mode 100644 index 0000000000..f822f59631 --- /dev/null +++ b/versioned_docs/version-7.1/server/extensions/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 4, + "label": Extensions, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/server/extensions/_expiration-csharp.mdx b/versioned_docs/version-7.1/server/extensions/_expiration-csharp.mdx new file mode 100644 index 0000000000..4aec97e3b1 --- /dev/null +++ b/versioned_docs/version-7.1/server/extensions/_expiration-csharp.mdx @@ -0,0 +1,135 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Documents can be given a future expiration time in which they'll be automatically deleted. +* The Expiration feature deletes documents set for expiration, when their time has passed. +* You can enable or disable the expiration feature while the database is already live with data. + +* In this page: + * [Expiration feature usages](../../server/extensions/expiration.mdx#expiration-feature-usages) + * [Configuring the expiration feature](../../server/extensions/expiration.mdx#configuring-the-expiration-feature) + * [Configure expiration settings using the client API](../../server/extensions/expiration.mdx#configure-expiration-settings-using-the-client-api) + * [Setting the document expiration time](../../server/extensions/expiration.mdx#setting-the-document-expiration-time) + * [Eventual consistency considerations](../../server/extensions/expiration.mdx#eventual-consistency-considerations) + +## Expiration feature usages + +Use the Expiration feature when data is needed only for a given time period. +E.g., for - + + * Shopping cart data that is kept only for a certain time period + * Email links that need to be expired after a few hours + * A web application login session details + * Cache data from an SQL server + + + +## Configuring the expiration feature + +Documents expiration settings can be changed via Studio or the API. +It is possible to: + +* Enable or Disable the deletion of expired documents. + Default value: **Disable** the deletion of expired documents. +* Determine how often RavenDB would look for expired documents and delete them. + Default value: **60 seconds** +* Set the maximal number of documents that RavenDB is allowed to delete per interval. + Default value: **All expired documents** + +[Learn how to configure expiration settings via Studio](../../studio/database/settings/document-expiration.mdx) + +### Configure expiration settings using the client API + +Modify the expiration settings using the client API by setting an `ExpirationConfiguration` +object and sending it to RavenDB using a `ConfigureExpirationOperation` operation. + +#### Example: + + + +{`await store.Maintenance.SendAsync(new ConfigureExpirationOperation(new ExpirationConfiguration +\{ + Disabled = false, + DeleteFrequencyInSec = 60, + MaxItemsToProcess = 1000 +\})); +`} + + + +#### `ExpirationConfiguration` + + + +{`public class ExpirationConfiguration +\{ + // Set 'Disabled' to false to enable the deletion of expired items + public bool Disabled \{ get; set; \} + + // How frequently to delete expired items + public long? DeleteFrequencyInSec \{ get; set; \} + + // How many items to delete (per batch) + public long? MaxItemsToProcess \{ get; set; \} +\} +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **Disabled** | `bool` | If `true`, deleting expired documents is disabled for the entire database.<BR>Default: `true` | +| **DeleteFrequencyInSec** | `long?` | Determines how often (in seconds) the expiration feature looks for expired documents and deletes them.<BR>Default: `60` | +| **MaxItemsToProcess** | `long?` | Determines the maximal number of documents the feature is allowed to delete in one run. | + + + +## Setting the document expiration time + +* To set a document expiration time, add the document's `@metadata` an + `@expires` property with the designated expiration time as a value. + Set the time in **UTC** format, not local time. E.g. - + **"@expires": "2025-04-22T08:00:00.0000000Z"** + + Metadata properties starting with `@` are for internal RavenDB usage only. + Do _not_ use the metadata `@expires` property for any other purpose than + scheduling a document's expiration time for the built-in expiration feature. + +* If and when the expiration feature is enabled, it will process all documents + carrying the `@expires` flag and automatically delete each document + [by its expiration time](../../server/extensions/expiration.mdx#eventual-consistency-considerations). +* To set the document expiration time from the client, use the following code: + + +{`DateTime expiry = DateTime.UtcNow.AddMinutes(5); +using (IAsyncDocumentSession session = store.OpenAsyncSession()) +\{ + await session.StoreAsync(user); + session.Advanced.GetMetadataFor(user)[Constants.Documents.Metadata.Expires] = expiry; + await session.SaveChangesAsync(); +\} +`} + + + + + +## Eventual consistency considerations + +* Internally, RavenDB tracks all documents carrying the `@expires` flag even if the + expiration feature is disabled. This way, once the expiration feature is enabled expired + documents can be processed without delay. +* Once a document expires, it may take up to the _delete frequency interval_ (60 seconds by default) + until is it actually deleted. +* Deletion may be further delayed if `MaxItemsToProcess` is set, limiting the number + of documents that RavenDB is allowed to delete each time the expiration feature is invoked. +* Expired documents are _not_ filtered out during `load`, `query`, or indexing, so be aware that + as long as an expired document hasn't been actually deleted it may still be included in the results. + + + + diff --git a/versioned_docs/version-7.1/server/extensions/_refresh-csharp.mdx b/versioned_docs/version-7.1/server/extensions/_refresh-csharp.mdx new file mode 100644 index 0000000000..851e395d08 --- /dev/null +++ b/versioned_docs/version-7.1/server/extensions/_refresh-csharp.mdx @@ -0,0 +1,138 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The Refresh feature increments a document's [change vector](../../server/clustering/replication/change-vector.mdx), + triggering its re-indexation as well as other features that react to document updates. + +* Refresh is scheduled using the `@refresh` flag in a document's [metadata](../../client-api/session/how-to/get-and-modify-entity-metadata.mdx). + +* In this page: + * [Overview](../../server/extensions/refresh.mdx#overview) + * [Examples](../../server/extensions/refresh.mdx#examples) + * [Syntax](../../server/extensions/refresh.mdx#syntax) + * [Configure from Studio](../../server/extensions/refresh.mdx#configure-from-studio) + + + +## Overview + +* To set a document refresh time, add the document's `@metadata` a + `@refresh` property with the designated refresh time as a value. + Set the time in **UTC** format, not local time, e.g. - + **"@refresh": "2025-04-22T08:00:00.0000000Z"** + + Metadata properties starting with `@` are for internal RavenDB usage only. + Do _not_ use the metadata `@refresh` property for any other purpose than + scheduling a document's refresh time for the built-in refresh feature. + + +* This will cause the document to refresh **only once**. + When the refresh operation takes place, it will also remove the `@refresh` property from the document. + + + 1. The refresh time value set for the `@refresh` property. + 2. The way you set the [Refresh Configuration](../../server/extensions/refresh.mdx#syntax), + including - + - The interval by which the server refreshes documents (set by default to 60 seconds). + - The way you set **maximal items to process**, potentially limiting the number + of documents that RavenDB is allowed to delete each time the refresh feature is invoked. + + + +* Refreshing a document causes its [change vector](../../server/clustering/replication/change-vector.mdx) + to increment the same way it would after any other kind of update to the document. + This triggers any features that react to document updating, including but not limited to: + - Re-indexing of the document by indexes that cover it + - [Replication](../../server/ongoing-tasks/external-replication.mdx), + [Subscriptions](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx), + and [ETL](../../server/ongoing-tasks/etl/basics.mdx) triggering + - Creation of a [document revision](../../document-extensions/revisions/overview.mdx) + + + +## Examples + +#### Example I + +How to set refresh configuration for a database: + + + +{`var refreshConfig = new RefreshConfiguration \{ + Disabled = false, + RefreshFrequencyInSec = 300, + MaxItemsToProcess = 1000 +\}; + +var result = documentStore.Maintenance.Send(new ConfigureRefreshOperation(refreshConfig)); +`} + + + +This activates document refreshing and sets the interval at 5 minutes. + + +#### Example II + +How to set a document to refresh 1 hour from now: + + + +{`using (var session = documentStore.OpenSession()) +\{ + var document = session.Load("users/1-A"); + + session.Advanced.GetMetadataFor(document)["@refresh"] = DateTime.UtcNow.AddHours(1); + + session.SaveChanges(); +\} +`} + + + + + +## Syntax + +To activate and/or configure document refreshing, send the server a +`RefreshConfiguration` object using the `ConfigureRefreshOperation` operation. +#### `RefreshConfiguration` + + + +{`public class RefreshConfiguration +\{ + // Set 'Disabled' to false to enable the refresh feature + public bool Disabled \{ get; set; \} + + // How frequently to process documents with a @refresh flag + public long? RefreshFrequencyInSec \{ get; set; \} + + // How many items to refresh (each time the refresh task is invoked) + public long? MaxItemsToProcess \{ get; set; \} +\} +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **Disabled** | `bool` | If `true`, refreshing documents is disabled for the entire database.<BR>Default: `true` | +| **RefreshFrequencyInSec** | `long?` | Determines how often (in seconds) the server processes documents that need to be refreshed.<BR>Default: `60` | +| **MaxItemsToProcess** | `long?` | Determines the maximal number of documents the feature is allowed to refresh in one run. | + + + +## Configure from Studio + +Alternatively, document refreshing can also be configured via Studio, under **Settings > Document Refresh**. + +![NoSQL DB Server - Document Refresh](./assets/StudioRefresh.png) + + + + diff --git a/versioned_docs/version-7.1/server/extensions/assets/StudioRefresh.png b/versioned_docs/version-7.1/server/extensions/assets/StudioRefresh.png new file mode 100644 index 0000000000..28bcb6c95b Binary files /dev/null and b/versioned_docs/version-7.1/server/extensions/assets/StudioRefresh.png differ diff --git a/versioned_docs/version-7.1/server/extensions/expiration.mdx b/versioned_docs/version-7.1/server/extensions/expiration.mdx new file mode 100644 index 0000000000..bf300c9e66 --- /dev/null +++ b/versioned_docs/version-7.1/server/extensions/expiration.mdx @@ -0,0 +1,27 @@ +--- +title: "Document Expiration" +hide_table_of_contents: true +sidebar_label: Document Expiration +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import ExpirationCsharp from './_expiration-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/server/extensions/refresh.mdx b/versioned_docs/version-7.1/server/extensions/refresh.mdx new file mode 100644 index 0000000000..5a26d4afee --- /dev/null +++ b/versioned_docs/version-7.1/server/extensions/refresh.mdx @@ -0,0 +1,30 @@ +--- +title: "Document Refresh" +hide_table_of_contents: true +sidebar_label: Document Refresh +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import RefreshCsharp from './_refresh-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/server/kb/_category_.json b/versioned_docs/version-7.1/server/kb/_category_.json new file mode 100644 index 0000000000..58e4b9f38d --- /dev/null +++ b/versioned_docs/version-7.1/server/kb/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 9, + "label": Knowledge Base, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/server/kb/_document-identifier-generation-csharp.mdx b/versioned_docs/version-7.1/server/kb/_document-identifier-generation-csharp.mdx new file mode 100644 index 0000000000..ad5efcd410 --- /dev/null +++ b/versioned_docs/version-7.1/server/kb/_document-identifier-generation-csharp.mdx @@ -0,0 +1,531 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import ContentFrame from '@site/src/components/ContentFrame'; +import Panel from '@site/src/components/Panel'; + + + +* A document identifier (document ID) is a unique string associated with a document. + It is globally unique in the scope of the database - no two documents in the same database can have the same ID. + +* This article focuses on the different **Document ID Types** available in RavenDB and when to use each one. + Additional explanation is available in [Working with document identifiers](../../client-api/document-identifiers/working-with-document-identifiers.mdx). + To create a document from the Studio, see [Create new document](../../studio/database/documents/create-new-document.mdx#create-new-document). + +* In this article: + * [Overview](../../server/kb/document-identifier-generation.mdx#overview) + * [ID types](../../server/kb/document-identifier-generation.mdx#id-types) + * [ID structure](../../server/kb/document-identifier-generation.mdx#id-structure) + * [ID limitations](../../server/kb/document-identifier-generation.mdx#id-limitations) + * [Document IDs:](../../server/kb/document-identifier-generation.mdx#document-ids) + * [Semantic ID](../../server/kb/document-identifier-generation.mdx#semantic-id) + * [GUID](../../server/kb/document-identifier-generation.mdx#guid) + * [Server-side ID](../../server/kb/document-identifier-generation.mdx#server-side-id) + * [Identity ID](../../server/kb/document-identifier-generation.mdx#identity-id) + * [HiLo algorithm ID](../../server/kb/document-identifier-generation.mdx#hilo-algorithm-id) + * [Artificial document ID](../../server/kb/document-identifier-generation.mdx#artificial-document-id) + * [Customizing the separator character](../../server/kb/document-identifier-generation.mdx#customizing-the-separator-character) + + + + + + +### ID types + +RavenDB supports several document ID types, where the ID string can be generated in different ways: + +* **Defined by the user**: + You explicitly specify the document ID. + * [Semantic ID](../../server/kb/document-identifier-generation.mdx#semantic-id) + +* **Generated by the server**: + The server generates the document ID based on the ID string format you provide when creating the document. + * [GUID](../../server/kb/document-identifier-generation.mdx#guid) + * [Server-side ID](../../server/kb/document-identifier-generation.mdx#server-side-id) + * [Identity ID](../../server/kb/document-identifier-generation.mdx#identity-id) + +* **Generated by the client (from a range provided by the server)**: + The server assigns a range of IDs to the client upon request. + The client then uses that range to generate document IDs locally within the session. + * [HiLo algorithm ID](../../server/kb/document-identifier-generation.mdx#hilo-algorithm-id) + +* **Generated from a Map-Reduce index output**: + The ID is generated by the server when saving map-reduce results as artificial documents. + * [Artificial document ID](../../server/kb/document-identifier-generation.mdx#artificial-document-id) + + + + +### ID Structure + +Document IDs typically consist of three parts: +the collection prefix, a slash (_'/'_) as the default separator, and a unique suffix. +For example: `users/123-A` (HiLo ID), or `users/000000000001-A` (Server-side ID). + +This structure is common but not mandatory: + * RavenDB does not require the ID to include a collection prefix. + * The default slash separator (`/`) can be **customized** for [HiLo IDs](../../server/kb/document-identifier-generation.mdx#hilo-algorithm-id) + and [Identity IDs](../../server/kb/document-identifier-generation.mdx#identity-id). => see how below.... + + + + +### ID Limitations + +The following limitations apply to document IDs: + +* Maximum length: `512` bytes (in UTF-8) +* Document IDs cannot end with the following reserved characters: + * `/` - reserved for [Server-side ID generation](../../server/kb/document-identifier-generation.mdx#server-side-id) + * `|` - reserved for [Identity ID generation](../../server/kb/document-identifier-generation.mdx#identity-id) + + + + +## Document IDs: + + + +* **Generated by**: + The user + +* **Description**: + * The **semantic ID** is assigned by _you_ when creating the document (using the Client API or from the Studio), + and not generated by the server. It’s therefore your responsibility to ensure that each ID is unique. + * Creating a new document with an existing semantic ID will _overwrite_ the existing document. + +* **When to use**: + Use a semantic ID when you want the document’s identifier to convey meaning, + to reflect what the document represents. + +* **Example**: + * Documents that use an _email_ address as a unique identifier in the _Users_ collection: + * `users/ayende@ayende.com` + * `users/john@john.doe` + This makes the ID both globally unique and instantly meaningful. + It is clear which user the document represents. + * IDs that describe the document’s contents: + * `accounts/591-192/txs/2025-11-12` + Implying that the document holds all the transactions for account 591-192 on November 12th, 2025. + * `support-tickets/INV-88411` + A support ticket related to invoice 88411. + +--- + + + +```csharp +using (var session = store.OpenSession()) +{ + // Specify the semantic ID in the entity's Id property + var user = new User { Name = "John", Id = "users/john@john.doe" }; + session.Store(user); + + session.SaveChanges(); + // The document will be saved with the ID you specified: "users/john@john.doe" +} +``` + + +```csharp +using (var session = store.OpenSession()) +{ + var user = new User { Name = "John" }; + // Specify the semantic ID when calling Store() + session.Store(user, "users/john@john.doe"); + + session.SaveChanges(); + // The document will be saved with the ID you specified: "users/john@john.doe" +} +``` + + + + + + +* **Generated by**: + The server + +* **Description**: + * If you don’t specify a document ID when creating the document, + the server will generate a **globally unique identifier** (GUID) for the new document. + * Although this is a simple way to generate a document ID, GUIDs are not human-friendly + and make debugging or troubleshooting more difficult. This approach is generally less recommended. + +* **When to use**: + Use this only when you don’t care about the document ID and don’t need to trace it in logs, tools, or support. + +* **Example**: + A GUID as a document ID: + `50bbe329-6258-4634-be24-2f013d7174cd` + +--- + + + +```csharp +using (var session = store.OpenSession()) +{ + // Set the entity's Id to string.Empty + var user = new User { Name = "John", Id = string.Empty }; + session.Store(user); + + session.SaveChanges(); + // The server will generate a GUID-based ID, e.g. "50bbe329-6258-4634-be24-2f013d7174cd" +} +``` + + +```csharp +using (var session = store.OpenSession()) +{ + var user = new User { Name = "John" }; + // Specify string.Empty when calling Store() + session.Store(user, string.Empty); + + session.SaveChanges(); + // The server will generate a GUID-based ID, e.g. "50bbe329-6258-4634-be24-2f013d7174cd" +} +``` + + + + + + +* **Generated by**: + The server + +* **Description**: + * When you create a document and provide an ID string that ends with a _slash_ (`/`), + the server will generate the full document ID for you. + * The server handling the request increments its [last document etag](../../glossary/etag.mdx) and appends it, + along with the **server's node tag**, to the string you provided. + * Since the _etag_ reflects any document change (add, update, delete), + the generated server-side IDs are always increasing but not guaranteed to be sequential. + +* **When to use**: + * Use the server-side ID when you don't care about the exact ID given to a newly created document. + * Recommended when creating many documents (e.g., during bulk insert), + as this method has the least overhead and requires minimal work from the server. + +* **Example**: + * On a server running on node 'A': + * Creating the first document with `users/` => results in document ID: `users/0000000000000000001-A` + * Creating a second document with `users/` => results in document ID: `users/0000000000000000002-A` + * On a server running on node 'B': + * Creating a third document with `users/` => may result in: `users/0000000000000000034-B` + * Note: node tag 'B' was appended to the ID generated because the request was handled by node 'B'. + Since each server has its own local _etag_, the numeric part of the ID is _not_ necessarily sequential (or unique) across the nodes + in the database group, as can happen when documents are created in parallel on multiple nodes during network partitions or failover. + +* **Note**: + If you _manually_ generate a document ID with a pattern that matches the server-side generated IDs, + RavenDB will _not_ check for that and will overwrite the existing document. + The leading zeros in server-side generated IDs help reduce the risk of such accidental collisions. + +--- + + + +```csharp +using (var session = store.OpenSession()) +{ + // Set the entity's Id to "users/" + var user = new User { Name = "John", Id = "users/" }; + session.Store(user); + + session.SaveChanges(); + // The server will generate a server-side ID, e.g. "users/0000000000000000001-A" +} +``` + + +```csharp +using (var session = store.OpenSession()) +{ + var user = new User { Name = "John" }; + // Specify "users/" when calling Store() + session.Store(user, "users/"); + + session.SaveChanges(); + // The server will generate a server-side ID, e.g. "users/0000000000000000001-A" +} +``` + + + + + + +* **Generated by**: + The server + +* **Description**: + * When you create a document and provide an ID string that ends with a _pipe_ symbol (`|`), + the server will generate an **identity** ID. + * The server replaces the _pipe_ with a separator character (_slash_ (`/`) by default) and appends + an always-incrementing number. + * Unlike the server-side ID, identity numbers are guaranteed to be **globally unique** across all nodes + in the database group. + +* **When to use**: + Use an identity ID only if you truly need document IDs that are incrementing. + For example, when generating invoice numbers or to meet legal or business requirements. + + Using an identity guarantees that IDs will increment, but doesn't guarantee that the sequence will be gapless. + Gaps may occur if documents are deleted or if a transaction fails after incrementing the counter. + For example: `companies/1`, `companies/2`, `companies/4`... + + +* **Example**: + * On a server running on node 'A': + * Creating the first document with `users|` => results in document ID: `users/1` + * Creating a second document with `users|` => results in document ID: `users/2` + * On a server running on node 'B': + * Creating a third document with `users|` => results in document ID: `users/3` + +* **Note**: + * Identity ID generation comes with a real cost. + In a cluster, where the database is replicated across multiple nodes, + the nodes must coordinate to ensure the same identity ID isn’t generated on two nodes. + This coordination requires network round-trips. + + * Moreover, if the server cannot reach the majority of nodes in the database group, + saving the document will fail because the next identity value cannot be generated. + + * All other ID generation methods continue to work even when the server is disconnected from the cluster. + So unless you specifically require incremental IDs, it’s better to use a different ID generation strategy. + +* **Customizing the separator character**: + The separator character used in the identity IDs can be customized. + Learn more in [Customizing the separator character](../../server/kb/document-identifier-generation.mdx#customizing-the-separator-character). + +* **Customizing the identity number**: + The numeric part of an identity ID is an always-incrementing value managed by the server. + You can modify the latest identity number used for a given prefix (typically a collection name) in the following ways. + The server will base the next generated identity ID on the updated value you provide. + * **From the Client API**: + Use the [NextIdentityForOperation](../../client-api/operations/maintenance/identities/increment-next-identity.mdx) to increment the latest identity number. + Use the [SeedIdentityForOperation](../../client-api/operations/maintenance/identities/seed-identity.mdx) to explicitly set its starting value. + * **From the Studio**: + Go to the [Identities view](../../studio/database/documents/identities-view.mdx) to view or edit the latest identity number for any prefix. + +--- + + + +```csharp +using (var session = store.OpenSession()) +{ + // Set the entity's Id to "users|" + var user = new User { Name = "John", Id = "users|" }; + session.Store(user); + + session.SaveChanges(); + // The server will generate an identity ID, e.g. "users/1" +} +``` + + +```csharp +using (var session = store.OpenSession()) +{ + var user = new User { Name = "John" }; + // Specify "users|" when calling Store() + session.Store(user, "users|"); + + session.SaveChanges(); + // The server will generate an identity ID, e.g. "users/1" +} +``` + + + + + + +* **Generated by**: + The client (from a range provided by the server) + +* **Description**: + * The HiLo algorithm allows generating document IDs on the **client side**. + * The client requests a range of IDs from the server, + and the server ensures that this range is reserved exclusively for that client. + * Different clients receive different, non-overlapping ranges. + * The client can then safely generate IDs locally within the given range, + without further coordination with the server. + * For a more detailed explanation, see [HiLo Algorithm](../../client-api/document-identifiers/hilo-algorithm.mdx). + +* **When to use**: + Use HiLo when you want to create a document and immediately use its ID within the same transaction, + without needing an additional server call to fetch the ID. + +* **Example**: + `people/128-A`, `people/129-B` + +* **Customizing the separator character**: + The separator character used in the HiLo document IDs can be customized. + Learn more in [Customizing the separator character](../../server/kb/document-identifier-generation.mdx#customizing-the-separator-character). + +--- + + + +```csharp +using (var session = store.OpenSession()) +{ + // Do not set the Id property of the entity + var user = new User { Name = "John" }; + + // Pass only the entity to Store(), without specifying an ID + session.Store(user); + + // The ID is already available here because the client holds a reserved range from the server + var documentId = user.Id; + + session.SaveChanges(); + // The document will be saved with the ID assigned by the session, e.g. "users/1-A" +} +``` + + + + + + +* **Generated by**: + The server + +* **Description**: + * The output of a Map-Reduce index can be saved as artificial documents in a new collection. + * Their IDs are generated automatically by the server. + Each ID consists of a prefix, which is the name of the output collection you specify, + followed by a hash of the _reduce_ key that you cannot control. + * For a more detailed explanation, see [Artificial Documents](../../indexes/map-reduce-indexes.mdx#reduce-results-as-artificial-documents). + +* **When to use**: + Use artificial documents when you need to further process Map-Reduce index results, for example: + * Creating a recursive Map-Reduce index over the resulting artificial documents. + * Running ETL tasks or Subscriptions on the resulting artificial documents collection for further processing. + +* **Example**: + `MonthlyProductSales/1377/0576973199715021` + + + + + +The separator character used in the [Identity](../../server/kb/document-identifier-generation.mdx#identity-id) +and [HiLo](../../server/kb/document-identifier-generation.mdx#hilo-algorithm-id) document IDs can be customized. +By default, the separator is a _slash_ (`/`), but this can be changed to any other character, except _pipe_ (`|`). + +There are several ways to customize the separator. +It can be configured globally for all databases (server-wide), or per database, overriding the global setting. + +--- + +### From the Studio: + +* Configure the separator character globally in: [Client configuration (server-wide)](../../studio/server/client-configuration.mdx). +* Override the global setting for a specific database in: [Client configuration (per database)](../../studio/database/settings/client-configuration-per-database.mdx). +* This will apply to both **Identity** & **HiLo** IDs. + +--- + +### From the Client API - using operations: + +* Set the separator globally using the [PutServerWideClientConfigurationOperation](../../client-api/operations/server-wide/configuration/put-serverwide-client-configuration.mdx). +* Override the global setting for a specific database using the [PutClientConfigurationOperation](../../client-api/operations/maintenance/configuration/put-client-configuration.mdx). +* This will apply to both **Identity** & **HiLo** IDs. + + +```csharp +// For example, set the separator character for a specific database + +var store = new DocumentStore +{ + Urls = new[] { "http://localhost:8080" }, + Database = "SampleDB" +}.Initialize(); + +// Customize the separator character to '#' instead of the default '/' +// using the 'PutClientConfigurationOperation' operation + store.Maintenance.Send(new PutClientConfigurationOperation( + new ClientConfiguration { IdentityPartsSeparator = '#' }));; + +using (var session = store.OpenSession()) +{ + // Create document - HiLo ID + // ========================= + var user1 = new User() { Name = "John" }; + session.Store(user1); + + // The session assigns the id immediately + var id = user1.Id; // "users#1-A" + + session.SaveChanges(); + // The document is saved with ID: "users#1-A" + + // Create document - Identity ID + // ============================= + var user2 = new User() { Name = "Jane" }; + session.Store(user2, "users|"); + + session.SaveChanges(); + // The document is saved with ID: "users#1" +} +``` + + +--- + +## From the Client API - using conventions: + +* For HiLo IDs, you can also set the separator character using the [IdentityPartsSeparator convention](../../client-api/configuration/conventions.mdx#identitypartsseparator) + on the _DocumentStore_ during initialization. +* Note: Any separator configured later via an operation or from the Studio will override this convention. +* This **applies only to HiLo IDs** and has no effect on Identity IDs. + + +```csharp +// Set the separator character for HiLo ID via conventions +var store = new DocumentStore +{ + Urls = new[] { "http://localhost:8080" }, + Database = "SampleDB", + Conventions = new DocumentConventions + { + IdentityPartsSeparator = '$', + // ... set any other conventions as needed + } +}.Initialize(); + +using (var session = store.OpenSession()) +{ + // Create document - HiLo ID + // ========================= + var user1 = new User() { Name = "John" }; + session.Store(user1); + + // The session assigns the id immediately + var id = user1.Id; // "users$1-A" + + session.SaveChanges(); + // The document is saved with ID: "users$1-A" + + // Create document - Identity ID + // ============================= + var user2 = new User() { Name = "Jane" }; + session.Store(user2, "users|"); + + session.SaveChanges(); + // The document is saved with ID: "users/1" (uses default separator '/') +} +``` + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/server/kb/assets/NumberTypesprecisions.png b/versioned_docs/version-7.1/server/kb/assets/NumberTypesprecisions.png new file mode 100644 index 0000000000..5b7b5149e1 Binary files /dev/null and b/versioned_docs/version-7.1/server/kb/assets/NumberTypesprecisions.png differ diff --git a/versioned_docs/version-7.1/server/kb/document-identifier-generation.mdx b/versioned_docs/version-7.1/server/kb/document-identifier-generation.mdx new file mode 100644 index 0000000000..c3536cff00 --- /dev/null +++ b/versioned_docs/version-7.1/server/kb/document-identifier-generation.mdx @@ -0,0 +1,20 @@ +--- +title: "Document Identifier Generation" +hide_table_of_contents: true +sidebar_label: "Document Identifier Generation" +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import DocumentIDsCsharp from './_document-identifier-generation-csharp.mdx'; + +export const supportedLanguages = ["csharp"]; + + + + + + + diff --git a/versioned_docs/version-7.1/server/kb/javascript-engine.mdx b/versioned_docs/version-7.1/server/kb/javascript-engine.mdx new file mode 100644 index 0000000000..9a032a4a4b --- /dev/null +++ b/versioned_docs/version-7.1/server/kb/javascript-engine.mdx @@ -0,0 +1,166 @@ +--- +title: "Knowledge Base: JavaScript Engine" +hide_table_of_contents: true +sidebar_label: JavaScript Engine +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Knowledge Base: JavaScript Engine + + + +* RavenDB integrates **JavaScript scripting** across various features, including: + * [RQL projections](../../indexes/querying/projections.mdx) + * [Subscriptions](../../client-api/data-subscriptions/creation/examples.mdx#create-subscription-with-filtering-and-projection) + * [ETL](../../server/ongoing-tasks/etl/basics.mdx) + * [Smuggler (data import/export)](../../client-api/smuggler/what-is-smuggler.mdx#transformscript) + * [Single](../../client-api/operations/patching/single-document.mdx) or [Set based](../../client-api/operations/patching/set-based.mdx) document patches + * [Time Series](../../document-extensions/timeseries/client-api/javascript-support.mdx) and + [Incremental Time Series](../../document-extensions/timeseries/incremental-time-series/client-api/javascript-support.mdx) + * [Embeddings generation tasks](../../ai-integration/generating-embeddings/embeddings-generation-task.mdx#chunking-methods-and-tokens) + +* To execute JavaScript code, + RavenDB uses [Jint](https://github.com/sebastienros/jint), an open source JavaScript interpreter supporting ECMAScript 5.1. + +* In this page: + * [How RavenDB uses Jint](../../server/kb/javascript-engine.mdx#how-ravendb-uses-jint) + * [Predefined JavaScript functions](../../server/kb/javascript-engine.mdx#predefined-javascript-functions) + + + +## How RavenDB uses Jint + +* **Execution context**: + Jint executes a JavaScript function on a single document at a time, with each execution running in isolation. + Its processing context is limited to a single document, with no persistent execution state - + even in patch operations, where it might appear to maintain continuity. + +* **Performance considerations**: + Since initializing the Jint engine is resource-intensive, + RavenDB caches Jint instances based on user-defined scripts to reuse them and enhance performance. + +* **Execution limitations**: + * RavenDB limits the amount of statements that can be performed for each document processing. + The default value is **10,000** and it can be set using the [Patching.MaxStepsForScript](../../server/configuration/patching-configuration.mdx#patchingmaxstepsforscript) configuration. + * RavenDB limits the amount of cached Jint engines. + The default value is **2,048** and it can be set using the [Patching.MaxNumberOfCachedScripts](../../server/configuration/patching-configuration.mdx#patchingmaxstepsforscript) configuration. + * Recursive calls within scripts are limited to a depth of **64**, a constant value that cannot be modified. + +## Predefined JavaScript functions + +In addition to Jint's ECMAScript 5.1 implementation, +RavenDB provides the following set of predefined functions: + +#### **Document operations**: + +| Method Signature | Return type | Description | +|-------------------------------------------------------------|-------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **id(document)** | `string` | Returns the ID of the specified document[[ex]](../../client-api/operations/patching/set-based.mdx#updating-by-document-id). | +| **load(documentId)** | `object` | Returns the document with the given ID.
Used in [patching](../../client-api/operations/patching/single-document.mdx#loading-documents-in-a-script) or [ETL scripts](../../server/ongoing-tasks/etl/basics.mdx#transform). | +| **load(documentId, collectionName)** | `object` | Returns the document with the given ID.
Used in [JavaScript indexes](../../indexes/javascript-indexes.mdx). | +| **loadPath(document, pathString)** | `object` | Returns document(s) based on IDs found within the specified `pathString` in the given document.
The `pathString` can be in a simple _Foo.Bar_ form, in which case a single document is returned. A path like _Foo.Bars[].Buzz_ can return an array of documents. | +| **getMetadata(document)** | `object` | Returns the metadata of the specified document, including properties like `ChangeVector`, `ID`, and `LastModified`. | +| **lastModified(document)** | `number` | Returns the number of milliseconds elapsed since the last modification time (UTC) of the specified document. | +| **include(documentId)** | `Task` | Used in RQL [queries](../../client-api/session/querying/what-is-rql.mdx) to include the document with the specified ID with the results. | +| **put(documentId, document, [optional]changeVectorString)** | `Task` | Creates or updates a document with the specified ID.
Learn about the different document identifiers in
[Document identifier generation](../../server/kb/document-identifier-generation.mdx)[[ex]](../../client-api/operations/patching/single-document.mdx#add-document).
This function can also clone an existing document.
Note: attachments & counters will not be included in the clone[[ex]](../../client-api/operations/patching/single-document.mdx#clone-document). Used in patching. | +| **del(documentId)** | `void` | Deletes the document with the specified ID.
Used in [patching](../../client-api/operations/patching/set-based.mdx#updating-a-collection-name). | +| **archived.archiveAt(document, dateString)** | `void` | Schedules the specified document to be archived at the specified `dateString`.
Used in [patching](../../data-archival/schedule-document-archiving.mdx#schedule-multiple-documents-for-archiving---from-the-client-api). | +| **archived.unarchive(document)** | `void` | Unarchives the specified document.
Used in [patching](../../data-archival/unarchiving-documents.mdx#unarchive-all-documents-in-a-collection---from-the-client-api). | + +#### **Counter operations**: + +| Method Signature | Return type | Description | +|------------------------------------------------------|--------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **counter(documentId, counterName)** | `number` | Returns the value of the specified counter for the given document ID<sup>[[ex]](../../client-api/operations/patching/single-document.mdx#get-counter)</sup>. | +| **counter(document, counterName)** | `number` | Returns the value of the specified counter for the given document<sup>[[ex]](../../client-api/operations/patching/single-document.mdx#get-counter)</sup>. | +| **incrementCounter(documentId, counterName, value)** | `void` | Increments the specified counter for the given document ID.
If the counter does not exist, it is implicitly created with the provided `value`. Counter values can be negative, allowing both increment and decrement operations<sup>[[ex]](../../client-api/operations/patching/single-document.mdx#increment-counter)</sup>. | +| **incrementCounter(document, counterName, value)** | `void` | Increments the specified counter for the given document.
If the counter does not exist, it is implicitly created with the provided `value`. Counter values can be negative, allowing both increment and decrement operations<sup>[[ex]](../../client-api/operations/patching/single-document.mdx#increment-counter)</sup>. | +| **deleteCounter(documentId, counterName)** | `void` | Delete the specified counter from the given document ID<sup>[[ex]](../../client-api/operations/patching/single-document.mdx#delete-counter)</sup>. | +| **deleteCounter(document, counterName)** | `void` | Delete the specified counter from the given document<sup>[[ex]](../../client-api/operations/patching/single-document.mdx#delete-counter)</sup>. | +| **counterRaw(documentId, counterName)** | `object` | Returns a dictionary containing the counter value for each database node. The overall counter value is the sum of all node values. | +| **counterRaw(document, counterName)** | `object` | Returns a dictionary containing the counter value for each database node. The overall counter value is the sum of all node values. | + +#### **Time series operations**: + +* Learn more in: + * [Time series: JavaScript support](../../document-extensions/timeseries/client-api/javascript-support.mdx) + * [Incremental Time Series: JavaScript Support ](../../document-extensions/timeseries/incremental-time-series/client-api/javascript-support.mdx) + +| Method Signature | Return type | Description | +|------------------------------------------------|--------------|----------------------------------------------------------------------------------------------------------------------------------| +| **timeseries (doc, name)** | `object` | Returns a handle to the specified time series associated with the document, enabling operations such as append, delete, and get. | +| **timeseries.append (timestamp, values)** | `void` | Appends a new entry to the time series at the specified timestamp with the provided values. | +| **timeseries.append (timestamp, values, tag)** | `void` | Appends a new entry to the time series at the specified timestamp with the provided values and an optional tag. | +| **timeseries.delete (from, to)** | `void` | Deletes a range of entries from a time series. | +| **timeseries.get (from, to)** | `object[]` | Retrieves a range of time series entries. | +| **timeseries.increment(timestamp, values)** | `void` | Increments the values of an incremental time series entry at the specified timestamp. | +| **timeseries.increment(values)** | `void` | Increments the values of an incremental time series entry at the current time. | +| **timeseries.increment(timestamp, value)** | `void` | Increments a single value of an incremental time series entry at the specified timestamp. | +| **timeseries.increment(value)** | `void` | Increments a single value of an incremental time series entry at the current time. | + +#### **Compare-exchange**: + +| Method Signature | Return type | Description | +|---------------------------------|--------------|-------------------------------------------------------------------------------------------------------------------------------------| +| **cmpxchg(compareExchangeKey)** | `object` | Returns the value stored in a [Compare Exchange](../../client-api/operations/compare-exchange/overview.mdx) item for the specified key. | + +#### **String manipulation**: + +| Method Signature | Return type | Description | +|---------------------------------------------------------|--------------|----------------------------------------------------------------------------------------------------------------------------------------| +| **String.prototype.startsWith(searchString, position)** | `boolean` | Returns _true_ if the specified string starts with `searchString` at the given `position`. `position` is optional and defaults to `0`. | +| **String.prototype.endsWith(searchString, position)** | `boolean` | Returns _true_ if the specified string end with `searchString` at the given `position`. `position` is optional and defaults to `0`. | +| **String.prototype.padStart(targetLength, padString)** | `string` | Pads the string from the start with `padString`
(or whitespace by default) until it reaches `targetLength`. | +| **String.prototype.padEnd(targetLength, padString)** | `string` | Pads the string from the end with `padString`
(or whitespace by default) until it reaches `targetLength`. | +| **String.prototype.format(arg1, arg2, arg3 ...)** | `string` | Formats the string by replacing occurrences of `{[number]}` with the corresponding argument based on a zero-based index. | +| **startsWith(inputString, prefix)** | `boolean` | Returns _true_ if `inputString` starts with the specified `prefix`. | +| **endsWith(inputString, suffix)** | `boolean` | Returns _true_ if `inputString` ends with the specified `suffix`. | +| **regex(inputString, regex)** | `boolean` | Returns _true_ if `inputString` matches the specified `regex` pattern. | + +#### **Arrays & objects**: + +| Method Signature | Return type | Description | +|------------------------------------------------------|-----------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **Array.prototype.find(function callback)** | Array's element | Returns the first element in the array for which the `callback` function returns _true_. | +| **Object.map(input, function mapFunction, context)** | `Array` | Returns an array containing the results of `mapFunction` applied to all properties of `input` (or items, if input is an array). The `mapFunction` signature is `function(itemValue, itemKey)`. | + +#### **Mathematical operations**: + +| Method Signature | Return type | Description | +|---------------------------|--------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **Raven_Min(num1, num2)** | `number` | Returns the smaller of `num1` and `num2`. If both params are of the same type (both numbers or both strings), a standard comparison is performed.
If they are of mixed types (one number and one string), the string is parsed as a double for comparison.
`LazyNumberValue` params resulting from method `scalarToRawString` are not supported. | +| **Raven_Max(num1, num2)** | `number` | Returns the larger of `num1` and `num2`. If both params are of the same type (both numbers or both strings), a standard comparison is performed.
If they are of mixed types (one number and one string), the string is parsed as a double for comparison.
`LazyNumberValue` params resulting from method `scalarToRawString` are not supported. | + +#### **Conversion operations**: + +| Method Signature | Return type | Description | +|------------------------------------------------|-----------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **scalarToRawString(document, lambdaToField)** | Raw field value.
`LazyStringValue` for strings,
`LazyNumberValue` for floating point numbers. | Returns the raw representation of a field. Useful for handling numbers that exceed the numeric or accuracy range of `double` (See [Numbers in Jint](../../server/kb/numbers-in-ravendb.mdx#numbers-in-javascript-engine)), or for optimizing memory consumption when projecting large string values.
The returned value is immutable. | +| **convertJsTimeToTimeSpanString(ticksNumber)** | `string` | Returns a human-readable `TimeSpan` representation of the specified `ticksNumber`. | + +#### **Generating embeddings**: + +* Learn more in: [Chunking methods and tokens](../../ai-integration/generating-embeddings/embeddings-generation-task.mdx#chunking-methods-and-tokens). + +| Method Signature | Return | Description | +|--------------------------------------------------------|------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **embeddings.generate(object)** | `null` | This method is designed to process a JSON object where each key represents a document field, and the value is a text-splitting method to process that field's content. | +| **text.split(text, maxTokensPerChunk)** | `string[]` | Splits a plain text string into multiple chunks based on the specified maximum token count. | +| **text.splitLines(text, maxTokensPerChunk)** | `string[]` | Splits a plain text string into individual lines based on line breaks and whitespace while ensuring that each line does not exceed the specified maximum token limit. | +| **text.splitParagraphs(lines, maxTokensPerChunk)** | `string[]` | Combines consecutive lines to form paragraphs while ensuring each paragraph is as complete as possible without exceeding the specified token limit. | +| **markdown.splitLines(text, maxTokensPerChunk)** | `string[]` | Splits markdown content into individual lines at line breaks while ensuring that each line remains within the specified token limit. | +| **markdown.splitParagraphs(lines, maxTokensPerChunk)** | `string[]` | Groups lines into coherent paragraphs at designated paragraph breaks while ensuring each paragraph remains within the specified token limit. | +| **html.strip(htmlText, maxTokensPerChunk)** | `string[]` | Removes HTML tags from the content and splits the resulting plain text into chunks based on a specified token limit. | + +#### **Debugging**: + +| Method Signature | Return | Description | +|-------------------------------------------------|----------|--------------------------------------------------------------------------------------------------------------------------------------------| +| **output(message)** or **console.log(message)** | `void` | Prints message to the debug output.
Used for debugging [single document patches](../../client-api/operations/patching/single-document.mdx). | diff --git a/versioned_docs/version-7.1/server/kb/linux-setting-limits.mdx b/versioned_docs/version-7.1/server/kb/linux-setting-limits.mdx new file mode 100644 index 0000000000..7a31ecb145 --- /dev/null +++ b/versioned_docs/version-7.1/server/kb/linux-setting-limits.mdx @@ -0,0 +1,33 @@ +--- +title: "Linux: Setting limits" +hide_table_of_contents: true +sidebar_label: "Linux: Setting limits" +sidebar_position: 3 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Linux: Setting limits +Linux security limits may degrade RavenDB performance, and in an encrypted database even prevent actual functionality, +even if physical resources allow higher performance. Additionally, debugging may be affected (i.e. core dump creation). + +Setting these limits in a persistant way can be achived by editing `/etc/security/limits.conf` to recommended values: +``` +* soft core unlimited +* hard core unlimited +* soft nofile 131070 +* hard nofile 131070 +* soft nproc 131070 +* hard nproc 131070 +* soft memlock 1000 +* hard memlock 1000 +``` + +Opening a larger ports range can help RavenDB's machine process a larger number of parallel requests. +E.g., this can be achieved using ```sysctl -w net.ipv4.ip_local_port_range="10000 65535"``` +or by adding ```net.ipv4.ip_local_port_range=1024 65535``` to `/etc/sysctl.conf`. diff --git a/versioned_docs/version-7.1/server/kb/linux-setting-memlock.mdx b/versioned_docs/version-7.1/server/kb/linux-setting-memlock.mdx new file mode 100644 index 0000000000..e362a22c39 --- /dev/null +++ b/versioned_docs/version-7.1/server/kb/linux-setting-memlock.mdx @@ -0,0 +1,30 @@ +--- +title: "Linux: Setting memlock when using encrypted database" +hide_table_of_contents: true +sidebar_label: "Linux: Setting `memlock` when using encrypted database" +sidebar_position: 4 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Linux: Setting memlock when using encrypted database +Encrypted database uses extensively sodium library which requires high values of locked memory limits. +`memlock` refers to memory that will not be paged out, and it's limit can be viewed usign `ulimit -l`. +The modification of `memlock` limit settings can be achieved by in running session with `prlimit`: + +Example, for 1MB limit: +``` +prlimit -p pid --memlock 1MB:1MB +``` + +Persistant settings can be achieved by adding to `/etc/security/limits.conf` the following: +``` +* soft memlock 1000 +* hard memlock 1000 +``` + diff --git a/versioned_docs/version-7.1/server/kb/numbers-in-ravendb.mdx b/versioned_docs/version-7.1/server/kb/numbers-in-ravendb.mdx new file mode 100644 index 0000000000..7d4cdf8f9b --- /dev/null +++ b/versioned_docs/version-7.1/server/kb/numbers-in-ravendb.mdx @@ -0,0 +1,167 @@ +--- +title: "Knowledge Base: Numbers in RavenDB" +hide_table_of_contents: true +sidebar_label: Numbers in RavenDB +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Knowledge Base: Numbers in RavenDB + + +* Rational numbers are infinitely big and infinitely small, in computer science we are able to "model" the universe with a limited degree of accuracy and range. + RavenDB is not an exception and has it's own boundaries of accuracy and range, which match most common cases of any business application. + +* This article is planned for users who know that they are going to use uncommon numeric ranges. + It will cover the way RavenDB's Server approaches numeric values and what level of range and accuracy can be expected from each of it's mechanisms. + +* RavenDB's Client support of numbers depends on platform and chosen deserialization. + +In this page: + +* [About numbers](../../server/kb/numbers-in-ravendb.mdx#about-numbers) +* [Numbers in documents](../../server/kb/numbers-in-ravendb.mdx#numbers-in-documents) +* [Numbers in JavaScript](../../server/kb/numbers-in-ravendb.mdx#numbers-in-javascript-engine) +* [Numbers in indexes and queries](../../server/kb/numbers-in-ravendb.mdx#numbers-in-indexes-and-queries) +* [Numbers in studio](../../server/kb/numbers-in-ravendb.mdx#numbers-in-management-studio) +* [Numbers in client API](../../server/kb/numbers-in-ravendb.mdx#numbers-in-client-api) + + + +## About numbers + +Although real numbers have no limits in size or precision, in computing there are limitations. +Simplest type of numbers known to computers are integers. RavenDB fully supports integers of `int` type between [-2,147,483,648 to 2,147,483,647]. +Simplest type of fraction known to computers are floating point numbers. RavenDB fully supports double precision floating point number with approximate range of 15-16 digits between [±5.0 × 10^(−324) to ±1.7 × 10^308]. + +RavenDB supports storing numbers in the range of the `double` type described above. RavenDB supports storing number in any precision, but it's indexing and JavaScript mechanisms are limited to the 16 digits precision of `double` numbers. +In order to better understand the terms precision and range, please observe the next diagram, comparing range and precision of 3 common .NET types: `long`, `double` and `decimal`: + +![precision and range in numeric types](./assets/NumberTypesprecisions.png) + +Numbers bigger then double precision max number will be rejected by server. Mechanisms supporting only double precision numbers will by default truncate the number to a `double`, loosing precision. +Please follow the next paragraphs to learn more about those limitations and possible workarounds. + + +Please note that `long`'s max and min numbers are beyond the accuracy range of `double`, therefore, it should be used with care, and it's recommended to avoid using it for global maximum or minimum notations. + + + + + +Examples in this page are based on the `InterstellarTrip` entity, describing an intergalactic journey of a brave pioneer: + + +{`public class InterstellarTrip +\{ + public class Segment + \{ + public string SourcePlanet \{ get; set; \} + public string DestinationPlanet \{ get; set; \} + public decimal DistanceInKilometers \{ get; set; \} + \} + public string TripName \{ get; set; \} + public List TripTrack \{ get; set; \} + public decimal TotalDistance \{ get; set; \} + public decimal GasBill \{ get; set; \} +\} +`} + + + + +## Numbers in documents + +Numbers in documents represented by either: + + * `long` for integers in the `long` range (-9,223,372,036,854,775,808 to 9,223,372,036,854,775,807) + * `LazyNumberValue` for all the other numbers, including other integers and floating point numbers. `LazyNumberValue` Wraps a string representation of a number. + +RavenDB server will accept document with numbers in the range of 'double' with any precision. + + +## Numbers in JavaScript engine + +RavenDB uses JavaScript in many mechanisms: projections in queries, subscriptions, ETL processes and more. +The only type of numbers supported by JavaScript is double precision floating point number, and `Jint`, the JavaScript engine RavenDB uses is no exception. +precision of any number that is outside of the precision range of `double` will be truncated to the digits amount of a double. +RavenDB provides a way to receive the original value, before the cast to double. The only limitation is that it won't be possible to treat it as a number, but you will be able to receive a string representation of the value. +The way to do that is using the `scalarToRawString` extension method, example: + + + +{`From InterstellarTrips as trip +Where trip.Name = $name +Select \{ + Name:a.Name, + TotalDistance: scalarToRawString(a, x=> x.TotalDistance) + GasBill: scalarToRawString(a, x=> x.GasBill) +\} +`} + + + + + +## Numbers in indexes and queries + +RavenDB's indexes supports either integers in the `long` range, or fractions in the `double` range. +Integers outside of the `long` boundaries will be treated as `double` and therefore their precision will be truncated to `double`'s. +Because of that, those truncated numbers indexed value won't be equal to the original value, therefore, queries may not return the expected results. +In order to overcome that, it is recommended to treat those values as strings. The implication is that it will be possible to perform only string related queries and not numeric ones. + + +The way to treat numbers that exceed the precision level of `long` or `double` in static indexes, the string representation of the value should be indexed, It will use the raw value, rather the truncated one. + + +{`Map = interstellarTrips => from trip in interstellarTrips + select new + \{ + TotalDistance = trip.TotalDistance.ToString() + \}; +`} + + + + + +Query work as expected with integers in the 'long' range and fractions in the `double` range and precision. +In order to query numbers outside of that range, query the the field using the string representation of the value, whether if using an index or a collection query. + + +{`From InterstellarTrips as trip +Where trip.Name = $name +Select \{ + Name:a.Name, + TotalDistance: scalarToRawString(a, x=> x.TotalDistance) + GasBill: scalarToRawString(a, x=> x.GasBill) +\} +`} + + + +Note that alphanumeric sorting of string representation of numbers is accurate only with integers. Alphanumeric sorting of fractions and numbers with exponent notations will not analyze the value as a number. + + + + +## Numbers in management studio + +The management studio treats documents as JavaScript objects, therefore, it treats it's numbers as a JavaScript number, which is always a `double`. +Note that editing documents with numeric data outside of the precision range of double will end up with truncating those numbers to a proper `double` and unintentional modification of those fields. + + +## Numbers in client API + +Numbers in RavenDB clients depend on the limitations of the platforms and the serialization/deserialization mechanisms. See articles in the desired languages: + +[Number (de)serialization](../../client-api/configuration/serialization.mdx#working-with-numbers) + + + diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/_category_.json b/versioned_docs/version-7.1/server/ongoing-tasks/_category_.json new file mode 100644 index 0000000000..a56a405450 --- /dev/null +++ b/versioned_docs/version-7.1/server/ongoing-tasks/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 7, + "label": Ongoing Tasks, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/backup-overview.mdx b/versioned_docs/version-7.1/server/ongoing-tasks/backup-overview.mdx new file mode 100644 index 0000000000..f77f233f58 --- /dev/null +++ b/versioned_docs/version-7.1/server/ongoing-tasks/backup-overview.mdx @@ -0,0 +1,197 @@ +--- +title: "Backup Overview" +hide_table_of_contents: true +sidebar_label: Backup Overview +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Backup Overview + + +* Maintaining a proper backup routine ensures that you'd be able to restore your data to its state at chosen points in time. + Use this overview as an introduction to backing up and restoring your databases. + +* The two principal reasons for backing up your database are - + * **Securing data** in case catastrophe strikes. + * **Freezing data in chosen points in time** to retain access to it in various stages of its existence/development. + +* RavenDB's Backup is an **Ongoing task**. + * Routinely backing up your data is a fundamental aspect of your database maintenance. + Backup is therefore provided not as a one-time operation, but as an [ongoing task](../../studio/database/tasks/ongoing-tasks/general-info.mdx) + that runs in the background. + It is configured once and then executed periodically according to the defined schedule. + +* You can create and configure backup tasks using the [Client API](../../client-api/operations/maintenance/backup/backup-overview.mdx) or + in [Studio](../../studio/database/tasks/backup-task.mdx) navigate from **Tasks** -> **Backups** -> **Create a Periodic Backup**. + +* On a [sharded](../../sharding/overview.mdx) database, a single backup task + is defined by the user for all shards, and RavenDB automatically defines + sub-tasks that create backups per shard. + Read about backups on a sharded database [in the section dedicated to it](../../sharding/backup-and-restore/backup.mdx). + +* In this page: + * [Backup Type](../../server/ongoing-tasks/backup-overview.mdx#backup-type) + * [Backup Contents](../../server/ongoing-tasks/backup-overview.mdx#backup-contents) + * [Backup Scope: Full or Incremental](../../server/ongoing-tasks/backup-overview.mdx#backup-scope:-full-or-incremental) + * [Backup Name and Folder Structure](../../server/ongoing-tasks/backup-overview.mdx#backup-name-and-folder-structure) + * [Encryption](../../server/ongoing-tasks/backup-overview.mdx#encryption) + * [Compression](../../server/ongoing-tasks/backup-overview.mdx#compression) + * [Retention Policy](../../server/ongoing-tasks/backup-overview.mdx#retention-policy) + * [Restoration Procedure](../../server/ongoing-tasks/backup-overview.mdx#restoration-procedure) + +## Backup Type + +There are two backup types: [Logical-backup](../../client-api/operations/maintenance/backup/backup-overview.mdx#logical-backup) (or simply "Backup") and [Snapshot](../../client-api/operations/maintenance/backup/backup-overview.mdx#snapshot). + +#### Logical Backup + A logical backup is a compressed JSON dump of database contents, including documents, index definitions, and [additional data](../../server/ongoing-tasks/backup-overview.mdx#backup-contents) + that can be stored in [full](../../server/ongoing-tasks/backup-overview.mdx#full-backup) and [incremental](../../server/ongoing-tasks/backup-overview.mdx#incremental-backup) backups. + + * After a database restore, the dataset is re-indexed by the backed-up index definitions. + This can be time-consuming in large datasets. +#### Snapshot + A snapshot is a binary image of the database contents, full indexes, and [additional data](../../server/ongoing-tasks/backup-overview.mdx#backup-contents) + at a given point in time. + + * Restoration time is reduced because no re-indexing is needed, but more data is transferred during backups. + * [Incremental Snapshot backups](../../server/ongoing-tasks/backup-overview.mdx#backup-scope:-full-or-incremental) + (the changes made since the last backup) **do not incrementally update indexes or change vector data** after the initial complete Snapshot image. + + Snapshots are only available for _Enterprise subscribers_. + + + + +## Backup Contents + +Backed-up data includes both database-level and cluster-level contents, as detailed below. + +| Database-level data | [Logical Backup](../../server/ongoing-tasks/backup-overview.mdx#logical-backup) | [Snapshot](../../server/ongoing-tasks/backup-overview.mdx#snapshot) | +|---- | - | - | +| [Documents & Revisions](https://ravendb.net/learn/inside-ravendb-book/reader/4.0/3-document-modeling) | ✔ | ✔ | +| [Attachments](../../document-extensions/attachments/what-are-attachments.mdx) | ✔ | ✔ | +| [Counters](../../document-extensions/counters/overview.mdx) | ✔ | ✔ | +| [Time-Series](../../document-extensions/timeseries/overview.mdx) | ✔ | ✔ | +| [Change Vector data](../../server/clustering/replication/change-vector.mdx#after-restoring-a-database-from-backup) | Change Vectors are not preserved, thus recreated from the beginning upon restore | ✔ | +| [Tombstones](../../glossary/tombstone.mdx) | ✔ | ✔ | +| [Conflicts](../../server/clustering/replication/replication-conflicts.mdx) | ✔ | ✔ | + + +| Cluster-level data | [Logical Backup](../../server/ongoing-tasks/backup-overview.mdx#logical-backup) | [Snapshot](../../server/ongoing-tasks/backup-overview.mdx#snapshot) | +|---- | - | - | +| [Database Record](../../studio/database/settings/database-record.mdx) | ✔ | ✔ | +| [Compare-exchange values](../../client-api/operations/compare-exchange/overview.mdx) | ✔ | ✔ | +| [Identities](../../client-api/document-identifiers/working-with-document-identifiers.mdx#identities) | ✔ | ✔ | +| [Indexes](../../indexes/creating-and-deploying.mdx) | Index definitions are saved and used to rebuild indexes during database restoration | ✔ | +| [Ongoing Tasks configuration](../../studio/database/tasks/ongoing-tasks/general-info.mdx) | ✔ | ✔ | +| [Subscriptions](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx) | ✔ | ✔ | + + + +## Backup Scope: Full or Incremental + +You can set the Backup task to create either **full** or **incremental** backups during its periodical executions. + +#### **Full Backup** + A full backup contains **all** current database contents and configuration. + + * The creation of a full-backup file normally **takes longer** and **requires more storage space** than the creation of an incremental-backup file. + +#### **Incremental Backup** + An incremental backup contains only **the difference** between the current database data and the last backed-up data. + + * An incremental-backup file is normally **faster to create** and **smaller** than a full-backup file. + * When an incremental-backup task is executed, it checks for the existence of a previous backup file. + If such a file doesn't exist, the first backup created will be a full backup. + Subsequent backups will be incremental. + +* **A Typical Configuration** + A typical configuration would include quick incremental-backup runs that "fill the gaps" between full backups. + * For example - + A **full-backup** task is set to run **every 12 hours**, + and an **incremental-backup** task that runs **every 30 minutes**. + + + +## Backup Name and Folder Structure + +#### Naming + +Backup folders and files are **named automatically**. + +* Their names are constructed of: + Current Date and Time + Backed-up Database Name + Owner-Node Tag + Backup Type ("backup" or "snapshot") + Backup Scope ("full-backup" or "incremental-backup") + +* For example: + * `2018-12-26-16-17.ravendb-Products-A-backup` is the name automatically given to a backup _folder_. + * "**2018-12-26-16-17**" - Backup Date and time + * "**Products**" - Backed-up Database name + * "**A**" - Executing node's tag + * "**backup**" - Backup type (backup/snapshot) + * `2018-12-26-16-17.ravendb-full-backup` is the name automatically given to the backup _file_ inside this folder. + * "**full-backup**" - For a full backup; an incremental backup's name will state "incremental-backup". +#### Folder Structure + +A typical backup folder holds a single full-backup file and a list of incremental-backup files that supplement it. +Each incremental backup file contains only the delta from its predecessor backup file. + +* For example - + 2018-12-26-09-00.ravendb-full-backup + 2018-12-26-12-00.ravendb-incremental-backup + 2018-12-26-15-00.ravendb-incremental-backup + 2018-12-26-18-00.ravendb-incremental-backup + + + +## Encryption + +Stored backup data can be [Encrypted](../../client-api/operations/maintenance/backup/encrypted-backup.mdx) or Unencrypted. + + + +## Compression + +* A backup always consists of a single compressed file. + It is so for all backup formats: Full "logical" backup dumps, Snapshot images, and the Incremental backups that supplement both. +* Data is compressed using [gzip](https://www.gzip.org/). + + + +## Retention Policy + +By default, backups are not deleted. The backup retention policy sets a retention period, +at the end of which backups are deleted. Deletion occurs during the next scheduled backup task +after the end of the retention period. + +* Set retention policy [via API](../../client-api/operations/maintenance/backup/backup-overview.mdx#backup-retention-policy). +* Set retention policy [via Studio](../../studio/database/tasks/backup-task.mdx#retention-policy). + + + +## Restoration Procedure + +To restore a database - + +* [Provide RavenDB](../../client-api/operations/maintenance/backup/restore.mdx#restoring-a-database:-configuration-and-execution) + with the path to the backup folder. +* To [restore an encrypted database](../../client-api/operations/maintenance/backup/encrypted-backup.mdx#restoring-an-encrypted-backup) + you must provide the encryption key. +* RavenDB will search the backup folder and restore the full-backup found in it. +* RavenDB will then restore the incremental-backups one by one, up to and including the last one. + You can set `LastFileNameToRestore` to + [stop restoration](../../client-api/operations/maintenance/backup/restore.mdx#optional-settings) + at a specific backup file. + + + diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/etl/_category_.json b/versioned_docs/version-7.1/server/ongoing-tasks/etl/_category_.json new file mode 100644 index 0000000000..5368f151db --- /dev/null +++ b/versioned_docs/version-7.1/server/ongoing-tasks/etl/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 3, + "label": ETL, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/raven-etl-setup.png b/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/raven-etl-setup.png new file mode 100644 index 0000000000..33729282af Binary files /dev/null and b/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/raven-etl-setup.png differ diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/snowflake-etl-advanced_01.png b/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/snowflake-etl-advanced_01.png new file mode 100644 index 0000000000..1f3d7e6052 Binary files /dev/null and b/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/snowflake-etl-advanced_01.png differ diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/snowflake-etl-advanced_02.png b/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/snowflake-etl-advanced_02.png new file mode 100644 index 0000000000..a8cc49cf19 Binary files /dev/null and b/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/snowflake-etl-advanced_02.png differ diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/snowflake-etl-setup.png b/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/snowflake-etl-setup.png new file mode 100644 index 0000000000..91ed6219a7 Binary files /dev/null and b/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/snowflake-etl-setup.png differ diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/snowflake-etl-tables.png b/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/snowflake-etl-tables.png new file mode 100644 index 0000000000..86b1cfa046 Binary files /dev/null and b/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/snowflake-etl-tables.png differ diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/snowflake_etl_new_task.png b/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/snowflake_etl_new_task.png new file mode 100644 index 0000000000..b3f83b6241 Binary files /dev/null and b/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/snowflake_etl_new_task.png differ diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/sql-etl-setup.png b/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/sql-etl-setup.png new file mode 100644 index 0000000000..cc80f61a12 Binary files /dev/null and b/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/sql-etl-setup.png differ diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/sql-etl-tables.png b/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/sql-etl-tables.png new file mode 100644 index 0000000000..e03177fc35 Binary files /dev/null and b/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/sql-etl-tables.png differ diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/test-raven.png b/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/test-raven.png new file mode 100644 index 0000000000..c43254f18a Binary files /dev/null and b/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/test-raven.png differ diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/test-sql.png b/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/test-sql.png new file mode 100644 index 0000000000..924049c64f Binary files /dev/null and b/versioned_docs/version-7.1/server/ongoing-tasks/etl/assets/test-sql.png differ diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/etl/basics.mdx b/versioned_docs/version-7.1/server/ongoing-tasks/etl/basics.mdx new file mode 100644 index 0000000000..06a4323144 --- /dev/null +++ b/versioned_docs/version-7.1/server/ongoing-tasks/etl/basics.mdx @@ -0,0 +1,269 @@ +--- +title: "Ongoing Tasks: ETL Basics" +hide_table_of_contents: true +sidebar_label: ETL Basics +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Ongoing Tasks: ETL Basics + + +* **ETL (Extract, Transform & Load)** is a three-stage RavenDB process that transfers data from a RavenDB database to an external target. + The data can be filtered and transformed along the way. + +* The external target can be: + * Another RavenDB database instance (outside of the [Database Group](../../../studio/database/settings/manage-database-group.mdx)) + * A relational database + * Elasticsearch + * OLAP (Online Analytical Processing) + * A message broker such as Apache Kafka, RabbitMQ, or Azure Queue Storage + +* ETL can be used on [sharded](../../../sharding/etl.mdx) and non-sharded databases alike. + Learn more about how ETL works on a sharded database [here](../../../sharding/etl.mdx). + +* In this page: + * [Why use ETL](../../../server/ongoing-tasks/etl/basics.mdx#why-use-etl) + * [Defining ETL Tasks](../../../server/ongoing-tasks/etl/basics.mdx#defining-etl-tasks) + * [ETL Stages:](../../../server/ongoing-tasks/etl/basics.mdx#etl-stages) + * [Extract](../../../server/ongoing-tasks/etl/basics.mdx#extract) + * [Transform](../../../server/ongoing-tasks/etl/basics.mdx#transform) + * [Load](../../../server/ongoing-tasks/etl/basics.mdx#load) + * [Troubleshooting](../../../server/ongoing-tasks/etl/basics.mdx#troubleshooting) + +## Why use ETL + +* **Share relevant data** + Send data in a well-defined format to match specific requirements, ensuring only relevant data is transmitted + (e.g., sending data to an existing reporting solution). + +* **Protect your data - Share partial data** + Limit access to sensitive data. Details that should remain private can be filtered out as you can share partial data. + +* **Reduce system calls** + Distribute data across related services within your system architecture, allowing each service to access its _own copy_ of the data without cross-service calls + (e.g., sharing a product catalog among multiple stores). + +* **Transform the data** + * Modify content sent as needed with JavaScript code. + * Multiple documents can be sent from a single source document. + * Data can be transformed to match the target destination's model. + +* **Aggregate your data** + Data sent from multiple locations can be aggregated in a central server + (e.g., aggregating sales data from point of sales systems for centralized calculations). + + + +## Defining ETL Tasks + +* The following ETL tasks can be defined: + * [RavenDB ETL](../../../server/ongoing-tasks/etl/raven.mdx) - send data to another _RavenDB database_ + * [SQL ETL](../../../server/ongoing-tasks/etl/sql.mdx) - send data to an _SQL database_ + * [Snowflake ETL](../../../server/ongoing-tasks/etl/snowflake.mdx) - send data to a _Snowflake warehouse_ + * [OLAP ETL](../../../server/ongoing-tasks/etl/olap.mdx) - send data to an _OLAP destination_ + * [Elasticsearch ETL](../../../server/ongoing-tasks/etl/elasticsearch.mdx) - send data to an _Elasticsearch destination_ + * [Kafka ETL](../../../server/ongoing-tasks/etl/queue-etl/kafka.mdx) - send data to a _Kafka message broker_ + * [RabbitMQ ETL](../../../server/ongoing-tasks/etl/queue-etl/rabbit-mq.mdx) - send data to an _RabbitMQ exchange_ + * [Azure Queue Storage ETL](../../../server/ongoing-tasks/etl/queue-etl/azure-queue.mdx) - send data to an _Azure Queue Storage message queue_ + * [Amazon SQS ETL](../../../server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx) - send data to an _Amazon SQS message queue_ + +* All ETL tasks can be defined from the Client API or from the [Studio](../../../studio/database/tasks/ongoing-tasks/general-info.mdx). + +* The destination address and access options are set using a pre-defined **connection string**, simplifying deployment across different environments. + For example, with RavenDB ETL, multiple URLs can be configured in the connection string since the target database can reside on multiple nodes within the Database Group of the destination cluster. + If one of the destination nodes is unavailable, RavenDB automatically executes the ETL process against another node specified in the connection string. + Learn more in the [Connection Strings](../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx) article. + + + +## ETL Stages + +ETL's three stages are: + +* [Extract](../../../server/ongoing-tasks/etl/basics.mdx#extract) - Extract the documents from the database +* [Transform](../../../server/ongoing-tasks/etl/basics.mdx#transform) - Transform & filter the documents data according to the supplied script (optional) +* [Load](../../../server/ongoing-tasks/etl/basics.mdx#load) - Load (write) the transformed data into the target destination +### Extract + +The ETL process starts with retrieving the documents from the database. +You can choose which documents will be processed by the next two stages (Transform and Load). + +The possible options are: + +* Documents from a single collection +* Documents from multiple collections +* All documents +### Transform + +* This stage transforms and filters the extracted documents according to a provided script. + Any transformation can be done so that only relevant data is shared. + The script is written in JavaScript and its input is a document. + +* A task can be provided with multiple transformation scripts. + Different scripts run in separate processes, allowing multiple scripts to run in parallel. + +* You can do any transformation and send only data you are interested in sharing. + The following is an example of RavenDB ETL script processing documents from the "Employees" collection: + + + +{`var managerName = null; + +if (this.ReportsTo !== null) +\{ + var manager = load(this.ReportsTo); + managerName = manager.FirstName + " " + manager.LastName; +\} + +// Load the object to a target destination by the name of "EmployeesWithManager" +loadToEmployeesWithManager(\{ + Name: this.FirstName + " " + this.LastName, + Title: this.Title , + BornOn: new Date(this.Birthday).getFullYear(), + Manager: managerName +\}); +`} + + + + + +#### Syntax + +In addition to the ECMAScript 5.1 API, +RavenDB introduces the following functions and members that can be used in the transformation script: + +| Member / Function | Type | Description | +|-------------------|----------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `this` | object | The current document (with metadata) | +| `id(document)` | function | Returns the document ID | +| `load(id)` | function | Load another document.
This will increase the maximum number of allowed steps in a script.
**Note**:
Changes made to the other _loaded_ document will Not trigger the ETL process. | + +Specific ETL functions: + +| Function / Member | Type | Description | +|-----------------------------------------|----------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `loadTo` | function | Load an object to the specified target.
This command has several syntax options,
see details [below](../../../server/ongoing-tasks/etl/basics.mdx#themethod).
**Note:** An object will only be sent to the destination if the `loadTo` method is called. | +| **Attachments:** | | | +| `loadAttachment(name)` | function | Load an attachment of the current document. | +| `hasAttachment(name)` | function | Check if an attachment with a given name exists for the current document. | +| `getAttachments()` | function | Get a collection of attachments details for the current document. Each item has the following properties:
`Name`, `Hash`, `ContentType`, `Size`. | +| `.addAttachment([name,] attachmentRef)` | function | Add an attachment to a transformed document that will be sent to a target (``).
For details specific to Raven ETL, refer to this [section](../../../server/ongoing-tasks/etl/raven.mdx#attachments). | + +
+ + + +#### The `loadTo` method + + +An object will only be sent to the destination if the `loadTo` method is called. + + +To specify which target to load the data into, use either of the following overloads in your script. +The two methods are equivalent, offering alternative syntax. + +* **`loadTo(obj, {attributes})`** + * Here the target is specified as part of the function name. + * The _<TargetName>_ in this syntax is Not a variable and cannot be used as one, + it is simply a string literal of the target's name. + +* **`loadTo('TargetName', obj, {attributes})`** + * Here the target is passed as an argument to the method. + * Separating the target name from the `loadTo` function name makes it possible to include symbols like `'-'` and `'.'` in target names. + This is not possible when the `loadTo` syntax is used because including special characters in the name of a JavaScript function makes it invalid. + * This syntax may vary for some ETL types. + Find the accurate syntax for each ETL type in the type's specific documentation. +For each ETL type, the target must be: + + * RavenDB ETL: a _collection_ name + * SQL ETL: a _table_ name + * OLAP ETL: a _folder_ name + * Elasticsearch ETL: an _index_ name + * Kafka ETL: a _topic_ name + * RabbitMQ ETL: an _exchange_ name + * Azure Queue Storage ETL: a _queue_ name + + + + + +#### Batch processing + +Documents are extracted and transformed by the ETL process in a batch manner. +The number of documents processed depends on the following configuration limits: + +* [`ETL.ExtractAndTransformTimeoutInSec`](../../../server/configuration/etl-configuration.mdx#etlextractandtransformtimeoutinsec) (default: 30 sec) + Time-frame for the extraction and transformation stages (in seconds), after which the loading stage will start. + +* [`ETL.MaxNumberOfExtractedDocuments`](../../../server/configuration/etl-configuration.mdx#etlmaxnumberofextracteddocuments) (default: 8192) + Maximum number of extracted documents in an ETL batch. + +* [`ETL.MaxNumberOfExtractedItems`](../../../server/configuration/etl-configuration.mdx#etlmaxnumberofextracteditems) (default: 8192) + Maximum number of extracted items (documents, counters) in an ETL batch. + +* [`ETL.MaxBatchSizeInMb`](../../../server/configuration/etl-configuration.mdx#etlmaxbatchsizeinmb) (default: 64 MB) + Maximum size of an ETL batch in MB. + + +### Load + +* Loading the results to the target destination is the last stage. + +* In contrast to [Replication](../../../server/clustering/replication/replication-overview.mdx), + ETL is a push-only process that _writes_ data to the destination whenever documents from the relevant collections are changed. **Existing entries on the target will always be overwritten**. + +* Updates are implemented by executing consecutive DELETEs and INSERTs. + When a document is modified, the delete command is sent before the new data is inserted and both are processed under the same transaction on the destination side. + This applies to all ETL types with two exceptions: + * In RavenDB ETL, when documents are loaded to **the same** collection there is no need to send DELETE because the document on the other side has the same identifier and will just update it. + * in SQL ETL you can configure to use inserts only, which is a viable option for append-only systems. + + + +**Securing ETL Processes for Encrypted Databases**: + +If your RavenDB database is encrypted, then you must not send data in an ETL process using a non-encrypted channel by default. +It means that the connection to the target must be secured: + +- In RavenDB ETL, a URL of a destination server has to use HTTPS + (a server certificate of the source server needs to be registered as a client certificate on the destination server). +- in SQL ETL, a connection string to an SQL database must specify encrypted connection (specific per SQL engine provided). + +This validation can be turned off by selecting the _Allow ETL on a non-encrypted communication channel_ option in the Studio, +or setting `AllowEtlOnNonEncryptedChannel` if the task is defined using the Client API. +Please note that in such cases, your data encrypted at rest _won't_ be protected in transit. + + + + + +## Troubleshooting + +ETL errors and warnings are [logged to files](../../../server/troubleshooting/logging.mdx) and displayed in the notification center panel. +You will be notified if any of the following events happen: + +- Connection error to the target +- JS script is invalid +- Transformation error +- Load error +- Slow SQL was detected + + +**Fallback Mode**: +If the ETL cannot proceed the load stage (e.g. it can't connect to the destination) then it enters the fallback mode. +The fallback mode means suspending the process and retrying it periodically. +The fallback time starts from 5 seconds and it's doubled on every consecutive error according to the time passed since the last error, +but it never crosses [`ETL.MaxFallbackTimeInSec`](../../../server/configuration/etl-configuration.mdx#etlmaxfallbacktimeinsec) configuration (default: 900 sec). + +Once the process is in the fallback mode, then the _Reconnect_ state is shown in the Studio. + + + diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/etl/elasticsearch.mdx b/versioned_docs/version-7.1/server/ongoing-tasks/etl/elasticsearch.mdx new file mode 100644 index 0000000000..c72314a6d3 --- /dev/null +++ b/versioned_docs/version-7.1/server/ongoing-tasks/etl/elasticsearch.mdx @@ -0,0 +1,394 @@ +--- +title: "Ongoing Tasks: Elasticsearch ETL" +hide_table_of_contents: true +sidebar_label: Elasticsearch ETL +sidebar_position: 5 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Ongoing Tasks: Elasticsearch ETL + + +* An **Elasticsearch** [ETL](../../../server/ongoing-tasks/etl/basics.mdx) task creates an ETL process + from selected collections in a RavenDB database to Elasticsearch destinations. + +* You can define an Elasticsearch ETL task using [Studio](../../../studio/database/tasks/ongoing-tasks/elasticsearch-etl-task.mdx) + or your [client](../../../client-api/operations/maintenance/etl/add-etl.mdx#example---add-elasticsearch-etl-task). + +* In this page: + * [Elasticsearch ETL](../../../server/ongoing-tasks/etl/elasticsearch.mdx#elasticsearch-etl) + * [Transformation Script](../../../server/ongoing-tasks/etl/elasticsearch.mdx#transformation-script) + * [Alternative Syntax](../../../server/ongoing-tasks/etl/elasticsearch.mdx#alternative-syntax) + * [Data Delivery](../../../server/ongoing-tasks/etl/elasticsearch.mdx#data-delivery) + * [What is Transferred](../../../server/ongoing-tasks/etl/elasticsearch.mdx#what-is-transferred) + * [Document Identifiers](../../../server/ongoing-tasks/etl/elasticsearch.mdx#document-identifiers) + * [Transactions](../../../server/ongoing-tasks/etl/elasticsearch.mdx#transactions) + * [Insert Only Mode](../../../server/ongoing-tasks/etl/elasticsearch.mdx#insert-only-mode) + * [Elasticsearch Index Definition](../../../server/ongoing-tasks/etl/elasticsearch.mdx#elasticsearch-index-definition) + * [Client API](../../../server/ongoing-tasks/etl/elasticsearch.mdx#client-api) + * [Add an Elasticsearch ETL Task](../../../server/ongoing-tasks/etl/elasticsearch.mdx#add-an-elasticsearch-etl-task) + * [Add an Elasticsearch Connection String](../../../server/ongoing-tasks/etl/elasticsearch.mdx#add-an-elasticsearch-connection-string) + * [Supported Elasticsearch Versions](../../../server/ongoing-tasks/etl/elasticsearch.mdx#supported-elasticsearch-versions) + + +## Elasticsearch ETL + +* The following steps are required when creating an Elasticsearch ETL task: + * Define a [connection string](../../../server/ongoing-tasks/etl/elasticsearch.mdx#add-an-elasticsearch-connection-string) which includes: + * URLs to Elasticsearch nodes. + * Authentication method required by the Elasticsearch nodes. + * **Define the Elasticsearch Indexes** + * Indexes are used by Elasticsearch to store and locate documents. + * The ETL task will send new documents to the specified Elasticsearch indexes. + * If not otherwise specified, existing Elasticsearch documents will be removed before adding new documents. + * A [document identifier](../../../server/ongoing-tasks/etl/elasticsearch.mdx#document-identifiers) + field property is defined per document, and used by the delete command to locate the matching documents. + * **Define Transformation Scripts**. + The transformation script determines which RavenDB documents will be transferred, + to which Elasticsearch Indexes, and in what form. + +* For a thorough step-by-step explanation: + * Learn [here](../../../server/ongoing-tasks/etl/elasticsearch.mdx#add-an-elasticsearch-etl-task) + to define an Elasticsearch ETL task using **code**. + * Learn [here](../../../studio/database/tasks/ongoing-tasks/elasticsearch-etl-task.mdx) + to define an Elasticsearch ETL task using **Studio**. + + + +## Transformation Script + +* The structure and syntax of an Elasticsearch ETL transformation script are similar to + those of all other ETL types ([RavenDB ETL](../../../server/ongoing-tasks/etl/raven.mdx), + [SQL ETL](../../../server/ongoing-tasks/etl/sql.mdx), and + [OLAP ETL](../../../server/ongoing-tasks/etl/olap.mdx)) scripts. + The script defines which documents will be _Extracted_ from the database, + _Transforms_ the retrieved data, and _Loads_ it to the Elasticsearch destination. + Learn about ETL transformation scripts [here](../../../server/ongoing-tasks/etl/basics.mdx#transform). + +* The script **Loads** data to the Elasticsearch destination using the + [loadTo\\<Target\\>(obj)](../../../server/ongoing-tasks/etl/basics.mdx#transform) command. + * `Target` is the name of the Elasticsearch index to which the data is transferred. + * **In the task settings**: + Define Elasticsearch Index names using only lower-case characters (as required by Elasticsearch). + E.g. orders + * **In the transformation script**: + The target can be defined using both upper and lower-case characters. + The task will transform the index name to all lower-case characters before sending it to Elasticsearch. + E.g. use either loadToOrders or loadToorders. + * `obj` is an object defined by the script, that will be loaded to Elasticsearch. + It determines the shape and contents of the document that will be created on the Elasticsearch Index. + E.g., the following script defines the `orderData` object and loads it to the `orders` index: + + +{`var orderData = \{ DocId: id(this), + OrderLinesCount: this.Lines.length, + TotalCost: 0 \}; + +oOrders(orderData); +`} + + + +### Alternative Syntax + +The target index name can be passed to the `loadTo` command separately, as a string argument, +using this syntax: `loadTo('Target', obj)` + +* **Example**: + The following two calls to `loadTo` are equivalent. + `loadToOrders(obj);` + `loadTo('Orders', obj);` + + + + * The target name `'Orders'` in this syntax is **not** a variable and **cannot** be used as one: + it is simply a string literal of the target's name. + * Separating the target name from the `loadTo` command makes it possible to include symbols like + `-` and `.` in target names. This is not possible when the standard `loadToOrders` syntax is + used because including special characters in the name of a JS function turns it invalid. + + + + + +## Data Delivery +### What is Transferred + +An Elasticsearch ETL task transfers **documents only**. +Document extensions like attachments, counters, or time series, will not be transferred. +### Document Identifiers + +* When Elasticsearch stores RavenDB documents, it provides each of them + with an automatically-generated iD. +* RavenDB needs to delete and replace documents, but cannot do it + using Elasticsearch's arbitrarily generated IDs. + Instead, one of the transferred document's properties is used as ID. +* The identifier must be a property that the transformation script passes to Elasticsearch. + To achieve this: + * Add a dedicated property to the transferred data structure in your script, + that will hold the original RavenDB document ID. + The property's Name can be any name of your choice. + The property's Value must be: `id(this)` + * E.g., the **DocId** property below is used to hold the RavenDB document ID in the transferred document. + + +{`var orderData = \{ + DocId: id(this), // document ID property + OrderLinesCount: this.Lines.length, + TotalCost: 0 + \}; + +adToOrders(orderData); +`} + + +* In addition to specifying this document property in the script, it must be defined for the ETL task: + * Either set `DocumentIdProperty` through code (see [code sample](../../../server/ongoing-tasks/etl/elasticsearch.mdx#add-an-elasticsearch-etl-task)), + * or Set the [Document ID Property Name](../../../studio/database/tasks/ongoing-tasks/elasticsearch-etl-task.mdx#elasticsearch-indexes) field via Studio. +### Transactions + +The task delivers the data to the Elasticsearch destination in one or two calls per index. + +1. [_delete_by_query](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html): + An optional command, to delete existing versions of RavenDB documents from Elasticsearch + before appending new ones. + + +{`POST orders/_delete_by_query?refresh=true +query":\{"terms":\{"DocID":["orders/1-a"]\}\}\} +`} + + +2. [_bulk ](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html): + Append RavenDB documents to the Elasticsearch destination. + + +{`POST orders/_bulk?refresh=wait_for +index":\{"_id":null\}\} +OrderLinesCount":3,"TotalCost":0,"DocID":"orders/1-a"\} +`} + + +### Insert Only Mode + +You can enable the task's **Insert Only** mode using [code](../../../server/ongoing-tasks/etl/elasticsearch.mdx#add-an-elasticsearch-etl-task) +or via [Studio](../../../studio/database/tasks/ongoing-tasks/elasticsearch-etl-task.mdx#elasticsearch-indexes), +to **omit** _delete_by_query commands and so refrain from deleting documents before the transfer. + + Enabling **Insert Only** can boost the task's performance when there is no need to delete documents before loading them. + + + Be aware that enabling Insert Only mode will append documents to Elasticsearch whenever they + are modified on RavenDB, without removing existing documents. If document versions that are not + needed accumulate and storage space is a concern, keep Insert Only disabled. + + + + +## Elasticsearch Index Definition + +* When the Elasticsearch ETL task runs for the very first time, it will create any Elsasticsearch index defined in + the task that doesn't exist yet. + +* When the index is created, the document property that holds the RavenDB document ID will be defined + as a non-analyzed field, with type + [keyword](https://www.elastic.co/guide/en/elasticsearch/reference/7.15/keyword.html) to avoid having + full-text-search on it. + This way the RavenDB document identifiers won't be analyzed and the task will be able to `_delete_by_query` using exact match on those IDs. + I.e. + + +{`PUT /newIndexName + +"mappings": \{ + "properties": \{ + "DocId": \{ // the DocumentIdProperty + "type": "keyword" + \} + \} + \} + +`} + + + + +If you choose to create the Elasticsearch Index on your own (before running the +Elasticsearch ETL task), you must define the `DocumentIdProperty` **type** property +as **"keyword"** in your index definition. + + + + +## Client API + +### Add an Elasticsearch ETL Task + +* To define an Elasticsearch ETL task through the client, use the + [AddEtlOperation](../../../client-api/operations/maintenance/etl/add-etl.mdx) API method + as shown below. + Pass it an `ElasticSearchEtlConfiguration`instance with - + * The name of a defined **Connection String**. + You can define a connection string + [using code](../../../server/ongoing-tasks/etl/elasticsearch.mdx#add-an-elasticsearch-connection-string) + or via [Studio](../../../studio/database/tasks/ongoing-tasks/elasticsearch-etl-task.mdx#define-the-elasticsearch-etl-task). + * A list of **Elasticsearch Indexes**. + * A list of **Transformation Scripts**. + +**Code Sample**: + + +{`// Create an Elasticsearch ETL task +AddEtlOperation operation = new AddEtlOperation( +new ElasticSearchEtlConfiguration() +\{ + ConnectionStringName = elasticSearchConnectionString.Name, // Connection String name + Name = "ElasticsearchEtlTask", // ETL Task name + + ElasticIndexes = + \{ + // Define Elasticsearch Indexes + new ElasticSearchIndex \{ // Elasticsearch Index name + IndexName = "orders", + // The Elasticsearch document property that will contain + // the source RavenDB document id. + // Make sure this property is also defined inside the + // transform script. + DocumentIdProperty = "DocId", + InsertOnlyMode = false \}, + new ElasticSearchIndex \{ IndexName = "lines", + DocumentIdProperty = "OrderLinesCount", + // If true, don't send _delete_by_query before appending docs + InsertOnlyMode = true + \} + \}, + Transforms = + \{ // Transformation script configuration + new Transformation() + \{ + // RavenDB collections that the script uses + Collections = \{ "Orders" \}, + + Script = @"var orderData = \{ + DocId: id(this), + OrderLinesCount: this.Lines.length, + TotalCost: 0 + \}; + + // Write the \`orderData\` as a document to the Elasticsearch 'orders' index + loadToOrders(orderData);", + + // Transformation script Name + Name = "TransformIDsAndLinesCount" + \} + \} +\}); + +store.Maintenance.Send(operation); +`} + + + + +* `ElasticSearchEtlConfiguration` + + | Property | Type | Description | + |:-------------|:-------------|:-------------| + | **Name** | `string` | ETL Task Name | + | **ConnectionStringName** | `string` | The name of the connection string used by this task | + | **ElasticIndexes** | `List` | A list of Elasticsearch indexes | + | **Transforms** | `List` | A list of transformation scripts | + +* `ElasticSearchIndex` + + | Property | Type | Description | + |:-------------|:-------------|:-------------| + | **IndexName** | `string` | Elasticsearch Index name.
Name indexes **using lower-case characters only**, e.g. `orders`. | + | **DocumentIdProperty** | `string` | The [document ID property](../../../server/ongoing-tasks/etl/elasticsearch.mdx#document-identifiers) defined on the transferred document object inside the transformation script. | + | **InsertOnlyMode** | `bool` | `true` - Do not delete existing documents before appending new ones.
`false` - Delete existing document versions before appending documents.| + +
+### Add an Elasticsearch Connection String + +* An Elasticsearch connection string includes a list of **Elasticsearch destinations URLs**, + and determines the **Authentication Method** required to access them. + * Omit the Authentication property if the Elasticsearch destination requires no authentication. + * Add a connection string as shown below. + +**Code Sample**: + + +{`// Create a Connection String to Elasticsearch +var elasticSearchConnectionString = new ElasticSearchConnectionString +\{ + // Connection String Name + Name = "ElasticConStr", + // Elasticsearch Nodes URLs + Nodes = new[] \{ "http://localhost:9200" \}, + // Authentication Method + Authentication = new Raven.Client.Documents.Operations.ETL.ElasticSearch.Authentication + \{ + Basic = new BasicAuthentication + \{ + Username = "John", + Password = "32n4j5kp8" + \} + \} +\}; + +store.Maintenance.Send(new PutConnectionStringOperation(elasticSearchConnectionString)); +`} + + + + +* `ElasticSearchConnectionString` + + | Property | Type | Description | + |:-------------|:-------------|:-------------| + | **Name** | `string` | Connection string Name | + | **Nodes** | `string[]` | A list of URLs to Elasticsearch destinations | + | **Authentication** | `Authentication` | Optional authentication method
(Do not use when no authentication is required) | + +* `Authentication` (Authentication methods) + + | Property | Type | Description | + |:-------------|:-------------|:-------------| + | **Basic** | `BasicAuthentication` | Authenticate connection by **username** and **password** | + | **ApiKey** | `ApiKeyAuthentication` | Authenticate connection by an **API key** | + | **Certificate** | `CertificateAuthentication` | Authenticate connection by **certificate** | + +* `BasicAuthentication` (Authenticate transfers by **user name** and **password**) + + | Property | Type | + |:-------------|:-------------| + | **Username** | **string** | + | **Password** | **string** | + +* `ApiKeyAuthentication` (Authenticate transfers by an **API key**) + + | Property | Type | + |:-------------|:-------------| + | **ApiKeyId** | `string` | + | **ApiKey** | `string` | + +* `CertificateAuthentication` (Authenticate transfers by **certificate**) + + | Property | Type | Description | + |:-------------|:-------------|:-------------| + | **CertificatesBase64** | `string[]` | A valid certificate string | + +
+ + + +## Supported Elasticsearch Versions +RavenDB supports **Elasticsearch Server version 7 and up**. + + diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/etl/olap.mdx b/versioned_docs/version-7.1/server/ongoing-tasks/etl/olap.mdx new file mode 100644 index 0000000000..7c9870bd2e --- /dev/null +++ b/versioned_docs/version-7.1/server/ongoing-tasks/etl/olap.mdx @@ -0,0 +1,440 @@ +--- +title: "Ongoing Tasks: OLAP ETL" +hide_table_of_contents: true +sidebar_label: OLAP ETL +sidebar_position: 3 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Ongoing Tasks: OLAP ETL + + +* The **OLAP ETL task** creates an ETL process from a RavenDB database to a variety of destinations that +are especially useful for conducting OLAP. These destinations currently include: + * [Amazon S3](https://aws.amazon.com/s3/) + * [Amazon Glacier](https://aws.amazon.com/glacier/) + * [Microsoft Azure](https://azure.microsoft.com/) + * [Google Cloud Platform](https://cloud.google.com/) + * File Transfer Protocol + * Local storage + +* The data is encoded in the [Apache Parquet format](https://parquet.apache.org/docs), +an alternative to CSV that is much faster to query. Unlike CSV, Parquet groups the data according to its +column (by field) instead of by row (by document). + +* In this page: + * [Client API](../../../server/ongoing-tasks/etl/olap.mdx#client-api) + * [Transform Script](../../../server/ongoing-tasks/etl/olap.mdx#transform-script) + * [Alternative Syntax](../../../server/ongoing-tasks/etl/olap.mdx#alternative-syntax) + * [Athena Examples](../../../server/ongoing-tasks/etl/olap.mdx#athena-examples) + + +## Client API + +Creating an OLAP ETL task through the client is very similar to creating a RavenDB or SQL ETL task. +All cases use [the `AddEtlOperation`](../../../client-api/operations/maintenance/etl/add-etl.mdx). For +OLAP you will need an `OlapEtlConfiguration` which itself needs an `OlapConnectionString`. Their +configuration options are listed below. + +This is an example of a basic OLAP ETL creation operation: + + + +{`AddEtlOperation operation = new AddEtlOperation( + new OlapEtlConfiguration + \{ + ConnectionStringName = "olap-connection-string-name", + Name = "Orders ETL", + Transforms = + \{ + new Transformation + \{ + Name = "Script #1", + Collections = + \{ + "Orders" + \}, + Script = @"var orderDate = new Date(this.OrderedAt); + var year = orderDate.getFullYear(); + var month = orderDate.getMonth(); + var key = new Date(year, month); + + loadToOrders(key, \{ + Company : this.Company, + ShipVia : this.ShipVia + \})" + \} + \} + \}); + +AddEtlOperationResult result = store.Maintenance.Send(operation); +`} + + + +#### `OlapEtlConfiguration` + +| Property | Type | Description | +| - | - | - | +| `RunFrequency` | `string` | Takes a [cron expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm) which determines how often the server will execute the ETL process. | +| `CustomPartitionValue` | `string` | A value that can be used as a partition name in multiple scripts. See [below](../../../server/ongoing-tasks/etl/olap.mdx#the-custom-partition-value). | +| `OlapTables` | `List` | List of naming configurations for individual tables. See more details below. | + +#### `OlapConnectionString` + +The OLAP connection string contains the configurations for each destination of the ETL task. + +| Property | Description | +| - | - | +| `LocalSettings` | Settings for storing the data locally. | +| `S3Settings` | Settings for an AWS S3 bucket. | +| `GlacierSettings` | Settings for an AWS Glacier. | +| `AzureSettings` | Settings for Azure. | +| `GoogleCloudSettings` | Settings for Google Cloud Platform. | +| `FTPSettings` | Settings for File Transfer Protocol. | + + + +#### ETL destination settings + +This is the list of different settings objects that the `OlapConnectionString` object can contain. + +#### `LocalSettings` + +| Property | Type | Description | +| - | - | - | +| `FolderPath` | `string` | Path to local folder. If this property is not set, the data is saved to the location specified in [the setting `Storage.TempPath`](../../../server/configuration/storage-configuration.mdx#storagetemppath). If _that_ setting has no value, the data is instead saved to the location specified in [the setting `CoreConfiguration.DataDirectory`](../../../server/configuration/core-configuration.mdx#datadir). | + +#### `FtpSettings` + +| Property | Type | Description | +| - | - | - | +| `Url` | `string` | The FTP URL | +| `Port` | `int` | The FTP port | +| `UserName` | `string` | The username used for authentication | +| `Password` | `string` | Authentication password | +| `CertificateFileName` | `string` | The name of your local certificate file | +| `CertificateAsBase64` | `string` | The certificate in base 64 format | + +#### `S3Settings` + +| Property | Type | Description | +| - | - | - | +| `AwsAccessKey` | `string` | Main certificate for the AWS server | +| `AwsSecretKey` | `string` | Encryption certificate for the AWS server | +| `AwsSessionToken` | `string` | AWS session token | +| `AwsRegionName` | `string` | The AWS server region | +| `BucketName` | `string` | The name of the S3 bucket that is the destination for this ETL | +| `CustomServerUrl` | `string` | The custom URL to the S3 bucket, if you have one | +| `RemoteFolderName` | `string` | Name of the destination folder within the S3 bucket | +| `ForcePathStyle` | `bool` | Change the default S3 bucket path convention on custom S3 server| + +#### `GlacierSettings` + +| Property | Type | Description | +| - | - | - | +| `AwsAccessKey` | `string` | Main certificate for the AWS server | +| `AwsSecretKey` | `string` | Encryption certificate for the AWS server | +| `AwsSessionToken` | `string` | AWS session token | +| `AwsRegionName` | `string` | The AWS server region | +| `VaultName` | `string` | The name of your AWS Glacier vault | +| `RemoteFolderName` | `string` | Name of the destination folder within the Glacier | + +#### `AzureSettings` + +| Property | Type | Description | +| - | - | - | +| `StorageContainer` | `string` | Microsoft Azure Storage container name | +| `RemoteFolderName ` | `string` | Path to remote Azure folder | +| `AccountName` | `string` | The name of your Azure account | +| `AccountKey` | `string` | Your Azure account key | +| `SasToken` | `string` | Your SaS token for authentication | + +#### `GoogleCloudSettings` + +| Property | Type | Description | +| - | - | - | +| `BucketName` | `string` | Google cloud storage bucket name | +| `RemoteFolderName` | `string` | Path to remote bucket folder | +| `GoogleCredentialsJson` | `string` | Authentication credentials to your Google Cloud Storage | + + + +#### `OlapEtlTable` + +Optional, more detailed naming configuration. + +| Property | Type | Description | +| - | - | - | +| `TableName` | `string` | The name of the table. This should usually be the name of the source collection. | +| `DocumentIdColumn` | `string` | A name for the id column of the table. Default: "_id" | + +#### ETL Run Frequency + +Unlike other ETL tasks, OLAP ETL operates only in batches at regular intervals, rather than triggering a +new round every time a document updates. +If a document has been updated after ETL (even if updated data has not actually been loaded) they are +distinguished by `_lastModifiedTime`, the value of the `last-modified` field in a document's +metadata in unix time. This field appears as another column in the destination tables. + + + +## Transform Script + +Transformation scripts are similar to those in the RavenDB ETL and SQL ETL tasks - see more about this in +[ETL Basics](../../../server/ongoing-tasks/etl/basics.mdx#transform). The major difference is that data output +by the ETL task can be divided into folders and child folders called _partitions_. Querying the data usually involves scanning +the entire folder, so there is an efficiency advantage to dividing the data into more folders. + +#### The `key` Parameter + +As with other ETL tasks, the method that loads an entry to its destination is `loadTo()`, +but unlike the other ETL tasks, the method takes two parameters: the entry itself, and an additional 'key'. +This `key` determines how many partitions there are and what their names are. + + + +{`loadTo(key, object) +`} + + + +The method's name determines the name of the parent folder that the method outputs to. If you want to output +data to a folder called "Sales", use the method `loadToSales()`. The parameter key determines the names of +one or more layers of child folders that contain the actual destination table. + +The actual value that you pass as the `key` for `loadTo()` is one of two methods: + +* `partitionBy()` - creates one or more child folders (one inside the other). +* `noPartition()` - creates no child folders. + +The child folders created by OLAP ETL are considered a sort of 'virtual column' of the destination table. +This just means that all child folder names have this format: `[virtual column name]=[partition value]`, +i.e. two strings separated by a `=`. The default virtual column name is `_partition`. + +`partitionBy()` can take one or more folder names in the following ways: + +* **`partitionBy(key)`** - takes a partition value and uses the default virtual column +name `_partition`. The partition value can be a string, number, date, etc. +* **`partitionBy(['name', key])`** - takes a virtual column name and a partition value as an array of size two. +* **`partitonBy(['name1', key1], ['name2', key2], ... )`** - takes multiple arrays of size two, each with a virtual +column name and a partition value. Each pair represents a child folder of the preceding pair. + +Here are examples of possible values for `partitionBy()`, and the resulting folder names: + + + +{`loadToMyFolder( + partitionBy('one'), + object +) +//Loads the data to /MyFolder/_partition=one/ + +loadToMyFolder( + partitionBy(['month', 'August']), + object +) +//Loads the data to /MyFolder/month=August/ + +loadToMyFolder( + partitionBy(['month', 'August'], ['day', '22'], ['hour', '17']), + object +) +//Loads the data to /MyFolder/month=August/day=22/hour=17 + +loadToMyFolder( + partitionBy(this.Company), + object +) +// Loads the data to e.g. /MyFolder/_partition=Apple + +loadToMyFolder( + partitionBy(['month', new Date(this.OrderedAt).getMonth()]), + obj +) +//Loads the data to e.g. /MyFolder/month=8 +`} + + + +### Alternative Syntax + +The target folder name can be passed to the `loadTo` command separately, as a string argument, +using this syntax: `loadTo('folder_name', key, object)` + +* **Example**: + The following two calls to `loadTo` are equivalent. + `loadToOrders(key, object)` + `loadTo('Orders', key, object)` + + + + * The target name `'Orders'` in this syntax is **not** a variable and **cannot** be used as one: + it is simply a string literal of the target's name. + * Separating the target name from the `loadTo` command makes it possible to include symbols like + `-` and `.` in target names. This is not possible when the standard `loadToOrders` syntax is + used because including special characters in the name of a JS function turns it invalid. + + +#### The Custom Partition Value + +The custom partition value is a string value that can be set in the +[`OlapEtlConfiguration` object](../../../server/ongoing-tasks/etl/olap.mdx#section). This value can be +referenced in the transform script as `$customPartitionValue`. This setting gives you another way +to distinguish data from different ETL tasks that use the same transform script. + +Suppose you want to create multiple OLAP ETL tasks that all use the same transform script and +connection string. All the tasks will output to the same destination folders, but suppose you +want to be able to indicate which data came from which task. This custom partition value gives +you a simple way to achieve this: all the tasks can run the same script, and each script can +output the data to a destination folder with the name determined by that task's custom partition +value setting. + + + +{`partitionBy(['source_ETL_task', $customPartitionValue]) +`} + + + +In the case of multiple partitions, the custom partition value can be used more than once, and it +can appear anywhere in the folder structure. + +#### Script Example + + + +{`//Define the object that will be added to the table +var orderData = \{ + Company : this.Company, + RequireAt : new Date(this.RequireAt), + ItemCount: this.Lines.length +\}; + +//Create the partition names +var orderDate = new Date(this.OrderedAt); +var year = orderDate.getFullYear(); +var month = orderDate.getMonth(); + +//Load to the folder: /OrderData/Year=/Month=/ +loadToOrderData(partitionBy(['Year', year], ['Month', month]), orderData); +`} + + + + + +## Athena Examples + +Athena is a SQL query engine in the AWS environment that can both read directly from S3 buckets and +output to S3 buckets. + +Here are a few examples of queries you can run in Athena. But first, you need to configure the +destination for your query results: go to settings, and under "query result location" input the path +to your preferred bucket. [Read more here](https://docs.aws.amazon.com/athena/latest/ug/querying.html#query-results-specify-location-console) + +Create a `monthly_sales` table from parquet data stored in s3: + + +{`CREATE EXTERNAL TABLE mydatabase.monthly_sales ( + \`_id\` string, + \`Qty\` int, + \`Product\` string, + \`Cost\` int, + \`_lastModifiedTime\` int +) +PARTITIONED BY (\`dt\` string) +STORED AS parquet +LOCATION 's3://ravendb-test/olap/tryouts/data/Sales' +`} + + + +Load all partitions: + + +{`MSCK REPAIR TABLE monthly_sales +`} + + + +Select everything in the table: + + +{`select * +from monthly_sales +`} + + + +Select specific fields: + + +{`select _id orderId, qty quantity, product, cost +from monthly_sales +`} + + + +Filter based on product name: + + +{`select * +from monthly_sales +where product = 'Products/2' +`} + + + +Filter based on date (this is where partitioning adds efficiency - only the relevant folders are scanned): + + +{`select * +from monthly_sales +where dt >= '2020-01-01' and dt <= '2020-02-01' +`} + + + +From all items sold, select the maximum cost (price) per *order*: + + +{`select _id orderId, max(cost) cost +from monthly_sales +group by _id +`} + + + +Querying for the most recent version in an append-only table: +e.g. select everything in the table, and in case we have duplicates (multiple rows with the same id) +- only take the most recent version (the one with the highest _lastModifiedTime): + + +{`SELECT DISTINCT o.* +FROM monthly_orders o +INNER JOIN + (SELECT _id, + MAX(_lastModifiedTime) AS latest + FROM monthly_orders + GROUP BY _id) oo + ON o._id = oo._id + AND o._lastModifiedTime = oo.latest +`} + + + +#### Apache Parquet + +Parquet is an open-source text-based file format. Like [ORC](https://orc.apache.org/), columns are stored together +instead of rows being stored together (the same fields from multiple documents, rather than +whole documents). This makes queries more efficient. + + + diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/etl/queue-etl/_category_.json b/versioned_docs/version-7.1/server/ongoing-tasks/etl/queue-etl/_category_.json new file mode 100644 index 0000000000..f8f71f84fa --- /dev/null +++ b/versioned_docs/version-7.1/server/ongoing-tasks/etl/queue-etl/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 6, + "label": Queue ETL, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx b/versioned_docs/version-7.1/server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx new file mode 100644 index 0000000000..6561929a03 --- /dev/null +++ b/versioned_docs/version-7.1/server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx @@ -0,0 +1,500 @@ +--- +title: "Queue ETL: Amazon SQS" +hide_table_of_contents: true +sidebar_label: Amazon SQS ETL +sidebar_position: 4 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Queue ETL: Amazon SQS + + +* Amazon **SQS** (**S**imple **Q**ueue **S**ervice) is a distributed Message + Queue service (like Azure Queue Storage and others) that is widely used + for its scalability, durability, availability, and queueing methods: + * **Standard queueing** for enormous throughput. + * **FIFO queueing** to control delivery order and prevent message duplication. + +* Create an **Amazon SQS ETL Task** to: + * **Extract** data from a RavenDB database, + * **Transform** the data using one or more custom scripts, + * and **Load** the resulting JSON object to an SQS destination + in [CloudEvents messages](https://cloudevents.io) format. + + +This article focuses on the creation of an Amazon SQS ETL task using the Client API. +To define an Amazon SQS ETL task from Studio, see [Studio: Amazon SQS ETL Task](../../../../studio/database/tasks/ongoing-tasks/amazon-sqs-etl.mdx). +For an **overview of Queue ETL tasks**, see [Queue ETL tasks overview](../../../../server/ongoing-tasks/etl/queue-etl/overview.mdx). + + +* In this page: + * [RavenDB ETL and Amazon SQS](../../../../server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx#ravendb-etl-and-amazon-sqs) + * [Queue methods](../../../../server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx#queue-methods) + * [Standard queueing](../../../../server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx#standard-queueing) + * [FIFO queueing](../../../../server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx#fifo-queueing) + * [Caution: ETL message size -vs- Queue message size](../../../../server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx#caution-etl-message-size--vs--queue-message-size) + * [Add an Amazon SQS connection string](../../../../server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx#add-an-amazon-sqs-connection-string) + * [Authentication methods](../../../../server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx#authentication-methods) + * [Example](../../../../server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx#example) + * [Syntax](../../../../server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx#syntax) + * [Add an Amazon SQS ETL task](../../../../server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx#add-an-amazon-sqs-etl-task) + * [Example: Add SQS ETL task](../../../../server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx#example-add-sqs-etl-task) + * [Delete processed documents](../../../../server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx#delete-processed-documents) + * [Syntax](../../../../server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx#syntax-1) + * [The transformation script](../../../../server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx#the-transformation-script) + * [The loadTo method](../../../../server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx#the-loadto-method) + + +## RavenDB ETL and Amazon SQS + +* Utilizing SQS ETL tasks allows RavenDB to take the role of an event producer in an Amazon SQS + architecture, leveraging RavenDB's feature set and SQS` powerful message distribution capabilities. + +* The loading of RavenDB messages to an SQS queue can automatically _trigger AWS + [Lambda Functions](https://docs.aws.amazon.com/lambda/latest/dg/welcome.html)_, + enabling economic processing and powerful workflows. + + Enqueueing RavenDB messages using SQS can also be _integrated with other AWS services_ + such as [Amazon SNS](https://aws.amazon.com/sns/) to distribute message-related notifications + and [Step Functions](https://aws.amazon.com/step-functions/) to manage and visualize your workflows. + + +Read more about Amazon SQS in the platform's [official documentation](https://docs.aws.amazon.com/sqs/). + + + + +## Queue methods + +The data that ETL tasks handle is carefully selected and tailored for specific user needs. +Selecting which Queue Type Amazon SQS would use should also take into account the specific +nature of the transferred data. +#### Standard queueing + +Standard queueing offers an extremely high transfer rate but lacks the ability to ensure +that messages would arrive in the same order they were sent or prevent their duplication. + + +Use standard queueing when quick delivery takes precedence over messages order and +distinctness or the recepient can make up for them. + +#### FIFO queueing + +FIFO queueing controls delivery order using a First-In-First-Out queue and ensures +the delivery of each message exactly once, in exchange for a much slower transfer rate +than that of the Standard Queueing method. + +To load messages to a FIFO queue, add `.fifo` to the queue name while calling the +transformation script's [loadTo method](../../../../server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx#the-loadto-method): + + +{`Script = @"// Create an orderData object + // ========================== + var orderData = \{ + Id: id(this), // property with RavenDB document ID + OrderLinesCount: this.Lines.length, + TotalCost: 0 + \}; + + for (var i = 0; i < this.Lines.length; i++) \{ + var line = this.Lines[i]; + var cost = (line.Quantity * line.PricePerUnit) * ( 1 - line.Discount); + orderData.TotalCost += cost; + \} + + // Load the object to the FIFO 'Orders' ququq on the SQS destination + // ================================================================= + loadTo('orders.fifo', orderData, \{ + Id: id(this), + Type: 'com.github.users', + Source: '/registrations/direct-signup' + \});" +`} + + + +* **Deduplication**: + FIFO queues automatically prevent duplicate messages within a _deduplication interval_. + Interval default: 5 minutes + Deduplication is achieved by giving each message a _Message Deduplication ID_ + with a unique **Change Vector**. + If the change vector is longer than 128 characters (Amazon's restriction for + a message deduplication ID) we will truncate the CV and add a `-{9-digit hash}`. + +* **Message Grouping**: + Messages with the same _Message Group ID_ are processed in order. + Each group is handled independently, allowing parallel processing across different message groups. + + + Use this method when throughput is not as important as the order and uniqueness of + arriving messages. + +#### Caution: ETL message size -vs- Queue message size + + +Please **be aware** that the maximum size of an SQS queue message is `64 KB`, while the +maximum size of an ETL message to the queue is `256 KB`. +The significance of this difference is that when a maximum-size ETL message arrives +at its destination queue it may be charged for not 1 but 4 queue messages. + + + + +## Add an Amazon SQS connection string + +Prior to setting up the ETL task, define a connection string that the task will use to access your SQS destination. +The connection string includes the authorization credentials required to connect. +#### Authentication methods +The authorization method that the ETL task uses to access the SQS target is determined +by properties of the connection string it uses, as shown in the example below. + +#### Example + + + +{`// Prepare the connection string: +// ============================== +var conStr = new QueueConnectionString +\{ + // Provide a name for this connection string + Name = "mySqsConStr", + + // Set the broker type + BrokerType = QueueBrokerType.AmazonSqs, + + AmazonSqsConnectionSettings = new AmazonSqsConnectionSettings() + \{ + // Define whether to use a password or not. + // Set to \`true\` to authorize a dedicated machine that requires no password. + // You can only use this option in self-hosted mode. + Passwordless = false, + + // Sqs destination authorization parameters + Basic = + \{ + AccessKey = SqsAccessKey, + SecretKey = SqsSecretKey, + RegionName = SqsRegionName + \}, + \} +\}; + +// Deploy (send) the connection string to the server via the PutConnectionStringOperation: +// ======================================================================================= +var res = store.Maintenance.Send( + new PutConnectionStringOperation(conStr)); +`} + + + +* **Passwordless** + Defines whether to use a password or not. + Set this property to `true` if the target machine is pre-authorized. + This authorization method can only be used in self-hosted mode. + +* **Basic** + Defines these authorization properties: + * `AccessKey` + * `SecretKey` + * `RegionName` +#### Syntax + + + +{`public class AmazonSqsConnectionSettings +\{ + public Basic Basic \{ get; set; \} + public bool Passwordless \{ get; set; \} +\} +public class Basic +\{ + public string AccessKey \{ get; set; \} + public string SecretKey \{ get; set; \} + public string RegionName \{ get; set; \} +\} +`} + + + + +{`public class QueueConnectionString : ConnectionString +\{ + // Set to QueueBrokerType.AmazonSqs for an SQS connection string + public QueueBrokerType BrokerType \{ get; set; \} + + // Configure this when setting a connection string for Kafka + public KafkaConnectionSettings KafkaConnectionSettings \{ get; set; \} + + // Configure this when setting a connection string for RabbitMQ + public RabbitMqConnectionSettings RabbitMqConnectionSettings \{ get; set; \} + + // Configure this when setting a connection string for Azure Queue Storage + public AzureQueueStorageConnectionSettings AzureQueueStorageConnectionSettings \{ get; set; \} + + // Configure this when setting a connection string for Amazon SQS + public AmazonSqsConnectionSettings AmazonSqsConnectionSettings \{ get; set; \} +\} +`} + + + + +{`public enum QueueBrokerType +\{ + None, + Kafka, + RabbitMq, + AzureQueueStorage, + AmazonSqs +\} +`} + + + + + +## Add an Amazon SQS ETL task + +#### Example: Add SQS ETL task + +* In this example, the Amazon SQS ETL Task will - + * Extract source documents from the "Orders" collection in RavenDB. + * Process each "Order" document using a defined script that creates a new `orderData` object. + * Load the `orderData` object to the "OrdersQueue" queue on an SQS destination. +* For more details about the script and the `loadTo` method, see the transromation script section below. + + + +{`// Define a transformation script for the task: +// ============================================ +Transformation transformation = new Transformation +\{ + // Define the input collections + Collections = \{ "Orders" \}, + ApplyToAllDocuments = false, + + // The transformation script + Name = "scriptName", + Script = @"// Create an orderData object + // ========================== + var orderData = \{ + Id: id(this), + OrderLinesCount: this.Lines.length, + TotalCost: 0 + \}; + + // Update the orderData's TotalCost field + // ====================================== + for (var i = 0; i < this.Lines.length; i++) \{ + var line = this.Lines[i]; + var cost = (line.Quantity * line.PricePerUnit) * ( 1 - line.Discount); + orderData.TotalCost += cost; + \} + + // Load the object to the 'OrdersQueue' ququq on the SQS destination + // ================================================================= + loadToOrdersQueue(orderData, \{ + Id: id(this), + Type: 'com.example.promotions', + Source: '/promotion-campaigns/summer-sale' + \});" +\}; + +// Define the SQS ETL task +// ======================= +var etlTask = new QueueEtlConfiguration() +\{ + BrokerType = QueueBrokerType.AmazonSqs, + + Name = "myAmazonSqsEtlTaskName", + ConnectionStringName = "myAmazonSqsConStr", + + Transforms = \{ transformation \}, + + // Set to false to allow task failover to another node if current one is down + PinToMentorNode = false +\}; + +// Deploy (send) the task to the server via the AddEtlOperation: +// ============================================================= +store.Maintenance.Send(new AddEtlOperation(etlTask)); +`} + + +#### Delete processed documents + +* It is possible to **delete** documents from a RavenDB database once they have been processed by the Queue ETL task. +* To do this, set the optional `Queues` property in the ETL configuration with the list of SQS queues for which + processed documents are to be deleted. + + + +{`var etlTask = new QueueEtlConfiguration() +\{ + BrokerType = QueueBrokerType.AmazonSqs, + + Name = "myAmazonSqsEtlTaskName", + ConnectionStringName = "myAmazonSqsConStr", + + Transforms = \{ transformation \}, + + // Define whether to delete documents from RavenDB after they are sent to the target queue + Queues = new List() + \{ + new() + \{ + // The name of the SQS queue + Name = "OrdersQueue", + + // When set to 'true', + // documents that were processed by the transformation script will be deleted + // from RavenDB after the message is loaded to the target queue + DeleteProcessedDocuments = true + \} + \} +\}; + +store.Maintenance.Send(new AddEtlOperation(etlTask)); +`} + + +#### Syntax + + + +{`public class QueueEtlConfiguration +\{ + // Set to QueueBrokerType.AmazonSqs to define an SQS Queue ETL task + public QueueBrokerType BrokerType \{ get; set; \} + // The ETL task name + public string Name \{ get; set; \} + // The registered connection string name + public string ConnectionStringName \{ get; set; \} + // List of transformation scripts + public List Transforms \{ get; set; \} + // Optional configuration per queue + public List Queues \{ get; set; \} + // Set to 'false' to allow task failover to another node if current one is down + public bool PinToMentorNode \{ get; set; \} +\} + +public class Transformation +\{ + // The script name + public string Name \{ get; set; \} + // The source RavenDB collections that serve as the input for the script + public List Collections \{ get; set; \} + // Set whether to apply the script on all collections + public bool ApplyToAllDocuments \{ get; set; \} + // The script itself + public string Script \{ get; set; \} +\} + +public class EtlQueue +\{ + // The SQS queue name + public string Name \{ get; set; \} + // Delete processed documents when set to 'true' + public bool DeleteProcessedDocuments \{ get; set; \} +\} +`} + + + + + +## The transformation script + +The [basic characteristics](../../../../server/ongoing-tasks/etl/basics.mdx) of an Amazon SQS ETL script +are similar to those of other ETL types. +The script defines what data to **extract** from the source document, how to **transform** this data, +and which SQS Queue to **load** the data to. +#### The loadTo method + +To specify which SQS queue to load the data to, use either of the following methods in your script. +The two methods are equivalent, offering alternative syntax: + +* **`loadTo<QueueName>(obj, {attributes})`** + + +{`loadToOrdersQueue(orderData, \{ + Id: id(this), + Type: 'com.example.promotions', + Source: '/promotion-campaigns/summer-sale' + \} +`} + + + * Here the target is specified as part of the function name. + * The target _<QueueName>_ in this syntax is Not a variable and cannot be used as one, + it is simply a string literal of the target's name. + +* **`loadTo('QueueName', obj, {attributes})`** + + +{`loadTo('OrdersQueue', orderData, \{ +Id: id(this), +Type: 'com.example.promotions', +Source: '/promotion-campaigns/summer-sale' +\} +`} + + + * Here the target is passed as an argument to the method. + * Separating the target name from the `loadTo` command makes it possible to include symbols + like `'-'` and `'.'` in target names. + This is not possible when the `loadTo<QueueName>` syntax is used because including special + characters in the name of a JavaScript function makes it invalid. + * To deliver messages to a [FIFO queue](../../../../server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx#fifo-queueing), + use this format and add `.fifo` to the queue name. + `loadTo('QueueName.fifo', obj, {attributes})` + + | Parameter | Type | Description | + |----------------|--------|----------------------------------------------------------------------------------------------------------------------------------| + | **QueueName** | string | The name of the SQS Queue | + | **obj** | object | The object to transfer | + | **attributes** | object | An object with optional & required [CloudEvents attributes](../../../../server/ongoing-tasks/etl/queue-etl/overview.mdx#cloudevents) | + +For example, the following two calls, which load data to "OrdersQueue", are equivalent: + +* `loadToOrdersQueue(obj, {attributes})` +* `loadTo('OrdersQueue', obj, {attributes})` +The following is a sample script that processes documents from the Orders collection: + + + +{`// Create an orderData object +// ========================== +var orderData = \{ + Id: id(this), + OrderLinesCount: this.Lines.length, + TotalCost: 0 +\}; + +// Update the orderData's TotalCost field +// ====================================== +for (var i = 0; i < this.Lines.length; i++) \{ + var line = this.Lines[i]; + var cost = (line.Quantity * line.PricePerUnit) * ( 1 - line.Discount); + orderData.TotalCost += cost; +\} + +// Load the object to the "OrdersQueue" queue on the SQS destination +// ================================================================= +loadToOrdersQueue(orderData, \{ + Id: id(this), + Type: 'com.example.promotions', + Source: '/promotion-campaigns/summer-sale' +\}) +`} + + + + + diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/etl/queue-etl/assets/overview_stats.png b/versioned_docs/version-7.1/server/ongoing-tasks/etl/queue-etl/assets/overview_stats.png new file mode 100644 index 0000000000..df41727416 Binary files /dev/null and b/versioned_docs/version-7.1/server/ongoing-tasks/etl/queue-etl/assets/overview_stats.png differ diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/etl/queue-etl/azure-queue.mdx b/versioned_docs/version-7.1/server/ongoing-tasks/etl/queue-etl/azure-queue.mdx new file mode 100644 index 0000000000..de3da08a4a --- /dev/null +++ b/versioned_docs/version-7.1/server/ongoing-tasks/etl/queue-etl/azure-queue.mdx @@ -0,0 +1,393 @@ +--- +title: "Queue ETL: Azure Queue Storage" +hide_table_of_contents: true +sidebar_label: Azure Queue Storage ETL +sidebar_position: 3 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Queue ETL: Azure Queue Storage + + +* Azure Queue Storage is a Microsoft Azure service that allows for the storage and retrieval of large numbers of messages, + enabling communication between applications by allowing them to asynchronously send and receive messages. + Each message in a queue can be up to 64 KB in size, and a queue can contain millions of messages, + providing a robust and scalable solution for data processing. + +* Create an **Azure Queue Storage ETL Task** to: + * Extract data from a RavenDB database + * Transform the data using one or more custom scripts + * Load the resulting JSON object to an Azure Queue destination as a CloudEvents message + +* Utilizing this task allows RavenDB to act as an event producer in an Azure Queue architecture. + +* [Azure Functions](https://learn.microsoft.com/en-us/azure/azure-functions/functions-overview?pivots=programming-language-csharp) + can be triggered to consume and process messages that are sent to Azure queues, + enabling powerful and flexible workflows. + The message visibility period and life span in the Queue can be customized through these [ETL configuration options](../../../../server/configuration/etl-configuration.mdx#etlqueueazurequeuestoragetimetoliveinsec). + +* Read more about Azure Queue Storage in the platform's [official documentation](https://learn.microsoft.com/en-us/azure/storage/queues/storage-queues-introduction). +* This article focuses on how to create an Azure Queue Storage ETL task using the Client API. + To define an Azure Queue Storage ETL task from the Studio, see [Studio: Azure Queue Storage ETL Task](../../../../studio/database/tasks/ongoing-tasks/azure-queue-storage-etl.mdx). + For an **overview of Queue ETL tasks**, see [Queue ETL tasks overview](../../../../server/ongoing-tasks/etl/queue-etl/overview.mdx). + +* In this page: + * [Add an Azure Queue Storage connection string](../../../../server/ongoing-tasks/etl/queue-etl/azure-queue.mdx#add-an-azure-queue-storage-connection-string) + * [Authentication methods](../../../../server/ongoing-tasks/etl/queue-etl/azure-queue.mdx#authentication-methods) + * [Example](../../../../server/ongoing-tasks/etl/queue-etl/azure-queue.mdx#example) + * [Syntax](../../../../server/ongoing-tasks/etl/queue-etl/azure-queue.mdx#syntax) + * [Add an Azure Queue Storage ETL task](../../../../server/ongoing-tasks/etl/queue-etl/azure-queue.mdx#add-an-azure-queue-storage-etl-task) + * [Example](../../../../server/ongoing-tasks/etl/queue-etl/azure-queue.mdx#example-basic) + * [Delete processed documents](../../../../server/ongoing-tasks/etl/queue-etl/azure-queue.mdx#delete-processed-documents) + * [Syntax](../../../../server/ongoing-tasks/etl/queue-etl/azure-queue.mdx#syntax-1) + * [The transformation script](../../../../server/ongoing-tasks/etl/queue-etl/azure-queue.mdx#the-transformation-script) + * [The loadTo method](../../../../server/ongoing-tasks/etl/queue-etl/azure-queue.mdx#the-loadto-method) + + +## Add an Azure Queue Storage connection string + +Prior to setting up the ETL task, define a connection string that the task will use to access your Azure account. +The connection string includes the authorization credentials required to connect. +#### Authentication methods: +There are three authentication methods available: + +* **Connection string** + * Provide a single string that includes all the options required to connect to your Azure account. + Learn more about Azure Storage connection strings [here](https://learn.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string). + * Note: the following connection string parameters are mandatory: + * `AccountName` + * `AccountKey` + * `DefaultEndpointsProtocol` + * `QueueEndpoint` (when using http protocol) +* **Entra ID** + * Use the Entra ID authorization method to achieve enhanced security by leveraging Microsoft Entra’s robust identity solutions. + * This approach minimizes the risks associated with exposed credentials commonly found in connection strings and enables + more granular control through [Role-Based Access Controls](https://learn.microsoft.com/en-us/azure/role-based-access-control/). +* **Passwordless** + * This authorization method requires the machine to be pre-authorized and can only be used in self-hosted mode. + * Passwordless authorization works only when the account on the machine is assigned the Storage Account Queue Data Contributor role; the Contributor role alone is inadequate. +#### Example: + + + +{`// Prepare the connection string: +// ============================== +var conStr = new QueueConnectionString +\{ + // Provide a name for this connection string + Name = "myAzureQueueConStr", + + // Set the broker type + BrokerType = QueueBrokerType.AzureQueueStorage, + + // In this example we provide a simple string for the connection string + AzureQueueStorageConnectionSettings = new AzureQueueStorageConnectionSettings() + \{ + ConnectionString = @"DefaultEndpointsProtocol=https; + AccountName=myAccountName; + AccountKey=myAccountKey; + EndpointSuffix=core.windows.net" + \} +\}; + +// Deploy (send) the connection string to the server via the PutConnectionStringOperation: +// ======================================================================================= +var res = store.Maintenance.Send( + new PutConnectionStringOperation(conStr)); +`} + + +#### Syntax: + + + +{`public class QueueConnectionString : ConnectionString +\{ + // Set the broker type to QueueBrokerType.AzureQueueStorage + // for an Azure Queue Storage connection string + public QueueBrokerType BrokerType \{ get; set; \} + + // Configure this when setting a connection string for Kafka + public KafkaConnectionSettings KafkaConnectionSettings \{ get; set; \} + + // Configure this when setting a connection string for RabbitMQ + public RabbitMqConnectionSettings RabbitMqConnectionSettings \{ get; set; \} + + // Configure this when setting a connection string for Azure Queue Storage + public AzureQueueStorageConnectionSettings AzureQueueStorageConnectionSettings \{ get; set; \} +\} +`} + + + + +{`public enum QueueBrokerType +\{ + None, + Kafka, + RabbitMq, + AzureQueueStorage +\} +`} + + + + +{`public class AzureQueueStorageConnectionSettings +\{ + public EntraId EntraId \{ get; set; \} + public string ConnectionString \{ get; set; \} + public Passwordless Passwordless \{ get; set; \} +\} + +public class EntraId +\{ + public string StorageAccountName \{ get; set; \} + public string TenantId \{ get; set; \} + public string ClientId \{ get; set; \} + public string ClientSecret \{ get; set; \} +\} + +public class Passwordless +\{ + public string StorageAccountName \{ get; set; \} +\} +`} + + + + + +## Add an Azure Queue Storage ETL task + + + + **Example**: +* In this example, the Azure Queue Storage ETL Task will - + * Extract source documents from the "Orders" collection in RavenDB. + * Process each "Order" document using a defined script that creates a new `orderData` object. + * Load the `orderData` object to the "OrdersQueue" in an Azure Queue Storage. +* For more details about the script and the `loadTo` method, see the [transromation script](../../../../server/ongoing-tasks/etl/queue-etl/azure-queue.mdx#the-transformation-script) section below. + + + +{`// Define a transformation script for the task: +// ============================================ +Transformation transformation = new Transformation +\{ + // Define the input collections + Collections = \{ "Orders" \}, + ApplyToAllDocuments = false, + + // The transformation script + Name = "scriptName", + Script = @"// Create an orderData object + // ========================== + var orderData = \{ + Id: id(this), + OrderLinesCount: this.Lines.length, + TotalCost: 0 + \}; + + // Update the orderData's TotalCost field + // ====================================== + for (var i = 0; i < this.Lines.length; i++) \{ + var line = this.Lines[i]; + var cost = (line.Quantity * line.PricePerUnit) * ( 1 - line.Discount); + orderData.TotalCost += cost; + \} + + // Load the object to the 'OrdersQueue' in Azure + // ============================================= + loadToOrdersQueue(orderData, \{ + Id: id(this), + Type: 'com.example.promotions', + Source: '/promotion-campaigns/summer-sale' + \});" +\}; + +// Define the Azure Queue Storage ETL task: +// ======================================== +var etlTask = new QueueEtlConfiguration() +\{ + BrokerType = QueueBrokerType.AzureQueueStorage, + + Name = "myAzureQueueEtlTaskName", + ConnectionStringName = "myAzureQueueConStr", + + Transforms = \{ transformation \}, + + // Set to false to allow task failover to another node if current one is down + PinToMentorNode = false +\}; + +// Deploy (send) the task to the server via the AddEtlOperation: +// ============================================================= +store.Maintenance.Send(new AddEtlOperation(etlTask)); +`} + + + + + + + **Delete processed documents**: +* You have the option to delete documents from your RavenDB database once they have been processed by the Queue ETL task. + +* Set the optional `Queues` property in your ETL configuration with the list of Azure queues for which processed documents should be deleted. + + + +{`var etlTask = new QueueEtlConfiguration() +\{ + BrokerType = QueueBrokerType.AzureQueueStorage, + + Name = "myAzureQueueEtlTaskName", + ConnectionStringName = "myAzureQueueConStr", + + Transforms = \{ transformation \}, + + // Define whether to delete documents from RavenDB after they are sent to the target queue + Queues = new List() + \{ + new() + \{ + // The name of the Azure queue + Name = "OrdersQueue", + + // When set to 'true', + // documents that were processed by the transformation script will be deleted + // from RavenDB after the message is loaded to the "OrdersQueue" in Azure. + DeleteProcessedDocuments = true + \} + \} +\}; + +store.Maintenance.Send(new AddEtlOperation(etlTask)); +`} + + + + +#### Syntax + + + +{`public class QueueEtlConfiguration +\{ + // Set to QueueBrokerType.AzureQueueStorage to define an Azure Queue Storage ETL task + public QueueBrokerType BrokerType \{ get; set; \} + // The ETL task name + public string Name \{ get; set; \} + // The registered connection string name + public string ConnectionStringName \{ get; set; \} + // List of transformation scripts + public List Transforms \{ get; set; \} + // Optional configuration per queue + public List Queues \{ get; set; \} + // Set to 'false' to allow task failover to another node if current one is down + public bool PinToMentorNode \{ get; set; \} +\} + +public class Transformation +\{ + // The script name + public string Name \{ get; set; \} + // The source RavenDB collections that serve as the input for the script + public List Collections \{ get; set; \} + // Set whether to apply the script on all collections + public bool ApplyToAllDocuments \{ get; set; \} + // The script itself + public string Script \{ get; set; \} +\} + +public class EtlQueue +\{ + // The Azure queue name + public string Name \{ get; set; \} + // Delete processed documents when set to 'true' + public bool DeleteProcessedDocuments \{ get; set; \} +\} +`} + + + + + +## The transformation script + +The [basic characteristics](../../../../server/ongoing-tasks/etl/basics.mdx) of an Azure Queue Storage ETL script are similar to those of other ETL types. +The script defines what data to **extract** from the source document, how to **transform** this data, +and which Azure Queue to **load** it to. +#### The loadTo method + +To specify which Azure queue to load the data into, use either of the following methods in your script. +The two methods are equivalent, offering alternative syntax: + +* **`loadTo(obj, {attributes})`** + * Here the target is specified as part of the function name. + * The target _<QueueName>_ in this syntax is Not a variable and cannot be used as one, + it is simply a string literal of the target's name. + +* **`loadTo('QueueName', obj, {attributes})`** + * Here the target is passed as an argument to the method. + * Separating the target name from the `loadTo` command makes it possible to include symbols like `'-'` and `'.'` in target names. + This is not possible when the `loadTo` syntax is used because including special characters in the name of a JavaScript function makes it invalid. + + | Parameter | Type | Description | + |----------------|--------|----------------------------------------------------------------------------------------------------------------------------------| + | **QueueName** | string | The name of the Azure Queue | + | **obj** | object | The object to transfer | + | **attributes** | object | An object with optional & required [CloudEvents attributes](../../../../server/ongoing-tasks/etl/queue-etl/overview.mdx#cloudevents) | + +For example, the following two calls, which load data to "OrdersQueue", are equivalent: + +* `loadToOrdersQueue(obj, {attributes})` +* `loadTo('OrdersQueue', obj, {attributes})` +The following is a sample script that processes documents from the Orders collection: + + + +{`// Create an orderData object +// ========================== +var orderData = \{ + Id: id(this), + OrderLinesCount: this.Lines.length, + TotalCost: 0 +\}; + +// Update the orderData's TotalCost field +// ====================================== +for (var i = 0; i < this.Lines.length; i++) \{ + var line = this.Lines[i]; + var cost = (line.Quantity * line.PricePerUnit) * ( 1 - line.Discount); + orderData.TotalCost += cost; +\} + +// Load the object to the "OrdersQueue" in Azure +// ============================================= +loadToOrdersQueue(orderData, \{ + Id: id(this), + Type: 'com.example.promotions', + Source: '/promotion-campaigns/summer-sale' +\}) +`} + + + + + +Note: +The queue name defined in the transform script must follow the set of rules outlined in: +[Naming Queues and Metadata](https://learn.microsoft.com/en-us/rest/api/storageservices/naming-queues-and-metadata#queue-names ). + + + + + diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/etl/queue-etl/kafka.mdx b/versioned_docs/version-7.1/server/ongoing-tasks/etl/queue-etl/kafka.mdx new file mode 100644 index 0000000000..ed3b7741fb --- /dev/null +++ b/versioned_docs/version-7.1/server/ongoing-tasks/etl/queue-etl/kafka.mdx @@ -0,0 +1,346 @@ +--- +title: "Queue ETL: Apache Kafka" +hide_table_of_contents: true +sidebar_label: Kafka ETL +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Queue ETL: Apache Kafka + + +* Apache Kafka is a distributed, high-performance, transactional messaging platform that remains performant + as the number of messages it needs to process increases and the number of events it needs to stream climbs to the big-data zone. + +* Create a **Kafka ETL Task** to: + * Extract data from a RavenDB database + * Transform the data using one or more custom scripts + * Load the resulting JSON object to a Kafka destination as a CloudEvents message + +* Utilizing this task allows RavenDB to act as an event producer in a Kafka architecture. + +* Read more about Kafka clusters, brokers, topics, partitions, and other related subjects, + in the platform's [official documentation](https://kafka.apache.org/documentation/#gettingStarted). +* This article focuses on how to create a Kafka ETL task using the Client API. + To define a Kafka ETL task from the Studio, see [Studio: Kafka ETL Task](../../../../studio/database/tasks/ongoing-tasks/kafka-etl-task.mdx). + For an **overview of Queue ETL tasks**, see [Queue ETL tasks overview](../../../../server/ongoing-tasks/etl/queue-etl/overview.mdx). + +* In this page: + * [Add a Kafka connection string](../../../../server/ongoing-tasks/etl/queue-etl/kafka.mdx#add-a-kafka-connection-string) + * [Exmaple](../../../../server/ongoing-tasks/etl/queue-etl/kafka.mdx#example) + * [Syntax](../../../../server/ongoing-tasks/etl/queue-etl/kafka.mdx#syntax) + * [Add a Kafka ETL task](../../../../server/ongoing-tasks/etl/queue-etl/kafka.mdx#add-a-kafka-etl-task) + * [Example - basic](../../../../server/ongoing-tasks/etl/queue-etl/kafka.mdx#example-basic) + * [Example - delete processed documents](../../../../server/ongoing-tasks/etl/queue-etl/kafka.mdx#delete-processed-documents) + * [Syntax](../../../../server/ongoing-tasks/etl/queue-etl/kafka.mdx#syntax-1) + * [The transformation script](../../../../server/ongoing-tasks/etl/queue-etl/kafka.mdx#the-transformation-script) + * [The loadTo method](../../../../server/ongoing-tasks/etl/queue-etl/kafka.mdx#the-loadto-method) + + +## Add a Kafka connection string + +Before setting up the ETL task, define a connection string that the task will use to connect to the message broker's bootstrap servers. +#### Example + + + +{`// Prepare the connection string: +// ============================== +var conStr = new QueueConnectionString +\{ + // Provide a name for this connection string + Name = "myKafkaConStr", + + // Set the broker type + BrokerType = QueueBrokerType.Kafka, + + // Configure the connection details + KafkaConnectionSettings = new KafkaConnectionSettings() + \{ BootstrapServers = "localhost:9092" \} +\}; + +// Deploy (send) the connection string to the server via the PutConnectionStringOperation: +// ======================================================================================= +var res = store.Maintenance.Send( + new PutConnectionStringOperation(conStr)); +`} + + +#### Syntax + + + +{`public class QueueConnectionString : ConnectionString +\{ + // Set the broker type to QueueBrokerType.Kafka for a Kafka connection string + public QueueBrokerType BrokerType \{ get; set; \} + + // Configure this when setting a connection string for Kafka + public KafkaConnectionSettings KafkaConnectionSettings \{ get; set; \} + + // Configure this when setting a connection string for RabbitMQ + public RabbitMqConnectionSettings RabbitMqConnectionSettings \{ get; set; \} + + // Configure this when setting a connection string for Azure Queue Storage + public AzureQueueStorageConnectionSettings AzureQueueStorageConnectionSettings \{ get; set; \} +\} +`} + + + + +{`public enum QueueBrokerType +\{ + None, + Kafka, + RabbitMq, + AzureQueueStorage +\} +`} + + + + +{`public class KafkaConnectionSettings +\{ + // A string containing comma-separated keys of "host:port" URLs to Kafka brokers + public string BootstrapServers \{ get; set; \} + + // Various configuration options + public Dictionary ConnectionOptions \{ get; set; \} + + public bool UseRavenCertificate \{ get; set; \} +\} +`} + + + + + +## Add a Kafka ETL task + + + + **Example - basic**: +* In this example, the Kafka ETL Task will - + * Extract source documents from the "Orders" collection in RavenDB. + * Process each "Order" document using a defined script that creates a new `orderData` object. + * Load the `orderData` object to the "OrdersTopic" in a Kafka broker. +* For more details about the script and the `loadTo` method, see the [transromation script](../../../../server/ongoing-tasks/etl/queue-etl/kafka.mdx#the-transformation-script) section below. + + + +{`// Define a transformation script for the task: +// ============================================ +Transformation transformation = new Transformation +\{ + // Define the input collections + Collections = \{ "Orders" \}, + ApplyToAllDocuments = false, + + // The transformation script + Name = "scriptName", + Script = @"// Create an orderData object + // ========================== + var orderData = \{ + Id: id(this), + OrderLinesCount: this.Lines.length, + TotalCost: 0 + \}; + + // Update the orderData's TotalCost field + // ====================================== + for (var i = 0; i < this.Lines.length; i++) \{ + var line = this.Lines[i]; + var cost = (line.Quantity * line.PricePerUnit) * ( 1 - line.Discount); + orderData.TotalCost += cost; + \} + + // Load the object to the 'OrdersTopic' in Kafka + // ============================================= + loadToOrdersTopic(orderData, \{ + Id: id(this), + PartitionKey: id(this), + Type: 'com.example.promotions', + Source: '/promotion-campaigns/summer-sale' + \});" +\}; + +// Define the Kafka ETL task: +// ========================== +var etlTask = new QueueEtlConfiguration() +\{ + BrokerType = QueueBrokerType.Kafka, + + Name = "myKafkaEtlTaskName", + ConnectionStringName = "myKafkaConStr", + + Transforms = \{ transformation \}, + + // Set to false to allow task failover to another node if current one is down + PinToMentorNode = false +\}; + +// Deploy (send) the task to the server via the AddEtlOperation: +// ============================================================= +store.Maintenance.Send(new AddEtlOperation(etlTask)); +`} + + + + + + + **Example - delete processed documents**: +* You have the option to delete documents from your RavenDB database once they have been processed by the Queue ETL task. + +* Set the optional `Queues` property in your ETL configuration with the list of Kafka topics for which processed documents should be deleted. + + + +{`var etlTask = new QueueEtlConfiguration() +\{ + BrokerType = QueueBrokerType.Kafka, + + Name = "myKafkaEtlTaskName", + ConnectionStringName = "myKafkaConStr", + + Transforms = \{ transformation \}, + + // Define whether to delete documents from RavenDB after they are sent to the target topic + Queues = new List() + \{ + new() + \{ + // The name of the Kafka topic + Name = "OrdersTopic", + + // When set to 'true', + // documents that were processed by the transformation script will be deleted + // from RavenDB after the message is loaded to the "OrdersTopic" in Kafka. + DeleteProcessedDocuments = true + \} + \} +\}; + +store.Maintenance.Send(new AddEtlOperation(etlTask)); +`} + + + + +#### Syntax + + + +{`public class QueueEtlConfiguration +\{ + // Set to QueueBrokerType.Kafka to define a Kafka ETL task + public QueueBrokerType BrokerType \{ get; set; \} + // The ETL task name + public string Name \{ get; set; \} + // The registered connection string name + public string ConnectionStringName \{ get; set; \} + // List of transformation scripts + public List Transforms \{ get; set; \} + // Optional configuration per queue + public List Queues \{ get; set; \} + // Set to 'false' to allow task failover to another node if current one is down + public bool PinToMentorNode \{ get; set; \} +\} + +public class Transformation +\{ + // The script name + public string Name \{ get; set; \} + // The source RavenDB collections that serve as the input for the script + public List Collections \{ get; set; \} + // Set whether to apply the script on all collections + public bool ApplyToAllDocuments \{ get; set; \} + // The script itself + public string Script \{ get; set; \} +\} + +public class EtlQueue +\{ + // The Kafka topic name + public string Name \{ get; set; \} + // Delete processed documents when set to 'true' + public bool DeleteProcessedDocuments \{ get; set; \} +\} +`} + + + + + +## The transformation script + +The [basic characteristics](../../../../server/ongoing-tasks/etl/basics.mdx) of a Kafka ETL script are similar to those of other ETL types. +The script defines what data to **extract** from the source document, how to **transform** this data, +and which Kafka Topic to **load** it to. +#### The loadTo method + +To specify which Kafka topic to load the data into, use either of the following methods in your script. +The two methods are equivalent, offering alternative syntax: + + * **`loadTo(obj, {attributes})`** + * Here the target is specified as part of the function name. + * The target _<TopicName>_ in this syntax is Not a variable and cannot be used as one, + it is simply a string literal of the target's name. + + * **`loadTo('TopicName', obj, {attributes})`** + * Here the target is passed as an argument to the method. + * Separating the target name from the `loadTo` command makes it possible to include symbols like `'-'` and `'.'` in target names. + This is not possible when the `loadTo` syntax is used because including special characters in the name of a JavaScript function makes it invalid. + + | Parameter | Type | Description | + |----------------|--------|----------------------------------------------------------------------------------------------------------------------------------| + | **TopicName** | string | The name of the Kafka topic | + | **obj** | object | The object to transfer | + | **attributes** | object | An object with optional & required [CloudEvents attributes](../../../../server/ongoing-tasks/etl/queue-etl/overview.mdx#cloudevents) | + +For example, the following two calls, which load data to "OrdersTopic", are equivalent: + + * `loadToOrdersTopic(obj, {attributes})` + * `loadTo('OrdersTopic', obj, {attributes})` +A sample script that process documents from the Orders collection: + + + +{`// Create an orderData object +// ========================== +var orderData = \{ + Id: id(this), + OrderLinesCount: this.Lines.length, + TotalCost: 0 +\}; + +// Update the orderData's TotalCost field +// ====================================== +for (var i = 0; i < this.Lines.length; i++) \{ + var line = this.Lines[i]; + var cost = (line.Quantity * line.PricePerUnit) * ( 1 - line.Discount); + orderData.TotalCost += cost; +\} + +// Load the object to the "OrdersTopic" in Kafka +// ============================================= +loadToOrders(orderData, \{ + Id: id(this), + PartitionKey: id(this), + Type: 'com.example.promotions', + Source: '/promotion-campaigns/summer-sale' +\}) +`} + + + + + diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/etl/queue-etl/overview.mdx b/versioned_docs/version-7.1/server/ongoing-tasks/etl/queue-etl/overview.mdx new file mode 100644 index 0000000000..3a92aa2477 --- /dev/null +++ b/versioned_docs/version-7.1/server/ongoing-tasks/etl/queue-etl/overview.mdx @@ -0,0 +1,133 @@ +--- +title: "Queue ETL Overview" +hide_table_of_contents: true +sidebar_label: Overview +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Queue ETL Overview + + +* Message brokers are high-throughput, distributed messaging services that host data they receive + from **producer** applications and serve it to **consumer** clients via FIFO data queues. + +* RavenDB can operate as a _Producer_ within this architecture to the following message brokers: + * **Apache Kafka** + * **RabbitMQ** + * **Azure Queue Storage** + * **Amazon SQS** + +* This functionality is achieved by defining [Queue ETL tasks](../../../../server/ongoing-tasks/etl/queue-etl/overview.mdx#queue-etl-tasks) within a RavenDB database. + +* RavenDB can also function as a _Consumer_. + To learn about RavenDB's role as a _Consumer_ please refer to the [Queue Sink section](../../../../server/ongoing-tasks/queue-sink/overview.mdx). + +* In this page: + * [Queue ETL tasks](../../../../server/ongoing-tasks/etl/queue-etl/overview.mdx#queue-etl-tasks) + * [Data delivery](../../../../server/ongoing-tasks/etl/queue-etl/overview.mdx#data-delivery) + * [What is transferred](../../../../server/ongoing-tasks/etl/queue-etl/overview.mdx#what-is-transferred) + * [How are messages produced and consumed](../../../../server/ongoing-tasks/etl/queue-etl/overview.mdx#how-are-messages-produced-and-consumed) + * [Idempotence and message duplication](../../../../server/ongoing-tasks/etl/queue-etl/overview.mdx#idempotence-and-message-duplication) + * [CloudEvents](../../../../server/ongoing-tasks/etl/queue-etl/overview.mdx#cloudevents) + * [Task statistics](../../../../server/ongoing-tasks/etl/queue-etl/overview.mdx#task-statistics) + + +## Queue ETL tasks + +RavenDB produces messages to broker queues via the following Queue ETL tasks: + +* **Kafka ETL Task** + You can define a Kafka ETL Task from [Studio](../../../../studio/database/tasks/ongoing-tasks/kafka-etl-task.mdx) + or using the [Client API](../../../../server/ongoing-tasks/etl/queue-etl/kafka.mdx). +* **RabbitMQ ETL Task** + You can define a RabbitMQ ETL Task from [Studio](../../../../studio/database/tasks/ongoing-tasks/rabbitmq-etl-task.mdx) + or using the [Client API](../../../../server/ongoing-tasks/etl/queue-etl/rabbit-mq.mdx). +* **Azure Queue Storage ETL Task** + You can define an Azure Queue Storage ETL Task from [Studio](../../../../studio/database/tasks/ongoing-tasks/azure-queue-storage-etl.mdx) + or using the [Client API](../../../../server/ongoing-tasks/etl/queue-etl/azure-queue.mdx). +* **Amazon SQS ETL Task** + You can define an Amazon SQS ETL Task from [Studio](../../../../studio/database/tasks/ongoing-tasks/amazon-sqs-etl.mdx) + or using the [Client API](../../../../server/ongoing-tasks/etl/queue-etl/amazon-sqs.mdx). +The above ETL tasks: + +* **Extract** selected data from RavenDB documents from specified collections. +* **Transform** the data to new JSON objects. +* Wrap the JSON objects as [CloudEvents messages](https://cloudevents.io) + and **Load** them to the designated message broker. + + + +## Data delivery + +#### What is transferred: + +* **Documents only** + A Queue ETL task transfers documents only. + Document extensions like attachments, counters, or time series, will not be transferred. +* **CloudEvents messages** + JSON objects produced by the task's transformation script are wrapped + and delivered as [CloudEvents Messages](../../../../server/ongoing-tasks/etl/queue-etl/overview.mdx#cloudevents). + +#### How are messages produced and consumed: + +* The Queue ETL task will send the messages it produces to the target using a **connection string**, + which specifies the destination and credentials required to authorize the connection. + Find the specific syntax for defining a connection string per task in each task's documentation. +* Each message will be added to the tail of its assigned queue according to the transformation script. + As earlier messages are processed, it will advance to the head of the queue, becoming available for consumers. +* RavenDB publishes messages to the designated brokers using [transactions and batches](../../../../server/ongoing-tasks/etl/basics.mdx#batch-processing), + creating a batch of messages and opening a transaction to the destination queue for the batch. + +#### Idempotence and message duplication: + +* RavenDB is an **idempotent producer**, which typically does not send duplicate messages to queues. +* However, it is possible that duplicate messages will be sent to the broker. + For example: + Different nodes of a RavenDB cluster are regarded as different producers by the broker. + If the node responsible for the ETL task fails while sending a batch of messages, + the new responsible node may resend messages that were already received by the broker. +* Therefore, if processing each message only once is important to the consumer, + it is **the consumer's responsibility** to verify the uniqueness of each consumed message. + + + +## CloudEvents + +* After preparing a JSON object that needs to be sent to a message broker, + the ETL task wraps it as a CloudEvents message using the [CloudEvents Library](https://cloudevents.io). + +* To do that, the JSON object is provided with additional [required attributes](https://github.com/cloudevents/spec/blob/main/cloudevents/spec.md#required-attributes), + added as headers to the message, including: + + | Attribute | Type | Description | Default Value | + |-----------------|----------|-----------------------------------------------------------------------------------------------------------|------------------------------------------------------| + | **id** | `string` | [Event identifier](https://github.com/cloudevents/spec/blob/main/cloudevents/spec.md#id) | The document Change Vector | + | **type** | `string` | [Event type](https://github.com/cloudevents/spec/blob/main/cloudevents/spec.md#type) | "ravendb.etl.put" | + | **source** | `string` | [Event context](https://github.com/cloudevents/spec/blob/main/cloudevents/spec.md#source-1) | `//` | + +* The optional 'partitionkey' attribute can also be added. + Currently, it is only implemented by [Kafka ETL](../../../../server/ongoing-tasks/etl/queue-etl/kafka.mdx). + + | Optional Attribute | Type | Description | Default Value | + |----------------------|------------|----------------------------------------------------------------------------------------------------------------------------------------------|------------------| + | **partitionkey** | `string` | [Events relationship/grouping definition](https://github.com/cloudevents/spec/blob/main/cloudevents/extensions/partitioning.md#partitionkey) | The document ID | + + + +## Task statistics + +Use the Studio [Ongoing tasks stats](../../../../studio/database/stats/ongoing-tasks-stats/overview.mdx) view +to see various statistics related to data extraction, transformation, and loading to the target broker. + +![Queue Brokers Stats](./assets/overview_stats.png) + + + + diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/etl/queue-etl/rabbit-mq.mdx b/versioned_docs/version-7.1/server/ongoing-tasks/etl/queue-etl/rabbit-mq.mdx new file mode 100644 index 0000000000..1ce37d2b68 --- /dev/null +++ b/versioned_docs/version-7.1/server/ongoing-tasks/etl/queue-etl/rabbit-mq.mdx @@ -0,0 +1,364 @@ +--- +title: "Queue ETL: RabbitMQ" +hide_table_of_contents: true +sidebar_label: RabbitMQ ETL +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Queue ETL: RabbitMQ + + +* RabbitMQ exchanges are designed to disperse data to multiple queues, + creating a flexible data channeling system that can easily handle complex message streaming scenarios. + +* Create a **RabbitMQ ETL Task** to: + * Extract data from a RavenDB database + * Transform the data using one or more custom scripts + * Load the resulting JSON object to a RabbitMQ destination as a CloudEvents message + +* Utilizing this task allows RavenDB to act as an event producer in a RabbitMQ architecture. + +* Read more about RabbitMQ in the platform's [official documentation](https://www.rabbitmq.com/). +* This article focuses on how to create a RabbitMQ ETL task using the Client API. + To define a RabbitMQ ETL task from Studio see [Studio: RabbitMQ ETL Task](../../../../studio/database/tasks/ongoing-tasks/rabbitmq-etl-task.mdx) + For an **overview of Queue ETL tasks**, see [Queue ETL tasks overview](../../../../server/ongoing-tasks/etl/queue-etl/overview.mdx). + +* In this page: + * [Add a RabbitMQ connection string](../../../../server/ongoing-tasks/etl/queue-etl/rabbit-mq.mdx#add-a-rabbitmq-connection-string) + * [Exmaple](../../../../server/ongoing-tasks/etl/queue-etl/rabbit-mq.mdx#example) + * [Syntax](../../../../server/ongoing-tasks/etl/queue-etl/rabbit-mq.mdx#syntax) + * [Add a RabbitMQ ETL task](../../../../server/ongoing-tasks/etl/queue-etl/rabbit-mq.mdx#add-a-rabbitmq-etl-task) + * [Example - basic](../../../../server/ongoing-tasks/etl/queue-etl/rabbit-mq.mdx#example-basic) + * [Example - delete processed documents](../../../../server/ongoing-tasks/etl/queue-etl/rabbit-mq.mdx#delete-processed-documents) + * [Syntax](../../../../server/ongoing-tasks/etl/queue-etl/rabbit-mq.mdx#syntax-1) + * [The transformation script](../../../../server/ongoing-tasks/etl/queue-etl/rabbit-mq.mdx#the-transformation-script) + * [The loadTo method](../../../../server/ongoing-tasks/etl/queue-etl/rabbit-mq.mdx#the-loadto-method) + * [Available method overloads](../../../../server/ongoing-tasks/etl/queue-etl/rabbit-mq.mdx#available-method-overloads) + + +## Add a RabbitMQ connection string + +Before setting up the ETL task, define a connection string that the task will use to connect to RabbitMQ. +#### Example + + + +{`// Prepare the connection string: +// ============================== +var conStr = new QueueConnectionString +\{ + // Provide a name for this connection string + Name = "myRabbitMqConStr", + + // Set the broker type + BrokerType = QueueBrokerType.RabbitMq, + + // Configure the connection details + RabbitMqConnectionSettings = new RabbitMqConnectionSettings() + \{ ConnectionString = "amqp://guest:guest@localhost:49154" \} +\}; + +// Deploy (send) the connection string to the server via the PutConnectionStringOperation: +// ======================================================================================= +var res = store.Maintenance.Send( + new PutConnectionStringOperation(conStr)); +`} + + +#### Syntax + + + +{`public class QueueConnectionString : ConnectionString +\{ + // Set the broker type to QueueBrokerType.RabbitMq for a RabbitMQ connection string + public QueueBrokerType BrokerType \{ get; set; \} + + // Configure this when setting a connection string for Kafka + public KafkaConnectionSettings KafkaConnectionSettings \{ get; set; \} + + // Configure this when setting a connection string for RabbitMQ + public RabbitMqConnectionSettings RabbitMqConnectionSettings \{ get; set; \} + + // Configure this when setting a connection string for Azure Queue Storage + public AzureQueueStorageConnectionSettings AzureQueueStorageConnectionSettings \{ get; set; \} +\} +`} + + + + +{`public enum QueueBrokerType +\{ + None, + Kafka, + RabbitMq, + AzureQueueStorage +\} +`} + + + + +{`public sealed class RabbitMqConnectionSettings +\{ + // A single string that specifies the RabbitMQ exchange connection details + public string ConnectionString \{ get; set; \} +\} +`} + + + + + +## Add a RabbitMQ ETL task + + + + **Example - basic**: +* In this example, the RabbitMQ ETL Task will - + * Extract source documents from the "Orders" collection in RavenDB. + * Process each "Order" document using a defined script that creates a new `orderData` object. + * Load the `orderData` object to the "OrdersExchange" in a RabbitMQ broker. +* For more details about the script and the `loadTo` method overloads, see the [transromation script](../../../../server/ongoing-tasks/etl/queue-etl/rabbit-mq.mdx#the-transformation-script) section below. + + + +{`// Define a transformation script for the task: +// ============================================ +Transformation transformation = new Transformation +\{ + // Define the input collections + Collections = \{ "Orders" \}, + ApplyToAllDocuments = false, + + // The transformation script + Name = "scriptName", + Script = @"// Create an orderData object + // ========================== + var orderData = \{ + Id: id(this), + OrderLinesCount: this.Lines.length, + TotalCost: 0 + \}; + + // Update the orderData's TotalCost field + // ====================================== + for (var i = 0; i < this.Lines.length; i++) \{ + var line = this.Lines[i]; + var cost = (line.Quantity * line.PricePerUnit) * ( 1 - line.Discount); + orderData.TotalCost += cost; + \} + + // Load the object to the 'OrdersExchange' in RabbitMQ + // =================================================== + loadToOrdersExchange(orderData, \`routingKey\`, \{ + Id: id(this), + Type: 'com.example.promotions', + Source: '/promotion-campaigns/summer-sale' + \});" +\}; + +// Define the RabbitMQ ETL task: +// ============================= +var etlTask = new QueueEtlConfiguration() +\{ + BrokerType = QueueBrokerType.RabbitMq, + + Name = "myRabbitMqEtlTaskName", + ConnectionStringName = "myRabbitMqConStr", + + Transforms = \{ transformation \}, + + // Set to false to have the RabbitMQ client library declare the queue if does not exist + SkipAutomaticQueueDeclaration = false, + + // Set to false to allow task failover to another node if current one is down + PinToMentorNode = false +\}; + +// Deploy (send) the task to the server via the AddEtlOperation: +// ============================================================= +store.Maintenance.Send(new AddEtlOperation(etlTask)); +`} + + + + + + + **Example - delete processed documents**: +* You have the option to delete documents from your RavenDB database once they have been processed by the Queue ETL task. + +* Set the optional `Queues` property in your ETL configuration with the list of RabbitMQ queues for which processed documents should be deleted. + + + +{`var etlTask = new QueueEtlConfiguration() +\{ + BrokerType = QueueBrokerType.RabbitMq, + + Name = "myRabbitMqEtlTaskName", + ConnectionStringName = "myRabbitMqConStr", + + Transforms = \{ transformation \}, + + // Define whether to delete documents from RavenDB after they are sent to RabbitMQ + Queues = new List() + \{ + new() + \{ + // The name of the target queue + Name = "OrdersQueue", + + // When set to 'true', + // documents that were processed by the transformation script will be deleted + // from RavenDB after the message is loaded to the "OrdersQueue" in RabbitMQ. + DeleteProcessedDocuments = true + \} + \} +\}; + +store.Maintenance.Send(new AddEtlOperation(etlTask)); +`} + + + + +#### Syntax + + + +{`public class QueueEtlConfiguration +\{ + // Set to QueueBrokerType.RabbitMq to define a RabbitMQ ETL task + public QueueBrokerType BrokerType \{ get; set; \} + // The ETL task name + public string Name \{ get; set; \} + // The registered connection string name + public string ConnectionStringName \{ get; set; \} + // List of transformation scripts + public List Transforms \{ get; set; \} + // Optional configuration per queue + public List Queues \{ get; set; \} + // Set to 'false' to allow task failover to another node if current one is down + public bool PinToMentorNode \{ get; set; \} + + // Set to 'false' to have the RabbitMQ client library declare the queue if does not exist. + // Set to 'true' to skip automatic queue declaration, + // use this option when you prefer to define Exchanges, Queues & Bindings manually. + public bool SkipAutomaticQueueDeclaration \{ get; set; \} +\} + +public class Transformation +\{ + // The script name + public string Name \{ get; set; \} + // The source RavenDB collections that serve as the input for the script + public List Collections \{ get; set; \} + // Set whether to apply the script on all collections + public bool ApplyToAllDocuments \{ get; set; \} + // The script itself + public string Script \{ get; set; \} +\} + +public class EtlQueue +\{ + // The RabbitMQ target queue name + public string Name \{ get; set; \} + // Delete processed documents when set to 'true' + public bool DeleteProcessedDocuments \{ get; set; \} +\} +`} + + + + + +## The transformation script + +The [basic characteristics](../../../../server/ongoing-tasks/etl/basics.mdx) of a RabbitMQ ETL script are similar to those of other ETL types. +The script defines what data to **extract** from the source document, how to **transform** this data, +and which RabbitMQ Exchange to **load** it to. +#### The loadTo method + +To specify which RabbitMQ Exchange to load the data into, use either of the following methods in your script. +The two methods are equivalent, offering alternative syntax: + + * **`loadTo(obj, 'routingKey', {attributes})`** + * Here the target is specified as part of the function name. + * The target _<ExchangeName>_ in this syntax is Not a variable and cannot be used as one, + it is simply a string literal of the target's name. + + * **`loadTo('ExchangeName', obj, 'routingKey', {attributes})`** + * Here the target is passed as an argument to the method. + * Separating the target name from the `loadTo` command makes it possible to include symbols like `'-'` and `'.'` in target names. + This is not possible when the `loadTo` syntax is used because including special characters in the name of a JavaScript function makes it invalid. + + | Parameter | Type | Description | + |------------------|---------|------------------------------------------------------------------------------------------------------------------------------| + | **ExchangeName** | string | The name of the RabbitMQ exchange. | + | **obj** | object | The object to transfer. | + | **routingKey** | string | The RabbitMQ exchange evaluates this attribute to determine how to route the message to queues based on the exchange type. | + | **attributes** | object | An object with [CloudEvents attributes](../../../../server/ongoing-tasks/etl/queue-etl/overview.mdx#cloudevents). | + +For example, the following two calls, which load data to the Orders exchange, are equivalent: + + * `loadToOrdersExchange(obj, 'users', {attributes})` + * `loadTo('OrdersExchange', obj, 'users', {attributes})` +#### Available method overloads + + * `loadTo('', obj, 'routingKey', {attributes})` + When replacing the exchange name with an empty string, + the message will be routed using the routingKey via the default exchange, which is predefined by the broker. + + * `loadTo(obj)` + `loadTo(obj, {attributes})` + When omitting the routingKey, messages delivery will depend on the exchange type. + + * `loadTo(obj, 'routingKey')` + When omitting the attributes, default attribute values will be assigned. + +If no exchange is defined in the RabbitMQ platform, RavenDB will create a default exchange of the **Fanout** type. +In this case, all routing keys will be ignored, and messages will be distributed to all bound queues. + +A sample script that process documents from the Orders collection: + + + +{`// Create an orderData object +// ========================== +var orderData = \{ + Id: id(this), + OrderLinesCount: this.Lines.length, + TotalCost: 0 +\}; + +// Update the orderData's TotalCost field +// ====================================== +for (var i = 0; i < this.Lines.length; i++) \{ + var line = this.Lines[i]; + var cost = (line.Quantity * line.PricePerUnit) * ( 1 - line.Discount); + orderData.TotalCost += cost; +\} + +// Load the object to "OrdersExchange" in RabbitMQ +// =============================================== +loadToOrdersExchange(orderData, 'users-queue', \{ + Id: id(this), + Type: 'com.example.promotions', + Source: '/promotion-campaigns/summer-sale' +\}) +`} + + + + + diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/etl/raven.mdx b/versioned_docs/version-7.1/server/ongoing-tasks/etl/raven.mdx new file mode 100644 index 0000000000..3cacce4456 --- /dev/null +++ b/versioned_docs/version-7.1/server/ongoing-tasks/etl/raven.mdx @@ -0,0 +1,821 @@ +--- +title: "Ongoing Tasks: RavenDB ETL" +hide_table_of_contents: true +sidebar_label: RavenDB ETL +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Ongoing Tasks: RavenDB ETL + + +* **RavenDB ETL Task** creates an [ETL](../../../server/ongoing-tasks/etl/basics.mdx) + process for a given database when the destination is another RavenDB database. + +* The script is executed per document whenever the document is created, modified, and/or deleted. + +* It can be defined in code or using the [Studio](../../../studio/database/tasks/ongoing-tasks/ravendb-etl-task.mdx). + +* One RavenDB ETL task can have multiple transformation scripts and each script can load to a different collection. + +* Each script can be defined on the source database to trigger ETL from a single collection, + multiple selected collections or be applied to **all** documents regardless of the associated collection(s). + +* For the destination cluster to trust the source, you must [pass the .pfx certificate from the source to the destination cluster](../../../server/security/authentication/certificate-management.mdx#enabling-communication-between-servers:-importing-and-exporting-certificates) + if you are running a secure server. + +* In this page: + * [Transformation Script Options](../../../server/ongoing-tasks/etl/raven.mdx#transformation-script-options) + * [Empty Script](../../../server/ongoing-tasks/etl/raven.mdx#empty-script) + * [Attachments](../../../server/ongoing-tasks/etl/raven.mdx#attachments) + * [Counters](../../../server/ongoing-tasks/etl/raven.mdx#counters) + * [Time Series](../../../server/ongoing-tasks/etl/raven.mdx#time-series) + * [Revisions](../../../server/ongoing-tasks/etl/raven.mdx#revisions) + * [Deletions](../../../server/ongoing-tasks/etl/raven.mdx#deletions) + + +![Figure 1. Configure RavenDB ETL task](./assets/raven-etl-setup.png) + +## Transformation Script Options + +* [Loading Documents](../../../server/ongoing-tasks/etl/raven.mdx#loading-documents) +* [Alternative Syntax](../../../server/ongoing-tasks/etl/raven.mdx#alternative-syntax) +* [Documents Identifiers](../../../server/ongoing-tasks/etl/raven.mdx#documents-identifiers) +* [Filtering](../../../server/ongoing-tasks/etl/raven.mdx#filtering) +* [Loading Data from Other Documents](../../../server/ongoing-tasks/etl/raven.mdx#loading-data-from-other-documents) +* [Accessing Metadata](../../../server/ongoing-tasks/etl/raven.mdx#accessing-metadata) +* [Creating Multiple Documents from a Single Document](../../../server/ongoing-tasks/etl/raven.mdx#creating-multiple-documents-from-a-single-document) +### Loading Documents + +* To load data to the destination database you must call the `loadTo()` method and pass a JS object. + +* Indicating the collection name in the load method is a convention. + +* The objects passed to the `loadTo` method will be sent to the target database in the last stage - `Load`. + +* All results created in a single ETL run will be sent in a single batch and processed transactionally in the destination. + + * For example, if you want to write data to the `Employees` collection you need to call the following method in the script body: + + +{`loadToEmployees(\{ ... \}); +`} + + + +* The method parameter must be a JS object. You can create it as follows: + + +{`loadToEmployees(\{ + Name: this.FirstName + " " + this.LastName +\}); +`} + + + +* Or simply transform the current document object and pass it: + + +{`this.Name = this.FirstName + " " + this.LastName; + +delete this.Address; +delete this.FirstName; +delete this.LastName; + +loadToEmployees(this); +`} + + + +#### Example: loadTo Method + +The following is an example of a RavenDB ETL script processing documents from the `Employees` collection: + + + +{`var managerName = null; + +if (this.ReportsTo !== null) +\{ + var manager = load(this.ReportsTo); + managerName = manager.FirstName + " " + manager.LastName; +\} + +// load documents to \`Employees\` collection in the destination +loadToEmployees(\{ + // the loaded documents will have these fields: + Name: this.FirstName + " " + this.LastName, + Title: this.Title, + BornOn: new Date(this.Birthday).getFullYear(), + Manager: managerName +\}); +`} + + + +### Alternative Syntax + +The target collection name can be passed to the `loadTo` command separately, as a string argument, +using this syntax: `loadTo('Target', obj)` + +* **Example**: + The following two calls to `loadTo` are equivalent. + `loadToEmployees(this);` + `loadTo('Employees', this);` + + + + * The target name `'Employees'` in this syntax is **not** a variable and **cannot** be used as one: + it is simply a string literal of the target's name. + * Separating the target name from the `loadTo` command makes it possible to include symbols like + `-` and `.` in target names. This is not possible when the standard `loadToEmployees` syntax is + used because including special characters in the name of a JS function turns it invalid. + + +### Documents Identifiers + +The documents generated in the destination database are given an ID according to the collection name specified in the `loadTo` method. + +**If the specified destination collection is the _same_ as the source** +then the document is loaded to the _same_ collection and the original identifier is preserved. + + * For example, the following ETL script defined in the `Employees` collection will keep the same identifiers in the target database: + + +{`// original ID will be preserved +loadToEmployees(\{ ... \}); +`} + + + +**If the 'loadTo' method indicates a _different_ destination collection**, e.g. `People`, + then the `Employees` documents will get new identifiers that combine the original ID and the new collection name in the destination database. + + This forces us to load new documents with incremented IDs instead of overwriting the fields in existing documents. + + By default, RavenDB deletes the old document version in the destination. + This can be changed by changing the [deletions behavior](../../../server/ongoing-tasks/etl/raven.mdx#deletions). + + RavenDB has to create a new, updated document in the destination with an [incremented server-made identity](../../../client-api/document-identifiers/working-with-document-identifiers.mdx#server-side-generated-ids). + + * For example, if the source collection is `Employees` while the destination collection is `People`: + + +{`// a new document with a new, incremented identifier will be generated in destination +// by default, the old version will be deleted +loadToPeople(\{ ... \}); +`} + + + +* In addition, ETL appends the symbol `/` to the requested id so that the target database will [generate identifiers on its side](../../../client-api/document-identifiers/working-with-document-identifiers.mdx#server-side-generated-ids). + As a result, documents in the `People` collection in the target database will have identifiers such as: `employees/1-A/people/00000000000000000024-A`. +### Filtering + +Documents can be filtered from the ETL by calling the `loadTo` method only for documents that match some condition: + + + +{`if (this.Active) \{ + // load only active users + loadToEmployees(\{ ... \}); +\} +`} + + +### Loading Data from Other Documents + +The `load` method loads a document with the specified ID into the script context so it can be transformed. + + + +{`// this.ReportsTo has some document ID +var manager = load(this.ReportsTo); +`} + + +### Accessing Metadata + +The metadata can be accessed in the following way: + + + +{`var value = this['@metadata']['custom-metadata-key']; +`} + + +### Creating Multiple Documents from a Single Document + +The `loadTo` method can be called multiple times in a single script. + That allows you to split a single source document into multiple documents on the destination database: + + + +{`// documents will be created in \`Addresses\` collection +loadToAddresses(\{ + City: this.Address.City , + Country: this.Address.Country , + Address: this.Address.Line1 +\}); + +delete this.Address; + +// documents will be created in the \`Employees\` collection +loadToEmployees(this); +`} + + + + + +## Empty Script + +* An ETL task can be created with an empty script. +* The documents will be transferred _without_ any modifications to the _same_ collection as the source document. + + +## Attachments + +* Attachments are sent automatically when you send a _full_ collection to the destination using an _empty_ script. +* If you use a script you can indicate that an attachment should also be sent by using dedicated functions: + + - `loadAttachment(name)` returns a reference to an attachment that is meant be passed to `addAttachment()` + - `.addAttachment([name,] attachmentRef)` adds an attachment to a document that will be sent in the process, `` is a reference returned by `loadTo()` +* [Sending attachments together with documents](../../../server/ongoing-tasks/etl/raven.mdx#sending-attachments-together-with-documents) +* [Changing attachment name](../../../server/ongoing-tasks/etl/raven.mdx#changing-attachment-name) +* [Loading non-existent attachment](../../../server/ongoing-tasks/etl/raven.mdx#loading-non-existent-attachment) +* [Accessing attachments from metadata](../../../server/ongoing-tasks/etl/raven.mdx#accessing-attachments-from-metadata) +### Sending attachments together with documents + +* Attachment is sent along with a transformed document if it's explicitly defined in the script by using `addAttachment()` method. By default, the attachment name is preserved. +* The script below sends _all_ attachments of a current document by taking advantage of `getAttachments()` function, loads each of them during transformation, and adds them to + a document that will be sent to the 'Users' collection on the destination database. + + +{`var doc = loadToUsers(this); + +var attachments = getAttachments(); + +for (var i = 0; i < attachments.length; i++) \{ + doc.addAttachment(loadAttachment(attachments[i].Name)); +\} +`} + + +### Changing attachment name + +* If `addAttachment()` is called with two arguments, the first one can indicate a new name for an attachment. In the below example attachment `photo` + will be sent and stored under the `picture` name. +* To check the existence of an attachment `hasAttachment()` function is used + + +{`var employee = loadToEmployees(\{ + Name: this.FirstName + " " + this.LastName +\}); + +if (hasAttachment('photo')) \{ + employee.addAttachment('picture', loadAttachment('photo')); +\} +`} + + +### Loading non-existent attachment + +Function `loadAttachment()` returns `null` if a document doesn't have an attachment with a given name. Passing such reference to `addAttachment()` will be no-op and no error will be thrown. +### Accessing attachments from metadata + +The collection of attachments of the currently transformed document can be accessed either by `getAttachments()` helper function or directly from document metadata: + + + +{`var attachments = this['@metadata']['@attachments']; +`} + + + + + +## Counters + +* Counters are sent automatically when you send a _full_ collection to the destination using an _empty_ script. +* If a script is defined RavenDB doesn't send counters by default. +* To indicate that a counter should also be sent, the [behavior function](../../../server/ongoing-tasks/etl/raven.mdx#counter-behavior-function) + (e.g. by increment operation). If the relevant function doesn't exist, a counter isn't loaded. +* The reason that counters require special functions is that incrementing a counter _doesn't_ modify the change vector of a related document so the document _isn't_ processed + by ETL on a change in the counter. +* Another option of sending a counter is to explicitly add it in a script to a loaded document. +* [Counter behavior function](../../../server/ongoing-tasks/etl/raven.mdx#counter-behavior-function) +* [Adding counter explicitly in a script](../../../server/ongoing-tasks/etl/raven.mdx#adding-counter-explicitly-in-a-script) +### Counter behavior function + +* Every time a counter of a document from a collection that ETL script is defined on is modified then the behavior function is called to check + if the counter should be loaded to a destination database. + + + +The counter behavior function can be defined _only_ for counters of documents from collections that are ETLed to _the same_ collections e.g.: +a script is defined on `Products` collection and it loads documents to `Products` collection in a destination database using `loadToProducts()` method. + + + +The function is defined in the script and should have the following signature: + + + +{`function loadCountersOfBehavior(docId, counterName) \{ + return [true | false]; +\} +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **docId** | `string` | The identifier of a deleted document. | +| **<CollectionName>** | `string` | The collection that the ETL script is working on. | +| **counterName** | `string` | The name of the modified counter for that doc. | + +| Return | Description | +| - | - | +| **bool** | If the function returns `true` then a change value is propagated to a destination. | + +#### Example: Modifying a Counter Named "downloads" + +The following script is defined on the `Products` collection: + + + +{`if (this.Category == 'software') \{ + loadToProducts(\{ + ProductName: this.Name + \}); +\} + +function loadCountersOfProductsBehavior(docId, counterName) \{ + var doc = load(docId); + + if (doc.Category == 'software' && counterName = 'downloads') + return true; +\} +`} + + +### Adding counter explicitly in a script + +Counter behavior functions typically handle counters of documents + that are loaded to the same collection. If a transformation script for `Employees` + collection specifies that they are loaded to the `People` collection in a target database, + then due to document ID generation strategy by ETL process (see [Documents Identifiers](../../../server/ongoing-tasks/etl/raven.mdx#documents-identifiers)), + the counters won't be sent because the final ID of a loaded document isn't known on the source side. + + You can use special functions in the script code to deal with counters on documents that are loaded into different collections: + + + +{`var person = loadToPeople(\{ Name: this.Name + ' ' + this.LastName \}); + +person.addCounter(loadCounter('likes')); +`} + + + +* The above example indicates that the `likes` counter will be sent together with a document. It uses the following functions to accomplish that: + - `loadCounter(name)` returns a reference to a counter that is meant be passed to `addCounter()` + - `.addCounter(counterRef)` adds a counter to a document that will be sent in the process, `` is a reference returned by `loadTo()` + + + + As the transformation script is run on a document update then counters added explicitly (`addCounter()`) will be loaded along with documents _only_ if the document is changed. + It means that incremented counter value won't be sent until a document is modified and the ETL process will run the transformation for it. + + + + + + +Counters sent by the ETL process always _override_ the existing value on the destination. ETL doesn't send an `increment` counter command +but it **sets the value using a** `put` command. + + + + + +## Time Series + +* If the transformation script is empty, time series are transferred along with their +documents by default. +* When the script is not empty, ETL can be set for time series via: + * [Time Series Load Behavior Function](../../../server/ongoing-tasks/etl/raven.mdx#time-series-load-behavior-function) + * [Adding Time Series to Documents](../../../server/ongoing-tasks/etl/raven.mdx#adding-time-series-to-documents) +### Time Series Load Behavior Function + +* The time-series behavior function is defined in the script to set the conditions under +which time-series data is loaded. +* The load behavior function evaluates each [time-series segment](../../../document-extensions/timeseries/design.mdx#segmentation) +and decides whether to load it to the destination database. ETL only updates the data +that has changed: if only one time-series entry is modified, only the segment that +entry belongs to is evaluated. +* Changes to time-series trigger ETL on both the time-series itself and on the document +it extends. +* The function returns either a boolean or an object with two `Date` values that +specify the range of time-series entries to load. +* The time-series behavior function can _only_ be applied to time-series whose source +collection and target collection have the same name. Loading a time-series from an +Employees collection on the server-side to a Users collection at the target database +is not possible using the load behavior function. +* The function should be defined with the following signature: + + +{`function loadTimeSeriesOfBehavior(docId, timeSeriesName) \{ + return [ true | false | ]; +\} +//"span of time" refers to this type: \{ string?: from, string?: to \} +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **\<collection name>** | A part of the function's name | Determines which collection's documents this behavior function applies to. A function named `loadTimeSeriesOfEmployeesBehavior` will apply on all time-series in the collection `Employees` | +| **docId** | `string` | This parameter is used inside the function to refer to the documents' ID | +| **timeSeriesName** | `string` | This parameter is used inside the function to refer to the time series' name | + +| Return Value | Description | +| - | - | +| `true` | If the behavior function returns `true`, the given time series segment is loaded. | +| `false` | The given time series segment is not loaded | +| **\<span of time>** | An object with two optional `Date` values: `from` and `to`. If this is the return value, the script loads the time series entries between these two times. If you leave `from` or `to` undefined they default to the start or end of the time series respectively. | + +#### Example + +The following script is defined in the `Companies` collection. The behavior function loads +each document in the collection into the script context using `load(docId)`, then filters +by the document's `Address.Country` property as well as the time series' name. This +sends only stock price data for French companies. + + +{`loadToCompanies(this); + +function loadTimeSeriesOfCompaniesBehavior(docId, timeSeriesName) \{ + var company = load(docId); + + if (company.Address.Country == 'France' && timeSeriesName = 'StockPrices') + return true; +\} +`} + + +### Adding Time Series to Documents + +* Time series can be loaded into the script context using `loadTimeSeries()`. +* Once a time series is loaded into the script, it can be added to a document using +`AddTimeSeries()`. + + +{`var employee = loadToEmployees(\{ + Name: this.Name + ' ' + this.LastName +\}); + +employee.addTimeSeries(loadTimeSeries('StockPrices')); +`} + + + + +When using `addTimeSeries`, `addAttachment`, and\or `addCounter`, ETL deletes and +replaces the existing documents at the destination database, including all time +series, counters, and attachments. + + + +Since the transformation script is run on document update, time series added to +documents using `addTimeSeries()` will be loaded _only_ when the document they +extend has changed. + +#### Filtering by start and end date + +Both the behavior function and `loadTimeSeries()` accept a start and end date as +second and third parameters. If these are set, only time-series data within this +time span is loaded to the destination database. + + + +{`company.addTimeSeries(loadTimeSeries('StockPrices', new Date(2020, 3, 26), new Date(2020, 3, 28))); +`} + + + + + +{`function loadTimeSeriesOfUsersBehavior(doc, ts) +\{ + return \{ + from: new Date(2020, 3, 26), + to: new Date(2020, 3, 28) + \}; +\}; +`} + + + + + +## Revisions + +Revisions are _not_ sent by the ETL process. + +But, if revisions are configured on the destination database, then when the target document is overwritten by the ETL process a revision will be created as expected. + + +## Deletions + +Upon source document modifications, ETL is set to delete and replace the destination documents by default. + +If you want to control the way deletions are handled in the destination database, +you can change the default settings with the configurable functions described in this section. + +* [Why documents are deleted by default](../../../server/ongoing-tasks/etl/raven.mdx#deletions-why-documents-are-deleted-by-default-in-the-destination-database) +* [When destination collections are different](../../../server/ongoing-tasks/etl/raven.mdx#when-destination-collections-are-different) +* [Collection specific function](../../../server/ongoing-tasks/etl/raven.mdx#deletions-collection-specific-function) +* [Generic function](../../../server/ongoing-tasks/etl/raven.mdx#deletions-generic-function) +* [Filtering deletions in the destination database](../../../server/ongoing-tasks/etl/raven.mdx#deletions-filtering-deletions-in-the-destination-database) +* [Deletions Example: ETL script with deletion behavior defined](../../../server/ongoing-tasks/etl/raven.mdx#deletions-example-etl-script-with-deletion-behavior-defined) + +## Deletions: Why documents are deleted by default in the destination database + +### Preventing duplication + +To prevent duplication, we delete the documents in the destination by default before loading the +updated documents that replace the deleted ones. +If the document is deleted in the source, RavenDB also deletes it in the destination by default. + +Some developers prefer to control the deletes so that, for example, a delete in the source will not cause a delete in the destination, +or to preserve a history of the document in the destination. + +The functions in this section were created to allow developers this control. + +### When destination collections are different + +**If we ETL to a different collection than the source**, +the source isn't aware of the new IDs created. +This forces us to load new documents with incremented IDs instead of overwriting the fields in existing documents. +RavenDB has to create a new, updated document in the destination with an [incremented server-made identity](../../../server/ongoing-tasks/etl/raven.mdx#documents-identifiers). +You can then choose if you want to delete the old version in the destination by selecting +`return false` in the transform script function [deleteDocumentsBehavior](../../../server/ongoing-tasks/etl/raven.mdx#deletions-generic-function). + +* Each updated version of the document gets a [server generated ID](../../../client-api/document-identifiers/working-with-document-identifiers.mdx#server-side-generated-ids) + in which the number at the end is incremented with each version. + + For example: + `"...profile/0000000000000000019-B"` will become `".../profile/0000000000000000020-B"` + The word before the number is the collection name and the letter after the number is the node. + In this case, the document's collection is "Profile", which is in a database in node "B", + and which has been updated via ETL 20 times. + +* If the ETL is defined to load the documents to more than one collection, + by default it will delete, and if it's not deleted in the source, it will replace all of the documents with the same prefix. + + For example: + Document `employees/1-A` is processed by ETL and put into the `People` and `Sales` collections with IDs: + `employees/1-A/people/0000000000000000001-A` and `employees/1-A/sales/0000000000000000001-A`. + Deletion or modification of the `employees/1-A` document on the source side triggers sending a command that deletes **all** documents + having the following prefix in their ID: `employees/1-A`. + +**If we load a document to the same collection as the source**, +then the ID is preserved and no special approach is needed. Deletion in the source results in +sending a single delete command in the destination for a given ID. +If documents are updated and not deleted in the source, they will simply be updated in the destination with no change to the destination document ID. +Deletions can be controlled by defining deletion behavior functions in the ETL script. + +* See a [sample ETL script with deletion behavior defined](../../../server/ongoing-tasks/etl/raven.mdx#deletions-example-etl-script-with-deletion-behavior-defined). + +## Deletions: Collection specific function + +### Syntax + + + +{`function deleteDocumentsOfBehavior(docId, deleted) \{ + if (deleted == false) + return +\} +`} + + + +`` needs to be substituted by a real collection name that the ETL script is working on (same convention as for [loadTo](../../../server/ongoing-tasks/etl/raven.mdx#transformation-script-options) +method). +e.g. `function deleteDocumentsOfOrdersBehavior(docId, deleted) {return false;}` + +| Parameter | Type | Description | Notes | +|-------------|----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------| +| **docId** | `string` | The identifier of a deleted document. | | +| **deleted** | `bool` | If you don't include the `deleted` parameter, RavenDB will execute the function without checking if the document was deleted from the source database.
If you include `deleted`, RavenDB will check if the document was indeed deleted or just updated. | Optional | + +| Return Value | Description | +| - | - | +| **true** | The document will be deleted from the destination database. | +| **false** | The document will not be deleted from the destination database. | + +### Example - Collection Specific Deletion Behavior Function + +To define deletion handling when the source and destination collections are the same, use the following sample. +If you ETL to a different collection than the source, there is a different deletion behavior in the destination. + + + +{`function deleteDocumentsOfproductsHistoryBehavior(docId, deleted) \{ + // If any document in the specified source collection is modified but is not deleted, + // then the ETL will not send a delete command to the destination. + if (deleted === false) + return false; + // If the source document was deleted, the destination will also be deleted + else return true; +\} +`} + + + +Leaving out the `deleted` parameter will not allow you to check if the source document was deleted and will +trigger the command regardless of whether the source document was deleted. + +## Deletions: Generic function + +There is also a generic function to control deletion on different collections. + +### Syntax + + + +{`function deleteDocumentsBehavior(docId, collection, deleted) \{ + + if (collection === "string" && deleted === ) + return ; +\} +`} + + + +| Parameter | Type | Description | +|---------------|----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **docId** | `string` | The identifier of a deleted document. | +| **collection** | `string` | The name of a collection that ETL is working on. | +| **deleted** | `bool` | Optional and therefore doesn't affect existing code.
If you don't include the `deleted` parameter, RavenDB will execute the function without checking if the document was deleted from the source database.
If you include `deleted`, RavenDB will check if the document was indeed deleted or just updated. | + +| Return Value | Description | +| - | - | +| **true** | The document will be deleted from the destination database. | +| **false** | The document will not be deleted from the destination database. | + + +Leaving out the `deleted` parameter will not allow you to check if the source document was deleted and will +trigger the command regardless of whether the source document was deleted. + + +### Example - Generic deletions behavior function + +[If the source and destination collection names are different](../../../server/ongoing-tasks/etl/raven.mdx#when-destination-collections-are-different) +and deletions behavior is set to false, +each document change will load a new document with an incremented document identity, thus saving a history. + +If the collection name of the destination is the same as the source, the ETL will simply update the destination document +without needing to change the ID. + + + +{`function deleteDocumentsBehavior(docId, collection, deleted) \{ + // If any document in the specified source collection is modified but is not deleted, + // then the ETL will not send a delete command to the destination collection "Products". + // (If collection names were different, the old document versions would remain + // and a new version would be stored with an incremented ID, thus saving a history of document versions.) + if (collection === "Products" && deleted === false) + return false; + // If the source document was deleted, delete the entire set of versions from the destination. + else return true; +\} +`} + + + +## Deletions: Filtering deletions in the destination database + +You can further specify the desired deletion behavior by adding filters. + +By the time an ETL process runs a delete behavior function, the original document is already deleted from the source. +It is no longer available. +You may want the ETL to set up an archive of documents that were deleted from the source, +or save a part of deleted documents in a separate document for later use. + +Following are three examples of ways to save documents for later use when they are deleted from the source database: + +#### Filtering out all deletions: + + + +{`loadToUsers(this); + +function deleteDocumentsOfUsersBehavior(docId) \{ + return false; +\} +`} + + + +#### Storing deletion info in an additional document: + +When you delete a document you can store a deletion marker document that will prevent propagating the deletion by ETL. + + * In the below example if the auxiliary document `LocalOnlyDeletions/{docId}` exists then we skip this deletion during ETL. + The auxiliary document can be created to protect certain documents from deletion in the destination database. + * You can add `@expires` tag to the metadata when storing the marker document, so it would be automatically cleaned up after a certain time + by [the expiration extension](../../../server/extensions/expiration.mdx#setting-the-document-expiration-time). + + +{`loadToUsers(this); + +function deleteDocumentsOfUsersBehavior(docId) \{ + var localOnlyDeletion = load('LocalOnlyDeletions/' + docId); + + return !localOnlyDeletion; +\} +`} + + + +#### When ETL is set on the entire database, but you want to filter deletions by certain collections: + +If you define ETL for all documents, regardless of the collection they belong to, then the +generic function can filter deletions by collection name. + + + +{`function deleteDocumentsBehavior(docId, collection, deleted) \{ + return 'Users' != collection; +\} +`} + + + +| Parameter | Type | Description | +|----------------|----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **docId** | `string` | The identifier of a deleted document. | +| **collection** | `string` | The name of a collection. | +| **deleted** | `bool` | Optional and therefore doesn't affect existing code.
If you don't include the `deleted` parameter, RavenDB will execute the function without checking if the document was deleted from the source database.
If you include `deleted`, RavenDB will check if the document was indeed deleted or just updated.
If you set `return true` and the document was deleted in the source database, it will also delete the document in the destination database. | + +| Return | Description | +| - | - | +| **bool** | If the returned value is `true`, the document will be deleted. | + + +## Deletions Example: ETL script with deletion behavior defined + +The following example will check if the source document was deleted or just updated before +loading the transformed document. This function can be used if the destination collection is the same or different from the source. + +> [In this example, the source and destination collection names are different](../../../server/ongoing-tasks/etl/raven.mdx#when-destination-collections-are-different) +and deletions behavior is set to false if the source is deleted, +so source deletions won't delete the destination document, thus saving a history of documents even if they're deleted in the source. + +> If the source isn't deleted, the ETL process will delete the old version and load the new version with an incremented ID. + +Conversely, if you set it to `return false` every time a document is updated and not deleted, +the destination will save a history of document versions +with an incremented auto-generated ID for each version. + +For this example, the fields `SupplierOrderLink` and `SupplierPhone` are added to test documents in the source database. + + + +{`// Define ETL to destination collection "productsHistory". +// Defined to update only the name of the item, a link to order the product, and the supplier's phone number +loadToproductsHistory (\{ + Name: this.Name + "updated data..", + SupplierOrderLink: this.SupplierOrderLink + "updated data..", + SupplierPhone: this.SupplierPhone + "updated data.." +\}); + +function deleteDocumentsBehavior(docId, collection, deleted) \{ + // Prevents document deletions from destination collection "productsHistory" if source document is deleted. + if (collection === "productsHistory" && deleted === true) + return false; + // If the source document information is updated (NOT deleted), + // and the source and destination collection names are different, like in this example, + // the script will delete then replace the destination document + // (with an incremented ID) to keep it current. + if (collection === "productsHistory" && deleted === false) + return true; +\} +`} + + + + + + + diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/etl/snowflake.mdx b/versioned_docs/version-7.1/server/ongoing-tasks/etl/snowflake.mdx new file mode 100644 index 0000000000..398606cc77 --- /dev/null +++ b/versioned_docs/version-7.1/server/ongoing-tasks/etl/snowflake.mdx @@ -0,0 +1,334 @@ +--- +title: "Ongoing Tasks: Snowflake ETL" +hide_table_of_contents: true +sidebar_label: Snowflake ETL +sidebar_position: 4 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Ongoing Tasks: Snowflake ETL + + +* Snowflake is a widely used cloud-based [Data Warehouse](https://docs.snowflake.com/en/user-guide/warehouses) + implementation, designated to gather data from a variety of sources and consolidate + it in a central depository where it can be efficiently managed, distributed, and analyzed + using BI (Business Intelligence) tools. + +* Data can be easily transferred from a RavenDB database to a Snowflake warehouse via user-defined Snowflake + [ETL](../../../server/ongoing-tasks/etl/basics.mdx) tasks that continuously monitor the origin RavenDB database, + detect data items as they are added or modified, and update the warehouse on the fly. + +* Snowflake ETL is enabled with a RavenDB [Enterprise License](https://ravendb.net/buy). + +* In this page: + * [Creating a Task](../../../server/ongoing-tasks/etl/snowflake.mdx#creating-a-task) + * [Snowflake warehouse setup](../../../server/ongoing-tasks/etl/snowflake.mdx#snowflake-warehouse-setup) + * [Snowflake Tables](../../../server/ongoing-tasks/etl/snowflake.mdx#snowflake-tables) + * [Performance improvement suggestions](../../../server/ongoing-tasks/etl/snowflake.mdx#performance-improvement-suggestions) + * [Transformation Scripts](../../../server/ongoing-tasks/etl/snowflake.mdx#transformation-scripts) + * [`loadTo` Method](../../../server/ongoing-tasks/etl/snowflake.mdx#method) + * [Alternative Syntax](../../../server/ongoing-tasks/etl/snowflake.mdx#alternative-syntax) + * [Filtering](../../../server/ongoing-tasks/etl/snowflake.mdx#filtering) + * [Loading Other Documents](../../../server/ongoing-tasks/etl/snowflake.mdx#loading-other-documents) + * [Accessing Metadata](../../../server/ongoing-tasks/etl/snowflake.mdx#accessing-metadata) + * [Loading to Multiple Tables](../../../server/ongoing-tasks/etl/snowflake.mdx#loading-to-multiple-tables) + * [Document Extensions](../../../server/ongoing-tasks/etl/snowflake.mdx#document-extensions) + * [Transaction Processing](../../../server/ongoing-tasks/etl/snowflake.mdx#transaction-processing) + * [Advanced Options](../../../server/ongoing-tasks/etl/snowflake.mdx#advanced-options) + + +## Creating a Task + +A Snowflake ETL task can be created using **Code** or via **Studio**. + +* To create the task using **Code:** + * Define a [Snowflake Connection String](https://github.com/snowflakedb/snowflake-connector-net/blob/master/doc/Connecting.md) + and register it using the [PutConnectionStringOperation](../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#putconnectionstringoperation) + operation. + Find an example [Here](../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#add-a-snowflake-connection-string). + * Define an **ETL Task**, associate it with your connection string, and run it using the + [AddEtlOperation](../../../client-api/operations/maintenance/etl/add-etl.mdx#add-etl-operation) operation. + Find an example [Here](../../../client-api/operations/maintenance/etl/add-etl.mdx#add-snowflake-etl-task). + +* To create the task using **Studio**: + + Find [Here](../../../studio/database/tasks/ongoing-tasks/snowflake-etl-task.mdx) + a **detailed look** at the creation of a Snowflake ETL task using Studio. + + In short: + + * Open the Studio `Settings -> Ongoing Tasks` view and click the **Snowflake ETL** option. + + ![Add New Snowflake Task](./assets/snowflake_etl_new_task.png) + + * Use the New Snowfake ETL view to define and save the new task. + + ![Define Snowflake Task](./assets/snowflake-etl-setup.png) + + + + +## Snowflake warehouse setup + +Prior to running the ETL task, please create the **destination tables** in the warehouse database +that RavenDB records are to be transferred to. + + + +## Snowflake Tables + +Use the **Snowflake Tables** section to select which tables the ETL process is to transfer data to. +For each table, specify a column that would be used as the document ID column. +RavenDB will fill the document ID column with the source document IDs, and use them to handle updates and deletes. + + +The column does **not** have to be a table's primary key. + + +![Define Snowflake Tables](./assets/snowflake-etl-tables.png) + + +### Performance improvement suggestions + +* **Insert data directly**: + The ETL process performs document updates by issuing DELETE and INSERT statements + to the relational database. If your system uses _append-only_, you can boost the + ETL process performance by setting Insert Only Mode to insert the data directly + without running a set of DELETE statements first. + * Using code (take a look [here](../../../client-api/operations/maintenance/etl/add-etl.mdx#add-snowflake-etl-task) + to see the snowflake-table definition in its context): + + +{`new SnowflakeEtlTable +\{ + TableName = "Orders", + DocumentIdColumn = "Id", + + // Set Insert Only Mode + InsertOnlyMode = true +\}, +`} + + + * Using Studio: see [Define a Snowflake ETL Task](../../../studio/database/tasks/ongoing-tasks/snowflake-etl-task.mdx#define-a-snowflake-etl-task). + +* **SQL tables indexes**: + To improve performance, you can define SQL tables indexes at least for the column used + to hold the document ID. + + + + + +## Transformation Scripts + +An ETL task can apply multiple transformation scripts. +The script is defined per collection, and **cannot** be empty. + +## `loadTo` Method + +A transformation script is executed per document once the document is created or modified. +To load data to the destination table, call the `loadTo()` method and pass it a user defined JavaScript object. + +The name of the table that the document is inserted to is indicated as a part of the `loadTo` method name. +E.g., to write data into the database `OrderLines` table, use the following call in the script: + + + +{`loadToOrderLines(\{ ... \}); +`} + + + +You must pass the method a JavaScript object, e.g. - + + + +{`loadToOrderLines(\{ + OrderId: id(this), + Qty: line.Quantity, + Product: line.Product, + Cost: line.PricePerUnit +\}); +`} + + +### Alternative Syntax + +The target table name can be passed to the `loadTo` command separately as a string argument, using the following syntax: +`loadTo('Target', obj)` + +E.g., the following two calls to `loadTo` are equivalent. +`loadToEmployees(this);` +`loadTo('Employees', this);` + + + + * The target name `'Employees'` in this syntax is **not** a variable and **cannot** be used as one; + it is simply a string literal of the target's name. + * Separating the target name from the `loadTo` command makes it possible to include symbols like + `-` and `.` in target names. This is not possible when the standard `loadToEmployees` syntax is + used because including special characters in the name of a JS function turns it invalid. + +### Filtering + +To filter documents out or include them in the transfer, use a logic of your choosing while calling `loadTp`. +E.g., + + + +{`if (this.ShipTo.Country === 'USA') \{ + // load only orders shipped to USA + loadToOrders(\{ ... \}); +\} +`} + + +### Loading Other Documents + +Use the `load` method to load a document with a specified ID during script execution. + + + +{`var company = load(this.Company); +`} + + +### Accessing Metadata + +Access metadata as follows: + + + +{`var value = this['@metadata']['custom-metadata-key']; +`} + + +### Loading to Multiple Tables + +The `loadTo` method can be called as many times as needed by a single script. +The following script demonstrates this by looping through the `Lines` array of an `Order` document, +using consecutive `loadTo` calls to store each line's properties in the `OrderLines` database table. +The process is concluded with a final `loadTo` call, storing the lines' total cost in the `Orders` table. + + + +{`var orderData = \{ + Id: id(this), + OrderLinesCount: this.Lines.length, + TotalCost: 0 +\}; + +for (var i = 0; i < this.Lines.length; i++) \{ + var line = this.Lines[i]; + orderData.TotalCost += line.PricePerUnit * line.Quantity; + loadToOrderLines(\{ + OrderId: id(this), + Qty: line.Quantity, + Product: line.Product, + Cost: line.PricePerUnit + \}); +\} +orderData.TotalCost = Math.round(orderData.TotalCost * 100) / 100; + +loadToOrders(orderData); +`} + + +### Document Extensions + +[Counters](../../../document-extensions/counters/overview.mdx), [Time series](../../../document-extensions/timeseries/overview.mdx), +and [Revisions](../../../document-extensions/revisions/overview.mdx) are not supported by Snowflake ETL. + +#### Loading Attachments + +To store binary data that is kept as RavenDB attachments, use the `loadAttachment()` method. + + + +{`CREATE TABLE [dbo].[Attachments] +( + [Id] int identity primary key, + [OrderId] [nvarchar](50) NOT NULL, + [AttachmentName] [nvarchar](50) NULL, + [Data] [varbinary](max) NULL +) +`} + + + +then you can define the script to load document's attachments: + + + +{`var attachments = this['@metadata']['@attachments']; + +for (var i = 0; i < attachments.length; i++) \{ + var attachment = \{ + OrderId: id(this), + AttachmentName: attachments[i].Name, + Data: loadAttachment(attachments[i].Name) + \}; + loadToAttachments(attachment); +\} +`} + + + +Attachments can also be accessed using the `getAttachments()` helper function rather than grabbing them from metadata. +To check whether an attachment exists, use the `hasAttachment(name)` function. + + + +## Transaction Processing + +* All records created in a single ETL run, one per each `loadTo` call, are sent in a single batch and processed + as part of the same transaction. + +* The ETL task will issue an SQL `INSERT` statement with each document it loads to the warehouse database. + The transaction is handled as an atomic unit, and the inserted documents will be stored in the warehouse + database using a `COMMIT` statement **only when the transaction completes**. + + If the ETL task is interrupted while the transaction is underway, e.g. due to a server failover or because + the task was restarted for some other reason, the transaction will be rolled back and the ETL task will + process the interrupted batch from scratch, starting right after the last `COMMIT`. + This may cause a delay for users of the destination database, who see incoming documents only when they are + stored in the database when transactions complete. + + + + +## Advanced Options + +#### Command timeout: +You can set the number of seconds after which an SQL command will timeout. +Default: `null` (use provider default) + +* **Set using Code**: + + +{`var snowflakeEtlConfig = new SnowflakeEtlConfiguration +\{ + Name = "task-name", + ConnectionStringName = "snowflake-connection-string-name", + + // Set Command Timeout + CommandTimeout = 5, +`} + + + +* **Set via Studio**: + + ![Advanced options](./assets/snowflake-etl-advanced_01.png) + + ![Command timeout](./assets/snowflake-etl-advanced_02.png) + + + diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/etl/sql.mdx b/versioned_docs/version-7.1/server/ongoing-tasks/etl/sql.mdx new file mode 100644 index 0000000000..d4a27dd62a --- /dev/null +++ b/versioned_docs/version-7.1/server/ongoing-tasks/etl/sql.mdx @@ -0,0 +1,449 @@ +--- +title: "Ongoing Tasks: SQL ETL" +hide_table_of_contents: true +sidebar_label: SQL ETL +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Ongoing Tasks: SQL ETL + + +* **SQL ETL** is a task that creates an [ETL process](../../../server/ongoing-tasks/etl/basics.mdx) where data from a RavenDB database is extracted, transformed, and loaded into a relational database as the destination. + +* In this page: + * [Supported relational databases](../../../server/ongoing-tasks/etl/sql.mdx#supported-relational-databases) + * [Creating the SQL ETL task](../../../server/ongoing-tasks/etl/sql.mdx#creating-the-sql-etl-task) + * [Configuring the SQL tables](../../../server/ongoing-tasks/etl/sql.mdx#configuring-the-sql-tables) + * [Transformation scripts](../../../server/ongoing-tasks/etl/sql.mdx#transformation-scripts) + * [The `loadTo` method](../../../server/ongoing-tasks/etl/sql.mdx#themethod) + * [Loading to multiple tables](../../../server/ongoing-tasks/etl/sql.mdx#loading-to-multiple-tables) + * [Loading related documents](../../../server/ongoing-tasks/etl/sql.mdx#loading-related-documents) + * [Loading attachments](../../../server/ongoing-tasks/etl/sql.mdx#loading-attachments) + * [Loading to VARCHAR and NVARCHAR columns](../../../server/ongoing-tasks/etl/sql.mdx#loading-to-varchar-and-nvarchar-columns) + * [Loading to specific column types](../../../server/ongoing-tasks/etl/sql.mdx#loading-to-specific-column-types) + * [Filtering](../../../server/ongoing-tasks/etl/sql.mdx#filtering) + * [Accessing the metadata](../../../server/ongoing-tasks/etl/sql.mdx#accessing-the-metadata) + * [Document extensions](../../../server/ongoing-tasks/etl/sql.mdx#document-extensions) + * [Advanced options](../../../server/ongoing-tasks/etl/sql.mdx#advanced-options) + * [Transaction processing](../../../server/ongoing-tasks/etl/sql.mdx#transaction-processing) + * [Creating the SQL ETL task from the Client API](../../../server/ongoing-tasks/etl/sql.mdx#creating-the-sql-etl-task-from-the-client-api) + + +## Supported relational databases + +* RavenDB supports ETL processes to the following relational databases: + * Microsoft SQL Server + * PostgreSQL + * MySQL + * Oracle + +* You must specify the provider type for the target relational database when setting up the + [SQL connection string](../../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#add-an-sql-connection-string). + +* Before starting with SQL ETL, you need to create tables in the target relational database. + These tables will serve as the destinations for records generated by the ETL scripts. + + + +## Creating the SQL ETL task + +To create an SQL ETL task using the Client API, see [Creating the SQL ETL task from the Client API](../../../server/ongoing-tasks/etl/sql.mdx#creating-the-sql-etl-task-from-the-client-api). +To create an SQL ETL task from the Studio open `Tasks -> Ongoing Tasks`. + +![Configure SQL ETL task](./assets/sql-etl-setup.png) + + + +## Configuring the SQL tables + +Define the target tables where the SQL ETL task will load data. + +![Define SQL tables](./assets/sql-etl-tables.png) + +#### Document ID Column + +* For each table, you must specify a column that will store the document ID column. + RavenDB will populate this column with the source document ID, enabling the handling of document updates and deletions. + +* Note that the specified column does not need to be the primary key of the table. + +* For performance reasons, you should define indexes on the SQL tables on the relational database side, + at least on the column used to store the document ID. + +#### Insert only + +* The SQL ETL process updates documents in the relational database using DELETE and INSERT statements. + +* If your system is _append-only_, you can enable the "Insert Only" toggle to instruct RavenDB to insert data without executing DELETE statements beforehand. + This can provide a significant performance boost for systems of this kind. + + + +## Transformation scripts + +The [basic characteristics](../../../server/ongoing-tasks/etl/basics.mdx) of an SQL ETL script are similar to those of other ETL types. +The script defines what data to **extract** from the source document, how to **transform** this data, +and which SQL table to **load** it to. + +A single SQL ETL task can have multiple transformation scripts. +The script is defined per collection, and it cannot be empty. +The script is executed per document from the source collection once the document is created or modified. +### The `loadTo` method + +To specify which SQL table to load the data into, use either of the following methods in your script. +The two methods are equivalent, offering alternative syntax: + +* **`loadTo(obj)`** + * Here the target table is specified as part of the function name. + * The target _<TableName>_ in this syntax is Not a variable and cannot be used as one, + it is simply a string literal of the target's name. + +* **`loadTo('TableName', obj)`** + * Here the target table is passed as an argument to the method. + * Separating the table name from the `loadTo` command makes it possible to include symbols like `'-'` and `'.'` in table names. + This is not possible when the `loadTo` syntax is used because including special characters in the name of a JavaScript function makes it invalid. + + | Parameter | Type | Description | + |-----------------|--------|----------------------------------------------------------------------------------------------------------------------------------| + | **TableName** | string | The name of the target SQL table | + | **obj** | object | The object to transfer | + +For example, the following two calls, which load data to "OrdersTable", are equivalent: + +* `loadToOrdersTable(obj)` +* `loadTo('OrdersTable', obj)` +### Loading to multiple tables + +The `loadTo` method can be called multiple times in a single script. +That allows you to split a single `Order` document having `Lines` collection into two tables and insert multiple rows: + +The following is a sample script that processes documents from the Orders collection: + + + +{`// Create an orderData object +// ========================== +var orderData = \{ + Id: id(this), + OrderLinesCount: this.Lines.length, + TotalCost: 0 +\}; + +// Update the orderData's TotalCost field +// ====================================== +for (var i = 0; i < this.Lines.length; i++) \{ + var line = this.Lines[i]; + var cost = (line.Quantity * line.PricePerUnit) * ( 1 - line.Discount); + orderData.TotalCost += cost; + + // Load the object to SQL table 'OrdersTable' + // ========================================== + loadToOrderLines(\{ + OrderId: id(this), + Qty: line.Quantity, + Product: line.Product, + Cost: line.PricePerUnit + \}); +\} + +orderData.TotalCost = Math.round(orderData.TotalCost * 100) / 100; + +// Load to SQL table 'Orders' +// ========================== +loadToOrders(orderData); +`} + + +### Loading related documents + +Use the `load` method to load a related document with the specified ID during script execution. + + + +{`var company = load(this.Company); +`} + + +### Loading Attachments + +You can store binary data that is kept as attachments in RavenDB using the `loadAttachment()` method. +For example, if you have the following _Attachments_ table: + + + +{`CREATE TABLE [dbo].[Attachments] +( + [Id] int identity primary key, + [OrderId] [nvarchar](50) NOT NULL, + [AttachmentName] [nvarchar](50) NULL, + [Data] [varbinary](max) NULL +) +`} + + + +then you can define the following script that loads the document's attachments: + + + +{`var attachments = this['@metadata']['@attachments']; + +for (var i = 0; i < attachments.length; i++) \{ + var attachment = \{ + OrderId: id(this), + AttachmentName: attachments[i].Name, + Data: loadAttachment(attachments[i].Name) + \}; + loadToAttachments(attachment); +\} +`} + + + +* Attachments can be also accessed using the `getAttachments()` helper function + (instead of grabbing them from metadata). +* The existence of an attachment can be checked by the `hasAttachment(name)` function. + +### Loading to VARCHAR and NVARCHAR columns + +Two additional functions are designed specifically for working with VARCHAR and NVARCHAR types: + +| Function | Description | +|-------------------------------|----------------------------------------------------------------------------------------------| +| `varchar(value, size = 50)` | Defines the parameter type as VARCHAR, with the option to specify its size
(default is 50 if not provided). | +| `nvarchar(value, size = 50)` | Defines the parameter type as NVARCHAR, with the option to specify its size
(default is 50 if not specified). | + + + +{`var names = this.Name.split(' '); + +loadToUsers( +\{ + FirstName: varchar(names[0], 30), + LastName: nvarchar(names[1]), +\}); +`} + + +### Loading to specific column types + +The SQL type of the target column can be explicitly specified in the SQL ETL script. +This is done by defining the `Type` and the `Value` properties for the data being loaded. + + * **Type**: + The type specifies the SQL column type the value is loaded to. + The type should correspond to the data types used in the target relational database. + + Supported enums for `Type` include: + * _SqlDbType_ - see [Microsoft SQL Server](https://learn.microsoft.com/en-us/sql/t-sql/data-types/data-types-transact-sql) + * _NpgsqlDbType_ - see [PostgreSQL](https://www.npgsql.org/doc/api/NpgsqlTypes.NpgsqlDbType.html) + * _MySqlDbType_ - see [MySQL Data Types](https://dev.mysql.com/doc/refman/8.4/en/data-types.html) + * _OracleDbType_ - see [Oracle Data Types](https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/Data-Types.html) + + Some databases allow combining enum values using `|`. + For example, using `Array | Double` for the Type is valid for PostgreSQL. + + If no type is specified, the column type will be detected automatically. + + * **Value**: + The value contains the actual data to be loaded into the column. + + + +{`var orderData = \{ + Id: id(this), + OrderLinesCount: this.OrderLines.length, + Quantities: \{ + // Specify the Type and Value for 'Quantities': + // ============================================ + Type: 'Array | Double', + Value: this.OrderLines.map(function(l) \{return l.Quantity;\}) + \}, + Products: \{ + // Specify the Type and Value for 'Products': + // ========================================== + Type: 'Array | Text', + Value: this.OrderLines.map(function(l) \{return l.Product;\}) + \}, +\}; + +// Load the data into the 'Orders' table +loadToOrders(orderData); +`} + + +### Filtering + +To filter some documents out from the ETL, simply omit the `loadTo` call: + + + +{`if (this.ShipTo.Country === 'USA') \{ + // Load only orders shipped to USA + loadToOrders(\{ ... \}); +\} +`} + + +### Accessing the metadata + +You can access the metadata in the following way: + + + +{`var value = this['@metadata']['custom-metadata-key']; +`} + + +### Document extensions + +The SQL ETL task does not support sending [Counters](../../../document-extensions/counters/overview.mdx), +[Time series](../../../document-extensions/timeseries/overview.mdx), or [Revisions](../../../document-extensions/revisions/overview.mdx). + + + +## Advanced options + +* **Command timeout** + Number of seconds after which SQL command will timeout. + It overrides the value defined in the [ETL.SQL.CommandTimeoutInSec](../../../server/configuration/etl-configuration.mdx#etlsqlcommandtimeoutinsec) configuration key. + Default: `null` (use provider default). +* **Parameterized deletes** + Control whether DELETE statements generated during the ETL process use parameterized SQL queries, + rather than embedding values directly in the query. + Default: `true`. +* **Table quotation** + Control whether table names in the generated SQL statements are enclosed in quotation marks. + Default: `true`. +* **Force recompile query** + Control whether to force the SQL Server to recompile the query statement using (`OPTION(RECOMPILE)`). + Default: `false`. + + + +## Transaction processing + +All records created in a single ETL run, one for each `loadTo` call, are sent in a single batch and processed within the same transaction. + + + +## Creating the SQL ETL task from the Client API + + + +{`// Define a connection string to a SQL database destination +// ======================================================== +var sqlConStr = new SqlConnectionString +\{ + Name = "sql-connection-string-name", + + // Define destination factory name + FactoryName = "MySql.Data.MySqlClient", + + // Define the destination database + // May also need to define authentication and encryption parameters + // By default, encrypted databases are sent over encrypted channels + ConnectionString = "host=127.0.0.1;user=root;database=Northwind" +\}; + +// Deploy (send) the connection string to the server via the PutConnectionStringOperation +// ====================================================================================== +var PutConnectionStringOp = new PutConnectionStringOperation(sqlConStr); +PutConnectionStringResult connectionStringResult = store.Maintenance.Send(PutConnectionStringOp); +`} + + + + +{`// Define the SQL ETL task configuration +// ===================================== +var sqlConfiguration = new SqlEtlConfiguration() +\{ + Name = "mySqlEtlTaskName", + ConnectionStringName = "sql-connection-string-name", + + SqlTables = + \{ + new SqlEtlTable + \{ + TableName = "Orders", DocumentIdColumn = "Id", InsertOnlyMode = false + \}, + new SqlEtlTable + \{ + TableName = "OrderLines", DocumentIdColumn = "OrderId", InsertOnlyMode = false + \}, + \}, + + Transforms = + \{ + new Transformation() + \{ + Name = "scriptName", + Collections = \{ "Orders" \}, + + Script = @" + var orderData = \{ + Id: id(this), + OrderLinesCount: this.Lines.length, + TotalCost: 0 + \}; + + for (var i = 0; i < this.Lines.length; i++) \{ + var line = this.Lines[i]; + var cost = (line.Quantity * line.PricePerUnit) * ( 1 - line.Discount); + orderData.TotalCost += cost; + + loadToOrderLines(\{ + OrderId: id(this), + Qty: line.Quantity, + Product: line.Product, + Cost: line.PricePerUnit + \}); + \} + + orderData.TotalCost = Math.round(orderData.TotalCost * 100) / 100; + loadToOrders(orderData); + ", + + ApplyToAllDocuments = false + \} + \} +\}; + +// Deploy the SQL ETL task to the server +// ===================================== +var addSqlEtlOperation = new AddEtlOperation(sqlConfiguration); +store.Maintenance.Send(addSqlEtlOperation); +`} + + +`SqlEtlConfiguration`: + +| Property | Type | Description | +|--------------------------|-------------------------|----------------------------------------------------------------------------------------------------------------------------| +| **Name** | `string` | The SQL ETL task name. | +| **ConnectionStringName** | `string` | The registered connection string name. | +| **SqlTables** | `List` | A list of SQL tables that the scripts will load data to. | +| **Transforms** | `List` | Your transformation scripts. | +| **QuoteTables** | `bool` | Control whether table names in the generated SQL statements are enclosed in quotation marks.
Default is `true`. | +| **ParameterizeDeletes** | `bool` | Control whether DELETE statements generated during the ETL process use parameterized SQL queries.
Default is `true`. | +| **ForceQueryRecompile** | `bool` | Set to `true` to force the SQL Server to recompile the query statement using (`OPTION(RECOMPILE)`).
Default is `false`. | +| **CommandTimeout** | `int?` | Number of seconds after which the SQL command will timeout. | + +`SqlEtlTable`: + +| Property | Type | Description | +|----------------------|----------|------------------------------------------------------------------------------------------------------------------------------| +| **TableName** | `string` | The table name your script will load data to. | +| **DocumentIdColumn** | `string` | The column in the destination table that will store the document IDs. | +| **InsertOnlyMode** | `bool` | When set to `true`, RavenDB will insert data directly without executing DELETE statements beforehand.
Default is `false`. | + + + diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/etl/test-scripts.mdx b/versioned_docs/version-7.1/server/ongoing-tasks/etl/test-scripts.mdx new file mode 100644 index 0000000000..5805ec33ed --- /dev/null +++ b/versioned_docs/version-7.1/server/ongoing-tasks/etl/test-scripts.mdx @@ -0,0 +1,68 @@ +--- +title: "Ongoing Tasks: Testing ETL Scripts" +hide_table_of_contents: true +sidebar_label: Testing Scripts +sidebar_position: 3 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Ongoing Tasks: Testing ETL Scripts + + +* ETL transformations can be tested with the usage of `Test script` option. Edit your script in the Studio to enter test mode. + +* In this page: + * [Testing Raven ETL Scripts](../../../server/ongoing-tasks/etl/test-scripts.mdx#testing-raven-etl-scripts) + * [Testing SQL ETL Scripts](../../../server/ongoing-tasks/etl/test-scripts.mdx#testing-sql-etl-scripts) + + + +## Testing Raven ETL Scripts + +![Figure 1. Test RavenDB ETL script](./assets/test-raven.png) + + + +* In order to test your transformation script you need to: + + 1. Enter ID of a document that will be used. + 2. Choose the test mode: + - `Document put / update` - use to see script results when the document is created or modified. + - `Document delete` - use to see script results when the document is deleted (note that [delete behavior functions](../../../server/ongoing-tasks/etl/raven.mdx#deletions) can be called then). + 3. Click `Test` button. + + + After executing the script in test mode you'll see the following tabs: + + 4. `Document Preview` displays the original document used in the test. + 5. `Test Results` presents the list of commands that will be sent to a destination database. + 6. `Debug output` contains all debug outputs that were created using `output()` function called in the script body. The function accepts string parameter. + + + + + + +## Testing SQL ETL Scripts + +![Figure 2. Test SQL ETL script](./assets/test-sql.png) + + + +* Testing SQL transformations requires the same steps as testing Raven ETL. Although there is one additional option available, `Execute and rollback the test transaction`: + + - unchecked (default): no execution of SQL statements will take place, will only show generated SQL statements, + - checked: all generated SQL statements will be executed against the target SQL database, the transaction will be rolled back when done. + +* The test results displayed in `Test Results` are SQL statements that would be sent to relational database. Depending on `Execute and rollback the test transaction` option they +will be parametrized (option checked) or values will be inserted directly into statements (option unchecked). + + + + diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/external-replication.mdx b/versioned_docs/version-7.1/server/ongoing-tasks/external-replication.mdx new file mode 100644 index 0000000000..9cba866038 --- /dev/null +++ b/versioned_docs/version-7.1/server/ongoing-tasks/external-replication.mdx @@ -0,0 +1,214 @@ +--- +title: "Server: Ongoing Tasks: External Replication" +hide_table_of_contents: true +sidebar_label: External Replication +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Server: Ongoing Tasks: External Replication + + +* Schedule an **External Replication Task** to have a _live_ replica of your data in another database: + * In a separate RavenDB cluster [on local machines](../../start/getting-started.mdx) or [a cloud instance](/cloud/cloud-overview), + which can be used as a failover if the source cluster is down. + * In the same cluster if you want a live copy that won't be a client failover target. + +* "Live" means that the replica is up to date at all times. + Any changes in the source database will be reflected in the replica once they occur. + +* This ongoing task replicates **one-way**, from the source to the destination. + For additional functionality such as filtration and two-way replication consider [Hub/Sink Replication](../../server/ongoing-tasks/hub-sink-replication.mdx). + +* To replicate between two separate, secure RavenDB servers, + you need to [pass a client certificate](../../server/ongoing-tasks/external-replication.mdx#step-by-step-guide) from the source server to the destination. + +* The External Replication task **does _not_ create a backup** of your data and indexes. + See more in [Backup -vs- Replication](../../studio/database/tasks/backup-task.mdx#backup-task--vs--replication-task) +* In this page: + * [General Information about External Replication Task](../../server/ongoing-tasks/external-replication.mdx#general-information-about-external-replication-task) + * [Code Sample](../../server/ongoing-tasks/external-replication.mdx#code-sample) + * [Step-by-Step Guide](../../server/ongoing-tasks/external-replication.mdx#step-by-step-guide) + * [Definition](../../server/ongoing-tasks/external-replication.mdx#definition) + * [Offline Behavior](../../server/ongoing-tasks/external-replication.mdx#offline-behavior) + * [Delayed Replication](../../server/ongoing-tasks/external-replication.mdx#delayed-replication) + + + +## General Information about External Replication Task + +**What is being replicated:** + +* All database documents and related data: + * [Attachments](../../document-extensions/attachments/what-are-attachments.mdx) + * [Revisions](../../document-extensions/revisions/overview.mdx) + * [Counters](../../document-extensions/counters/overview.mdx) + * [Time Series](../../document-extensions/timeseries/overview.mdx) +**What is _not_ being replicated:** + +* Server and cluster level features: + * [Indexes](../../indexes/creating-and-deploying.mdx) + * [Conflict resolution scripts](../../server/clustering/replication/replication-conflicts.mdx#conflict-resolution-script) + * [Compare-Exchange](../../client-api/operations/compare-exchange/overview.mdx) + * [Subscriptions](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx) + * [Identities](../../server/kb/document-identifier-generation.mdx#identity-id) + * Ongoing tasks + * [ETL](../../server/ongoing-tasks/etl/basics.mdx) + * [Backup](../../studio/database/tasks/backup-task.mdx) + * [Hub/Sink Replication](../../studio/database/tasks/ongoing-tasks/hub-sink-replication/overview.mdx) + + + + **Why are cluster-level features not replicated?** + + RavenDB is designed with a cluster-level data ownership model to prevent conflicts between clusters, + especially in scenarios where ACID transactions are critical. + + This approach ensures that certain features, such as policies, configurations, and ongoing tasks, + remain specific to each cluster, avoiding potential inconsistencies. + + To explore this concept further, refer to the [Data Ownership in a Distributed System](https://ayende.com/blog/196769-B/data-ownership-in-a-distributed-system) blog post. + + +**Conflicts:** + +* Two databases that have an External Replication task defined between them will detect and resolve document + [conflicts](../../server/clustering/replication/replication-conflicts.mdx) according to each database's conflict resolution policy. +* It is recommended to have the same [policy configuration](../../server/clustering/replication/replication-conflicts.mdx#configuring-conflict-resolution-using-the-client) + on both the source and the target databases. +**Sharding Support:** + +External replication is supported by both [sharded](../../sharding/overview.mdx) and non-sharded databases. +Learn more about the way ETL works on a sharded database [here](../../sharding/external-replication.mdx). + + + +## Code Sample + +The required elements of an External Replication task are: + +* The `UpdateExternalReplicationOperation()` method. +* The destination server needs the [certificate from the source server](../../server/security/authentication/certificate-management.mdx#enabling-communication-between-servers:-importing-and-exporting-certificates) + so that it will trust the source. +* The [connection string](../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#add-a-raven-connection-string) + with the destination server URL and any other details needed to access the destination server. +* The following properties in the `ExternalReplication` object: + * **ConnectionStringName** + The connection string name. + * **Name** + The External Replication task name. + + + +{`Properties + public class ExternalReplication + \{ + public TimeSpan DelayReplicationFor \{ get; set; \} + public string Name \{ get; set; \} + public string ConnectionStringName \{ get; set; \} + public string MentorNode \{ get; set; \} + \} +`} + + + +Optional elements include the following properties in the `ExternalReplication` object: + +* **MentorNode** + The preferred responsible node in the source server. +* **DelayReplicationFor** + The amount of time to delay replication. + The following sample shows a 30-minute delay. Delay can also be set by days, hours, and seconds. + + + +{`//define external replication with mentor node and delay timespan +await sourceStore.Maintenance.SendAsync( + new UpdateExternalReplicationOperation(new ExternalReplication +\{ + ConnectionStringName = connectionStrName, + Name = "task-name", + MentorNode = "B", + DelayReplicationFor = TimeSpan.FromMinutes(30) +\})); + +`} + + + +`ExternalReplication` properties: + + + +{`public class ExternalReplication +\{ + public TimeSpan DelayReplicationFor \{ get; set; \} + public string Name \{ get; set; \} + public string ConnectionStringName \{ get; set; \} + public string MentorNode \{ get; set; \} +\} +`} + + + + + + +## Step-by-Step Guide + +To create an external replication task via the RavenDB Studio, see the [Step-by-Step Guide](../../studio/database/tasks/ongoing-tasks/external-replication-task.mdx#step-by-step-guide) + + + +## Definition + +To learn how to define an external replication task via code, see [code sample](../../server/ongoing-tasks/external-replication.mdx#code-sample). + +You can also configure external eplication tasks [via RavenDB Studio](../../studio/database/tasks/ongoing-tasks/external-replication-task.mdx#definition). + + + + +## Offline Behavior + +* **When the source cluster is down** (and there is no leader): + + * Creating a _new_ Ongoing Task is a Cluster-Wide operation, + thus, a new Ongoing External Replication Task ***cannot*** be scheduled. + + * If an External Replication Task was _already_ defined and active when the cluster went down, + then the task will _not_ be active and no replication will take place. + +* **When the node responsible for the external replication task is down** + + * If the responsible node for the External Replication Task is down, + then another node from the Database Group will take ownership of the task so that the external replica is up to date. + +* **When the destination node is down:** + + * The external replication will wait until the destination is reachable again and proceed from where it left off. + + * If there is a cluster on the other side, and the URL addresses of the destination database group nodes are listed in the connection string, + then when the destination node is down, the replication task will simply start transferring data to one of the other nodes specified. + + + +## Delayed Replication + +In RavenDB we introduced a new kind of replication, _delayed replication_. It replicates data that is +delayed by `X` amount of time. +_Delayed replication_ works just like normal replication but instead of sending data immediately, +it waits `X` amount of time. +Having a delayed instance of a database allows you to "go back in time" and undo contamination to your data +due to a faulty patch script or other human errors. +While you can and should always use backup for those cases, having a live database makes it quick to failover +to prevent business losses while you repair the faulty databases. + +* To set delayed replication, see "3. **Set Replication Delay Time**" in the [definition instructions](../../studio/database/tasks/ongoing-tasks/external-replication-task.mdx#definition). + diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/hub-sink-replication.mdx b/versioned_docs/version-7.1/server/ongoing-tasks/hub-sink-replication.mdx new file mode 100644 index 0000000000..422adbb597 --- /dev/null +++ b/versioned_docs/version-7.1/server/ongoing-tasks/hub-sink-replication.mdx @@ -0,0 +1,371 @@ +--- +title: "Hub/Sink Replication" +hide_table_of_contents: true +sidebar_label: Hub/Sink Replication +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Hub/Sink Replication + + +Hub/Sink replication is used to maintain a live replica of a database +or a chosen part of it, through a secure connection between ongoing Hub +and Sink replication tasks. + +RavenDB instances distributed onboard mobile library busses, for example, +can collect data locally (e.g. bus GPS coordinates and books returns and +borrows), and replicate it via local Sink tasks to the central library's +Hub whenever they are online. + + + +* Learn more about **Hub/Sink replication** [Here](../../studio/database/tasks/ongoing-tasks/hub-sink-replication/overview.mdx). +* You can use the Studio to define + [Hub](../../studio/database/tasks/ongoing-tasks/hub-sink-replication/replication-hub-task.mdx) + and [Sink](../../studio/database/tasks/ongoing-tasks/hub-sink-replication/replication-sink-task.mdx) + tasks. + + + +* In this page: + * [What is and is not replicated?](../../server/ongoing-tasks/hub-sink-replication.mdx#what-is-and-is-not-replicated?) + * [Defining Replication Tasks](../../server/ongoing-tasks/hub-sink-replication.mdx#defining-replication-tasks) + * [Defining a Replication Hub](../../server/ongoing-tasks/hub-sink-replication.mdx#defining-a-replication-hub) + * [Defining a Hub Access](../../server/ongoing-tasks/hub-sink-replication.mdx#defining-a-hub-access) + * [Defining a Replication Sink](../../server/ongoing-tasks/hub-sink-replication.mdx#defining-a-replication-sink) + * [Defining a Connection String](../../server/ongoing-tasks/hub-sink-replication.mdx#defining-a-connection-string) + * [Usage Sample](../../server/ongoing-tasks/hub-sink-replication.mdx#usage-sample) + * [Failover](../../server/ongoing-tasks/hub-sink-replication.mdx#failover) + * [Backward Compatibility](../../server/ongoing-tasks/hub-sink-replication.mdx#backward-compatibility) + +## What is and is not replicated? + + + +After the tasks are defined, **changed** documents whose replication is allowed by +both the Hub and the Sink filters will replicate. + +If you want the entire database to be replicated in the destination, you can [import the database](../../studio/database/tasks/import-data/import-from-ravendb.mdx) +into the destination. + +After the data is in the destination server, setting up a hub/sink replication ongoing task will keep the two databases up to date. + + + +**What is being replicated:** + + * All database documents and related data: + * [Attachments](../../document-extensions/attachments/what-are-attachments.mdx) + * [Revisions](../../document-extensions/revisions/overview.mdx) + * [Counters](../../document-extensions/counters/overview.mdx) + * [Time Series](../../document-extensions/timeseries/overview.mdx) + +**What is _not_ being replicated:** + + * Server and cluster level features: + * [Indexes](../../indexes/creating-and-deploying.mdx) + * [Conflict resolver definitions](../../server/clustering/replication/replication-conflicts.mdx#conflict-resolution-script) + * [Compare-Exchange](../../client-api/operations/compare-exchange/overview.mdx) + * [Subscriptions](../../client-api/data-subscriptions/what-are-data-subscriptions.mdx) + * [Identities](../../server/kb/document-identifier-generation.mdx#identity-id) + * Ongoing tasks + * [ETL](../../server/ongoing-tasks/etl/basics.mdx) + * [Backup](../../studio/database/tasks/backup-task.mdx) + * [Hub/Sink Replication](../../studio/database/tasks/ongoing-tasks/hub-sink-replication/overview.mdx) + + +To provide for architecture that prevents conflicts between clusters, especially when ACID transactions are important, +RavenDB is designed so that data ownership is at the cluster level. +To learn more, see [Data Ownership in a Distributed System](https://ayende.com/blog/196769-B/data-ownership-in-a-distributed-system). + +It is also best to ensure that each cluster defines policies, configurations, and ongoing tasks that are relevant for it. + + + + + + +## Defining Replication Tasks + +To start replication via Hub and Sink tasks, you need to define - + +1. **A Hub task** +2. **Hub Access/es** + * Multiple Sink tasks can connect a Hub using each Access you define for it. + * Each Access has an associated certificate, that is used by the Sink to + authenticate with the Hub. This certificate is used to identify the specific + Access and the relevant filters for the connection. +3. **Sink task/s** +4. **Filtering** + * You can enable or disable *replication filtering*, and specify the paths + of documents whose replication is allowed. + * Allowed paths are defined separately for the Hub and for the Sink. + * You can filter incoming and outgoing replication defining separate lists of allowed paths + for *incoming* and *outgoing* documents. + * Only documents that are allowed by both the hub and sink filters will be replicated. +## Defining a Replication Hub + +Use `PutPullReplicationAsHubOperation` to register a new Hub task, +and configure it using a `PullReplicationDefinition` class. + + + +{`await store.Maintenance.SendAsync(new PutPullReplicationAsHubOperation + (new PullReplicationDefinition \{ + Name = "Hub1_Bidirectional", + Mode = PullReplicationMode.SinkToHub | PullReplicationMode.HubToSink, + WithFiltering = true + \})); +`} + + + +* **`PutPullReplicationAsHubOperation` definition** + + +{`public PutPullReplicationAsHubOperation(string name) +public PutPullReplicationAsHubOperation(PullReplicationDefinition pullReplicationDefinition) +`} + + + +* **`PullReplicationDefinition` parameters** + + | Parameters | Type | Description | + |:-------------|:-------------|:-------------| + | `DelayReplicationFor` | `TimeSpan` | Amount of time to wait before starting replication | + | `Disabled` | `bool` | Disable task or leave it enabled | + | `MentorNode` | `string` | Preferred Mentor Node | + | `Mode` | `PullReplicationMode` | Data Direction (HubToSink, SinkToHub, or Both) | + | `Name` | `string` | Task Name | + | `TaskId` | `long` | Task ID | + | `WithFiltering` | `bool` | Allow Replication Filtering | +### Defining a Hub Access + +Use `RegisterReplicationHubAccessOperation` to define a Hub Access, +and configure it using a `ReplicationHubAccess` class. + + + +{`await store.Maintenance.SendAsync(new RegisterReplicationHubAccessOperation + ("Hub1_Bidirectional", new ReplicationHubAccess \{ + Name = "Access1", + AllowedSinkToHubPaths = new[] + \{ + "products/*", + \}, + AllowedHubToSinkPaths = new[] + \{ + "products/*", + \}, + CertificateBase64 = Convert.ToBase64String(pullCert.Export(X509ContentType.Cert)) + \})); +`} + + + +* **`RegisterReplicationHubAccessOperation` definition** + + +{`public RegisterReplicationHubAccessOperation(string hubName, ReplicationHubAccess access) +`} + + + +* **`ReplicationHubAccess` parameters** + + | Parameters | Type | Description | + |:-------------|:-------------|:-------------| + | `Name` | `string` | Task Name | + | `CertificateBase64` | `string` | Task Certificate | + | `AllowedHubToSinkPaths` | `string[]` | Allowed paths from Hub to Sink | + | `AllowedSinkToHubPaths` | `string[]` | Allowed paths from Sink to Hub | + + +To **Remove** an existing Access, use `UnregisterReplicationHubAccessOperation`. + +* **`UnregisterReplicationHubAccessOperation` definition**: + + +{`public UnregisterReplicationHubAccessOperation(string hubName, string thumbprint) +`} + + + + + +## Defining a Replication Sink + +Use `UpdatePullReplicationAsSinkOperation` to define a Sink task, +and configure it using a `PullReplicationAsSink` class. + + + +{`await store.Maintenance.SendAsync(new UpdatePullReplicationAsSinkOperation + (new PullReplicationAsSink \{ + ConnectionStringName = dbName + "_ConStr", + Mode = PullReplicationMode.SinkToHub | PullReplicationMode.HubToSink, + CertificateWithPrivateKey = Convert.ToBase64String(pullCert.Export(X509ContentType.Pfx)), + HubName = "Bidirectional", + AllowedHubToSinkPaths = new[] + \{ + "employees/8-A" + \}, + AllowedSinkToHubPaths = new[] + \{ + "employees/8-A" + \} + \})); +`} + + + +* **`UpdatePullReplicationAsSinkOperation` definition** + + +{`public UpdatePullReplicationAsSinkOperation(PullReplicationAsSink pullReplication) +`} + + + +* **`PullReplicationAsSink` parameters** + + | Parameters | Type | Description | + |:-------------|:-------------|:-------------| + | `Mode` | `PullReplicationMode` | Data Direction (HubToSink, SinkToHub, or Both) | + | `AllowedHubToSinkPaths` | `string[]` | Allowed paths from Hub to Sink | + | `AllowedSinkToHubPaths` | `string[]` | Allowed paths from Sink to Hub | + | `CertificateWithPrivateKey` | `string` | A certificate with the Sink's Private key | + | `CertificatePassword` | `string` | Certificate Password | + | `AccessName` | `string` | Access Name to connect to | + | `HubName` | `string` | Hub Name to connect to | +### Defining a Connection String + +The Sink needs a connection string to locate the Hub task it is to use. + +Use `PutConnectionStringOperation` to define a connection string, +and configure it using a `RavenConnectionString` class. + + + +{`await storeA.Maintenance.SendAsync( + new PutConnectionStringOperation(new RavenConnectionString + \{ + Database = dbNameB, + Name = dbName + "_ConStr", + TopologyDiscoveryUrls = store.Urls + \})); +`} + + + +Learn about Connection Strings [here](../../client-api/operations/maintenance/connection-strings/add-connection-string.mdx#operations-how-to-add-a-connection-string). + + + + +## Usage Sample + + + +{`// Issue a certificate +var pullCert = new X509Certificate2("/path/to/cert.pfx", + (string)null, X509KeyStorageFlags.Exportable); + +// Define a Hub task +await store.Maintenance.SendAsync(new PutPullReplicationAsHubOperation( + new PullReplicationDefinition + \{ + Name = "Hub1_SinkToHub_Filtered", + Mode = PullReplicationMode.SinkToHub | PullReplicationMode.HubToSink, + WithFiltering = true + \})); + +// Define Hub access +await store.Maintenance.SendAsync(new RegisterReplicationHubAccessOperation( + "Hub1_SinkToHub_Filtered", new ReplicationHubAccess + \{ + Name = "Access1", + AllowedSinkToHubPaths = new[] + \{ + "products/*", + "orders/*" + \}, + + // The public portion of the certificate, in base 64 + CertificateBase64 = Convert.ToBase64String(pullCert.Export(X509ContentType.Cert)) + \})); + +// Define a Connection String +await store.Maintenance.SendAsync( + new PutConnectionStringOperation(new RavenConnectionString + \{ + Database = dbNameB, + Name = dbNameB + "_ConStr", + TopologyDiscoveryUrls = store.Urls + \})); + +// Define a Sink task +await store.Maintenance.SendAsync( + new UpdatePullReplicationAsSinkOperation(new PullReplicationAsSink + \{ + ConnectionStringName = dbNameB + "_ConStr", + Mode = PullReplicationMode.SinkToHub, + CertificateWithPrivateKey = Convert.ToBase64String(pullCert.Export(X509ContentType.Pfx)), + HubName = "Hub1_SinkToHub_Filtered" + \})); +`} + + + + + +## Failover + +Since the Sink task always initiates the replication, it is +also the Sink's responsibility to reconnect on network failure. +### Hub Failure +As part of the connection handshake, the Sink fetches an ordered list +of nodes from the Hub cluster. If a preferred node is defined (by explicitly +selecting a mentor node), it will be at the top of this list. +The Sink will try to connect to the first node in the list, and proceed +down the list with every failed attempt. +If the connection fails with all nodes, the Sink will request the list again. +### Sink Failure +If the failure occurs on the Sink node, the Sink cluster will +select a different node for the job. + + + +## Backward Compatibility + +RavenDB versions that precede 5.1 support **Pull Replication**, which allows +you to define *Hub and Sink* tasks and replicate data from Hub to Sink. + +In RavenDB 5.1 and on, *Pull Replication* is replaced and enhanced by +*Hub/Sink Replication*, which provides everything *Pull Replication* does +and adds to it *Sink to Hub* replication and *Replication Filtering*. + +* Pull Replication tasks defined on a RavenDB version earlier than 5.1, + **will remain operative** when you upgrade to version 5.1 and on. + +* A Hub or a Sink task that runs on a RavenDB version earlier than + 5.1, **can** connect a Hub or a Sink defined on RavenDB 5.1 and on. + You do **not** need to upgrade the task's instance to keep the task operative. + + +Upgrade RavenDB from a version earlier than 5.1 if you want to implement +*Hub/Sink Replication* added features, i.e. Sink-to-Hub replication and +Replication Filtering. + + + + + diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/queue-sink/_category_.json b/versioned_docs/version-7.1/server/ongoing-tasks/queue-sink/_category_.json new file mode 100644 index 0000000000..2c2547a545 --- /dev/null +++ b/versioned_docs/version-7.1/server/ongoing-tasks/queue-sink/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 4, + "label": Queue Sink, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/queue-sink/assets/info-hub.png b/versioned_docs/version-7.1/server/ongoing-tasks/queue-sink/assets/info-hub.png new file mode 100644 index 0000000000..3682a6d7e8 Binary files /dev/null and b/versioned_docs/version-7.1/server/ongoing-tasks/queue-sink/assets/info-hub.png differ diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/queue-sink/assets/overview_ongoing-tasks.png b/versioned_docs/version-7.1/server/ongoing-tasks/queue-sink/assets/overview_ongoing-tasks.png new file mode 100644 index 0000000000..277fb6a976 Binary files /dev/null and b/versioned_docs/version-7.1/server/ongoing-tasks/queue-sink/assets/overview_ongoing-tasks.png differ diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/queue-sink/assets/overview_stats.png b/versioned_docs/version-7.1/server/ongoing-tasks/queue-sink/assets/overview_stats.png new file mode 100644 index 0000000000..711e96535b Binary files /dev/null and b/versioned_docs/version-7.1/server/ongoing-tasks/queue-sink/assets/overview_stats.png differ diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/queue-sink/assets/overview_task-selection.png b/versioned_docs/version-7.1/server/ongoing-tasks/queue-sink/assets/overview_task-selection.png new file mode 100644 index 0000000000..167215bc88 Binary files /dev/null and b/versioned_docs/version-7.1/server/ongoing-tasks/queue-sink/assets/overview_task-selection.png differ diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/queue-sink/kafka-queue-sink.mdx b/versioned_docs/version-7.1/server/ongoing-tasks/queue-sink/kafka-queue-sink.mdx new file mode 100644 index 0000000000..32ad3207f8 --- /dev/null +++ b/versioned_docs/version-7.1/server/ongoing-tasks/queue-sink/kafka-queue-sink.mdx @@ -0,0 +1,314 @@ +--- +title: "Queue Sink: Apache Kafka" +hide_table_of_contents: true +sidebar_label: Kafka Queue Sink +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Queue Sink: Apache Kafka + + +* **Apache Kafka** is a distributed, high-performance, transactional messaging + platform, that remains performant as the number of messages it needs to process + increases and the number of events it needs to stream climbs to the big-data zone. + +* RavenDB can harness the advantages presented by message brokers like Kafka + both as a producer (by [running ETL tasks](../../../server/ongoing-tasks/etl/queue-etl/kafka.mdx)) + and as a **consumer** (using a sink task to consume enqueued messages). + +* To use RavenDB as a consumer, define an ongoing Sink task that will read batches + of enqueued JSON formatted messages from Kafka topics, construct documents using + user-defined scripts, and store the documents in RavenDB collections. + +* In this page: + * [The Queue Sink Task](../../../server/ongoing-tasks/queue-sink/kafka-queue-sink.mdx#the-queue-sink-task) + * [Client API](../../../server/ongoing-tasks/queue-sink/kafka-queue-sink.mdx#client-api) + * [Add a Kafka Connection String](../../../server/ongoing-tasks/queue-sink/kafka-queue-sink.mdx#add-a-kafka-connection-string) + * [Add a Kafka Sink Task](../../../server/ongoing-tasks/queue-sink/kafka-queue-sink.mdx#add-a-kafka-sink-task) + * [Configuration Options](../../../server/ongoing-tasks/queue-sink/kafka-queue-sink.mdx#configuration-options) + +## The Queue Sink Task + +Users of RavenDB 6.0 and on can create an ongoing Sink task that connects +a Kafka broker, retrieves enqueued messages from selected Kafka topics, +runs a user-defined script to manipulate data and construct documents, and +potentially stores the created documents in RavenDB collections. +#### Connecting a Kafka broker + +In the message broker architecture, RavenDB sinks take the role of data consumers. +A sink would connect a Kafka broker using a connection string, and retrieve messages +from the broker's Topics. + +Read [below](../../../server/ongoing-tasks/queue-sink/kafka-queue-sink.mdx#add-a-kafka-connection-string) +about adding a connection string via API. +Read [here](../../../studio/database/tasks/ongoing-tasks/kafka-queue-sink.mdx#define-a-kafka-sink-task) +about adding a connection string using Studio. + + +Like all ongoing tasks, a sink task is operated by a +[responsible node](../../../server/clustering/distribution/highly-available-tasks.mdx#responsible-node). +When the responsibility for the task is moved from one node to another, +e.g. from node `A` to node `B` as a result of node `A` down time: + +* The consumer task will maintain the same consumer group id it had on the original node. +* Kafka brokers may **cease serving the sink task for some time** as the Kafka consumer + group rebalances (adapting to the leaving of one node and the joining of another, among + other changes). + + +#### Retrieving enqueued messages from selected Kafka topics + +When a message is sent to a Kafka broker by a producer, it is pushed to +the tail of a topic. As preceding messages are pulled, the message advances +up the queue until it reaches its head and can be consumed by RavenDB's sink. +#### Running user-defined scripts + +A sink task's script is a JavaScript segment. Its basic role is to retrieve +selected Kafka messages or message properties, and construct documents that +will then be stored in RavenDB. + +The script can simply store the whole message as a document, as in this +segment: + + +{`// Add the document a metadata \`@collection\` property to keep it in +// this collection, or do not set it to store the document in @empty). +this['@metadata']['@collection'] = 'Orders'; +// Store the message as is, using its Id property as its RavenDB Id as well. +put(this.Id.toString(), this) +`} + + + +But the script can also retrieve some information from the read message +and construct a new document that doesn't resemble the original message. +Scripts often apply two sections: a section that creates a JSON object +that defines the document's structure and contents, and a second section +that stores the document. + +E.g., for Kafka messages of this format - + + +{`\{ + "Id" : 13, + "FirstName" : "John", + "LastName" : "Doe" +\} +`} + + + +We can create this script - + + +{`var item = \{ + Id : this.Id, + FirstName : this.FirstName, + LastName : this.LastName, + FullName : this.FirstName + ' ' + this.LastName, + "@metadata" : \{ + "@collection" : "Users" + \} +\}; + +// Use .toString() to pass the Id as a string even if Kafka provides it as a number +put(this.Id.toString(), item) +`} + + + +The script can also apply various other JavaScript commands, including +`load` to load a RavenDB document (e.g. to construct a document that +includes data from the retrieved message and complementing data from +existing RavenDB documents), `del` to remove existing RavenDB documents, +and [many others](../../../server/kb/javascript-engine.mdx#predefined-javascript-functions). +#### Storing documents in RavenDB collections + +The sink task consumes batches of queued messages and stores them in RavenDB +in a transactional manner, processing either the entire batch or none of it. + +Some script processing errors are allowed; when such an error occurs RavenDB +will skip the affected message, record the event in the logs, and alert the +user in Studio, but **continue processing the batch**. + + +Once a batch is consumed, the task confirms it by calling `kafkaConsumer.Commit()`. + +Note that the number of documents included in a batch is +[configurable](../../../server/ongoing-tasks/queue-sink/kafka-queue-sink.mdx#configuration-options). + + +Producers may enqueue +[multiple instances](../../../server/ongoing-tasks/etl/queue-etl/kafka.mdx#idempotence-and-message-duplication) +of the same document. +if processing each message only once is important to the consumer, +it is **the consumer's responsibility** to verify the uniqueness of +each consumed message. + +Note that as long as the **Id** property of Kafka messages is preserved +(so duplicate messages share an Id), the script's `put(ID, { ... })` +command will overwrite a previous document with the same Id and only +one copy of it will remain. + + + + +## Client API + +#### Add a Kafka Connection String + +Prior to defining a Kafka sink task, add a **Kafka connection string** +that the task will use to connect the message broker's bootstrap servers. + +To create the connection string: + +* Create a `QueueConnectionString`instance with the connection string configuration. + Pass it to the `PutConnectionStringOperation` store operation to add the connection string. + + `QueueConnectionString`: + + +{`// Add Kafka connection string +var res = store.Maintenance.Send( + new PutConnectionStringOperation( + new QueueConnectionString + \{ + Name = "KafkaConStr", + BrokerType = QueueBrokerType.Kafka, + KafkaConnectionSettings = new KafkaConnectionSettings() + \{ BootstrapServers = "localhost:9092" \} + \})); +`} + + + + `QueueBrokerType`: + + +{`public enum QueueBrokerType +\{ + None, + Kafka, + RabbitMq +\} +`} + + + + | Property | Type | Description | + |:-------------|:-------------|:-------------| + | **Name** | `string` | Connection string name | + | **BrokerType** | `QueueBrokerType` | Set to `QueueBrokerType.Kafka` for a Kafka connection string | + | **KafkaConnectionSettings** | `KafkaConnectionSettings[]` | A list of comma-separated host:port URLs to Kafka brokers | +#### Add a Kafka Sink Task + +To create the Sink task: + +* Create `QueueSinkScript` instances to define scripts with which the + task can process retrieved messages, apply JavaScript commands, construct + documents and store them in RavenDB. + + +{`// Define a Sink script +QueueSinkScript queueSinkScript = new QueueSinkScript +\{ + // Script name + Name = "orders", + // A list of Kafka topics to connect + Queues = new List() \{ "orders" \}, + // Apply this script + Script = @"this['@metadata']['@collection'] = 'Orders'; + put(this.Id.toString(), this)" +\}; +`} + + + +* Prepare a `QueueSinkConfiguration`object with the sink task configuration. + + `QueueSinkConfiguration` properties: + + | Property | Type | Description | + |:-------------|:-------------|:-------------| + | **Name** | `string` | The sink task name | + | **ConnectionStringName** | `string` | The registered connection string name | + | **BrokerType** | `QueueBrokerType` | Set to `QueueBrokerType.Kafka` to define a Kafka sink task | + | **Scripts** | `List` | A list of scripts | + +* Pass this object to the `AddQueueSinkOperation` store operation to add the Sink task. + + `QueueSinkScript` properties: + + | Property | Type | Description | + |:-------------|:-------------|:-------------| + | **Name** | `string` | Script name | + | **Queues** | `List` | A list of Kafka topics to consume messages from | + | **Script** | `string ` | The script contents | + +**Code Sample**: + + +{`// Add Kafka connection string +var res = store.Maintenance.Send( + new PutConnectionStringOperation( + new QueueConnectionString + \{ + Name = "KafkaConStr", + BrokerType = QueueBrokerType.Kafka, + KafkaConnectionSettings = new KafkaConnectionSettings() + \{ BootstrapServers = "localhost:9092" \} + \})); + +// Define a Sink script +QueueSinkScript queueSinkScript = new QueueSinkScript +\{ + // Script name + Name = "orders", + // A list of Kafka topics to connect + Queues = new List() \{ "orders" \}, + // Apply this script + Script = @"this['@metadata']['@collection'] = 'Orders'; + put(this.Id.toString(), this)" +\}; + +// Define a Kafka configuration +var config = new QueueSinkConfiguration() +\{ + // Sink name + Name = "KafkaSinkTaskName", + // The connection string to connect the broker with + ConnectionStringName = "KafkaConStr", + // What queue broker is this task using + BrokerType = QueueBrokerType.Kafka, + // The list of scripts to run + Scripts = \{ queueSinkScript \} +\}; + +AddQueueSinkOperationResult addQueueSinkOperationResult = + store.Maintenance.Send(new AddQueueSinkOperation(config)); +`} + + + + + +## Configuration Options + +Use these configuration options to gain more control over queue sink tasks. + +* [QueueSink.MaxBatchSize](../../../server/configuration/queue-sink-configuration.mdx#queuesinkmaxbatchsize) + The maximum number of pulled messages consumed in a single batch. +* [QueueSink.MaxFallbackTimeInSec](../../../server/configuration/queue-sink-configuration.mdx#queuesinkmaxfallbacktimeinsec) + The maximum number of seconds the Queue Sink process will be in a fallback + mode (i.e. suspending the process) after a connection failure. + + + + diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/queue-sink/overview.mdx b/versioned_docs/version-7.1/server/ongoing-tasks/queue-sink/overview.mdx new file mode 100644 index 0000000000..d8d59e5882 --- /dev/null +++ b/versioned_docs/version-7.1/server/ongoing-tasks/queue-sink/overview.mdx @@ -0,0 +1,110 @@ +--- +title: "Ongoing Tasks: Queue Sink Overview" +hide_table_of_contents: true +sidebar_label: Overview +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Ongoing Tasks: Queue Sink Overview + + +* Message brokers are high-throughput, distributed messaging services that + host data they receive from **producer** applications and serve it to + **consumer** clients via FIFO data queues. +* RavenDB 5.4 and on can function as a _Producer_ in this architecture. + RavenDB 6.0 and on can also function as a _Consumer_. + + This overview and the other pages in the Queue Sink section explain + **only** RavenDB's role as a _Consumer_ through the implementation of + a sink connector. + To learn about RavenDB's role as a _Producer_ please refer to the + [Queue ETL section](../../../server/ongoing-tasks/etl/queue-etl/overview.mdx). + +* RavenDB can run an ongoing Sink task that reads JSON formatted messages + from broker queues, apply a user-defined script that can, among other things, + construct documents from the retrieved messages, and potentially store + manufactured documents in RavenDB's database. +* Supported broker queues currently include **Apache Kafka** and **RabbitMQ**. + + +Using RavenDB as a message broker sink can benefit users who want to combine +Kafka or RabbitMQ's immense capability to collect and stream data with RavenDB's +ability to process this data, reveal and exploit its value. + + +* In this page: + * [Supported Message Brokers](../../../server/ongoing-tasks/queue-sink/overview.mdx#supported-message-brokers) + * [Task Statistics](../../../server/ongoing-tasks/queue-sink/overview.mdx#task-statistics) + * [Licensing](../../../server/ongoing-tasks/queue-sink/overview.mdx#licensing) + + +## Supported Message Brokers + +Queue brokers currently supported by RavenDB include **Apache Kafka** and **RabbitMQ**. + +![Ongoing Tasks](./assets/overview_ongoing-tasks.png) + +1. **Ongoing Tasks** + Click to open the ongoing tasks view. +2. **Add a Database Task** + Click to create a new ongoing task. +3. **Info Hub** + Click for usage and licensing assistance. + + ![Info Hub](./assets/info-hub.png) +![Define Queue Sink Task](./assets/overview_task-selection.png) + +1. **Kafka Sink** + Click to define a Kafka Queue Sink task. +2. **RabbitMQ Sink** + Click to define a RabbitMQ Queue Sink task. + + + +## Task Statistics + +Use Studio's [ongoing tasks stats](../../../studio/database/stats/ongoing-tasks-stats/overview.mdx) +view to see transfer statistics. + +![Queue Brokers Stats](./assets/overview_stats.png) + +1. **Kafka sink task statistics** + All statistics related to the sink task. + Click the bars to expand or collide statistics. + Hover over bar sections to expose statistics. +2. **RabbitMQ sink task statistics** +3. **Sink statistics** + * Total duration + The time it took to get a batch of documents (in MS) + * Currently allocated + Memory allocated for the task (in MB) + * Number of processed messages + The number of messages that were recognized and processed + * Number of read messages + The number of messages that were actually transferred to the database + * Successfully processed + Has this batch of messages been fully processed (yes/no) +4. **Queue readings** + The duration of reading from queues (in MS) +5. **Script processing** + The duration of script processing (in MS) + + + +## Licensing + +Queue Sink is Available on an **Enterprise** license. + + +Learn more about licensing [here](../../../start/licensing/licensing-overview.mdx). + + + + diff --git a/versioned_docs/version-7.1/server/ongoing-tasks/queue-sink/rabbit-mq-queue-sink.mdx b/versioned_docs/version-7.1/server/ongoing-tasks/queue-sink/rabbit-mq-queue-sink.mdx new file mode 100644 index 0000000000..e1723495f0 --- /dev/null +++ b/versioned_docs/version-7.1/server/ongoing-tasks/queue-sink/rabbit-mq-queue-sink.mdx @@ -0,0 +1,301 @@ +--- +title: "Queue Sink: RabbitMQ" +hide_table_of_contents: true +sidebar_label: RabbitMQ Queue Sink +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Queue Sink: RabbitMQ + + +* **RabbitMQ** brokers are designed to disperse data to multiple queues, + making for a flexible data channeling system that can easily handle complex + message streaming scenarios. + +* RavenDB can harness the advantages presented by RabbitMQ brokers both as + a producer (by [running ETL tasks](../../../server/ongoing-tasks/etl/queue-etl/rabbit-mq.mdx)) + and as a **consumer** (using a sink task to consume enqueued messages). + +* To use RavenDB as a consumer, define an ongoing Sink task that will read + batches of JSON formatted messages from RabbitMQ queues, construct documents + using user-defined scripts, and store the documents in RavenDB collections. + +* In this page: + * [The RabbitMQ Sink Task](../../../server/ongoing-tasks/queue-sink/rabbit-mq-queue-sink.mdx#the-rabbitmq-sink-task) + * [Client API](../../../server/ongoing-tasks/queue-sink/rabbit-mq-queue-sink.mdx#client-api) + * [Add a RabbitMQ Connection String](../../../server/ongoing-tasks/queue-sink/rabbit-mq-queue-sink.mdx#add-a-rabbitmq-connection-string) + * [Add a RabbitMQ Sink Task](../../../server/ongoing-tasks/queue-sink/rabbit-mq-queue-sink.mdx#add-a-rabbitmq-sink-task) + * [Configuration Options](../../../server/ongoing-tasks/queue-sink/rabbit-mq-queue-sink.mdx#configuration-options) + +## The RabbitMQ Sink Task + +Users of RavenDB 6.0 and on can create an ongoing Sink task that connects +a RabbitMQ broker, retrieves messages from selected queues, runs a user-defined +script to manipulate data and construct documents, and potentially stores the +created documents in RavenDB collections. +#### Connecting a RabbitMQ broker + +In the message broker architecture, RavenDB sinks take the role of data consumers. +A sink would connect a RabbitMQ broker using a connection string, and retrieve messages +from the broker's queues. + +Read [below](../../../server/ongoing-tasks/queue-sink/rabbit-mq-queue-sink.mdx#add-a-rabbitmq-connection-string) +about adding a connection string via API. +Read [here](../../../studio/database/tasks/ongoing-tasks/rabbitmq-queue-sink.mdx#define-a-rabbitmq-sink-task) +about adding a connection string using Studio. +#### Retrieving messages from RabbitMQ queues + +When a message is sent to a RabbitMQ broker by a producer, it is pushed to +the tail of a queue. As preceding messages are pulled, the message advances +up the queue until it reaches its head and can be consumed by RavenDB's sink. +#### Running user-defined scripts + +A sink task's script is a JavaScript segment. Its basic role is to retrieve +selected RabbitMQ messages or message properties, and construct documents that +will then be stored in RavenDB. + +The script can simply store the whole message as a document, as in this +segment: + + +{`// Add the document a metadata \`@collection\` property to keep it in +// this collection, or do not set it to store the document in @empty). +this['@metadata']['@collection'] = 'Orders'; +// Store the message as is, using its Id property as its RavenDB Id as well. +put(this.Id.toString(), this) +`} + + + +But the script can also retrieve some information from the read message +and construct a new document that doesn't resemble the original message. +Scripts often apply two sections: a section that creates a JSON object +that defines the document's structure and contents, and a second section +that stores the document. + +E.g., for RabbitMQ messages of this format - + + +{`\{ + "Id" : 13, + "FirstName" : "John", + "LastName" : "Doe" +\} +`} + + + +We can create this script - + + +{`var item = \{ + Id : this.Id, + FirstName : this.FirstName, + LastName : this.LastName, + FullName : this.FirstName + ' ' + this.LastName, + "@metadata" : \{ + "@collection" : "Users" + \} +\}; + +// Use .toString() to pass the Id as a string even if RabbitMQ provides it as a number +put(this.Id.toString(), item) +`} + + + +The script can also apply various other JavaScript commands, including +`load` to load a RavenDB document (e.g. to construct a document that +includes data from the retrieved message and complementing data from +existing RavenDB documents), `del` to remove existing RavenDB documents, +and [many others](../../../server/kb/javascript-engine.mdx#predefined-javascript-functions). +#### Storing documents in RavenDB collections + +The sink task consumes batches of queued messages and stores them in RavenDB +in a transactional manner, processing either the entire batch or none of it. + +Some script processing errors are allowed; when such an error occurs RavenDB +will skip the affected message, record the event in the logs, and alert the +user in Studio, but **continue processing the batch**. + + +Once a batch is consumed, the task confirms it by sending `_channel.BasicAck`. + +Note that the number of documents included in a batch is +[configurable](../../../server/ongoing-tasks/queue-sink/rabbit-mq-queue-sink.mdx#configuration-options). + + +Producers may enqueue +[multiple instances](../../../server/ongoing-tasks/etl/queue-etl/rabbit-mq.mdx#message-duplication) +of the same document. +if processing each message only once is important to the consumer, +it is **the consumer's responsibility** to verify the uniqueness of +each consumed message. + +Note that as long as the **Id** property of RabbitMQ messages is preserved +(so duplicate messages share an Id), the script's `put(ID, { ... })` command +will overwrite a previous document with the same Id and only one copy of +it will remain. + + + + +## Client API + +#### Add a RabbitMQ Connection String + +Prior to defining a RabbitMQ sink task, add a **RabbitMQ connection string** +that the task will use to connect the message brokers. + +To create the connection string: + +* Create a `QueueConnectionString`instance with the connection string configuration. + Pass it to the `PutConnectionStringOperation` store operation to add the connection string. + + `QueueConnectionString`: + + +{`// Add RabbitMQ connection string +var res = store.Maintenance.Send( + new PutConnectionStringOperation( + new QueueConnectionString + \{ + Name = "RabbitMqConStr", + BrokerType = QueueBrokerType.RabbitMq, + RabbitMqConnectionSettings = new RabbitMqConnectionSettings() + \{ ConnectionString = "amqp://guest:guest@localhost:5672/" \} + \})); +`} + + + + `QueueBrokerType`: + + +{`public enum QueueBrokerType +\{ + None, + Kafka, + RabbitMq +\} +`} + + + + | Property | Type | Description | + |:-------------|:-------------|:-------------| + | **Name** | `string` | Connection string name | + | **BrokerType** | `QueueBrokerType` | Set to `QueueBrokerType.RabbitMq` for a RabbitMQ connection string | + | **RabbitMqConnectionSettings** | `RabbitMqConnectionSettings[]` | A list of strings indicating RabbitMQ brokers connection details | +#### Add a RabbitMQ Sink Task + +To create the Sink task: + +* Create `QueueSinkScript` instances to define scripts with which the + task can process retrieved messages, apply JavaScript commands, construct + documents and store them in RavenDB. + + +{`// Define a Sink script +QueueSinkScript queueSinkScript = new QueueSinkScript +\{ + // Script name + Name = "orders", + // A list of RabbitMQ queues to connect + Queues = new List() \{ "orders" \}, + // Apply this script + Script = @"this['@metadata']['@collection'] = 'Orders'; + put(this.Id.toString(), this)" +\}; +`} + + + +* Prepare a `QueueSinkConfiguration`object with the sink task configuration. + + `QueueSinkConfiguration` properties: + + | Property | Type | Description | + |:-------------|:-------------|:-------------| + | **Name** | `string` | The sink task name | + | **ConnectionStringName** | `string` | The registered connection string name | + | **BrokerType** | `QueueBrokerType` | Set to `QueueBrokerType.RabbitMq` to define a RabbitMQ sink task | + | **Scripts** | `List` | A list of scripts | + +* Pass this object to the `AddQueueSinkOperation` store operation to add the Sink task. + + `QueueSinkScript` properties: + + | Property | Type | Description | + |:-------------|:-------------|:-------------| + | **Name** | `string` | Script name| + | **Queues** | `List` | A list of RabbitMQ queues to consume messages from | + | **Script** | `string ` | The script contents | + +**Code Sample**: + + +{`// Add Kafka connection string +var res = store.Maintenance.Send( + new PutConnectionStringOperation( + new QueueConnectionString + \{ + Name = "KafkaConStr", + BrokerType = QueueBrokerType.Kafka, + KafkaConnectionSettings = new KafkaConnectionSettings() + \{ BootstrapServers = "localhost:9092" \} + \})); + +// Define a Sink script +QueueSinkScript queueSinkScript = new QueueSinkScript +\{ + // Script name + Name = "orders", + // A list of Kafka topics to connect + Queues = new List() \{ "orders" \}, + // Apply this script + Script = @"this['@metadata']['@collection'] = 'Orders'; + put(this.Id.toString(), this)" +\}; + +// Define a Kafka configuration +var config = new QueueSinkConfiguration() +\{ + // Sink name + Name = "KafkaSinkTaskName", + // The connection string to connect the broker with + ConnectionStringName = "KafkaConStr", + // What queue broker is this task using + BrokerType = QueueBrokerType.Kafka, + // The list of scripts to run + Scripts = \{ queueSinkScript \} +\}; + +AddQueueSinkOperationResult addQueueSinkOperationResult = + store.Maintenance.Send(new AddQueueSinkOperation(config)); +`} + + + + + +## Configuration Options + +Use these configuration options to gain more control over queue sink tasks. + +* [QueueSink.MaxBatchSize](../../../server/configuration/queue-sink-configuration.mdx#queuesinkmaxbatchsize) + The maximum number of pulled messages consumed in a single batch. +* [QueueSink.MaxFallbackTimeInSec](../../../server/configuration/queue-sink-configuration.mdx#queuesinkmaxfallbacktimeinsec) + The maximum number of seconds the Queue Sink process will be in a fallback + mode (i.e. suspending the process) after a connection failure. + + + + diff --git a/versioned_docs/version-7.1/server/security/_category_.json b/versioned_docs/version-7.1/server/security/_category_.json new file mode 100644 index 0000000000..57f1316781 --- /dev/null +++ b/versioned_docs/version-7.1/server/security/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 5, + "label": Security, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/server/security/assets/1.png b/versioned_docs/version-7.1/server/security/assets/1.png new file mode 100644 index 0000000000..9f3a872987 Binary files /dev/null and b/versioned_docs/version-7.1/server/security/assets/1.png differ diff --git a/versioned_docs/version-7.1/server/security/assets/2.png b/versioned_docs/version-7.1/server/security/assets/2.png new file mode 100644 index 0000000000..234be76994 Binary files /dev/null and b/versioned_docs/version-7.1/server/security/assets/2.png differ diff --git a/versioned_docs/version-7.1/server/security/assets/3.png b/versioned_docs/version-7.1/server/security/assets/3.png new file mode 100644 index 0000000000..91fe217f03 Binary files /dev/null and b/versioned_docs/version-7.1/server/security/assets/3.png differ diff --git a/versioned_docs/version-7.1/server/security/assets/import-certificate.png b/versioned_docs/version-7.1/server/security/assets/import-certificate.png new file mode 100644 index 0000000000..75476f7c99 Binary files /dev/null and b/versioned_docs/version-7.1/server/security/assets/import-certificate.png differ diff --git a/versioned_docs/version-7.1/server/security/audit-log/_category_.json b/versioned_docs/version-7.1/server/security/audit-log/_category_.json new file mode 100644 index 0000000000..eed789d6cc --- /dev/null +++ b/versioned_docs/version-7.1/server/security/audit-log/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 4, + "label": Audit Log, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/server/security/audit-log/audit-log-overview.mdx b/versioned_docs/version-7.1/server/security/audit-log/audit-log-overview.mdx new file mode 100644 index 0000000000..65845aa148 --- /dev/null +++ b/versioned_docs/version-7.1/server/security/audit-log/audit-log-overview.mdx @@ -0,0 +1,121 @@ +--- +title: "Audit Log" +hide_table_of_contents: true +sidebar_label: Audit Log +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Audit Log + + +* [Authorization](../../../server/security/authorization/security-clearance-and-permissions.mdx) controls who can access RavenDB and what operations they can perform. + +* In addition, RavenDB provides an optional **Audit Log** to track who connects to the system and when. + Audit logs are available only when using a secure server. + +* Audit entries are recorded for operations at the database level. + See the full list of what is being logged below. + +* In this page: + * [Enabling the audit log](../../../server/security/audit-log/audit-log-overview.mdx#enabling-the-audit-log) + * [What is being logged](../../../server/security/audit-log/audit-log-overview.mdx#what-is-being-logged) + * [Things to consider](../../../server/security/audit-log/audit-log-overview.mdx#things-to-consider) + + +## Enabling the audit log + +* **To enable writing to the audit log**, set the following configuration key: + * [Security.AuditLog.FolderPath](../../../server/configuration/security-configuration.mdx#securityauditlogfolderpath) - set the path to a folder where RavenDB will store the audit logs. + +* In addition, the following configurations are available: + * [Security.AuditLog.ArchiveAboveSizeInMb](../../../server/configuration/security-configuration.mdx#securityauditlogarchiveabovesizeinmb) + The maximum size an audit log file may reach before it is archived and logging is directed to a new file. + * [Security.AuditLog.EnableArchiveFileCompression](../../../server/configuration/security-configuration.mdx#securityauditlogenablearchivefilecompression) - determine whether to compress the audit log files. + * [Security.AuditLog.MaxArchiveDays](../../../server/configuration/security-configuration.mdx#securityauditlogmaxarchivedays) - the maximum number of days that an archived audit log file is kept. + * [Security.AuditLog.MaxArchiveFiles](../../../server/configuration/security-configuration.mdx#securityauditlogmaxarchivefiles) - the maximum number of archived audit log files to keep. + * [Logs.ArchiveAboveSizeInMb](../../../server/configuration/logs-configuration.mdx#logsarchiveabovesizeinmb) - a new log file is created when this limit is reached (or daily). + +* Learn how to set configuration keys in this [configuration overview](../../../server/configuration/configuration-options.mdx). + + + +## What is being logged + +* Once the audit log is enabled, the following action items will be logged: + * **Connecting to RavenDB**: + Every time a connection is made to RavenDB + Every time a connection to RavenDB is closed + When a connection is rejected by RavenDB as invalid + Adding a certificate + what privileges it was granted + Deleting a certificate + Opening a 2FA session + Failing to open a 2FA session + * **Cluster**: + Adding a node to the cluster + Removing a node from the cluster + * **Database**: + Creating or deleting a database + Modifying the database topology + Modifying the database record + Exporting or importing a database + * **Indexes**: + Creating an index + Deleting an index + Resetting index + * **Analyzers and sorters**: + Adding or deleting an analyzer + Adding or deleting a sorter + * **Admin script**: + Executing an admin JS script + * **Integrations**: + Setting or deleting a user from PostgreSQL protocol credentials. + * **Connection strings**: + Adding or deleting a connection string + * **Queries**: + Deleting documents via patching + Streaming query results from @all_docs + * **Revisions**: + Deleting revisions + Modifying revisions settings + Modifying revisions bin cleaner settings + * **Ongoing tasks**: + Adding or updating an ETL task + Adding or updating a Kafka Sink or a RabbitMQ Sink task + Adding or updating External Replication task + Adding or updating Replication Hub or a Replication Sink task + Deleting any ongoing task + Toggling ongoing task state + * **Backups**: + Adding a manual (one time) backup task + Adding, updating, or deleting a periodic backup task + Delaying the backup operation + + + +## Things to consider + +* **Audit log processing**: + RavenDB only writes to the audit logs without any additional processing. + The audit entries can be loaded into centralized audit and analysis systems using dedicated tools. + +* **Audit logs are local**: + It is important to note that the audit logs are local. + For instance, if a database resides on node **C** and is removed by a command issued from node **B**, + the corresponding audit entry will be recorded in the audit log of node **B**, not in that of node **C**. + +* **Connection logging**: + RavenDB records connections in the audit log, not individual requests. + Logging contains the time of the TCP connection, the certificate being used, and the level of access granted to that certificate at the time of the connection. + This is done for performance and manageability; otherwise, the audit logs would become excessively large and difficult to manage. + With HTTP 1.1, a single TCP connection is utilized for multiple requests. + If you require more detailed logs at the level of individual HTTP requests, you can use a proxy in front of RavenDB to log the appropriate requests as they are made. + + + diff --git a/versioned_docs/version-7.1/server/security/authentication/_category_.json b/versioned_docs/version-7.1/server/security/authentication/_category_.json new file mode 100644 index 0000000000..c14de1b41c --- /dev/null +++ b/versioned_docs/version-7.1/server/security/authentication/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 1, + "label": Authentication, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/server/security/authentication/assets/cert-enhanced-key-usage.png b/versioned_docs/version-7.1/server/security/authentication/assets/cert-enhanced-key-usage.png new file mode 100644 index 0000000000..3f782c73d4 Binary files /dev/null and b/versioned_docs/version-7.1/server/security/authentication/assets/cert-enhanced-key-usage.png differ diff --git a/versioned_docs/version-7.1/server/security/authentication/assets/cert-key-usage.png b/versioned_docs/version-7.1/server/security/authentication/assets/cert-key-usage.png new file mode 100644 index 0000000000..143aefb209 Binary files /dev/null and b/versioned_docs/version-7.1/server/security/authentication/assets/cert-key-usage.png differ diff --git a/versioned_docs/version-7.1/server/security/authentication/assets/client-cert.png b/versioned_docs/version-7.1/server/security/authentication/assets/client-cert.png new file mode 100644 index 0000000000..7a3c0034f7 Binary files /dev/null and b/versioned_docs/version-7.1/server/security/authentication/assets/client-cert.png differ diff --git a/versioned_docs/version-7.1/server/security/authentication/assets/client-certificate-button-options.png b/versioned_docs/version-7.1/server/security/authentication/assets/client-certificate-button-options.png new file mode 100644 index 0000000000..ed018814c0 Binary files /dev/null and b/versioned_docs/version-7.1/server/security/authentication/assets/client-certificate-button-options.png differ diff --git a/versioned_docs/version-7.1/server/security/authentication/assets/edit.png b/versioned_docs/version-7.1/server/security/authentication/assets/edit.png new file mode 100644 index 0000000000..d1876e5a78 Binary files /dev/null and b/versioned_docs/version-7.1/server/security/authentication/assets/edit.png differ diff --git a/versioned_docs/version-7.1/server/security/authentication/assets/export-server-certificates.png b/versioned_docs/version-7.1/server/security/authentication/assets/export-server-certificates.png new file mode 100644 index 0000000000..b029f8ce2c Binary files /dev/null and b/versioned_docs/version-7.1/server/security/authentication/assets/export-server-certificates.png differ diff --git a/versioned_docs/version-7.1/server/security/authentication/assets/export_cluster_certificates.png b/versioned_docs/version-7.1/server/security/authentication/assets/export_cluster_certificates.png new file mode 100644 index 0000000000..c30b07a6fa Binary files /dev/null and b/versioned_docs/version-7.1/server/security/authentication/assets/export_cluster_certificates.png differ diff --git a/versioned_docs/version-7.1/server/security/authentication/assets/generate.png b/versioned_docs/version-7.1/server/security/authentication/assets/generate.png new file mode 100644 index 0000000000..021bdcb70c Binary files /dev/null and b/versioned_docs/version-7.1/server/security/authentication/assets/generate.png differ diff --git a/versioned_docs/version-7.1/server/security/authentication/assets/importing-and-exporting-certificate.png b/versioned_docs/version-7.1/server/security/authentication/assets/importing-and-exporting-certificate.png new file mode 100644 index 0000000000..e936d295fa Binary files /dev/null and b/versioned_docs/version-7.1/server/security/authentication/assets/importing-and-exporting-certificate.png differ diff --git a/versioned_docs/version-7.1/server/security/authentication/assets/registered.png b/versioned_docs/version-7.1/server/security/authentication/assets/registered.png new file mode 100644 index 0000000000..f5c72c94bc Binary files /dev/null and b/versioned_docs/version-7.1/server/security/authentication/assets/registered.png differ diff --git a/versioned_docs/version-7.1/server/security/authentication/assets/renew_server_certificate.png b/versioned_docs/version-7.1/server/security/authentication/assets/renew_server_certificate.png new file mode 100644 index 0000000000..e26270f2a3 Binary files /dev/null and b/versioned_docs/version-7.1/server/security/authentication/assets/renew_server_certificate.png differ diff --git a/versioned_docs/version-7.1/server/security/authentication/assets/server-certificates-button-options.png b/versioned_docs/version-7.1/server/security/authentication/assets/server-certificates-button-options.png new file mode 100644 index 0000000000..7d82778e8a Binary files /dev/null and b/versioned_docs/version-7.1/server/security/authentication/assets/server-certificates-button-options.png differ diff --git a/versioned_docs/version-7.1/server/security/authentication/assets/set-client-certificate-password.png b/versioned_docs/version-7.1/server/security/authentication/assets/set-client-certificate-password.png new file mode 100644 index 0000000000..5939d81dea Binary files /dev/null and b/versioned_docs/version-7.1/server/security/authentication/assets/set-client-certificate-password.png differ diff --git a/versioned_docs/version-7.1/server/security/authentication/assets/studio-certificates-overview.png b/versioned_docs/version-7.1/server/security/authentication/assets/studio-certificates-overview.png new file mode 100644 index 0000000000..dbc3b24ed8 Binary files /dev/null and b/versioned_docs/version-7.1/server/security/authentication/assets/studio-certificates-overview.png differ diff --git a/versioned_docs/version-7.1/server/security/authentication/assets/upload-client-certificate.png b/versioned_docs/version-7.1/server/security/authentication/assets/upload-client-certificate.png new file mode 100644 index 0000000000..da88edd677 Binary files /dev/null and b/versioned_docs/version-7.1/server/security/authentication/assets/upload-client-certificate.png differ diff --git a/versioned_docs/version-7.1/server/security/authentication/assets/upload.png b/versioned_docs/version-7.1/server/security/authentication/assets/upload.png new file mode 100644 index 0000000000..eceacd3e6f Binary files /dev/null and b/versioned_docs/version-7.1/server/security/authentication/assets/upload.png differ diff --git a/versioned_docs/version-7.1/server/security/authentication/certificate-configuration.mdx b/versioned_docs/version-7.1/server/security/authentication/certificate-configuration.mdx new file mode 100644 index 0000000000..2c5ccccfab --- /dev/null +++ b/versioned_docs/version-7.1/server/security/authentication/certificate-configuration.mdx @@ -0,0 +1,259 @@ +--- +title: "Authentication: Manual Certificate Configuration" +hide_table_of_contents: true +sidebar_label: Manual Certificate Configuration +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Authentication: Manual Certificate Configuration + +This article explains how to set up authentication **manually** by storing your certificate locally, externally or with logic you create that is foreign to RavenDB. + +Please also take a look at the automated [Setup Wizard](../../../start/installation/setup-wizard.mdx) which lets you set up +authentication in a much easier and faster way with automatic certificate renewals. + + * The Setup Wizard can process **certificates that you provide** + * Or the Wizard can give you a free, highly secure **certificate via [Let's Encrypt](../../../server/security/authentication/lets-encrypt-certificates.mdx)**. + * We've developed default **automatic renewals** of certificates when setting up with the Setup Wizard **together** with Let's Encrypt. + + If you choose manual setup and/or to provide your own certificate, **you are responsible for its periodic renewal**. + +* In this page: + * [Prerequisites](../../../server/security/authentication/certificate-configuration.mdx#prerequisites) + * [Standard Manual Setup With Certificate Stored Locally](../../../server/security/authentication/certificate-configuration.mdx#standard-manual-setup-with-certificate-stored-locally) + * [Certificate Requirements](../../../server/security/authentication/certificate-configuration.mdx#certificate-requirements) + * [Certificate Location](../../../server/security/authentication/certificate-configuration.mdx#certificate-location) + * [With Logic Foreign to RavenDB or External Certificate Storage](../../../server/security/authentication/certificate-configuration.mdx#with-logic-foreign-to-ravendb-or-external-certificate-storage) + * [Step-by-step Guide to Installing Certificate](../../../server/security/authentication/certificate-configuration.mdx#step-by-step-guide-to-installing-certificate) + + + +## Prerequisites + +To enable authentication, either `Security.Certificate.Path` or `Security.Certificate.Load.Exec` must be set in [settings.json](../../configuration/configuration-options.mdx#json). + Please note that `Security.Certificate.Load.Exec` has replaced the old `Security.Certificate.Exec` as of 4.2 - [see FAQ](../../../server/security/common-errors-and-faq.mdx#automatic-cluster-certificate-renewal-following-migration-to-42). +#### Setting up Client Certificates +When the server is manually set up with a server certificate for the first time, there are no client certificates registered in the server yet. +The first action an administrator will do is to [generate/register a new client certificate](../../../server/security/authentication/client-certificate-usage.mdx). +Find detailed instructions of the process [below](../../../server/security/authentication/certificate-configuration.mdx#step-by-step-guide-to-installing-certificate). + +You can set up various client certificates with different security clearance levels and database permissions. +See [Certificate Management](../../../server/security/authentication/certificate-management.mdx) for more about permissions. + + + + + +### Standard Manual Setup With Certificate Stored Locally + +#### Certificate Requirements + +RavenDB will accept `.pfx` server certificates that contain the private key, are not expired, +and include a basic (`Key Usage`) field and an enhanced (`Enhanced Key Usage`) field. + +- `Key Usage` + Permissions granted by this field: **Digital Signature** + + ![Key Usage](./assets/cert-key-usage.png) + +- `Enhanced Key Usage` + Permissions granted by this field: **Server Authentication** + + An `Enhanced Key Usage` field must include this OID: + **1.3.6.1.5.5.7.3.1** - Server Authentication + + ![Enhanced Key Usage](./assets/cert-enhanced-key-usage.png) + + + +* Certificates created during setup using [Let's Encrypt](../../../server/security/authentication/lets-encrypt-certificates.mdx) + are already provided with the above fields and OIDs. +* User-created certificates must be provided with these properties for RavenDB to accept and be able to use them. + + +#### Certificate Location + +A [settings.json](../../configuration/configuration-options.mdx#settingsjson) file must reside in each node's `Server` folder +and define the server and certificate settings. The server will retrieve this file and use its settings on startup. +Read more about RavenDB configuration options [here](../../configuration/configuration-options.mdx). + +* **ServerUrl** + When setting up securely, you must also set the `ServerUrl` configuration option to an **HTTPS** address. + In manual setup, we recommend configuring a permanent port instead of a random one. In the example below, the port is set to 8080. + For a list of IPs and ports already in use on your machine, run `netstat -a` in the command line. + +* **Setup.Mode** + Set to "None" if you want a manual setup. If you want to use the [Setup Wizard](../../../start/installation/setup-wizard.mdx), set to "Initial" + or simply run the `run.ps1` file in your server package via PowerShell. + +* **DataDir** + Configure the directory on each machine where the databases will be located. + +* **Path to Certificate** + The standard way to enable authentication is to set `Security.Certificate.Path` in the settings.json file with the path to your `.pfx` server certificate. + You may also supply a certificate password using `Security.Certificate.Password`, but this is optional. + +For example, this is a typical [settings.json](../../configuration/configuration-options.mdx#json) for a manual setup: + + + +{`\{ + "ServerUrl": "https://rvn-srv-1:8080", + "Setup.Mode": "None", + "DataDir": "/home/RavenData", + "Security.Certificate": \{ + "Path": "/home/secrets/server.pfx", + "Password": "s3cr7t p@$$w0rd" + \} +\} +`} + + + + + + + +### With Logic Foreign to RavenDB or External Certificate Storage + +The second way to enable authentication is to set `Security.Certificate.Load.Exec`. + +This option is useful when you want to protect your certificate (private key) with other solutions such as "Azure Key Vault", "HashiCorp Vault" +or even Hardware-Based Protection. RavenDB will invoke a process you specify, so you can write your own scripts / mini-programs and +apply the logic that you need. +This creates a clean separation between RavenDB and the secret store in use. + +RavenDB expects to get the raw binary representation (byte array) of the .pfx certificate through the standard output. + +Let's look at an example - + +To use `Security.Certificate.Load.Exec` with a PowerShell script, the [settings.json](../../configuration/configuration-options.mdx#json) +must be stored in each node's `Server` folder and will look something like this: + + + +{`\{ + "ServerUrl": "https://rvn-srv-1:8080", + "Setup.Mode": "None", + "DataDir": "RavenData", + "Security.Certificate.Load.Exec": "powershell", + "Security.Certificate.Load.Exec.Arguments": "C:\\\\secrets\\\\give_me_cert.ps1 90F4BC16CA5E5CB535A6CD8DD78CBD3E88FC6FEA" +\} +`} + + + +A sample powershell script called `give_me_cert.ps1` that matches the `settings.json` configuration: + + + +{`try +\{ + $thumbprint = $args[0] + $cert = gci "cert:\\CurrentUser\\my\\$thumbprint" + $exportedCertBinary = $cert.Export("Pfx") + $stdout = [System.Console]::OpenStandardOutput() + $stdout.Write($exportedCertBinary, 0, $exportedCertBinary.Length) +\} +catch +\{ + write-error $_.Exception + exit 3 +\} +`} + + + + + + +In all secure configurations, the `ServerUrl` must contain the same domain name that is used in the certificate (under the CN or ASN properties). + + + + + + +### Step-by-Step Guide to Installing Certificate + +1. Set up file infrastructure and download server. + - Create a [user account](https://ravendb.net/buy). You should get an email with your license key. + - [Download](https://ravendb.net/download) the RavenDB...zip server package. + - Extract the .zip into the folders on each machine where the server nodes will permanently live. + - Store server certificate in your desired location with secure permissions. + +2. In each node `Server` folder, create the `settings.json` file which you will configure like the [examples](../../../server/security/authentication/certificate-configuration.mdx#standard-manual-setup-with-certificate-stored-locally) above. + * You can do this by going into the node `Server` folder > right-click > **New** > **Text Document** > name it `settings.json` instead of + ...txt > click **Yes** > open it and begin configuring. + * Place the `settings.json` inside each node's `Server` folder because when you run the server, RavenDB is programmed to find the settings there. + +3. Configure the `settings.json` file in each node `Server` folder. + - Set the `ServerUrl`. Make sure to use `https` and that it matches the domain established in your certificate. + - Set `Setup.Mode` to `None` to deactivate the RavenDB Setup Wizard. + - Set `DataDir` to the desired database storage folder on each machine. + - Set the `Security.Certificate.Path` to the **.pfx** that you placed in each server folder if certificate is stored with [RavenDB logic on local machines](../../../server/security/authentication/certificate-configuration.mdx#standard-manual-setup-with-certificate-stored-locally) + or `Security.Certificate.Load.Exec` if using [external location or logic](../../../server/security/authentication/certificate-configuration.mdx#with-logic-foreign-to-ravendb-or-external-certificate-storage). + - Make sure that the certificate .path or .load script lead to the correct certificate location. `.Path` should look something like this: + `"Security.Certificate.Path": "C:/Windows/MyDomainName/A/Server/ravendb.domain.com.pfx"` + See [.json example for a .Path situation](../../../server/security/authentication/certificate-configuration.mdx#standard-manual-setup-with-certificate-stored-locally) above. + - Setting a **password** on the certificate is optional. See [settings.json example](../../../server/security/authentication/certificate-configuration.mdx#standard-manual-setup-with-certificate-stored-locally) above. + Run + +4. Right-click and run the `run.ps1` (or `run.sh` in Linux) in the extracted server package. In Windows, it runs in PowerShell as a default. + - If you don't yet have a client certificate installed, it will start up and launch a browser window that should give an error message about a missing client certificate. + Setting up the client certificate is the next two steps. + - If there is a previously existing client certificate on the machine, the browser will ask which certificate to use. + Until you set up the correct client certificate for this server (in the next two steps), it probably won't work and will give an error message. + This is because your browser will likely save your choice in the cache if you aren't in 'incognito' mode. + - It's best to **first do the next two steps before selecting a client certificate** in your browser. + +5. The PowerShell CLI window will be running the server terminal. The last line should read `ravendb>`. + In the CLI, run the [generateClientCert](../../../server/administration/cli.mdx#generateclientcert) command to generate a client certificate. + - The following is a generic RavenDB CLI command. + + +{`ravendb> generateClientCert [optional password] +`} + + + - In the following example the certificate will be named RavenDBClient, will be stored at C:\Users\administrator\Documents, will be valid for 60 months, and will have no password. + If a password is required add it to the end of the command. + + +{`ravendb> generateClientCert RavenDBClient C:\\Users\\administrator\\Documents 60 +`} + + + - A few seconds after running this command, a `.zip` file will download into the output folder that you defined. + +6. Extract the contents of the .zip file generated into the folders where your nodes live. + - Install the client certificate into the OS by double-clicking the `admin.client.certificate...pfx` file and complete the OS Certificate Import Wizard. + * **To install the client certificate without a password**, you can use the default settings by pressing **Next** all the way through. + In most cases, this is sufficient. + * **To set a password on the client certificate**, do so in the Import Wizard. You'll need to use that password every time you [work with the certificate](../../../server/security/authentication/certificate-management.mdx). + + ![Set client certificate password](./assets/set-client-certificate-password.png) + +7. Quit and restart the server with the `run.ps1` script. Select the certificate in the popup and click "OK". + The [RavenDB Studio](../../../studio/overview.mdx) should now open. + - In the PowerShell window type quit to close down the server for the next important step of setting it up as an OS service. + +8. To set up as an OS service, run PowerShell as an administrator and navigate to the root `Server` folder where the `settings.json` is located. + Copy and paste the following command `.\rvn.exe windows-service register --service-name RavenDB`. + It will set up the cluster as an OS service, which [will launch the server automatically](../../../start/installation/running-as-service.mdx) every time the machine starts, + but will fail to start if the Local Service account doesn't have access to all the required resources. + - Open the "Services" manager for Windows. Make sure that the "RavenDB" service is there and that the Startup Type is "Automatic". + +9. Now the service should run whenever the machine starts and the Studio should be accessible by the user with the client certificate. + - See [Certificate Management](../../../server/security/authentication/certificate-management.mdx) for an easy way to generate various client certificates with customizable permissions. + + + + diff --git a/versioned_docs/version-7.1/server/security/authentication/certificate-management.mdx b/versioned_docs/version-7.1/server/security/authentication/certificate-management.mdx new file mode 100644 index 0000000000..456ba7b637 --- /dev/null +++ b/versioned_docs/version-7.1/server/security/authentication/certificate-management.mdx @@ -0,0 +1,393 @@ +--- +title: "Authentication: Certificate Management" +hide_table_of_contents: true +sidebar_label: Certificate Management +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Authentication: Certificate Management + + +* Once authentication is set up, it's the administrator's responsibility to issue and manage client certificates. + Each client certificate can be configured to specify which databases the certificate can access and its authorization clearance level. + +* Read about the logic and rationale behind RavenDB's client certificates in [The RavenDB Security Authorization Approach](../../../server/security/authentication/certificate-management.mdx#the-ravendb-security-authorization-approach) to ensure that you configure them appropriately. + +* See the API article for integrating a client certificate into an application via the [document store](../../../client-api/creating-document-store.mdx). + +In this page: + +* [Studio Certificate Management View](../../../server/security/authentication/certificate-management.mdx#studio-certificates-management-view) +* [The RavenDB Security Authorization Approach](../../../server/security/authentication/certificate-management.mdx#the-ravendb-security-authorization-approach) + * [Authorization Levels in Client Certificates](../../../server/security/authentication/certificate-management.mdx#authorization-levels-in-client-certificates) + * [Partial Access to Database](../../../server/security/authentication/certificate-management.mdx#partial-access-to-database) +* [Create and Configure Certificates](../../../server/security/authentication/certificate-management.mdx#create-and-configure-certificates) + * [List of Registered Certificates](../../../server/security/authentication/certificate-management.mdx#list-of-registered-certificates) + * [Generate Client Certificate](../../../server/security/authentication/certificate-management.mdx#generate-client-certificate) + * [Edit Certificate](../../../server/security/authentication/certificate-management.mdx#edit-certificate) +* [Enabling Communication Between Servers: Importing and Exporting Certificates](../../../server/security/authentication/certificate-management.mdx#enabling-communication-between-servers:-importing-and-exporting-certificates) + * [Export Server Certificates](../../../server/security/authentication/certificate-management.mdx#export-server-certificates) + * [Upload an Existing Certificate](../../../server/security/authentication/certificate-management.mdx#upload-an-existing-certificate) + * [Certificate Collections](../../../server/security/authentication/certificate-management.mdx#certificate-collections) + * [Private Keys](../../../server/security/authentication/certificate-management.mdx#private-keys) + * [Client Certificate Chain of Trust](../../../server/security/authentication/certificate-management.mdx#client-certificate-chain-of-trust) + + +## Studio Certificates Management View + +![Figure 1. Studio Certificates Management View](./assets/studio-certificates-overview.png) + +1. Click the **Manage Server** tab. +2. Select **Certificates**. +3. **Client certificate** + ![Client Certificate Button Options](./assets/client-certificate-button-options.png) + * **Generate client certificate** + [Create and configure](../../../server/security/authentication/certificate-management.mdx#generate-client-certificate) a new client certificate + * **Upload client certificate** + [Import a client certificate](../../../server/security/authentication/certificate-management.mdx#upload-an-existing-certificate) that was exported from another server so that they can communicate. +4. **Server certificates** + ![Server Certificates Button Options](./assets/server-certificates-button-options.png) + * **Export server certificates** + [Download server certificates](../../../server/security/authentication/certificate-management.mdx#export-server-certificates) so that you can download and then import them into another server. + * **Replace server certificates** + [Replace server certificates](../../../server/security/authentication/certificate-renewal-and-rotation.mdx) by uploading another `.pfx` certificate. +5. Status of the current server certificate. + * Click [Renew now](../../../server/security/authentication/certificate-renewal-and-rotation.mdx) to renew a server certificate's expiration period. + If you did not set up your server with the RavenDB Installation Wizard and Let's Encrypt then **you are responsible** to renew your certificates periodically. +6. Status of current client certificates active in this server. You can remove or [edit client certificates](../../../server/security/authentication/certificate-management.mdx#edit-certificate), + including configuring databases permissions and [authorization (security clearance) levels](../../../server/security/authorization/security-clearance-and-permissions.mdx#authorization-security-clearance-and-permissions) here. + + + +Client certificates are managed by RavenDB directly and not by any [Public Key Infrastructure](https://en.wikipedia.org/wiki/Public_key_infrastructure). + + + + + +## The RavenDB Security Authorization Approach + +In general, RavenDB assumes that an application will implement its own logic and business rules, limiting +itself to protect the data from unauthorized access. Applications operate on behalf of developers, and as such, they are in a better position than RavenDB to determine what is +allowed. This is why access levels of RavenDB databases in each cluster are highly customizable by [cluster admins](../../../server/security/authorization/security-clearance-and-permissions.mdx#cluster-admin) +and [operators](../../../server/security/authorization/security-clearance-and-permissions.mdx#operator). + +### Authorization Levels in Client Certificates +The security system in RavenDB does not make assumptions about which types of users should access which type of database. The concept of a user +does not really exist within RavenDB in this manner. Instead, cluster admins or operators create various client certificates and configure each one with a custom set of permissions. + + +* [Cluster Admin](../../../server/security/authorization/security-clearance-and-permissions.mdx#cluster-admin) + Full administrative access to clusters and databases within. +* [Operator](../../../server/security/authorization/security-clearance-and-permissions.mdx#operator) + Admin access to databases, but not to modify the cluster. +* [User](../../../server/security/authorization/security-clearance-and-permissions.mdx#user) + Lowest levels of privileges. + * "User" certificates are configured to specify which databases people can access with each certificate. + * ["User" authorization levels](../../../server/security/authentication/certificate-management.mdx#setting-user-access-levels) are also configured per database. + +In most cases, users do not access RavenDB directly. Aside from admins and developers during the development process, all access to +the data inside RavenDB is expected to be done through your applications. A security mechanism on a per-user basis is not practical in complex systems +because each user (employee or customer) may need different access levels to different portions of the data. +Also, the same application usually needs to access the same data on behalf of different types of users with different levels of access. + +**Most organizations have fairly complex architectures.** In most systems, the access level +and operations allowed are never simple enough to be able to express them as an Access Control List. They are highly dependent on business rules and +processes, the state of the system, etc. + +**How can authorization levels efficiently handle complex systems?** By customizing access via client certificates. For example, an employee may request a vacation day, but the employee +is not permitted to approve their own vacation. The HR manager, on the other hand, may approve the vacation. + +From the point of view of RavenDB, the act of editing a vacation request document or approving it looks very much the same, it's a simple document edit. +The way that a typical business system looks at those operations is often much more complicated. Perhaps the HR manager is given a client certificate with read/write permission to edit documents on the HR database, +while other employees have a certificate with read-only access. Thus, they must make requests for changes to the HR staff. Also, the HR staff should have read/write access to the HR database, +but not to development-oriented data, whereas developers might have read/write or admin permissions to relevant databases. +Meanwhile, customers likely have read/write certification to their user account, but read-only access to the catalog. + +Thus, RavenDB is designed so that **each client certificate has specific and customizable permissions for various databases as well as configurable authorization levels**. +### Full Access to Database +RavenDB expects the applications and systems using it to utilize the security infrastructure it provides to prevent unauthorized access, such as a different +application trying to access the HR database. However, once access is granted, the access is complete. + +RavenDB security infrastructure operates at the level of the entire database. If you are granted access to a database, you can access +any and all documents in that database unless protection is explicitly configured. +### Partial Access to Database + +There are two approaches to giving partial access to a database: + + * [Using ETL for selective, one-way data transfer](../../../server/security/authentication/certificate-management.mdx#using-etl-for-selective-one-way-data-transfer) + * [Setting "User" Access Levels](../../../server/security/authentication/certificate-management.mdx#setting-user-access-levels) + +#### *Using ETL for selective, one-way data transfer* + +Some developers need to provide partial access to a database that also contains sensitive data. One approach is to set up an [Extract, Transform, Load (ETL) task](../../../studio/database/tasks/ongoing-tasks/ravendb-etl-task.mdx): + +1. [Create](../../../studio/database/create-new-database/general-flow.mdx) a dedicated database that the public will be able to access. +2. [Generate a client certificate](../../../server/security/authentication/certificate-management.mdx#generate-client-certificate) with "User" security clearance so that + you can configure it to give access only to the dedicated, public-facing database. +3. If the dedicated database is on a different cluster than the source database: (This is optional. If both databases are on the same cluster, skip to step 4.) + * [Export (download) server certificates](../../../server/security/authentication/certificate-management.mdx#export-server-certificates) from the **destination server**. + * [Upload the .pfx certificate](../../../server/security/authentication/certificate-management.mdx#upload-an-existing-certificate) to the **source server** to enable the two to connect. + * While uploading, configure the certificate to give access to the target source database. +4. Then set up an [ETL](../../../server/ongoing-tasks/etl/raven.mdx) task from the source database to the exposed, destination database. + * Set up a Javascript [Transform script](../../../server/ongoing-tasks/etl/basics.mdx#transform) in the ETL to automatically filter the information passed from the source to destination databases. + * After entering the script code, you can click the blue button to [test the script](../../../server/ongoing-tasks/etl/test-scripts.mdx) before saving the ETL. Once you click the red **Save** button, + the ETL task begins to work. It will transform and add the data to the dedicated database. +5. Check the dedicated database to make sure that the transform script did what you want it to do. + This database should only have the information that you filtered and is ready to expose to the public. + + + +With this approach, you can choose exactly what is exposed, including redacting personal information, hiding details, etc. Because the ETL process is unidirectional, +this also protects the source data from modifications made to the new database. On the other hand, ETLs are ongoing tasks, so changes made to data +in the source database will be reflected automatically in the destination database. + +Together, ETL and dedicated databases can be used for fine-grained filtration, but that tends to be the exception, rather than the rule. + + + +#### *Setting "User" Access Levels* + +You can also control access by giving a client certificate a [User](../../../server/security/authorization/security-clearance-and-permissions.mdx#user) security clearance. +With this clearance, you can set a different access level to each database. The three "User" access levels are: + +* User [Admin](../../../server/security/authorization/security-clearance-and-permissions.mdx#user) +* [Read/Write](../../../server/security/authorization/security-clearance-and-permissions.mdx#section-1) +* [Read-Only](../../../server/security/authorization/security-clearance-and-permissions.mdx#section-2) + * Learn more about the [Read-Only access level here](../../../studio/server/certificates/read-only-access-level.mdx). + + + +This approach is similar to the HR Manager and Customers certificates in the example given [above](../../../server/security/authentication/certificate-management.mdx#authorization-levels-in-client-certificates). +It enables developers to control access levels by configuring client certificates. + + + +* To learn how to configure each client certificate's database permissions and authorization levels via the RavenDB Studio GUI, see [Create and Configure Certificates](../../../server/security/authentication/certificate-management.mdx#create-and-configure-certificates). +* To learn how to configure client certificates via CLI, see [Authentication: Client Certificate Usage](../../../server/security/authentication/client-certificate-usage.mdx#authentication-client-certificate-usage) + + + + + + +## Create and Configure Certificates + +### List of Registered Certificates + +In the image below, the client certificates (HR, localcluster.client.certificate, Project Managers) have different **security clearance** and **database permissions** configurations. +This is done to give admins the ability to protect the contents of their databases by customizing permissions. + +For example, if an application user should have read/write but not admin access over a certain database, while project managers should have operator permissions on all databases, +you can grant different [access levels](../../../server/security/authorization/security-clearance-and-permissions.mdx#authorization-security-clearance-and-permissions) +by using different client certificates, each with its own set of permissions. + +![Figure 2. Status of Registered Certificates](./assets/registered.png) + +Each client certificate contains the following: + +1. **Name** + Client certificate name. +2. **Thumbprint** + Unique key for each certificate. +3. **Security Clearance** + [Authorization level](../../../server/security/authorization/security-clearance-and-permissions.mdx#authorization-security-clearance-and-permissions) + that determines the types of actions that can be done with this certificate. +4. **Expiration date** + Client certificates are given 5-year expiration periods by default. +5. **Allowed Databases** + The databases in this cluster that this client certificate has access to. +6. **Edit Certificate** + Configure which databases it can access (applicable for User-level) and its authorization clearance level. +7. **Delete Certificate** +### Generate Client Certificate + +Using this view, you can generate client certificates directly via RavenDB. +Newly generated certificates will be added to the list of registered certificates. + +![Figure 3. Generate Client Certificate](./assets/generate.png) + +When generating a certificate, you must complete the following fields: + +1. Click **Client certificate** and select **Generate client certificate**. +2. **Name** + Enter a name for this certificate. For future clarity, consider naming each certificate after the role that it will enable in your system (Full Stack Development, HR, Customer, Unregistered Guest, etc...) +3. **Security Clearance** + Set authorization level for this certificate. Read about [Security Clearance](../../../server/security/authorization/security-clearance-and-permissions.mdx#authorization-security-clearance-and-permissions) + to choose the appropriate level. +4. **Certificate Passphrase** + (Optional) Set a password for this certificate. +5. **Expire in** + Set validity period. +6. **Database Permissions** + Configure allowed databases and ["User" access level](../../../server/security/authorization/security-clearance-and-permissions.mdx#user) for each database. + Relevant for "User" authorization level. "Cluster Admin" and "Operator" have access to all databases. + + + + +This information is used by RavenDB internally and is not stored in the certificate itself. + + + + + +Expiration for client certificates is set to 5 years by default. + + +### Edit Certificate + +To edit existing certificates: + +![Figure 5. Edit Certificate](./assets/edit.png) + +1. **Edit** + Click the edit button to configure this certificate. +2. **Name** + Enter a name for this certificate. For future clarity, consider naming each certificate after the role that it will enable in your system + (Full Stack Development, HR, Customer, Unregistered Guest, etc...) +3. **Security Clearance** + Set authorization level for this certificate. Read about [Security Clearance](../../../server/security/authorization/security-clearance-and-permissions.mdx#authorization-security-clearance-and-permissions) + to choose the appropriate level. +4. **Thumbprint** + Click the button to copy the unique code assigned to this certificate. +5. **Database Permissions** + Configure allowed databases and ["User" access level](../../../server/security/authorization/security-clearance-and-permissions.mdx#user) for each database. + Relevant for "User" authorization level. "Cluster Admin" and "Operator" have access to all databases. + + + +This information is used by RavenDB internally and is not stored in the certificate itself. + + + + + +Expiration for client certificates is set to 5 years by default. + + + + + + +## Enabling Communication Between Servers: Importing and Exporting Certificates + +There are various situations where developers need to create a database with partial access to another server. +For example, a source server may contain sensitive information that should not be exposed to the public, but also contain databases that need to be exposed with limited access. +The following section explains how to give configurable access that enables communication between servers. + + + +To connect two secure databases, the destination cluster needs to trust the source cluster. +To achieve that you need to: + +a. **Export** ([download](../../../server/security/authentication/certificate-management.mdx#export-server-certificates)) the server certificate **from the source server**. +b. **Upload** ([import](../../../server/security/authentication/certificate-management.mdx#upload-an-existing-certificate)) the downloaded certificate **into the destination server**. + + + +![Importing and Exporting Certificates](./assets/importing-and-exporting-certificate.png) + + 1. Click **Manage Server** and select **Certificates** to access the Studio - Certificates Management screen. + 2. Click **Server certificates** in the source server. + ![Server Certificates Button Options](./assets/server-certificates-button-options.png) + * **Export server certificates** + [Download server certificates](../../../server/security/authentication/certificate-management.mdx#export-server-certificates) + so that you can download and then import them into another server. + 3. Click **Client certificate** in the destination server. + ![Client Certificate Button Options](./assets/client-certificate-button-options.png) + * **Upload client certificate** + [Import a client certificate](../../../server/security/authentication/certificate-management.mdx#upload-an-existing-certificate) + that was exported from another server so that the two can communicate. +### Export Server Certificates + +![Figure 6. Export Server Certificates](./assets/export-server-certificates.png) + +This option allows you to export the server certificate as a .pfx file. In the case of a cluster which contains several different server certificates, +a .pfx [collection](../../../server/security/authentication/certificate-management.mdx#certificate-collections) will be exported. +### Upload an Existing Certificate + +Click the **Client certificate** button, select **Upload client certificate** and you will see the following window. + +![Figure 4. Upload Existing Certificate](./assets/upload.png) + +When uploading an existing certificate .pfx file, you must configure the certificate by completing the following fields: + +1. **Name** + Enter a name for this certificate. For future clarity, consider naming each certificate after the role that it will enable in your system (Full Stack Development, HR, Customer, Unregistered Guest, etc...) +2. **Security Clearance** + Set authorization level for this certificate. Read about [Security Clearance](../../../server/security/authorization/security-clearance-and-permissions.mdx#authorization-security-clearance-and-permissions) to choose the appropriate level. +3. **Certificate file** + Upload the `.pfx` certificate file from the destination server installation folder. +4. **Certificate Passphrase** + (Optional) Set a password for this certificate. +4. **Database permissions** + Select databases and permission levels for this certificate. + If you choose *User* security clearance, you can give access to specific databases on the server and configure [User](../../../server/security/authorization/security-clearance-and-permissions.mdx#user) authorization levels for this certificate. + +The uploaded certificate will be added to the list of registered client certificates on this server. + + + +This information is used by RavenDB internally and is not stored in the certificate itself. + + + + + +Expiration for client certificates is set to 5 years by default. + + +### Certificate Collections + +`.pfx` files may contain a single certificate or a collection of certificates. + +When uploading a `.pfx` file with multiple certificates, RavenDB will add all of the certificates to the list of registered certificates as one entry +and will allow access to all these certificates explicitly by their thumbprint. +### Generating Client Certificates Via Command Line Interface + +* RavenDB provides an intuitive certificates management GUI in the Studio. + +* All of the operations which are described below are also available in Command Line Interface (CLI). + - Be sure to configure the `SecurityClearance` for each client certificate because the default is [cluster admin](../../../server/security/authorization/security-clearance-and-permissions.mdx#cluster-admin) which has full access. + - There are CLI-based means to [generate](../../../server/security/authentication/client-certificate-usage.mdx#example-i---using-the-ravendb-cli) and [configure client certificates in Windows](../../../server/security/authentication/client-certificate-usage.mdx#example-ii---using-powershell-and-wget-in-windows). + - [Linux](../../../server/security/authentication/client-certificate-usage.mdx#example-iii--using-curl-in-linux) developers can use this cURL command sample. +### Private Keys + +It's important to note that RavenDB does _not_ keep track of the certificate's private key. Whether you generate a client certificate +via RavenDB or upload an existing client certificate, the private key is not retained. If a certificate was lost, you'll +need to recreate a new certificate, assign the same permissions, and distribute the certificate again. + + +If two different RavenDB clusters are communicating securely, and the source cluster has its certificate renewed, the destination cluster could +still trust this new certificate - provided that the new certificate is signed with the same private key as the original, and was issued by the +same certificate authority. This is accomplished using a [public key pinning hash](../../../server/security/authentication/certificate-renewal-and-rotation.mdx#implicit-trust-by-public-key-pinning-hash). + +### Client Certificate Chain of Trust + +As mentioned above, RavenDB generates client certificates by signing them using the server certificate. +A typical server certificate doesn't allow acting as an Intermediate Certificate Authority signing other certificates. +This is the case with Let's Encrypt certificates. + +The left side of the following screenshot shows a newly generated client certificate, signed by a Let's Encrypt server certificate. +You cannot see the full chain of trust because the OS (Windows) doesn't have knowledge of the server certificate. + +If you wish to view the full chain, add the server certificate to the OS trusted store. This step is **not necessary** for RavenDB +and is explained here only to show how to view the full chain in Windows. The right side of the screenshot shows the full chain. + + +![Figure 7. Client Certificate Chain](./assets/client-cert.png) + +Because client certificates are managed by RavenDB directly and not through any PKI infrastructure **this is perfectly acceptable**. +Authenticating a client certificate is done explicitly by looking for the thumbprint in the registered certificates list in the server +and not by validating the chain of trust. + + + diff --git a/versioned_docs/version-7.1/server/security/authentication/certificate-renewal-and-rotation.mdx b/versioned_docs/version-7.1/server/security/authentication/certificate-renewal-and-rotation.mdx new file mode 100644 index 0000000000..bd21e4fef3 --- /dev/null +++ b/versioned_docs/version-7.1/server/security/authentication/certificate-renewal-and-rotation.mdx @@ -0,0 +1,109 @@ +--- +title: "Authentication: Certificate Renewal & Rotation" +hide_table_of_contents: true +sidebar_label: Certificate Renewal & Rotation +sidebar_position: 3 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Authentication: Certificate Renewal & Rotation + +X.509 certificates have expiration dates and must be renewed once in a while. + +When using the Setup Wizard to obtain a Let's Encrypt certificate, you don't have to worry about this. Read about [Automatic Let's Encrypt Renewals in RavenDB](../../../server/security/authentication/lets-encrypt-certificates.mdx). + +If you provided your own certificate to RavenDB, it is **your responsibility** to renew it. + +Once you have a new valid certificate for your server/cluster you need to make RavenDB use it instead of the currently loaded certificate. Replacing a certificate in the cluster is a distributed operation which requires all the nodes to confirm the replacement. The actual update will happen when all nodes of the cluster confirm the replacement or when there are 3 days left for expiration. + +You can also ignore these limits and replace the certificates immediately but beware of this option. Nodes which didn't confirm the replacement, will not be able to re-join the cluster and will have to be setup manually. This means the new certificate will have to be placed manually in that node. + +To manually replace the server certificate you can either edit [settings.json](../../configuration/configuration-options.mdx#json) with a new certificate path and restart the server or you can overwrite the existing certificate file and the server will pick it up within one hour without requiring a restart. + + +The new certificate must contain all of the cluster domain names in the CN or ASN properties of the certificate. Otherwise you will get an authentication error because SSL/TLS requires the domain in the certificate to match with the actual domain being used. + + +## Replace the Cluster Certificate Using the Studio + +Access the certificate view, click on `Cluster certificate` -> `Replace cluster certificate` and upload the new certificate PFX file. + +This will start the certificate replacement process. + +When running as a cluster, the replacement process is a distributed operation. It involves sending the new certificate to all nodes, and requires all nodes to confirm receipt and replacement of the certificate. + +Only when all nodes have confirmed, the cluster will start using this new certificate. + +If a node is not responding during the replacement, the operation will not complete until one of the following happens: + +* The node will come back online. It should pick up the replacement command and join the replacement process automatically. + +* There are only 3 days left for the expiration of the certificate. In this case, the cluster will complete the operation without the node which is down. When bringing that node up, the certificate must be replaced manually. + +* `Replace immediately` is chosen. In this case, the cluster will complete the operation without the node which is down. When bringing that node up, the certificate must be replaced manually. + +During the process you will receive alerts in the studio and in the logs indicating the status of the operation and any errors if they occur. The alerts are displayed for each node independently. + +## Replace the Cluster Certificate Using Powershell + +Here is a little example of using the REST API directly with powershell to replace the cluster certificate: + +Note the optional password parameter indicated in the second line as ``. If the certificate you are replacing +has no password, leave the parameter blank. + + + +{`[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 + +$clientCert = Get-PfxCertificate -FilePath C:\\path\\to\\client\\cert\\admin.client.certificate.raven.pfx + +$newCert = get-content 'C:\\path\\to\\server\\cert\\new.certificate.pfx' -Encoding Byte + +$newCertBase64 = [System.Convert]::ToBase64String($newCert) + +$payload = @\{ + Name = "MyNewCert"; + Certificate = $newCertBase64; +\} | ConvertTo-Json + +$response = Invoke-WebRequest https://b.raven.development.run:8080/admin/certificates/replace-cluster-cert -Certificate $clientCert -Method POST -Body $payload -ContentType "application/json" +`} + + + +## Implicit Trust by Public Key Pinning Hash + +As mentioned, RavenDB could only trust certificates which were explicitly registered in the cluster. Now, consider the following scenario: + +Two secured RavenDB clusters with ETL or External Replication defined between the two. The destination cluster trusts (explicitly) the source cluster's certificate. +Once the source cluster renews its certificate, the destination cluster will no longer trust it because the thumbprint has changed. +In such a case, the admin had to go to the destination and manually tell it to trust the new source certificate. + +The problem was addressed in RavenDB 4.2 where Implicit Trust by `Public Key Pinning Hash` was introduced. If the source cluster renews its certificate by +using **the same private key**, the new certificate will have the same `Public Key Pinning Hash`, and the destination cluster will be able to trust the new certificate. It will also be registered (implicitly) for future connections. + + +For security reasons, this feature will only work if the new certificate was issued by the same certificate authority as the original certificate. + + +When using the RavenDB Let's Encrypt solution, all of the renewals and certificate signing is handled for you (using the same private key). It means you don't need to do **anything** and the whole process is transparent. +But, in case you provide your own certificate, in order to use this feature you must use the same issuer and sign the certificate with the same private key as the one you are renewing. + +This feature means that you can drastically reduce the amount of work that an admin has to do in a multi-cluster topology and leads you to a system that you setup once and just keeps working. + + +To find out the public key pinning hash of your server certificate, take a look at this [C# code](https://github.com/ravendb/ravendb/blob/v4.2/src/Raven.Server/Utils/CertificateUtils.cs#L314) or issue the following command against your running server, by using OpenSSL: + + +{`openssl s_client -servername my.ravendb.cluster.com -connect my.ravendb.cluster.com:443 | openssl x509 -pubkey -noout | openssl rsa -pubin -outform der | openssl dgst -sha256 -binary | openssl enc -base64 +`} + + + + diff --git a/versioned_docs/version-7.1/server/security/authentication/client-certificate-usage.mdx b/versioned_docs/version-7.1/server/security/authentication/client-certificate-usage.mdx new file mode 100644 index 0000000000..e2b1e0f8ee --- /dev/null +++ b/versioned_docs/version-7.1/server/security/authentication/client-certificate-usage.mdx @@ -0,0 +1,141 @@ +--- +title: "Authentication: Client Certificate Usage" +hide_table_of_contents: true +sidebar_label: Client Certificate Usage +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Authentication: Client Certificate Usage + +* If you have already securely set up a server certificate, you'll need to use client certificates to connect to a RavenDB server. + +* You can read about the logic and rationale behind RavenDB's client certificates in [The RavenDB Security Authorization Approach](../../../server/security/authentication/certificate-management.mdx#the-ravendb-security-authorization-approach) to ensure that you configure them appropriately. + +## Obtaining Your First Admin Client Certificate + +When RavenDB is running with a server certificate for the first time, there are no client certificates registered in the server yet. The first action an administrator will do is generate/register an admin client certificate. + + +This operation is only required when doing a **manual** secured setup. If you are using the automated [Setup Wizard](../../../start/installation/setup-wizard.mdx), an admin client certificate will be generated for you as part of the wizard. + + +### Example I - Using the RavenDB CLI + +If you have access to the server, the simplest way is to use the RavenDB CLI: + + + +{`ravendb> generateClientCert [password] +`} + + + +This will generate a new certificate, with a `Cluster Admin` Security Clearance. + +If you wish to use your own client certificate you can have RavenDB trust it: + + + +{`ravendb> trustClientCert [password] +`} + + + +The certificate will be registered as a trusted certificate with a `Cluster Admin` Security Clearance. + +### Example II - Using Powershell and Wget in Windows + +You can use a client to make an HTTP request to the server. At this point you only have a **server certificate** and you will use it (acting as the client certificate). + +Assume we started the server with the following [settings.json](../../configuration/configuration-options.mdx#json): + + + +{`\{ + "ServerUrl": "https://rvn-srv-1:8080", + "Setup.Mode": "None", + "DataDir": "c:/RavenData", + "Security.Certificate.Path": "c:/secrets/server.pfx", + "Security.Certificate.Password": "s3cr7t p@$$w0rd" +\} +`} + + + +We can use wget to request a `Cluster Admin` certificate. This will be the payload of the POST request: + + + +{`\{ + "Name": "cluster.admin.client.certificate", + "SecurityClearance": "ClusterAdmin", + "Password": "p@$$w0rd" +\} +`} + + + +First, load the server certificate: + + + +{`$cert = Get-PfxCertificate -FilePath c:/secrets/server.pfx +`} + + + +Then make the request: + + + +{`wget -UseBasicParsing -Method POST -Certificate $cert -OutFile "cluster.admin.cert.zip" -Body '\{"Name": "cluster.admin.client.certificate","SecurityClearance": "ClusterAdmin","Password": "p@$$w0rd"\}' -ContentType application/json "https://rvn-srv-1:8080/admin/certificates" +`} + + + +### Example III : Using cURL in Linux + +At this point you only have a **server certificate** and you will use it (acting as the client certificate). +First, we will convert the .pfx certificate to .pem: + + +{`openssl pkcs12 -in cluster.server.certificate.example.pfx -out server.pem -clcerts +`} + + + + +You must provide a password when creating the .pem file, cURL will only accept a password protected certificate. + + +Then make the request: + + +{`curl -X POST -H "Content-Type: application/json" -d '\{"Name": "cluster.admin.client.certificate","SecurityClearance": "ClusterAdmin","Password": "p@$$w0rd"\}' -o cluster.admin.cert.zip https://rvn-srv-1:8080/admin/certificates --cert /home/secrets/server.pem:pem_password +`} + + + +## Using Client Certificates + +Once you have the admin client certificate you can access the server/cluster by using the Studio, +the Client API or any other client. + + +[Read Here](../../../studio/overview.mdx#accessing-studio-after-setup) +about gaining management access to RavenDB after setup. + + +It is recommended to generate additional certificates with reduced access rights for applications +and users. +Wiring a certificate in the RavenDB Client is described in the +[setting up authentication and authorization](../../../client-api/setting-up-authentication-and-authorization.mdx) +section of the Client API. + diff --git a/versioned_docs/version-7.1/server/security/authentication/lets-encrypt-certificates.mdx b/versioned_docs/version-7.1/server/security/authentication/lets-encrypt-certificates.mdx new file mode 100644 index 0000000000..6615f88c0f --- /dev/null +++ b/versioned_docs/version-7.1/server/security/authentication/lets-encrypt-certificates.mdx @@ -0,0 +1,113 @@ +--- +title: "Authentication: Let's Encrypt Certificates" +hide_table_of_contents: true +sidebar_label: Let's Encrypt Certificates +sidebar_position: 4 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Authentication: Let's Encrypt Certificates + +RavenDB 4.x uses X.509 certificates for authentication and authorization and has **built in support** for [Let's Encrypt](https://letsencrypt.org/). + +## Obtain a Let's Encrypt Certificate + +The [Setup Wizard Walkthrough](../../../start/installation/setup-wizard.mdx) explains how to obtain a free Let's Encrypt certificate for your server or cluster. + +It's a wildcard certificate, so if you choose the domain `example` during the wizard (with the community license), the generated certificate will have the common name (CN) `*.example.ravendb.community`. + +## Automatic Renewal for Let's Encrypt certificates obtained via RavenDB + +Let's Encrypt certificates have a [90-day lifetime policy](https://letsencrypt.org/2015/11/09/why-90-days.html). + +In RavenDB, you don't need to worry about renewals. RavenDB takes care of this for you. + +When there are 30 days left until expiration, RavenDB will initiate the certificate renewal and replacement process. The actual request to Let's Encrypt will happen on the nearest coming Saturday. + +Once the renewed certificate is obtained, [it will be replaced](../../../server/security/authentication/certificate-renewal-and-rotation.mdx) in all the nodes of the cluster without needing to shut down any server. + + +Automatic certificate renewal is available only if you obtained your certificate using the Setup Wizard and got your free RavenDB domain. Self-obtained certificates will not renew automatically, even if issued by Let's Encrypt. + + +When running as a cluster, the replacement process is a distributed operation. It involves sending the new certificate to all nodes, and requires all nodes to confirm that they have recieved and replaced the certificate. + +Only when all nodes have confirmed will the cluster start using this new certificate. + +If a node is not responding during the replacement, the operation will not complete until one of the following happens: + +* The node will come back online. It should pick up the replacement command and join the replacement process automatically. + +* There are only 3 days left for the expiration of the certificate. In this case, the cluster will complete the operation without the node which is down. **When bringing that node up, the certificate must be replaced manually.** + +During the process you will receive alerts in the studio and in the logs indicating the status of the operation and any errors if they occur. The alerts are displayed for each node independently. + +## Automatic Renewal for self-obtained certificates + +When you set up RavenDB with your own Let's Encrypt certificate, the renewal mechanism will not work because RavenDB doesn't control your domain and cannot pass the Let's Encrypt challenge that proves ownership of a domain. +However, you can (quite easily) enable automatic renewals for your Let's Encrypt certificate via [Certbot](https://certbot.eff.org/). + +Certbot is not available in Windows, but you can use a c# client called [Certes](https://github.com/fszlin/certes/), or [other similar projects](https://letsencrypt.org/docs/client-options/) that automate the certificate process. + +First, install and configure certbot on your machine. Here's a [nice tutorial](https://medium.com/prog-code/lets-encrypt-wildcard-certificate-configuration-with-aws-route-53-9c15adb936a7) to get you started. +You should also download the apropriate DNS plugin for certbot. This example uses [Amazon's Route53](https://certbot-dns-route53.readthedocs.io/en/stable/), but [many other services](https://certbot.eff.org/docs/using.html#dns-plugins) are supported. + +Set the credentials for your DNS service. In Route53 it's done by creating a user with an [IAM policy](https://certbot-dns-route53.readthedocs.io/en/stable/#sample-aws-policy-json) to allow changing DNS records. The credentials can then be set in the server as environment variables or via the AWS config file at `~/.aws/config`. + +Now that certbot is ready, you can create an executable script that will run the certbot command whenever RavenDB asks it, this way the certificate will keep renewing itself. + +When using the `Security.Certificate.Load.Exec` option, RavenDB expects to get the raw binary representation (byte array) of the .pfx certificate through +the standard output. [See this example](../../../server/security/authentication/certificate-configuration.mdx) of how to write a file to standard output in +Powershell. + +Here's a little script, `certificate.sh`, that demonstrates this feature. It renews the certificate or creates it in the first run, uses `openssl` to convert the received file to .PFX and writes it to the standard output for RavenDB to consume. + + +{`certbot -d *.test.ravendb.cloud certonly --config-dir ~/.certbot/config --logs-dir ~/.certbot/logs --work-dir ~/.certbot/work --dns-route53 --dns-route53-propagation-seconds 30 --non-interactive --agree-tos -m name@mail.com > /dev/null 2>&1 +openssl pkcs12 -inkey ~/.certbot/config/live/test.ravendb.cloud/privkey.pem -in ~/.certbot/config/live/test.ravendb.cloud/cert.pem -export -out ./cert.pfx -passout pass: +cat -u ~/.certbot/config/live/test.ravendb.cloud/cert.pfx +`} + + + + +Use unbuffered I/O (the -u flag) when writing the certificate to the standard output, otherwise RavenDB might get a partial file and fail to load the certificate. + + +To enable the script, add the following to settings.json: + + + +{`"Security.Certificate.Load.Exec": "/bin/bash", +"Security.Certificate.Load.Exec.Arguments": "certificate.sh" +`} + + + + +If two different RavenDB clusters are communicating securely, and the source cluster has its certificate renewed, the destination cluster could +still trust this new certificate - provided that the new certificate is signed with the same private key as the original, and was issued by the +same certificate authority. This is accomplished using a [public key pinning hash](../../../server/security/authentication/certificate-renewal-and-rotation.mdx#implicit-trust-by-public-key-pinning-hash). + + +## Manual Renewal + +When using RavenDB's Let's Encrypt support, you can initiate the renewal process manually by going to the certificate view in the studio and clicking +`Renew` on the server certificate. This will trigger the same certificate replacement process which was described [above](../../../server/security/authentication/lets-encrypt-certificates.mdx##automatic-renewal-for-lets-encrypt-certificates-obtained-via-ravendb). + +If a node is down and you click `Renew`, the cluster will complete the operation without the node that is down. **When bringing that node up, the +certificate must be replaced manually.** + + +## Updating DNS records + +Updating DNS records for your domain can be acheived by running the Setup Wizard again or by using a dedicated page at the RavenDB website. + +You can easily edit the DNS records which are associated with your license using the [Customers Portal](https://customers.ravendb.net). + diff --git a/versioned_docs/version-7.1/server/security/authentication/solve-cluster-certificate-renewal-issue.mdx b/versioned_docs/version-7.1/server/security/authentication/solve-cluster-certificate-renewal-issue.mdx new file mode 100644 index 0000000000..67fae408e1 --- /dev/null +++ b/versioned_docs/version-7.1/server/security/authentication/solve-cluster-certificate-renewal-issue.mdx @@ -0,0 +1,145 @@ +--- +title: "Let's Encrypt: Solve Certificate Renewal Issue" +hide_table_of_contents: true +sidebar_label: Solve Certificate Renewal Issue +sidebar_position: 5 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Let's Encrypt: Solve Certificate Renewal Issue + +* If you have External Replication or ETL to another cluster, or if you use + your own Let's Encrypt certificates as client certificates, the next certificate + renewal may cause permission issues that need to be handled manually. + +* Find [whether this issue is relevant for you](../../../server/security/authentication/solve-cluster-certificate-renewal-issue.mdx#current-situation) + and if so, [how to handle it](../../../server/security/authentication/solve-cluster-certificate-renewal-issue.mdx#solutions). + +* **In this page**: + * [Errors Relevant To This Issue](../../../server/security/authentication/solve-cluster-certificate-renewal-issue.mdx#errors-relevant-to-this-issue) + * [Background And Current Situation](../../../server/security/authentication/solve-cluster-certificate-renewal-issue.mdx#background-and-current-situation) + * [Solutions](../../../server/security/authentication/solve-cluster-certificate-renewal-issue.mdx#solutions) + +## Errors Relevant To This Issue + +If your current cluster certificate is operative, you will encounter +the errors we discuss here only when an attempt is made to renew the certificate. + +When the cluster certificate is renewed, authentication errors will appear +as **Studio alerts** and/or **responses to client requests**. + +* The errors will also appear in the logs. + ``` + Raven.Server.Documents.ETL.Providers.Raven.RavenEtl, Failed to load transformed data for 'ETL Script', EXCEPTION: Raven.Client.Exceptions.Security.AuthorizationException: Forbidden access to db1@https://a.some.one.ravendb.cloud, does not have permission to access it or is unknown. Method: GET, Request: https://a.some.one.ravendb.cloud/topology?name=db1&first-topology-update + {"Type":"InvalidAuth","Message":"The supplied client certificate 'CN=\*.some.one.ravendb.cloud(Thumbprint: A632C1DBD145B2102CB70D254B7EC1C813444766)' is unknown to the server but has a known Public Key Pinning Hash. Will not use it to authenticate because the issuer is unknown. To fix this, the admin can register the pinning hash of the \*issuer\* certificate: 'jQJTbIh0grw0/1TkHSumWb+Fs0Ggogr621gT3PvPKG0=' in the 'Security.WellKnownIssuer.Admin' configuration entry."} + ``` + + The Audit Log will contain the following entry: + ``` + Information, AuthenticateCertificate, Audit, Connection from with certificate 'CN=*.some.one.ravendb.cloud (A632C1DBD145B2102CB70D254B7EC1C813444766)' which is not registered in the cluster. + Tried to allow the connection implicitly based on the client certificate's Public Key Pinning Hash but the client certificate was signed by an unknown issuer - closing the connection. + To fix this, the admin can register the pinning hash of the *issuer* certificate: 'jQJTbIh0grw0/1TkHSumWb+Fs0Ggogr621gT3PvPKG0=' in the 'Security.WellKnownIssuers.Admin' configuration entry. + Alternatively, the admin can register the actual certificate ( 'A632C1DBD145B2102CB70D254B7EC1C813444766') explicitly in the cluster. + ``` + +## Background and Current Situation + +### Background + +* **Original Problem** + RavenDB versions preceding 4.2 presented the following administration problem when + ETL or External Replication was defined between two secured RavenDB clusters: + The destination cluster trusts (explicitly) the source cluster's certificate. + Once the source cluster renewes its certificate, the destination cluster would + no longer trust it because the thumbprint has changed. + To solve this, the admin had to access the destination cluster and manually specify + that it can trust the new source certificate. + +* **Original Solution** + To solve this problem, we introduced [Implicit Trust](../../../server/security/authentication/certificate-renewal-and-rotation.mdx#implicit-trust-by-public-key-pinning-hash) + by using the certificate's **Public Key Pinning Hash**. + Now, if the source cluster renews its certificate **by using the same private key and + issuer**, the new certificate will have the same Public Key Pinning Hash, and the + destination cluster will be able to trust the new certificate. + It will also be registered (implicitly) for future connections. + +* **What you gain** + Using this feature means that you can drastically reduce the amount of work + an admin has to perform in a multi-cluster topology and gain a system that + is set-up once and just keeps working, **as long as you keep using the same + private key and issuer**. +### Current Situation + +In September 2020, Let's Encrypt changed their certificate issuer to support full ECDSA +certificate chains. You can [read about it here](https://letsencrypt.org/2020/09/17/new-root-and-intermediates.html). + +As a result, RavenDB users that use the Public Key Pinning Hash solution need to +take a few manual steps. + +This is relevant for you - + +1. If you use RavenDB ETL or perform External Replication between two + RavenDB clusters. + * If you use RavenDB Cloud where certificates are renewed automatically. + * If you used our Setup Wizard with Let's Encrypt, and certificates are + renewed automatically. + * If you used your own Let's Encrypt **cluster** certificate, and you + renew it yourself with the same private key. +2. If you created your own Let's Encrypt **client** certificates, and you renew + them using the same private key. + + + +## Solutions + +### Solution 1: + +Register the pinning hash of the old issuer in the destination cluster. In settings.json, +on every node of the destination cluster add the following: + + + +{`"Security.WellKnownIssuers.Admin": "jQJTbIh0grw0/1TkHSumWb+Fs0Ggogr621gT3PvPKG0=" +`} + + + +The hash in the example is the Public Key Pinning Hash of the Let's Encrypt X3 intermediate +certificate authority which was previously used to sign Let's Encrypt certificates. + +This solution is stronger, it ensures a smooth transparent transition when the source cluster +certificate will renew itself and the issuer will actually change. The advantage here is that +there is no downtime. + +The disadvantage is that an admin needs to access the machines themselves. They need to edit +the settings.json file on all nodes and restart the RavenDB service (node-by-node for no downtime). +### Solution 2: + +1. In the case of ETL or External Replication, go to the source cluster and renew the Let's Encrypt + cluster certificate. Then export it and register it in the destination cluster. + + * Click `Renew` on the server certificate in one of the nodes of the source cluster. + ![Figure 1. Renew Server Certificate](./assets/renew_server_certificate.png) + * Export the new cluster certificate: + ![Figure 2. Export Cluster Certificate](./assets/export_cluster_certificates.png) + * Use the destination cluster's Studio to upload the exported pfx file as a client certificate. + ![Figure 3. Upload Client Certificat](./assets/upload-client-certificate.png) + + +2. In case you are using your own Let's Encrypt client certificates, simply renew them + (on your own) and re-register them in the certificate view in the Studio ([Upload client + certificate](../../../studio/database/tasks/import-data/import-from-ravendb#step-#1:-prepare-servers-for-the-import-process-(secure-4.x-servers-only))). + +This solution is easier and doesn't require access to the machines themselves, just an admin +certificate. The disadvantage is a short downtime in the service, because you must renew the +certificate first and only then you may export it and upload to the destination cluster. +This can cause some delay in the ETL or Replication service. + + + diff --git a/versioned_docs/version-7.1/server/security/authorization/_category_.json b/versioned_docs/version-7.1/server/security/authorization/_category_.json new file mode 100644 index 0000000000..a95c045d8e --- /dev/null +++ b/versioned_docs/version-7.1/server/security/authorization/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 2, + "label": Authorization, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/server/security/authorization/security-clearance-and-permissions.mdx b/versioned_docs/version-7.1/server/security/authorization/security-clearance-and-permissions.mdx new file mode 100644 index 0000000000..bc21982f24 --- /dev/null +++ b/versioned_docs/version-7.1/server/security/authorization/security-clearance-and-permissions.mdx @@ -0,0 +1,141 @@ +--- +title: "Authorization: Security Clearance and Permissions" +hide_table_of_contents: true +sidebar_label: Security Clearance and Permissions +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Authorization: Security Clearance and Permissions + +* X.509 certificates are used for authentication - validating that users are who they say they are. + Once a connection is authenticated, RavenDB uses the certificate for authorization as well. + +* Each certificate is associated with a security clearance and access permissions per database. + +* It is the administrator's responsibility to generate client certificates and assign permissions. + Read more in the [Certificate Management](../authentication/certificate-management.mdx) page. + +* A client certificate's security clearance can be one of the following: Cluster Admin, Operator, User. + +* In this page: + * [Cluster Admin](../../../server/security/authorization/security-clearance-and-permissions.mdx#cluster-admin) + * [Operator](../../../server/security/authorization/security-clearance-and-permissions.mdx#operator) + * [User](../../../server/security/authorization/security-clearance-and-permissions.mdx#user) + * [Admin](../../../server/security/authorization/security-clearance-and-permissions.mdx#section) + * [Read/Write](../../../server/security/authorization/security-clearance-and-permissions.mdx#section-1) + * [Read Only](../../../server/security/authorization/security-clearance-and-permissions.mdx#section-2) + +## Cluster Admin + +`Cluster Admin` is the highest security clearance. There are no restrictions. A `Cluster Admin` certificate has admin permissions to all databases. It also has the ability to modify the cluster itself. + +The following operations are allowed **only** for `Cluster Admin` certificates: + +- All cluster operations +- Manage `Cluster Admin` certificates +- Replace and renew server certificates +- Use the Admin JS Console +- Activate or update the license +- Get SNMP used OIDs + + + +## Operator + +A client certificate with an `Operator` security clearance has admin access to all databases +but is unable to modify the cluster. It cannot perform operations such as +add/remove/promote/demote nodes from the cluster. This is useful in a hosted solution +(such as **RavenDB Cloud**). If you are running on your own machines, you'll typically ignore +that level in favor of `Cluster Admin` or `User`. + +The following operations are allowed for **both** `Operator` and `Cluster Admin` certificates and are not allowed for `User` certificates: + +- Operations on databases (put, delete, enable, disable) +- Manage `Operator` and `User` certificates +- Enable and disable an ongoing task +- Define External Replication +- Create and delete RavenDB ETL and SQL ETL +- Migrate databases +- View cluster observer logs +- View admin logs +- Gather local and cluster debug info (process, memory, cpu, threads) +- Use smuggler +- Use the traffic watch +- Put cluster-wide client configuration (Max number of requests per session, Read balance behavior) +- Get the database record +- Manage database groups in the cluster +- Restore databases from backup +- Perform database and index compaction +- Get server metrics (request/sec, indexed/sec, batch size, etc...) +- Get remote server build info + + + +## User + +A client certificate with a `User` security clearance cannot perform any admin operations at the cluster level. +Unlike the other clearance levels, a `User` client certificate can grant different access levels to different databases. +These access levels are, from highest to lowest: + +* **Admin** +* **Read/Write** +* **Read Only** + +If no access level is defined for a particular database, the certificate doesn't grant access to that database at all. +### `Admin` + +The following operations are permitted at the `Admin` access level but not for `Read/Write` or `Read Only`: + +- Operations on indexes (put, delete, start, stop, enable and disable) +- Solve replication conflicts +- Configure revisions and delete revision documents +- Define expiration +- Create backups and define periodic backups +- Operations on connection strings (put, get, delete) +- Put client configuration for the database (Max number of requests per session, Read balance behavior) +- Get transaction info +- Perform SQL migration +### `Read/Write` + +A `User` certificate with a `Read/Write` access level can perform all operations **except** for those listed above in the 'Admin' and 'Operator'sections. + + * [JavaScript static indexes](../../../indexes/javascript-indexes.mdx) are permitted by default with Read/Write User certificates. + To configure a server or database so that only Admin certificates will be able to deploy JavaScript static indexes, + set [Indexing.Static.RequireAdminToDeployJavaScriptIndexes](../../../server/configuration/indexing-configuration.mdx#indexingstaticrequireadmintodeployjavascriptindexes) + to `true`. +### `Read Only` + +The `ReadOnly` access level **allows** clients to: + +- Read data from a database, but not to write or modify data. +- Be subscription workers to consume data subscriptions. +- Query the databases that are configured in the client certificate. + + [An Auto-index](../../../indexes/creating-and-deploying.mdx#auto-indexes) + is built if there is no existing index that satisfies a query. + + + + +The following operations are **forbidden**: + +- Creating documents or modifying existing documents +- Changing any configurations or settings +- Creating or modifying [ongoing tasks](../../../studio/database/tasks/ongoing-tasks/general-info.mdx) +- Defining [static indexes](../../../indexes/creating-and-deploying.mdx#static-indexes) (the database will create +[auto-indexes](../../../indexes/creating-and-deploying.mdx#auto-indexes) if there is no existing index that satisfies a query.) + + + + +Learn more about the `Read Only` access level [here](../../../studio/server/certificates/read-only-access-level.mdx). + + + diff --git a/versioned_docs/version-7.1/server/security/common-errors-and-faq.mdx b/versioned_docs/version-7.1/server/security/common-errors-and-faq.mdx new file mode 100644 index 0000000000..0ffef0069f --- /dev/null +++ b/versioned_docs/version-7.1/server/security/common-errors-and-faq.mdx @@ -0,0 +1,519 @@ +--- +title: "Security: Common Errors & FAQ" +hide_table_of_contents: true +sidebar_label: Common Errors & FAQ +sidebar_position: 6 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Security: Common Errors & FAQ + + +* This article explains some of the common security configuration errors and how to handle them. + +* In this page + * [Setup Wizard Issues](../../server/security/common-errors-and-faq.mdx#setup-wizard-issues) + * [Changing Configurations and Renewals Issues](../../server/security/common-errors-and-faq.mdx#changing-configurations-and-renewals-issues) + * [Authentication Issues](../../server/security/common-errors-and-faq.mdx#authentication-issues) + * [Encryption Issues](../../server/security/common-errors-and-faq.mdx#encryption-issues) + + + +## Setup Wizard Issues + +* [Server cannot bind to the provided private IP address](../../server/security/common-errors-and-faq.mdx#server-cannot-bind-to-the-provided-private-ip-address) +* [Ports are blocked by the firewall](../../server/security/common-errors-and-faq.mdx#ports-are-blocked-by-the-firewall) +* [DNS is cached locally](../../server/security/common-errors-and-faq.mdx#dns-is-cached-locally) +* [Long DNS propagation time](../../server/security/common-errors-and-faq.mdx#long-dns-propagation-time) +* [If I already have the Zip file, can I avoid repeating the setup process?](../../server/security/common-errors-and-faq.mdx#if-i-already-have-the-zip-file-can-i-avoid-repeating-the-setup-process) +### Server cannot bind to the provided private IP address + +If the IP/port is not accessible on your machine, you'll get the following error. + + + +{`System.InvalidOperationException: Setting up RavenDB in Let's Encrypt security mode failed. ---> +System.InvalidOperationException: Validation failed. ---> +System.InvalidOperationException: Failed to simulate running the server with the supplied settings using: https://a.example.ravendb.community:4433 ---> +System.InvalidOperationException: Failed to start webhost on node 'A'. The specified ip address might not be reachable due to network issues. +It can happen if the ip is external (behind a firewall, docker). If this is the case, try going back to the previous screen and add the same ip as an external ip. +Settings file: D:\\temp\\RavenDB-4.0.0-windows-x64\\Server\\settings.json. +IP addresses: 10.0.0.65:4433. +---> Microsoft.AspNetCore.Server.Kestrel.Transport.Libuv.Internal.Networking.UvException: Error -4092 EACCES permission denied +`} + + + +This can be caused by two different reasons: + +1. Your private IP address is not reachable inside the machine or you provided the wrong IP/port. +2. You are running behind a firewall (VM, docker...) and accidentally provided the external IP address during setup. + +Make sure you provide the private IP address in the "IP Address / Hostname" field as seen in [this example](../../start/installation/setup-wizard.mdx#example-iii---behind-a-firewall). +### Ports are blocked by the firewall + +When configuring a VM in Azure, [AWS](../../start/installation/setup-examples/aws-windows-vm.mdx) or any other provider, you should define firewall rules to allow both the **HTTP** and **TCP** ports you have chosen during setup. +This should be done both inside the VM operating system **and** in the web dashboard or management console. + +If ports are blocked you'll get the following error. + + +{`Setting up RavenDB in Let's Encrypt security mode failed. +System.InvalidOperationException: Setting up RavenDB in Let's Encrypt security mode failed. ---> +System.InvalidOperationException: Validation failed. ---> +System.InvalidOperationException: Failed to simulate running the server with the supplied settings using: https://a.example.development.run:443 ---> +System.InvalidOperationException: Client failed to contact webhost listening to 'https://a.example.development.run:443'. +Are you blocked by a firewall? Make sure the port is open. +Settings file: D:\\RavenDB-4.0.0-windows-x64\\Server\\settings.json. +IP addresses: 10.0.1.4:443. +`} + + +### DNS is cached locally + +Most networks cache DNS records. In some environments you can get an error such as this: + + + +{`Setting up RavenDB in Let's Encrypt security mode failed. +System.InvalidOperationException: Setting up RavenDB in Let's Encrypt security mode failed. ---> +System.InvalidOperationException: Validation failed. ---> +System.InvalidOperationException: Failed to simulate running the server with the supplied settings using: https://a.onenode.development.run ---> +System.InvalidOperationException: Tried to resolve 'a.onenode.development.run' locally but got an outdated result. +Expected to get these ips: 127.0.0.1 while the actual result was: 10.0.0.65 +If we try resolving through google's api (https://dns.google.com), it works well. +Try to clear your local/network DNS cache or wait a few minutes and try again. +Another temporary solution is to configure your local network connection to use google's DNS server (8.8.8.8). +`} + + + +This error probably means that the DNS is cached. You can wait a few minutes or reset the network DNS cache, +but in many cases, the easiest solution is to [temporarily switch your DNS server to 8.8.8.8](https://developers.google.com/speed/public-dns/docs/using) +You can click the Try Again button to restart the validation process of the Setup Wizard. +### Long DNS propagation time + +If you are trying to modify existing DNS records, for example running the Setup Wizard again for the same domain name, you may encounter errors such as this: + + + +{`Setting up RavenDB in Let's Encrypt security mode failed. + +System.InvalidOperationException: Setting up RavenDB in Let's Encrypt security mode failed. ---> +System.InvalidOperationException: Validation failed. ---> +System.InvalidOperationException: Failed to simulate running the server with the supplied settings using: https://a.example.development.run ---> +System.InvalidOperationException: Tried to resolve 'a.example.development.run' using google's api (https://dns.google.com). +Expected to get these ips: 127.0.0.1 while google's actual result was: 10.0.0.65 +Please wait a while until DNS propagation is finished and try again. If you are trying to update existing DNS records, +it might take hours to update because of DNS caching. If the issue persists, contact RavenDB's support. +`} + + + +If this happens, there is nothing you can do except wait for DNS propagation. When it's updated on dns.google.com click the `Try Again` button. +You can keep track of your RavenDB clusters and their associated DNS records at the [Customer's Portal](https://customers.ravendb.net). +### If I already have the Zip file, can I avoid repeating the setup process? + +Yes. +You can use the Zip file to re-install or deploy the server/cluster elsewhere. +Download a fresh copy of RavenDB and run the setup wizard. Then choose `Continue Cluster Setup` and select node A. +This will use the existing Zip file and the same configuration and certificate which were previously chosen. +When building a cluster, repeat this step with nodes B, C, and so on. + + + + +## Changing Configurations and Renewals Issues + +* [After installing with Let's Encrypt, can I change the DNS records?](../../server/security/common-errors-and-faq.mdx#after-installing-with-lets-encrypt-can-i-change-the-dns-records) +* [Can I change the (private) IP address RavenDB binds to?](../../server/security/common-errors-and-faq.mdx#can-i-change-the-private-ip-address-ravendb-binds-to) +* [The Let's Encrypt certificate is about to expire but doesn't renew automatically](../../server/security/common-errors-and-faq.mdx#the-lets-encrypt-certificate-is-about-to-expire-but-doesnt-renew-automatically) +* [What should I do when my license expires?](../../server/security/common-errors-and-faq.mdx#what-should-i-do-when-my-license-expires) +* [Let's Encrypt certificate permission errors after renewal](../../server/security/common-errors-and-faq.mdx#lets-encrypt-certificate-permission-errors-after-renewal) +### After installing with Let's Encrypt, can I change the DNS records? + +Yes. + +1. The [Customers Portal](https://customers.ravendb.net) allows you to easily edit DNS records that are associated with your license. +2. You can run the setup wizard again. + +If you supply different IP addresses then the wizard will update the DNS records of your domain. +If you use a new domain or if you add/remove nodes in the new configuration then the wizard will also fetch a new certificate. +### Can I change the (private) IP address RavenDB binds to? + +Yes. +Open the [settings.json](../configuration/configuration-options.mdx#json) file located in the RavenDB Server installation folder, +change the `ServerUrl` setting and restart the server. +### The Let's Encrypt certificate is about to expire but doesn't renew automatically + +If you are getting the following error you must update the RavenDB server. + + + +{`Failed to update certificate from Lets Encrypt, EXCEPTION: System.InvalidOperationException: +Your license is associated with the following domains: ravendb.community but the PublicServerUrl +configuration setting is: Raven.Server.Config.Settings.UriSetting.There is a mismatch, therefore +cannot automatically renew the Lets Encrypt certificate. Please contact support. +`} + + + +If it's not the same error as above, please open [settings.json](../configuration/configuration-options.mdx#json) in your Server installation +and make sure you have all of the fields defined properly. Take a look at the following example: + + + +{`\{ + "DataDir": "RavenData", + "License.Eula.Accepted": true, + "Security.Certificate.LetsEncrypt.Email": "your-email@example.com", + "Setup.Mode": "LetsEncrypt", + "Security.Certificate.Path": "cluster.server.certificate.aws.pfx", + "ServerUrl": "https://172.31.30.163", + "ServerUrl.Tcp": "tcp://172.31.30.163:38888", + "ExternalIp": "35.130.249.162", + "PublicServerUrl": "https://a.aws.development.run", + "PublicServerUrl.Tcp": "tcp://a.aws.development.run:38888" +\} +`} + + + +Things to check: + +* **"Setup.Mode" must be "LetsEncrypt"** + The automatic renewal process only works if you acquired your certificate through the RavenDB setup wizard and used LetsEncrypt. + If you did not set up your cluster with the setup wizard and with LetsEncrypt, you are responsible to renew your certificate periodically. + * To enable RavenDB's automatic certificate renewal, set up a new cluster with the setup wizard, create parallel databases, + reconfigure the [document store](../../client-api/creating-document-store.mdx) to connect to the new databases, + and [import the data](../../studio/database/tasks/import-data/import-from-ravendb.mdx). +* **Security.Certificate.LetsEncrypt.Email** + must be identical to the e-mail which is associated with your license. +* **PublicServerUrl and PublicServerUrl.Tcp** + must contain the same domain as the one chosen during the setup wizard and is associated with your license. +* **ExternalIp** + should be defined only if you are running behind a firewall (cloud VM, docker, etc...). + +* If all of this looks right, and the certificate still doesn't renew automatically and there are no alerts telling you what's wrong, + you can contact support. + * Make sure to supply the server logs with your ticket. When running in a cluster, please provide the logs from all nodes. + * If your logs are turned off, open `Manage Server`->`Admin Logs` in the Studio, and keep them open while you click the `Renew` button in the certificate view. +### What should I do when my license expires? + +* When your license expires the Studio is blocked. + Client API operations and other RavenDB features will continue to work. + However, any usage of expired RavenDB licenses is outside the license agreement + and doesn't comply with the [EULA terms](https://ravendb.net/terms). + +* __Renew your license__ as described in this [Renew License](../../start/licensing/renew-license.mdx) tutorial. +### Let's Encrypt certificate permission errors after renewal + +If you have External Replication or ETL to another cluster, +or if you use your own Let's Encrypt certificates as client certificates, +the next certificate renewal may cause permission issues that need to be handled manually. + +Learn how to handle this issue [here](../../server/security/authentication/solve-cluster-certificate-renewal-issue.mdx). + + + +## Authentication Issues + +* [Authentication Error Occurred using Edge](../../server/security/common-errors-and-faq.mdx#authentication-error-occurred-using-edge) +* [Authentication Error Occurred using Chrome](../../server/security/common-errors-and-faq.mdx#authentication-error-occurred-using-chrome) +* [RavenDB is running as a service in Windows and Chrome doesn't use the client certificate from the OS store](../../server/security/common-errors-and-faq.mdx#ravendb-is-running-as-a-service-in-windows-and-chrome-doesnt-use-the-client-certificate-from-the-os-store) +* [Authentication Error Occurred in Firefox](../../server/security/common-errors-and-faq.mdx#authentication-error-occurred-in-firefox) +* [Cannot Import the Client Certificate to Firefox](../../server/security/common-errors-and-faq.mdx#cannot-import-the-client-certificate-to-firefox) +* [Getting the full error using PowerShell](../../server/security/common-errors-and-faq.mdx#getting-the-full-error-using-powershell) +* [Not using TLS](../../server/security/common-errors-and-faq.mdx#not-using-tls) +* [How to regain access to a server when you have physical access but no client certificate](../../server/security/common-errors-and-faq.mdx#how-to-regain-access-to-a-server-when-you-have-physical-access-but-no-client-certificate) +* [Certificate is not recognized when setting up on Azure App Services](../../server/security/common-errors-and-faq.mdx#certificate-is-not-recognized-when-setting-up-on-azure-app-services) +* [Automatic cluster certificate renewal following migration to 4.2](../../server/security/common-errors-and-faq.mdx#automatic-cluster-certificate-renewal-following-migration-to-42) +### Authentication Error Occurred using Edge + +You cannot access Studio using Edge, though during +[setup](../../start/installation/setup-wizard.mdx#configuring-the-server-addresses) you checked +the "Automatically register the admin client certificate in this (local) OS" checkbox +and the setup wizard ended successfully. + +![Figure 1. Authentication Error](./assets/1.png) + + + +{`There were problems authenticating the request: +This server requires client certificate for authentication, but none was provided by the client. +`} + + + +1. Try closing **all instances** of the browser and restarting it, + or opening an incognito tab and pasting the server URL into the address bar. +2. If clearing the cache didn't help, manually register the client certificate in the OS store. + * Under Windows: + Double-click the .pfx certificate. + Repeat clicking `next` for the default settings or provide your own settings. + * Under Linux: + Import the certificate directly to the browser. +3. If the browser has presented several certificates and you selected the wrong one, you can - + * Remove the certificate from the browser's cache and reinstall the .pfx certificate as described above + * Or open an Incognito tab and paste the server URL into the address bar. +4. In case none of the above works, you can use your own certificate and have RavenDB trust it. + You can use any client certificate that works under your OS and browser, even if it wasn't generated by RavenDB. + See [trusting an existing certificate](../../server/administration/cli.mdx#trustclientcert). +#### If your browser runs under Windows 7 or Windows Server 2008 or older: + +The first thing to try would be installing the **ADMIN** certificate to the OS +where your server is running, closing **all instances** of the browser and restarting it. + +If the issue persists, please also visit the +[Trusted Issuers List](https://support.microsoft.com/en-us/topic/failed-tls-connection-between-unified-communications-peers-generates-an-schannel-warning-9079a7df-1756-bf4d-20c7-42981a50f8df) +and follow method 3 (**Configure Schannel to no longer send the list of trusted root certification authorities during the TLS/SSL handshake process**) +to set the following registry entry to false: + + + +{`HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\SecurityProviders\\SCHANNEL +Value name: SendTrustedIssuerList +Value type: REG_DWORD +Value data: 0 (False) +`} + + +### Authentication Error Occurred using Chrome + +You cannot access Studio using Chrome, though during +[setup](../../start/installation/setup-wizard.mdx#configuring-the-server-addresses) you checked +the "Automatically register the admin client certificate in this (local) OS" checkbox +and the setup wizard ended successfully. + +![Figure 1. Authentication Error](./assets/1.png) + + + +{`There were problems authenticating the request: +This server requires client certificate for authentication, but none was provided by the client. +`} + + + +1. Try closing **all instances** of the browser and restarting it, + or open an incognito tab (Ctrl+Shift+N) and paste the server URL into the address bar. +2. If clearing the cache didn't help, manually register the client certificate. + * Chrome versions **earlier than 105** look for certificatees registered **with the OS**. + Windows users can register a certificate with the OS by double-clicking its .pfx file and repeatedly clicking `next` + for the default settings (or providing custom settings). + Linux users can import the certificate directly to the browser. + * Chrome versions **105 and on** look for certificatees registered with the browser's [root store](https://blog.chromium.org/2022/09/announcing-launch-of-chrome-root-program.html). + A failure to locate the certificate may be the result of registering it with the OS rather than with the browser. + + This failure typically occures when [self-signed certificates](../../server/security/authentication/certificate-configuration.mdx) + are used rather than with let's encrypt certificates issued during setup, since + let's encrypt certificates are automatically installed in the Chrome root store. + + To import a certificate to chrome's root store use the browser's settings: + **Settings** > **Privacy and Security** > **Security** > **Manage device certificates** + When a "Certificates" window opens, click **Import** and select your PFX certificate. + ![Import Certificate](./assets/import-certificate.png) +3. If the browser has presented several certificates and you selected the wrong one, you can - + * Either remove the certificate from the browser (Settings -> Privacy and security -> Security > Manage device certificates) + and reinstall the .pfx certificate as described above, + * Or open an incognito tab (Ctrl+Shift+N) and paste the server URL into the address bar. +4. In case none of the above works, you can use your own certificate and have RavenDB trust it. + You can use any client certificate that works under your OS and browser, even if it wasn't generated by RavenDB. + See [trusting an existing certificate](../../server/administration/cli.mdx#trustclientcert). +### RavenDB is running as a service in Windows and Chrome doesn't use the client certificate from the OS store + +Your RavenDB service may run under a certain user, for which the certificate was installed, while you +are currently using a different user for which no certificate was installed. +Or you may have registered the certificate with the OS, but are using a Chrome version higher than 105 that +looks for the certificate not at the OS root but at the Chrome root store. + +To solve these issues: +Using a user that the service is available for, install or import the certificate PFX +[as described above](../../server/security/common-errors-and-faq.mdx#authentication-error-occurred-using-chrome). +### Authentication Error Occurred in Firefox + +You cannot access the Studio using Firefox even though you have finished the setup wizard successfully and you also checked the box saying "Automatically register the admin client certificate in this (local) OS". + +![Figure 2. Authentication Error](./assets/1.png) + + + +{`There were problems authenticating the request: +This server requires client certificate for authentication, but none was provided by the client. +`} + + + +Firefox doesn't use the OS certificate store like Chrome or Edge. Please import the certificate manually (In Firefox, "Settings" -> "Privacy and Security" -> scroll down to Security and click "View Certificates" -> "Import"). +Then close **all instances** of the browser and restart it. + +![Figure 3. Firefox Import](./assets/2.png) +### Cannot Import the Client Certificate to Firefox + +You're trying to import the client certificate received from RavenDB to Firefox but get the following error: + +![Figure 3. Unknown Reasons](./assets/3.png) + + + +{`The PKCS#12 operation failed for unknown reasons. +`} + + + +Firefox fails to import a certificate that is not password protected. +To overcome this issue, use the RavenDB CLI to [generate a password protected certificate](../../server/administration/cli.mdx#generateclientcert). +You can also add a password to the current certificate by using OpenSSL or by importing it to the OS store and exporting it back with a password. + +Firefox **sometimes** fails to import a perfectly good certificate for no apparent reason and without a proper error message. + +You can try to generate a new password-protected certificate using the RavenDB CLI and import that instead. + +If it didn't help, you can use any other client certificate you have that works with Firefox (even if it wasn't generated by RavenDB) and have RavenDB trust it. See [trusting an existing certificate](../../server/administration/cli.mdx#trustclientcert). + +You can also generate your own self-signed client certificate by using OpenSSL or Powershell. + +This is a known issue which has been reported many times to Mozilla. + +Some references: + +[Bugzilla: #1049435](https://bugzilla.mozilla.org/show_bug.cgi?id=1049435) +[Bugzilla: #458161](https://bugzilla.mozilla.org/show_bug.cgi?id=458161) +[mozilla.dev.tech.crypto issue](https://groups.google.com/forum/?fromgroups=#!topic/mozilla.dev.tech.crypto/RiIeY-R5Q4Y) +### Getting the full error using PowerShell + +You can use PowerShell to make requests using the REST API. + +If you are having trouble using certificates, take a look at this example which prints the full error (replace the server URL and the `/certificates/whoami` endpoint with yours). + + + +{`[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 +$cert = Get-PfxCertificate -FilePath C:\\secrets\\admin.client.certificate.example.pfx + +try \{ + $response = Invoke-WebRequest https://a.example.development.run:8080/certificates/whoami -Certificate $cert +\} +catch \{ + if ($_.Exception.Response -ne $null) \{ + Write-Host $_.Exception.Message + + $stream = $_.Exception.Response.GetResponseStream() + $reader = New-Object System.IO.StreamReader($stream) + Write-Host $reader.ReadToEnd() + \} + Write-Error $_.Exception +\} +`} + + +### Not using TLS + +The RavenDB clients use TLS 1.2 by default. If you want to use other clients please make sure to use the TLS security protocol version 1.2 or 1.3. + + + + +{`{ + "Url":"/admin/secrets/generate", + "Type":"Raven.Client.Exceptions.Security.InsufficientTransportLayerProtectionException", + "Message":"RavenDB requires clients to connect using TLS 1.2, but the client used: 'Tls'.", + "Error":"Raven.Client.Exceptions.Security.InsufficientTransportLayerProtectionException: RavenDB requires clients to connect using TLS 1.2, but the client used: 'Tls'. + at Raven.Server.RavenServer.AuthenticateConnection.ThrowException() in C:\\\\Builds\\\\RavenDB-Stable-4.0\\\\src\\\\Raven.Server\\\\RavenServer.cs:line 570 + at Raven.Server.Routing.RequestRouter.TryAuthorize(RouteInformation route, HttpContext context, DocumentDatabase database) in C:\\\\Builds\\\\RavenDB-Stable-4.0\\\\src\\\\Raven.Server\\\\Routing\\\\RequestRouter.cs:line 168 + at Raven.Server.Routing.RequestRouter.d__6.MoveNext() in C:\\\\Builds\\\\RavenDB-Stable-4.0\\\\src\\\\Raven.Server\\\\Routing\\\\RequestRouter.cs:line 89 + --- End of stack trace from previous location where exception was thrown --- + at System.Runtime.ExceptionServices.ExceptionDispatchInfo.Throw() + at System.Runtime.CompilerServices.TaskAwaiter.HandleNonSuccessAndDebuggerNotification(Task task) + at System.Runtime.CompilerServices.TaskAwaiter\`1.GetResult() + at System.Runtime.CompilerServices.ValueTaskAwaiter\`1.GetResult() + at Raven.Server.RavenServerStartup.d__11.MoveNext() in C:\\\\Builds\\\\RavenDB-Stable-4.0\\\\src\\\\Raven.Server\\\\RavenServerStartup.cs:line 159" +} +`} + + + + +In PowerShell it can be solved like this: + + + +{`[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 +`} + + +### How to regain access to a server when you have physical access but no client certificate + +An admin client certificate can be generated through the [RavenDB CLI](../../server/administration/cli#generateclientcert If RavenDB runs as a console application, the CLI is just there. When running as a service, please use the `rvn admin-channel`. +Use either the [generateClientCert](../../server/administration/cli.mdx#generateclientcert) command, or (if you already have a certificate) the [trustClientCert](../../server/administration/cli.mdx#trustclientcert) command. + +Another way to gain access for an existing certificate is to add the [Security.WellKnownCertificates.Admin](../../server/configuration/security-configuration.mdx#securitywellknowncertificatesadmin) configuration to `settings.json` with your existing certificate's thumbprint. +In this case, a server restart is required. +### Certificate is not recognized when setting up on Azure App Services + +You may have gotten an error message like: + +``` +The credentials supplied to the package were not recognized (The SSL connection could not be established, see inner exception.) +``` + +1) In the app settings of your Azure App Services application, add the `WEBSITE_LOAD_USER_PROFILE = 1` option. + +2) Another solution is to use the `MachineKeySet` flag during certificate creation: + + + +{`[DocumentStore].Certificate = new X509Certificate2("[path to your pfx file]", + (string)null, X509KeyStorageFlags.MachineKeySet); +`} + + +### Automatic cluster certificate renewal following migration to 4.2 + +`Security.Certificate.Exec` was deprecated in 4.2 and replaced by `Security.Certificate.Load.Exec`. You can use your old `Security.Certificate.Exec` +executable by simply moving it to this new path. The settings `Security.Certificate.Renew.Exec` and `Security.Certificate.Change.Exec` have been added +for automatically persisting the certificate to the whole cluster. If you have your own mechanism for doing this, or are using a single node cluster, +you still need to place empty scripts in the `Security.Certificate.Renew.Exec` and `Security.Certificate.Change.Exec` paths or an exception will be +thrown. + + + +## Encryption Issues + +### Insufficient Memory Exception + + + +{`Memory exception occurred: System.InsufficientMemoryException: +Failed to increase the min working set size so we can lock 4,294,967,296 for +D:\\stackoverflow\\RavenData\\Databases\\SO\\Indexes\\Auto_Questions_ByBody\\Temp\\compression.0000000000.buffers. +With encrypted databases we lock some memory in order to avoid leaking secrets to disk. +Treating this as a catastrophic error and aborting the current operation. +`} + + + +When encryption is turned on, RavenDB locks memory in order to avoid leaking secrets to disk. Read more [here](../../server/security/encryption/encryption-at-rest.mdx#locking-memory). + +By default, RavenDB treats this error as catastrophic and will not continue the operation. +You can change this behavior but it's not recommended and should be done only after a proper security analysis is performed, see the [Security Configuration Section](../../server/configuration/security-configuration.mdx#securitydonotconsidermemorylockfailureascatastrophicerror). + +If such a catastrophic error occurs in **Windows**, RavenDB will try to recover automatically by increasing the size of the minimum working set and retrying the operation. +In **Linux**, it is the admin's responsibility to configure higher limits manually using: + + +{`sudo prlimit --pid [process-id] --memlock=[new-limit-in-bytes] +`} + + + +To figure out what the new limit should be, look at the exception thrown by RavenDB, which includes this size. + + + diff --git a/versioned_docs/version-7.1/server/security/encryption/_category_.json b/versioned_docs/version-7.1/server/security/encryption/_category_.json new file mode 100644 index 0000000000..6b33b8e106 --- /dev/null +++ b/versioned_docs/version-7.1/server/security/encryption/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 3, + "label": Encryption, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/server/security/encryption/assets/1.png b/versioned_docs/version-7.1/server/security/encryption/assets/1.png new file mode 100644 index 0000000000..e4ed1e22bc Binary files /dev/null and b/versioned_docs/version-7.1/server/security/encryption/assets/1.png differ diff --git a/versioned_docs/version-7.1/server/security/encryption/database-encryption.mdx b/versioned_docs/version-7.1/server/security/encryption/database-encryption.mdx new file mode 100644 index 0000000000..433982cc7c --- /dev/null +++ b/versioned_docs/version-7.1/server/security/encryption/database-encryption.mdx @@ -0,0 +1,156 @@ +--- +title: "Encryption: Database Encryption" +hide_table_of_contents: true +sidebar_label: Database Encryption +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Encryption: Database Encryption + +In RavenDB you can create encrypted databases. Each encrypted database will have its own secret key which is used to encrypt and decrypt data. + +## Creating An Encrypted Database Using The Studio + +When [creating an encrypted database using the Studio](../../../studio/database/create-new-database/encrypted.mdx), you will receive a secret key which will +allow you to recover the encrypted data in case of a disaster, and when restoring from backup. During normal operations there is no need to supply the secret key to RavenDB. +See [Secret Key Management](../../../server/security/encryption/secret-key-management.mdx) for more information. + +![Figure 1. Secret Key](./assets/1.png) + + +Download, print, or copy and save the secret key in a safe place. It will NOT be available again! + + +## Creating An Encrypted Database Using The REST API And The Client API + +Before creating the database, a secret key must be generated. Generating and storing secret keys is restricted to `Operator` or `ClusterAdmin` Security Clearances. +RavenDB uses a [cryptographically secure pseudo-random number generator](https://en.wikipedia.org/wiki/Cryptographically_secure_pseudorandom_number_generator) and +it is recommended that you use it. If you must use your own secret key, please make sure it is 256 bits long and cryptographically secure. + +You must use a client certificate to make the request because the server is using authentication. + +## Windows Example + +Load the client certificate in PowerShell: + + +{`$cert = Get-PfxCertificate -FilePath C:\\secrets\\admin.client.certificate.example.pfx +`} + + + +Make sure to use TLS 1.2 (or 1.3): + + +{`[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 +`} + + + +Ask RavenDB to generate a key for you: + + +{`$response = Invoke-WebRequest https://your-server-url/admin/secrets/generate -Certificate $cert +`} + + + +Then send the key to the RavenDB server on which the database will be created. Note that the database doesn't exist yet, but you will still need to supply its name. Make the following POST request to assign the secret key to a specific database: + + +{`$payload = [System.Text.Encoding]::ASCII.GetString($response.Content) +Invoke-WebRequest https://your-server-url/admin/secrets?name=MyEncryptedDatabase -Certificate $cert -Method POST -Body $payload +`} + + + +Finally, create the encrypted database using the Client API: + + + +{`store.Maintenance.Server.Send(new CreateDatabaseOperation(new DatabaseRecord("MyEncryptedDatabase") +{ + Encrypted = true +})); +`} + + + + +{`await store.Maintenance.Server.SendAsync(new CreateDatabaseOperation(new DatabaseRecord("MyEncryptedDatabase") +{ + Encrypted = true +})); +`} + + + + +## Linux Example + +When generating a client certificate using RavenDB, you will receive a Zip file containing an admin client certificate (.pfx, .crt, .key). + +First we will create a .pem certificate file from the .crt and .key files: + + +{`cat admin.client.certificate.example.crt admin.client.certificate.example.key > clientCert.pem +`} + + + +Ask RavenDB to generate a key for you: + + +{`key=$(curl --cert clientCert.pem https://your-server-url/admin/secrets/generate) +`} + + + +Then send the key to the RavenDB server on which the database will be created. Note that the database doesn't exist yet, but you will still need to supply its name. Make the following POST request to assign the secret key to a specific database: + + +{`curl -X POST -H "Content-Type: text/plain" --data $key --cert clientCert.pem https://your-server-url/admin/secrets?name=MyEncryptedDatabase +`} + + + +Finally, create the encrypted database using the Client API: + + + +{`store.Maintenance.Server.Send(new CreateDatabaseOperation(new DatabaseRecord("MyEncryptedDatabase") +{ + Encrypted = true +})); +`} + + + + +{`await store.Maintenance.Server.SendAsync(new CreateDatabaseOperation(new DatabaseRecord("MyEncryptedDatabase") +{ + Encrypted = true +})); +`} + + + + +## Remarks + +Database encryption must be enabled when creating the database. If you wish to use encryption in an existing database, it must be exported and then imported back into a new encrypted database. + + +Indexing is most efficient when it is performed in the largest transactions possible. However, using encryption is very memory intensive, and if memory +runs out before the transaction completes, the entire transaction will fail. To avoid this, you can limit the size of indexing batches in encrypted +databases using [Indexing.Encrypted.TransactionSizeLimitInMb](../../../server/configuration/indexing-configuration.mdx#indexingencryptedtransactionsizelimitinmb). + + + diff --git a/versioned_docs/version-7.1/server/security/encryption/encryption-at-rest.mdx b/versioned_docs/version-7.1/server/security/encryption/encryption-at-rest.mdx new file mode 100644 index 0000000000..37d13e8e13 --- /dev/null +++ b/versioned_docs/version-7.1/server/security/encryption/encryption-at-rest.mdx @@ -0,0 +1,89 @@ +--- +title: "Encryption: Encryption At Rest" +hide_table_of_contents: true +sidebar_label: Encryption at Rest +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Encryption: Encryption At Rest + +Encryption at rest is implemented at the storage layer, using Daniel J. Bernstein's [`XChaCha20-Poly1305`](https://libsodium.gitbook.io/doc/secret-key_cryptography/aead/chacha20-poly1305/xchacha20-poly1305_construction) +authenticated encryption algorithm. + +## What Does it Mean? + +In [Voron](../../../server/storage/storage-engine.mdx), the storage engine behind RavenDB, data is stored in memory mapped files. This includes documents, indexes, attachments and transactions which are written to the journal. + +If your disk is stolen or lost, an attacker will have full access to the raw data files and without encryption turned on the data can be read with very little effort. + +On the other hand, when encryption is turned on the raw data is encrypted and unreadable without possession of the secret key. + +In RavenDB, encryption is done at the lowest possible layer, the storage engine. It is fully transparent to other levels of the server, making it super easy to use. + +## How Does it Work? + +As long as the database is idle and there are no requests to serve, everything is kept encrypted in the data files. + +Once a request is made, RavenDB will start a transaction (either read or write) and decrypt just the necessary data into memory. Then it will serve the +request, and when the transaction is finished, modified pages are encrypted and written back to the datafile. The data is +[locked](../../../server/security/encryption/encryption-at-rest.mdx#locking-memory) during the transaction by default, and it is zeroed when the transaction completes. + + +1. RavenDB makes sure that **no data is written to disk as plain text**. It will always be encrypted. +2. Indexed fields (the actual terms and values being indexed) will reside in memory as plain text. +3. Data of the current transaction will reside in memory as plain text and only for the duration of the transaction. When the transaction ends, the used memory is safely zeroed. +4. Loading documents from the database (using the Studio, the Client API, REST API) means that they will be decrypted to plain text on the server and then sent to the client (securely) by HTTPS. Once the data is received on the client side it is no longer encrypted. RavenDB does not provide encryption on the client side. + + + +Indexing is most efficient when it is performed in the largest transactions possible. However, using encryption can be very memory intensive, and if memory +runs out before the transaction completes, the entire transaction will fail. To avoid this, you can limit the size of indexing batches in encrypted +databases using [Indexing.Encrypted.TransactionSizeLimitInMb](../../../server/configuration/indexing-configuration.mdx#indexingencryptedtransactionsizelimitinmb). +The default limit is 64 MB. + + + +Due to the overhead of the encryption algorithm, performance can be slightly decreased. However, it doesn't affect the ACID properties of RavenDB which remains both transactional and secure. + + +## Locking Memory + +RavenDB uses memory-mapped files to keep its data. During normal operations, a process's memory regions may be paged by the OS to a file on disk when RAM has become scarce. + +With encrypted databases, we must ensure that plaintext is never written to disk. +Most of the memory-mapped files used by RavenDB are always encrypted so even if the OS decides to page out a part of a file, it will be written to disk encrypted. + +However, the memory-mapped files used for **special temporary buffers** (compression, recovery, etc.) are the exception and are not encrypted since they only reside in memory. +We lock the memory regions used by these buffers in order to avoid leaking secrets to disk. This means that if we run out of memory, the OS is not allowed to page these buffers to disk. + +The downside to this approach is that if we run out of physical RAM RavenDB won't be able to lock memory and will abort the current operation. +You can change this behavior but it's not recommended and should be done only after a proper security analysis is performed, see the [Security Configuration Section](../../../server/configuration/security-configuration.mdx#securitydonotconsidermemorylockfailureascatastrophicerror). + +If such a catastrophic error occurs in **Windows**, RavenDB will try to recover automatically by increasing the size of the minimum working set and retrying +the operation. RavenDB's ability to do this may be limited by the ['increase a process working set' policy setting](https://docs.microsoft.com/en-us/windows/security/threat-protection/security-policy-settings/increase-a-process-working-set), +so an admin may need to modify it. +In **Linux**, it is the admin's responsibility to configure higher limits manually using: + + +{`sudo prlimit --pid [process-id] --memlock=[new-limit-in-bytes] +`} + + + +To figure out what the new limit should be, look at the exception thrown by RavenDB, which includes this size. + +## What about Encryption in Transit? + +To enable encryption in RavenDB, the user must first [enable authentication](../../../server/security/authentication/certificate-configuration.mdx) and HTTPS (by providing a certificate). + +Enabling Authentication and HTTPS (using the TLS protocol version 1.2 or 1.3) provides privacy and integrity of the data in transit. It protects against man-in-the-middle attacks, eavesdropping, and tampering of the communication. + +Using the encryption feature together with HTTPS provides assurance that your data is safe both at rest and in transit. + diff --git a/versioned_docs/version-7.1/server/security/encryption/secret-key-management.mdx b/versioned_docs/version-7.1/server/security/encryption/secret-key-management.mdx new file mode 100644 index 0000000000..e32b637b26 --- /dev/null +++ b/versioned_docs/version-7.1/server/security/encryption/secret-key-management.mdx @@ -0,0 +1,207 @@ +--- +title: "Encryption: Secret Key Management" +hide_table_of_contents: true +sidebar_label: Secret Key Management +sidebar_position: 3 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Encryption: Secret Key Management + + +One of the challenges in cryptosystems is "secret protection" - how to protect the encryption key. +If the key is stored in plain text then any user that can access the key can access the encrypted data. +If the key is to be encrypted, another key is needed, and so on. + +In RavenDB this can be handled in one of two ways: + +1. [Providing a master key to RavenDB](../../../server/security/encryption/secret-key-management.mdx#providing-a-master-key-to-ravendb) +2. [Relying on the OS protection methods](../../../server/security/encryption/secret-key-management.mdx#relying-on-the-os-protection-methods) + + +## Providing a Master Key to RavenDB + +If a master key is provided, RavenDB will use it to encrypt the secret keys of encrypted databases. + +You can provide a master key by setting `Security.MasterKey.Exec` and `Security.MasterKey.Exec.Arguments` in +[settings.json](../../configuration/configuration-options.mdx#json). RavenDB will invoke a process you specify, so +you can write your own scripts / mini programs and apply whatever logic you need. It creates a clean separation +between RavenDB and the secret store in use. This option is useful when you want to protect your master key +with other solutions such as "Azure Key Vault", "HashiCorp Vault" or even Hardware-Based Protection. + +RavenDB expects to get a cryptographically secure 256-bit key through the standard output. + +For example, the following C# Console Application (GiveMeKey.cs) will generate a random key and write it to the +standard output. Obviously this is just an example and your executable should supply the same key every time it +is invoked. + + + + +{`using System; +using System.Security.Cryptography; + +namespace GiveMeKey +{ + class Program + { + static void Main(string[] args) + { + var buffer = new byte[256 / 8]; + using (var cryptoRandom = new RNGCryptoServiceProvider()) + { + cryptoRandom.GetBytes(buffer); + } + var stream = Console.OpenStandardOutput(); + stream.Write(buffer, 0, buffer.Length); + } + } +} +`} + + + + +And [settings.json](../../configuration/configuration-options.mdx#json) can look like this: + + + +{`\{ + "ServerUrl": "https://rvn-srv-1:8080", + "Setup.Mode": "None", + "DataDir": "RavenData", + "Security.Certificate.Path": "your-server-cert.pfx", + "Security.MasterKey.Exec": "C:\\\\secrets\\\\GiveMeKey.exe" +\} +`} + + + +Another way to provide a master key is to use a file containing the raw key bytes. In that case, set +`Security.MasterKey.Path` in [settings.json](../../configuration/configuration-options.mdx#json) with the file path. +RavenDB expects a cryptographically secure 256-bit key. + + + +## Relying on the OS Protection Methods + +If a master key is not provided by the user RavenDB will use the following default behavior: + +In **Windows**, secret keys are encrypted and stored using the [Data Protection API (DPAPI)](https://docs.microsoft.com/en-us/previous-versions/ms995355(v=msdn.10)), which means they can only be retrieved by the user who stored them. + +In **Unix**, RavenDB will generate a random master key and store it in the user's home folder with read/write +permissions (octal 1600) only for the user who stored it. Then, RavenDB will use this master key to encrypt the +secret keys of encrypted databases. + +### Changing/Resetting a Windows User Password + +This section is relevant only to [Server Store encryption](../../../server/security/encryption/server-store-encryption.mdx) +and only if you chose to rely on the **Windows** protection methods. + +Windows uses the **user password** to encrypt secrets in [DPAPI](https://docs.microsoft.com/en-us/previous-versions/ms995355(v=msdn.10)). +When a Windows password is **changed** the following actions are taken: + +- DPAPI receives notification from Winlogon during a password change operation. +- DPAPI decrypts all the secrets that were encrypted with the user's old passwords. +- DPAPI re-encrypts all the secrets with the user's new password. + +Changing a password this way is supported and RavenDB is not affected. + +On the other hand, if the password was **reset** (either by you or by the administrator), secrets **cannot be decrypted anymore**. + +If you still need to reset the password for some reason, please follow these steps to ensure that secret +keys which are protected with DPAPI aren't lost. + +Navigate to the RavenDB application folder where you can find the `rvn` tool. +Run the following get-key command for **every** encrypted database (including `System` if it's encrypted): + + + +{`./rvn offline-operation get-key +`} + + + +The output is the plaintext key which is not protected and not tied to a user. + +Now reset the Windows password. + +Then, run the following put-key command for **every** encrypted database. Supply the path of the +database folder and the key you just got (using get-key): + + + +{`./rvn offline-operation put-key +`} + + + +This operation takes the key and protects it with the new Windows user password. +After doing this for all databases you can run the server and continue working. + +### Using the Admin JS Console + + +Do not use the console unless you're sure of what you're doing. Running a +script in the Admin Console could cause your server to crash, loss of +data, or other irreversible harm. + + +The server's Admin Console is [found in the Studio](../../../studio/server/debug/admin-js-console.mdx). +You can use it to access and change your master key. This method is useful for +changing the key when you change your **Windows** user account. + +On the console page, select a database or the server. If you select a database, +the master key is read only, and can be accessed with this script: + + + +{`return database.MasterKey +`} + + + +The master key for a given database can be modified with the script: + + + +{`server.ServerStore.PutSecretKey(base64, name, overwrite) +`} + + + +| Parameter | Type | Description | +| - | - | - | +| **base64** | `string` | The new master key for the database | +| **name** | `string` | The name of the database for which to change the key | +| **overwrite** | `bool` | Whether to overwrite an existing key. If this is false and the database has a master key, an exception will be thrown. Be sure that this is what you want to do. | + +#### Changing the Windows User + +To change your Windows user without losing access to your database, use the scripts described +above to: + +1. Retrieve your current key +2. Switch your windows user +3. Inject the key back into your database + + + +{`return database.MasterKey + +// Save the returned key +// Change windows user + +server.ServerStore.PutSecretKey(, , true) +`} + + + + + diff --git a/versioned_docs/version-7.1/server/security/encryption/server-store-encryption.mdx b/versioned_docs/version-7.1/server/security/encryption/server-store-encryption.mdx new file mode 100644 index 0000000000..ac5ab3a2e2 --- /dev/null +++ b/versioned_docs/version-7.1/server/security/encryption/server-store-encryption.mdx @@ -0,0 +1,120 @@ +--- +title: "Encryption: Server Store Encryption" +hide_table_of_contents: true +sidebar_label: Server Store Encryption +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Encryption: Server Store Encryption + +The Server Store is an internal special database (sometimes called the `System` database) which is **not encrypted by default**. +It is used by RavenDB to hold server wide information and includes details such as the cluster state machine, database records, +compare-exchange values, client certificate definitions (without the private key), etc. + +In particular, the Server Store holds the encryption keys of all the encrypted databases on this server. The encryption +keys are stored encrypted and the admin has the choice of how that encryption would be handled. +See [Secret Key Management](../../../server/security/encryption/secret-key-management.mdx) for more details on this topic. + +## Enabling Server Store Encryption + +This is an offline operation which should be performed only once using the [rvn tool](../../../server/administration/cli.mdx). +It is recommended to do this at the very start, as part of the initial cluster setup, right after the server was launched for the first time. +Server Store encryption is also possible at later times, even after creating databases and storing documents. + +To encrypt the Server Store, make sure the server is not running. Then navigate to the RavenDB application folder where you can find the `rvn` tool. +Run the following command and supply the path of the `System` folder: + + + +{`./rvn offline-operation encrypt +`} + + + +This operation encrypts the data and saves the encryption key to the same directory. +**The key file (secret.key.encrypted) is protected using RavenDB's secret protection policy**. Read about +[Secret Key Management](../../../server/security/encryption/secret-key-management.mdx) to learn about secret protection in RavenDB. +Once encrypted, The server will only work for the current OS user or the current Master Key (whichever method was chosen to protect secrets). +Snapshots of an encrypted Server Store can only be restored by the same OS user or the same Master Key which was used during snapshot backup. + +From this point on, the Server Store is encrypted and adheres to the same principles of database encryption described +[here](../../../server/security/encryption/encryption-at-rest.mdx#how-does-it-work). You still need to explicitly setup each database as encrypted +and each database will use a different encryption key. In order to restore snapshot backups you'll have to store the encryption key for each +database in a safe location. + +## Disabling Server Store Encryption + +To decrypt the Server Store, make sure the server is not running. Then navigate to the RavenDB application folder where you can find the `rvn` tool. +Run the following command and supply the path of the `System` folder: + + + +{`./rvn offline-operation decrypt +`} + + + +The decryption is done using the key file (secret.key.encrypted) which was originally created when the Server Store encryption was enabled. +From this point on, the Server Store is not encrypted anymore and the key file is deleted. + +## Backup and Restore an Encrypted Server Store + +Because of RavenDB's Secret Protection, the encrypted data is tied to a specific machine/user or to a supplied master key. +The following instructions **assume that no changes were made** to the OS user or the master key between backup and restore. +If any of them changed, move to the next section. + +Navigate to the RavenDB application folder where you can find the `rvn` tool. Run the following command and supply the path of the `System` folder: + + + +{`./rvn offline-operation get-key +`} + + + +The output is the plaintext key which is not protected and not tied to any user or master key. This key allows you to move the snapshot to any environment. +Save the `System` folder in a safe place. This is your snapshot backup. If you decide to separate the key file from the backup data (recommended) you should +make sure to return the key file to the folder before performing a restore. + +To restore a Server Store from the snapshot backup, first shutdown the server. +Delete the current `System` folder and replace it with the backed up folder (don't forget to rename it to `System`). +Then restart the server. + +## Moving the encrypted Server Store to a new machine or a different OS user + +Because of RavenDB's Secret Protection, the encrypted data is tied to a specific machine/user or to a supplied master key. +Moving the server to a new machine or switching the OS user requires the admin to perform an additional offline operation. + +Navigate to the RavenDB application folder where you can find the `rvn` tool. Run the following command and supply the path of the `System` folder: + + + +{`./rvn offline-operation get-key +`} + + + +The output is the plaintext key which is not protected and not tied to any user or master key. This key allows you to move the snapshot to any environment. + + +On the new machine, copy the `System` folder to the new location. +run the following command and supply the path of the `System` folder and the key you just got (using get-key): + + + +{`./rvn offline-operation put-key +`} + + + +This operation takes the key and protects it for the new OS user or new master key. +The protected key is then saved as a file in the same folder (secret.key.encrypted). +Now, you can run the server, which will use this new protected key and you can work with the restored data. + diff --git a/versioned_docs/version-7.1/server/security/fiddler-usage-with-secured-database.mdx b/versioned_docs/version-7.1/server/security/fiddler-usage-with-secured-database.mdx new file mode 100644 index 0000000000..68ac59f260 --- /dev/null +++ b/versioned_docs/version-7.1/server/security/fiddler-usage-with-secured-database.mdx @@ -0,0 +1,70 @@ +--- +title: "Fiddler Usage With Secured Database" +hide_table_of_contents: true +sidebar_label: Fiddler Usage With Secured Database +sidebar_position: 5 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Fiddler Usage With Secured Database + +When you want to use fiddler with secure database you need to configure fiddler that it could response and do requests as you. + +## Respond to Requests Requiring a Client Certificate + +First you need to create .CER file: + +1. Open Manege User certificates + +2. Right-click the certificate in Personal Certificates Store. + +3. Click All Tasks > Export + +Then you need to specify the .CER file for Fiddler to return for a given session. + +You have two options to do so: + +1. Add a FiddlerScript to OnBeforeRequest function: + + +{`oSession["https-Client-Certificate"] = "C:\\\\test\\\\someCert.cer"; +`} + + +2. Place your .CER file in '%USERPROFILE%\My Documents\Fiddler2\ClientCertificate.cer' ( the name must be ClientCertificate.cer) + + +If you do this your client certificate is exposed through fiddler + + +## Accepting response by the client + +* Option 1: Configure Windows Client to trust Fiddler Root Certificate + + 1. Click Tools > Fiddler Options > HTTPS. + + 2. Click the Decrypt HTTPS Traffic box. + + 3. Next to `Trust the Fiddler Root certificate?`, click Yes. + + 4. After `Do you want to install this certificate?`, click Yes. + + +If you do this windows will automatically trust any certificate issued by this CA. This is a security risk! + + +* Option 2: Client will ignore certificate validation + + In the application you should set: + + Raven.Client.Http.RequestExecutor.RemoteCertificateValidationCallback += (sender, cert, chain, errors) => true; + + +If you do this and forget to remove it from your code, your client will accept any response! + diff --git a/versioned_docs/version-7.1/server/security/overview.mdx b/versioned_docs/version-7.1/server/security/overview.mdx new file mode 100644 index 0000000000..e97abf765d --- /dev/null +++ b/versioned_docs/version-7.1/server/security/overview.mdx @@ -0,0 +1,78 @@ +--- +title: "Security: Overview" +hide_table_of_contents: true +sidebar_label: Overview +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Security: Overview + +In the security section, we will review the security features in RavenDB and explain how to manage your secured server or cluster. + +## Authentication + +RavenDB uses X.509 certificate-based authentication. +X.509 certificates are standardized, secured and widely used in many applications. They allow you to use TLS/SSL and HTTPS which keeps your communications encrypted and secured. + +The idea of authentication in RavenDB is based on a fact that the server holds a server certificate, which is either signed by a trusted SSL Certificate Authority or self-signed. The server certificate is used by an administrator to generate client certificates with assigned permissions. Client certificates can be used for authentication, and authorization is granted according to the assigned permissions. + + +RavenDB **does not** use PKI infrastructure to trust certificates and uses a more strict approach. It will allow access to client certificates which are explicitly registered in the RavenDB server **or** certificates which have the same issuer and Public Key Pinning Hash as a certificate which is already trusted. +Registering a certificate means one of the following: + +* The certificate was generated by the server +* The certificate was registered explicitly by an administrator. +* The certificate was registered implicitly for having the same [Public Key Pinning Hash](authentication/certificate-renewal-and-rotation#implicit-trust-by-public-key-pinning-hash) as a registered certificate. + +In any case, it must appear in the certificates view in the studio. Visit the [Certificate Management](authentication/certificate-management) section for more information. + + +In the Studio, administrators can use the [Certificates View](../../server/security/authentication/certificate-configuration.mdx) to easily manage their certificates. It can be used to generate client certificates, register existing client certificates, import and export server certificates, rename, assign permissions and more. + +**Read more:** + +- [Manual Certificate Configuration](../../server/security/authentication/certificate-configuration.mdx) +- [Certificate Management](../../server/security/authentication/certificate-management.mdx) +- [Client Certificate Usage](../../server/security/authentication/client-certificate-usage.mdx) +- [Certificate Renewal & Rotation](../../server/security/authentication/certificate-renewal-and-rotation.mdx) +- [Let's Encrypt Certificates](../../server/security/authentication/lets-encrypt-certificates.mdx) +- [Common Errors & FAQ](../../server/security/common-errors-and-faq.mdx) + + + +## Authorization + +Authorization in RavenDB is based on the same X.509 certificates. + +Every client certificate is associated with a security clearance and access permissions per database. + +**Read more:** + +- [Security Clearance & Permissions](../../server/security/authorization/security-clearance-and-permissions.mdx) + + + +## Encryption + +RavenDB offers full database encryption using [libsodium](https://download.libsodium.org/doc/), a well-known battle tested encryption library. + +Encryption is implemented at the storage level, with XChaCha20-Poly1305 authenticated encryption using 256 bit keys. + +When database encryption is on, all the features of a database are automatically encrypted - documents, indexes and every piece of data that is written to disk. + +**Read more:** + +- [Encryption at Rest](../../server/security/encryption/encryption-at-rest.mdx) +- [Database Encryption](../../server/security/encryption/database-encryption.mdx) +- [Server Store Encryption](../../server/security/encryption/server-store-encryption.mdx) +- [Secret Key Management](../../server/security/encryption/secret-key-management.mdx) + + + diff --git a/versioned_docs/version-7.1/server/storage/_category_.json b/versioned_docs/version-7.1/server/storage/_category_.json new file mode 100644 index 0000000000..69b91a6bf9 --- /dev/null +++ b/versioned_docs/version-7.1/server/storage/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 6, + "label": Storage, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/server/storage/_documents-compression-csharp.mdx b/versioned_docs/version-7.1/server/storage/_documents-compression-csharp.mdx new file mode 100644 index 0000000000..7a66dcbc50 --- /dev/null +++ b/versioned_docs/version-7.1/server/storage/_documents-compression-csharp.mdx @@ -0,0 +1,171 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The **Documents Compression** feature employs the [Zstd compression algorithm](https://github.com/facebook/zstd) + to achieve more efficient data storage with constantly improving compression ratios. + +* Documents compression can be set for all collections, selected collections, and revisions. + Default compression settings are [configurable](../../server/configuration/database-configuration.mdx#databasescompressioncompressallcollectionsdefault). + +* When turned on, compression will be applied to: + * **New documents**: + * A new document that is saved will be compressed. + * **Existing documents**: + * Existing documents that are modified and saved will be compressed. + * Existing documents that are Not modified will only be compressed when executing the + [compact database operation](../../client-api/operations/server-wide/compact-database.mdx#compaction-triggers-compression). + +* Compression can be set from the [Studio](../../studio/database/settings/documents-compression.mdx), + or by updating the database record from the Client API, see below. + +* In this page: + * [Overview](../../server/storage/documents-compression.mdx#overview) + * [Compression -vs- Compaction](../../server/storage/documents-compression.mdx#compression--vs--compaction) + * [Set compression for all collections](../../server/storage/documents-compression.mdx#set-compression-for-all-collections) + * [Set compression for selected collections](../../server/storage/documents-compression.mdx#set-compression-for-selected-collections) + * [Syntax](../../server/storage/documents-compression.mdx#syntax) + + +## Overview + +* As a document database, RavenDB's schema-less nature presents many advantages, + however, it requires us to manage the data structure on a per-document basis. + In extreme cases, the majority of the data you store is the documents' structure. + +* The [Zstd compression algorithm](https://github.com/facebook/zstd) is used to learn your data model, identify common patterns, + and create dictionaries that represent the redundant structural data across documents in a collection. + +* The algorithm is trained by each compression operation and continuously improves its compression ratio + to maintain the most efficient compression model. + In many datasets, this can reduce the storage space by more than 50%. + +* Compression and decompression are fully transparent to the user. + Reading and querying compressed large datasets is usually as fast as reading and querying + their uncompressed versions because the compressed data is loaded much faster. + +* Compression is Not applied to attachments, counters, and time series data, + only to the content of documents and revisions. + +* Detailed information about the database's physical storage is visible in the [Storage Report view](../../studio/database/stats/storage-report.mdx). + + + +## Compression -vs- Compaction + +* The following table summarizes the differences between Compression and Compaction: + +| **Compression** | | +| - | - | +| Action: | Reduce storage space using the `Zstd` compression algorithm | +| Items that can be compressed: | **-** Documents in collections that are configured for compression
**-** Revisions for all collections | +| Triggered by: | The server | +| Triggered when: | Compression feature is configured,
**and** when either of the following occurs for the configured collections:
   **-** Storing new documents
   **-** Modifying & saving existing documents
   **-** Compact operation is triggered, existing documents will be compressed | + +| **Compaction** | | +| - | - | +| Action: | Remove empty gaps on disk that still occupy space after deletes | +| Items that can be compacted: | Documents and/or indexes on the specified database | +| Triggered by: | Client API code | +| Triggered when: | Explicitly calling [compact database operation](../../client-api/operations/server-wide/compact-database.mdx) | + + + +## Set compression for all collections + + + + +{`// Compression is configured by setting the database record + +// Retrieve the database record +var dbrecord = store.Maintenance.Server.Send(new GetDatabaseRecordOperation(store.Database)); + +// Set compression on ALL collections +dbrecord.DocumentsCompression.CompressAllCollections = true; + +// Update the database record +store.Maintenance.Server.Send(new UpdateDatabaseOperation(dbrecord, dbrecord.Etag)); +`} + + + + +{`// Compression is configured by setting the database record + +// Retrieve the database record +var dbrecord = await store.Maintenance.Server.SendAsync(new GetDatabaseRecordOperation(store.Database)); + +// Set compression on ALL collections +dbrecord.DocumentsCompression.CompressAllCollections = true; + +// Update the database record +await store.Maintenance.Server.SendAsync(new UpdateDatabaseOperation(dbrecord, dbrecord.Etag)); +`} + + + + + + +## Set compression for selected collections + + + + +{`// Retrieve the database record +var dbrecord = store.Maintenance.Server.Send(new GetDatabaseRecordOperation(store.Database)); + +// Turn on compression for specific collections +dbrecord.DocumentsCompression.Collections = new[] { "Orders", "Employees" }; + +// Turn off compression for all revisions, on all collections +dbrecord.DocumentsCompression.CompressRevisions = false; + +// Update the database record +store.Maintenance.Server.Send(new UpdateDatabaseOperation(dbrecord, dbrecord.Etag)); +`} + + + + +{`// Retrieve the database record +var dbrecord = await store.Maintenance.Server.SendAsync(new GetDatabaseRecordOperation(store.Database)); + +// Turn on compression for specific collection +dbrecord.DocumentsCompression.Collections = new[] { "Orders", "Employees" }; + +// Turn off compression for all revisions, on all collections +dbrecord.DocumentsCompression.CompressRevisions = false; + +// Update the database record +await store.Maintenance.Server.SendAsync(new UpdateDatabaseOperation(dbrecord, dbrecord.Etag)); +`} + + + + + + +## Syntax + +* Documents compression is configured using the `DocumentsCompressionConfiguration` class in the database record. + + + +{`public class DocumentsCompressionConfiguration +\{ + public string[] Collections \{ get; set; \} + public bool CompressRevisions \{ get; set; \} + public bool CompressAllCollections \{ get; set; \} +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/server/storage/_documents-compression-nodejs.mdx b/versioned_docs/version-7.1/server/storage/_documents-compression-nodejs.mdx new file mode 100644 index 0000000000..055db2273d --- /dev/null +++ b/versioned_docs/version-7.1/server/storage/_documents-compression-nodejs.mdx @@ -0,0 +1,137 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* The **Documents Compression** feature employs the [Zstd compression algorithm](https://github.com/facebook/zstd) + to achieve more efficient data storage with constantly improving compression ratios. + +* Documents compression can be set for all collections, selected collections, and revisions. + Default compression settings are [configurable](../../server/configuration/database-configuration.mdx#databasescompressioncompressallcollectionsdefault). + +* When turned on, compression will be applied to: + * **New documents**: + * A new document that is saved will be compressed. + * **Existing documents**: + * Existing documents that are modified and saved will be compressed. + * Existing documents that are Not modified will only be compressed when executing the + [compact database operation](../../client-api/operations/server-wide/compact-database.mdx#compaction-triggers-compression). + +* Compression can be set from the [Studio](../../studio/database/settings/documents-compression.mdx), + or by updating the database record from the Client API, see below. + +* In this page: + * [Overview](../../server/storage/documents-compression.mdx#overview) + * [Compression -vs- Compaction](../../server/storage/documents-compression.mdx#compression--vs--compaction) + * [Set compression for all collections](../../server/storage/documents-compression.mdx#set-compression-for-all-collections) + * [Set compression for selected collections](../../server/storage/documents-compression.mdx#set-compression-for-selected-collections) + * [Syntax](../../server/storage/documents-compression.mdx#syntax) + + +## Overview + +* As a document database, RavenDB's schema-less nature presents many advantages, + however, it requires us to manage the data structure on a per-document basis. + In extreme cases, the majority of the data you store is the documents' structure. + +* The [Zstd compression algorithm](https://github.com/facebook/zstd) is used to learn your data model, identify common patterns, + and create dictionaries that represent the redundant structural data across documents in a collection. + +* The algorithm is trained by each compression operation and continuously improves its compression ratio + to maintain the most efficient compression model. + In many datasets, this can reduce the storage space by more than 50%. + +* Compression and decompression are fully transparent to the user. + Reading and querying compressed large datasets is usually as fast as reading and querying + their uncompressed versions because the compressed data is loaded much faster. + +* Compression is Not applied to attachments, counters, and time series data, + only to the content of documents and revisions. + +* Detailed information about the database's physical storage is visible in the [Storage Report view](../../studio/database/stats/storage-report.mdx). + + + +## Compression -vs- Compaction + +* The following table summarizes the differences between Compression and Compaction: + +| **Compression** | | +| - | - | +| Action: | Reduce storage space using the `Zstd` compression algorithm | +| Items that can be compressed: | **-** Documents in collections that are configured for compression
**-** Revisions for all collections | +| Triggered by: | The server | +| Triggered when: | Compression feature is configured,
**and** when either of the following occurs for the configured collections:
   **-** Storing new documents
   **-** Modifying & saving existing documents
   **-** Compact operation is triggered, existing documents will be compressed | + +| **Compaction** | | +| - | - | +| Action: | Remove empty gaps on disk that still occupy space after deletes | +| Items that can be compacted: | Documents and/or indexes on the specified database | +| Triggered by: | Client API code | +| Triggered when: | Explicitly calling [compact database operation](../../client-api/operations/server-wide/compact-database.mdx) | + + + +## Set compression for all collections + + + +{`// Compression is configured by setting the database record + +// Retrieve the database record +const dbrecord = await store.maintenance.server.send(new GetDatabaseRecordOperation(store.database)); + +// Set compression on ALL collections +dbrecord.documentsCompression.compressAllCollections = true; + +// Update the the database record +await store.maintenance.server.send(new UpdateDatabaseOperation(dbrecord, dbrecord.etag)); +`} + + + + + +## Set compression for selected collections + + + +{`// Retrieve the database record +const dbrecord = store.maintenance.server.send(new GetDatabaseRecordOperation(store.database)); + +// Turn on compression for specific collections +// Turn off compression for all revisions, on all collections +dbrecord.documentsCompression = \{ + collections: ["Orders", "Employees"], + compressRevisions: false +\}; + +// Update the the database record +store.maintenance.server.send(new UpdateDatabaseOperation(dbrecord, dbrecord.etag)); +`} + + + + + +## Syntax + +* Documents compression is configured using the following object in the database record: + + + +{`// The documentsCompression object +\{ + collections; // string[], List of collections to compress + compressRevisions; // boolean, set to true to compress revisions + compressAllCollections; // boolean, set to true to compress all collections +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/server/storage/customizing-raven-data-files-locations.mdx b/versioned_docs/version-7.1/server/storage/customizing-raven-data-files-locations.mdx new file mode 100644 index 0000000000..c8949c437f --- /dev/null +++ b/versioned_docs/version-7.1/server/storage/customizing-raven-data-files-locations.mdx @@ -0,0 +1,280 @@ +--- +title: "Customizing Data Files Locations" +hide_table_of_contents: true +sidebar_label: Customizing Data Files Locations +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Customizing Data Files Locations + + +* The structure of [RavenDB directories](../../server/storage/directory-structure.mdx) **cannot** be changed. However: + * Path for temporary files can be customized. + * Data files can be stored in different locations, + by defining junction points (Windows) or mount points (Linux). + * A script can be used to automate the different location definitions. + +* In this page: + * [Why store data on different devices](../../server/storage/customizing-raven-data-files-locations.mdx#why-store-data-on-different-devices) + * [Configuring temp files location](../../server/storage/customizing-raven-data-files-locations.mdx#configuring-temp-files-location) + * [Configuring data files location](../../server/storage/customizing-raven-data-files-locations.mdx#configuring-data-files-location) + * [Automate storage definitions](../../server/storage/customizing-raven-data-files-locations.mdx#automate-storage-definitions) + * [Script example - Basic usage](../../server/storage/customizing-raven-data-files-locations.mdx#script-example---basic-usage) + * [Script example - Point Indexes to different location](../../server/storage/customizing-raven-data-files-locations.mdx#script-example---point-indexes-to-different-location) + + +## Why store data on different devices + +* A file or directory can be redirected to a different storage location according to its speed, durability, etc. + +* This allows organizing the files based on their usage pattern and the performance of the different devices you own. + i.e. slow devices may cause a slow down when reading from _Raven.voron_. + +* [Voron components](../../server/storage/directory-structure.mdx#voron-storage-environment) operate concurrently. + Storing them to different locations can help avoid traffic jams and achieve better concurrency. + + + +## Configuring temp files location + +* __System temporary files__ + By default, temporary files containing cluster data and server-wide data are written to the `Temp` folder under the System directory. + Customize the path for these files by setting the [Storage.TempPath](../../server/configuration/storage-configuration.mdx#storagetemppath) configuration option. + The temporary files will be written to the combined path `"/System"`. + +* __Databases temporary files__ + By default, all databases' temporary files are written to the `Temp` folder under each Database directory. + Customize the path for these files by setting the [Storage.TempPath](../../server/configuration/storage-configuration.mdx#storagetemppath) configuration option. + The temporary files will be written to `"/Databases/{database-name}"`. + +* __Configuration temporary files__ + By default, the configuration temporary files are written to the `Temp` folder under the Configuration directory per database. + Customize the path for these files by setting the [Storage.TempPath](../../server/configuration/storage-configuration.mdx#storagetemppath) configuration option. + The temporary files will be written to `"/Databases/{database-name}/Configuration"`. + +* __Indexes temporary files__ + By default, all indexes' temporary files are written to the `Temp` folder under each Index directory. + Customize the path for these files by setting the [Indexing.TempPath](../../server/configuration/indexing-configuration.mdx#indexingtemppath) configuration option. + +* __Backup temporary files__ + By default, backup temporary files are written under the `Database` directory or under `Storage.TempPath` if defined. + Customize the path for these files by setting the [Backup.TempPath](../../server/configuration/backup-configuration.mdx#backuptemppath) configuration option. + + + +Refer to article [Configuration overview](../../server/configuration/configuration-options.mdx) to learn how to set the configuration keys. + + + + + +## Configuring data files location + +* You can store RavenDB data files in any directory of your choice. + This is done by defining __junction points__ (Windows) or __mount points__ (Linux). + +* If data already exists in the directory, and you want to define the junction / mount point, + you need to create a backup of the data first and copy it back into the directory after executing the command. + +* The database must be offline when moving the database folder itself to a new point. + + + +__Example - Moving Journals__ + +A common practice is to store the journals on a very fast drive to achieve better write performance. +The following command will point the `Journals` directory of the _Northwind_ database to a path on a different drive. + + + +{`# Windows: +C:\\RavenDB\\Server\\RavenData\\Databases\\Northwind>mklink /J Journals E:\\Journals\\Northwind +`} + + + + + +{`# Linux: +ln -s /mnt/FastDrive/Databases/Northwind/Journals ~/RavenDB/Server/RavenData/Databases/Northwind/Journals +`} + + + + + + +__Example - Moving Indexes__ + +To store the data of all indexes of the _Northwind_ database in the custom location, +you can use the following command to to point the `Indexes` directory to a new location: + + + +{`# Windows: +C:\\RavenDB\\Server\\RavenData\\Databases\\Northwind>mklink /J Indexes D:\\Indexes\\Northwind +`} + + + + + +{`# Linux: +ln -s /mnt/FastDrive/Databases/Northwind/Indexes ~/RavenDB/Server/RavenData/Databases/Northwind/Indexes +`} + + + + + + +## Automate storage definitions + +* To help automate the process, we have added the [on directory initialize](../../server/configuration/storage-configuration.mdx#storageondirectoryinitializeexec) configuration option. + Whenever RavenDB __creates a directory__, it will invoke the process that is defined within that configuration. + +* The process is called just before the directory is created. + This allows you to create a script with your own logic, defining junction/mount points as needed. +* RavenDB will invoke the process with the following params: + + * Params passed by the user: + * User arguments - optional params, set in the [optional user arguments](../../server/configuration/storage-configuration.mdx#storageondirectoryinitializeexecarguments) configuration option + + * Params passed by RavenDB: + * The environment type (System, Database, Index, Configuration, Compaction) + * The database name + * Path of the `DataDir` directory + * Path of the `Temp` directory + * Path of the `Journals` directory + +#### Script example - Basic usage + +The following is a very simple PowerShell script example, here only to show basic usage. +The script is Not modifying any file location, it will only print out the value of the script params into a text file. + + + +{`# script.ps1 + +param([string]$userArg ,[string]$type, [string]$name, [string]$dataPath, [string]$tempPath, [string]$journalPath) +Add-Content $userArg "$type $name $dataPath $tempPath $journalPath +" +exit 0 +`} + + + +The output file _outFile.txt_ is supplied as a user argument. +Add the script path and its user arguments to the _settings.json_ file as follows: + + + +{`\{ + "Setup.Mode": "None", + "ServerUrl": "http://127.0.0.1:8080", + "License.Eula.Accepted": true, + "Storage.OnDirectoryInitialize.Exec" :"powershell", + "Storage.OnDirectoryInitialize.Exec.Arguments" :"c:\\\\scripts\\\\script.ps1 c:\\\\scripts\\\\outFile.txt" +\} +`} + + + +When launching the server and creating the Northwind database with the Northwind sample data, the script is invoked every time a directory is created. +Each line in _outFile.txt_ shows the values passed to the script when it was called. + + + +{`\{ +System System C:\\RavenDB\\Server\\System C:\\RavenDB\\Server\\System\\Temp C:\\RavenDB\\Server\\System\\Journals + +Configuration Northwind C:\\RavenDB\\Server\\Databases\\Northwind\\Configuration C:\\RavenDB\\Server\\Databases\\Northwind\\Configuration\\Temp C:\\RavenDB\\Server\\Databases\\Northwind\\Configuration\\Journals + +Database Northwind C:\\RavenDB\\Server\\Databases\\Northwind C:\\RavenDB\\Server\\Databases\\Northwind\\Temp C:\\RavenDB\\Server\\Databases\\Northwind\\Journals + +Index Northwind C:\\RavenDB\\Server\\Databases\\Northwind\\Indexes\\Orders_ByCompany C:\\RavenDB\\Server\\Databases\\Northwind\\Indexes\\Orders_ByCompany\\Temp C:\\RavenDB\\Server\\Databases\\Northwind\\Indexes\\Orders_ByCompany\\Journals + +Index Northwind C:\\RavenDB\\Server\\Databases\\Northwind\\Indexes\\Product_Search C:\\RavenDB\\Server\\Databases\\Northwind\\Indexes\\Product_Search\\Temp C:\\RavenDB\\Server\\Databases\\Northwind\\Indexes\\Product_Search\\Journals + +// ... + more lines per index folder created +\} +`} + + + + + + +#### Script example - Point Indexes to different location + +The following bash script example will point the Indexes data to a new location. + +If the environment type is other than 'Database' the script will exit. +Else, the script will create a soft link for the `Indexes` directory. + +After the script is run, the Indexes link will reside under the Database directory (See [Directory Structure](../../server/storage/directory-structure.mdx)). +The link will point to the new location set by variable $INDEXES_TARGET_DIR_NAME where all indexes' data will be written. + + + +{`#!/bin/bash +# bash ./your-script USER_ARGS Database DB_NAME BASE_PATH TEMP_PATH JOURNALS_PATH + +# Use directory names as defined on your machine +RDB_DATA_DIR="/var/lib/ravendb/data" +INDEXES_TARGET_DIR="/mnt/ravendb-indexes" + +DIR_TYPE="$1" +DB_NAME="$2" + +if [ "$DIR_TYPE" != 'Database' ]; then +exit 0 +fi + +INDEXES_SOURCE_DIR_NAME="$\{RDB_DATA_DIR\}/Databases/$\{DB_NAME\}/Indexes" +INDEXES_TARGET_DIR_NAME="$\{INDEXES_TARGET_DIR\}/$\{DB_NAME\}/Indexes" + + +if [ -d "$INDEXES_SOURCE_DIR_NAME" ] && [ ! -L "$INDEXES_SOURCE_DIR_NAME" ]; then +# If Indexes directory exists then exit - need manual handling +echo "FATAL: Directory $INDEXES_SOURCE_DIR_NAME already exists." +exit 1 +fi + +if [ -L "$INDEXES_SOURCE_DIR_NAME" ]; then +exit 0 +fi + +mkdir -p "$INDEXES_TARGET_DIR_NAME" + +ln -s "$INDEXES_TARGET_DIR_NAME" "$INDEXES_SOURCE_DIR_NAME" +`} + + + +Add the script to the _settings.json_ file as follows: + + + +{`\{ +"Setup.Mode": "None", +"ServerUrl": "http://127.0.0.1:8080", +"License.Eula.Accepted": true, +"Storage.OnDirectoryInitialize.Exec" :"bash", +"Storage.OnDirectoryInitialize.Exec.Arguments" :"/scripts/your-script.sh" +\} +`} + + + + + + + diff --git a/versioned_docs/version-7.1/server/storage/directory-structure.mdx b/versioned_docs/version-7.1/server/storage/directory-structure.mdx new file mode 100644 index 0000000000..9e107e9c21 --- /dev/null +++ b/versioned_docs/version-7.1/server/storage/directory-structure.mdx @@ -0,0 +1,99 @@ +--- +title: "Storage: Directory Structure" +hide_table_of_contents: true +sidebar_label: Directory Structure +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Storage: Directory Structure + + +* RavenDB keeps all data under the location specified by the [DataDir](../../server/configuration/core-configuration.mdx#datadir) configuration value. + +* In this page: + * [On-disk Data Structure](../../server/storage/directory-structure.mdx#on-disk-data-structure) + * [Voron Storage Environment](../../server/storage/directory-structure.mdx#voron-storage-environment) + +## On-disk Data Structure + +* The on-disk structure of RavenDB data directories is as follows: + + + +* __<data-dir>__ + * `Databases` + * __<database-name>__ + * `Confguration` + * `Journals` + * `Temp` + * Raven.voron + * `Indexes` + * __<index-name>__ + * `Journals` + * `Temp` + * Raven.voron + * __...more indexes__ + * `Journals` + * `Temp` + * Raven.voron + * __...more databases__ + * `System` + * `Journals` + * `Temp` + * Raven.voron + + + +* The main <data-dir> contains: + + * **Databases folder** + Contains subdirectories with data per database. + * **System folder** + Stores cluster data & server-wide data such as shared resources needed by all cluster nodes, + e.g. the Database Record. + +* The System folder, the Database folders, and their inner folders (the Configuration folder and each Index folder) are each a separate **Voron storage environment**. + + + +## Voron Storage Environment + +* Each Voron storage environment is composed of: + + +__Temp Folder__ + +* Holds temporary scratch & compression <em>*.buffers</em> files. +* These are small memory-mapped files that keep separate data versions for concurrent running transactions. +* Data modified by a transaction is copied into the scratch space - modifications are made on a copy of the data. +* Compression files are used to compress the transaction data just before writing it to the journal. +* When a transaction is written from these files to the journal file it is considered committed. + + + +__Journals Folder__ + +* Contains Write-Ahead Journal files (WAJ) that are used in the hot path of a transaction commit. +* From the journals, transactions will be flushed to the Voron data file where they are persisted. +* Entries written to these files use unbuffered sequential writes and direct I/O to ensure a direct write that bypasses all caches. +* Writes to the journals happen immediately, have high priority, and take precedence over writes to the Voron data file. +* Data is cleared from the journals once it is successfully stored in the Voron data file. +* The journal's transactions take part in database recovery - info is used to recover up to the same point you were at before failure. + + + +__Raven.voron file__ + +* This file contains the persisted data on disk. +* It is a memory-mapped data file using buffered writes with random reads and writes. + + + + diff --git a/versioned_docs/version-7.1/server/storage/documents-compression.mdx b/versioned_docs/version-7.1/server/storage/documents-compression.mdx new file mode 100644 index 0000000000..269d7cd7cd --- /dev/null +++ b/versioned_docs/version-7.1/server/storage/documents-compression.mdx @@ -0,0 +1,41 @@ +--- +title: "Documents Compression" +hide_table_of_contents: true +sidebar_label: Documents Compression +sidebar_position: 3 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import DocumentsCompressionCsharp from './_documents-compression-csharp.mdx'; +import DocumentsCompressionNodejs from './_documents-compression-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/server/storage/storage-engine.mdx b/versioned_docs/version-7.1/server/storage/storage-engine.mdx new file mode 100644 index 0000000000..9b89393c0a --- /dev/null +++ b/versioned_docs/version-7.1/server/storage/storage-engine.mdx @@ -0,0 +1,105 @@ +--- +title: "Storage Engine - Voron" +hide_table_of_contents: true +sidebar_label: Storage Engine +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Storage Engine - Voron + + +* RavenDB uses an in-house managed storage engine called Voron to persist your data (documents, indexes, and configuration). + It is a high performance storage engine designed and optimized for RavenDB's needs. + +* Voron employs the following structures to efficiently organize data on persistent storage: + * **B+Tree** - Supports variable-size keys and values. + * **Fixed Sized B+Tree** - Uses `Int64` keys and fixed-size values (defined at creation time). + This structure enables various optimizations. + * **Raw Data Section** – Allows storage of raw data (e.g., document content) and provides an identifier for O(1) data access. + * **Table** – Combines Raw Data Sections with any number of indexes, which internally are implemented as regular or + Fixed-Sized B+Trees. + +* In this page: + * [Transaction support](../../server/storage/storage-engine.mdx#transaction-support) + * [Single write model](../../server/storage/storage-engine.mdx#single-write-model) + * [Memory-mapped files](../../server/storage/storage-engine.mdx#memory-mapped-files) + * [Requirements](../../server/storage/storage-engine.mdx#requirements) + * [Limitations](../../server/storage/storage-engine.mdx#limitations) + + +## Transaction support + +Voron is a fully transactional storage engine that ensures atomicity and durability using a Write-Ahead Journal (WAJ). +All modifications made within a transaction are first written to a journal file (using unbuffered I/O and write-through) before being applied to the main data file and synced to disk. +The journals act as a transaction log, preserving the durability property of ACID transactions. +The application of the WAJ occurs in the background. + +In the event of an ungraceful server shutdown, the journals serve as the recovery source. +If a process stops unexpectedly, leaving modifications not yet applied to the data file, +Voron recovers the database state during the next database startup by replaying the transactions stored in the journal files. + +Because journals are flushed and synced to disk before a transaction commit is completed, this guarantees that changes will survive process or system crashes. +Each transaction is written to the journal once it's committed, and a successful response is returned _only_ if the commit completes successfully. +This ensures that transactions can be easily recovered if necessary. + +Multi-Version Concurrency Control (MVCC) is implemented using scratch files, +which are temporary files that maintain concurrent versions of the data for active transactions. + +Each transaction operates on a snapshot of the database, ensuring that write transactions do not modify the data being accessed by other transactions. +Snapshot isolation for concurrent transactions is maintained using Page Translation Tables. + + + +## Single write model + +Voron supports only single write processes (but there can be multiple read processes). +Having only a single write transaction simplifies the handling of writes. + +To achieve high performance, RavenDB implements transaction merging on top of Voron's single write model. +This approach provides a tremendous performance boost, particularly in high-load scenarios. + +Additionally, Voron includes support for asynchronous transaction commits. +These commits must meet specific requirements to seamlessly integrate with RavenDB's transaction merging mechanism. +The actual transaction lock handoff and early lock release are managed at a higher layer, where more detailed system information is available. + + + +## Memory-mapped files + +Voron is based on memory mapped files. + + + +Since RavenDB 4.0, Voron has no limits when running in 32 bits mode. The issue of running out of address space when mapping files into memory +has been addressed by providing a dedicated pager (component responsible for mapping) for a 32 bits environments. + +Instead of mapping an entire file, it maps just the pages that are required and only for the duration of the transaction. + + + + + +## Requirements + +The storage hardware / file system must support: + +* fsync +* `[Windows]` UNBUFFERED_IO / WRITE_THROUGH +* `[Windows]` [Hotfix for Windows 7 and Windows Server 2008 R2](https://support.microsoft.com/en-us/help/2731284/33-dos-error-code-when-memory-memory-mapped-files-are-cleaned-by-using) +* `[Posix]` O_DIRECT + + + +## Limitations + +- The key size must be less than 2025 bytes in UTF8 + + + diff --git a/versioned_docs/version-7.1/server/tcp-compression.mdx b/versioned_docs/version-7.1/server/tcp-compression.mdx new file mode 100644 index 0000000000..fc815367b6 --- /dev/null +++ b/versioned_docs/version-7.1/server/tcp-compression.mdx @@ -0,0 +1,95 @@ +--- +title: "TCP Compression" +hide_table_of_contents: true +sidebar_label: TCP Compression +sidebar_position: 10 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# TCP Compression + + +* With **TCP Compression** enabled, internal cluster data transfers + are compressed, including nodes' + [database replication](../server/clustering/replication/replication-overview.mdx) + and the data submitted by + [Data Subscriptions](../client-api/data-subscriptions/what-are-data-subscriptions.mdx) + to their workers. + +* Especially on the cloud, the significant reduction in the amount of transferred + data translates to a significant reduction of costs. + + +* In this page: + * [TCP Compression](../server/tcp-compression.mdx#tcp-compression) + * [Compression Algorithm and Ratio](../server/tcp-compression.mdx#compression-algorithm-and-ratio) + * [Enabling TCP Compression](../server/tcp-compression.mdx#enabling-tcp-compression) + * [Disabling TCP Compression](../server/tcp-compression.mdx#disabling-tcp-compression) + + +## TCP Compression + +RavenDB offers a variety of compression tools, including the compression +of [stored documents](../server/storage/documents-compression.mdx) and +[backups](../server/ongoing-tasks/backup-overview.mdx#compression). + +With the **TCP compression** feature enabled, data **in transit** is compressed as well. + +* RavenDB will compress data before [replicating](../server/clustering/replication/replication-overview.mdx) + it from one cluster node to another. + Replication makes a large portion of a cluster's traffic, and compressing + replicated data will minimize the traffic volume and expedite data delivery. + +* [Data Subscriptions](../client-api/data-subscriptions/what-are-data-subscriptions.mdx) + will also transfer compressed data to their workers. + Data subscriptions are used to automate documents processing, + and may transfer large quantities of documents on a regular basis. + Compressing the data they submit can reduce the traffic volume + and the costs of such automated routines considerably. + +## Compression Algorithm and Ratio + +* TCP Compression uses the [Zstandard compression algorithm](https://en.wikipedia.org/wiki/Zstandard), + continuously learning your data model to create dictionaries that represent the redundant + structural data across transferred documents. + +* Compression ratio tends to ascend as the size of the transferred data grows, + and may **top 85%** for big data transfers. + +## Enabling TCP Compression + +Your server's [license](../start/licensing/licensing-overview.mdx) type determines whether +TCP compression, among other features, is activated. +No additional configuration is needed to enable this feature. + +![License (Studio View)](./assets/tcp-compression-license.png) +### Disabling TCP Compression + +* TCP Compression can be disabled by a client, using the client API + DocumentStore `DocumentConventions.DisableTcpCompression ` convention. + + +{`using (var store = new DocumentStore()) +\{ + var DocumentConventions = new DocumentConventions + \{ + // Disable TCP Compression + DisableTcpCompression = true + \}; +\} +`} + + + +* TCP Compression can also be disabled server-wide, using the + server [Server.Tcp.Compression.Disable](../server/configuration/server-configuration.mdx#servertcpcompressiondisable) configuration. + Learn how to change database settings using Studio [here](../studio/database/settings/database-settings.mdx). + + + diff --git a/versioned_docs/version-7.1/server/troubleshooting/_category_.json b/versioned_docs/version-7.1/server/troubleshooting/_category_.json new file mode 100644 index 0000000000..771e2baaa7 --- /dev/null +++ b/versioned_docs/version-7.1/server/troubleshooting/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 8, + "label": Troubleshooting, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/server/troubleshooting/assets/admin-logs-view.png b/versioned_docs/version-7.1/server/troubleshooting/assets/admin-logs-view.png new file mode 100644 index 0000000000..1e824180f6 Binary files /dev/null and b/versioned_docs/version-7.1/server/troubleshooting/assets/admin-logs-view.png differ diff --git a/versioned_docs/version-7.1/server/troubleshooting/assets/create-debug-package.png b/versioned_docs/version-7.1/server/troubleshooting/assets/create-debug-package.png new file mode 100644 index 0000000000..f5fb636e7b Binary files /dev/null and b/versioned_docs/version-7.1/server/troubleshooting/assets/create-debug-package.png differ diff --git a/versioned_docs/version-7.1/server/troubleshooting/assets/download-logs-1.png b/versioned_docs/version-7.1/server/troubleshooting/assets/download-logs-1.png new file mode 100644 index 0000000000..718a7c5d3c Binary files /dev/null and b/versioned_docs/version-7.1/server/troubleshooting/assets/download-logs-1.png differ diff --git a/versioned_docs/version-7.1/server/troubleshooting/assets/download-logs-2.png b/versioned_docs/version-7.1/server/troubleshooting/assets/download-logs-2.png new file mode 100644 index 0000000000..383c2bc41b Binary files /dev/null and b/versioned_docs/version-7.1/server/troubleshooting/assets/download-logs-2.png differ diff --git a/versioned_docs/version-7.1/server/troubleshooting/assets/microsoft-logs.png b/versioned_docs/version-7.1/server/troubleshooting/assets/microsoft-logs.png new file mode 100644 index 0000000000..1c135fa2c2 Binary files /dev/null and b/versioned_docs/version-7.1/server/troubleshooting/assets/microsoft-logs.png differ diff --git a/versioned_docs/version-7.1/server/troubleshooting/assets/save-to-settings-json.png b/versioned_docs/version-7.1/server/troubleshooting/assets/save-to-settings-json.png new file mode 100644 index 0000000000..79cf545fd5 Binary files /dev/null and b/versioned_docs/version-7.1/server/troubleshooting/assets/save-to-settings-json.png differ diff --git a/versioned_docs/version-7.1/server/troubleshooting/assets/server-logs.png b/versioned_docs/version-7.1/server/troubleshooting/assets/server-logs.png new file mode 100644 index 0000000000..34602195a2 Binary files /dev/null and b/versioned_docs/version-7.1/server/troubleshooting/assets/server-logs.png differ diff --git a/versioned_docs/version-7.1/server/troubleshooting/assets/settings-logs-on-disk.png b/versioned_docs/version-7.1/server/troubleshooting/assets/settings-logs-on-disk.png new file mode 100644 index 0000000000..fcc56d2ece Binary files /dev/null and b/versioned_docs/version-7.1/server/troubleshooting/assets/settings-logs-on-disk.png differ diff --git a/versioned_docs/version-7.1/server/troubleshooting/assets/traffic-watch-logs.png b/versioned_docs/version-7.1/server/troubleshooting/assets/traffic-watch-logs.png new file mode 100644 index 0000000000..c1dd31646e Binary files /dev/null and b/versioned_docs/version-7.1/server/troubleshooting/assets/traffic-watch-logs.png differ diff --git a/versioned_docs/version-7.1/server/troubleshooting/assets/zip-1.png b/versioned_docs/version-7.1/server/troubleshooting/assets/zip-1.png new file mode 100644 index 0000000000..b72cd26c7a Binary files /dev/null and b/versioned_docs/version-7.1/server/troubleshooting/assets/zip-1.png differ diff --git a/versioned_docs/version-7.1/server/troubleshooting/assets/zip-2.png b/versioned_docs/version-7.1/server/troubleshooting/assets/zip-2.png new file mode 100644 index 0000000000..6aee372cce Binary files /dev/null and b/versioned_docs/version-7.1/server/troubleshooting/assets/zip-2.png differ diff --git a/versioned_docs/version-7.1/server/troubleshooting/assets/zip-3.png b/versioned_docs/version-7.1/server/troubleshooting/assets/zip-3.png new file mode 100644 index 0000000000..3963e2c14f Binary files /dev/null and b/versioned_docs/version-7.1/server/troubleshooting/assets/zip-3.png differ diff --git a/versioned_docs/version-7.1/server/troubleshooting/assets/zip-4.png b/versioned_docs/version-7.1/server/troubleshooting/assets/zip-4.png new file mode 100644 index 0000000000..90b0cb8b86 Binary files /dev/null and b/versioned_docs/version-7.1/server/troubleshooting/assets/zip-4.png differ diff --git a/versioned_docs/version-7.1/server/troubleshooting/assets/zip-5.png b/versioned_docs/version-7.1/server/troubleshooting/assets/zip-5.png new file mode 100644 index 0000000000..679623958c Binary files /dev/null and b/versioned_docs/version-7.1/server/troubleshooting/assets/zip-5.png differ diff --git a/versioned_docs/version-7.1/server/troubleshooting/assets/zip-6.png b/versioned_docs/version-7.1/server/troubleshooting/assets/zip-6.png new file mode 100644 index 0000000000..6bbf78b38f Binary files /dev/null and b/versioned_docs/version-7.1/server/troubleshooting/assets/zip-6.png differ diff --git a/versioned_docs/version-7.1/server/troubleshooting/collect-info.mdx b/versioned_docs/version-7.1/server/troubleshooting/collect-info.mdx new file mode 100644 index 0000000000..e397ecab9b --- /dev/null +++ b/versioned_docs/version-7.1/server/troubleshooting/collect-info.mdx @@ -0,0 +1,200 @@ +--- +title: "Collecting Information on Incidents for Support" +hide_table_of_contents: true +sidebar_label: Collect Info for Support +sidebar_position: 3 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Collecting Information on Incidents for Support + + + +* When encountering incidents or issues with RavenDB, it is essential to gather as much information as possible + to help the support team diagnose and resolve the problem effectively. + +* Follow the steps in this article for collecting the relevant information for RavenDB support. + +* In this article: + + * [Provide incident description](../../server/troubleshooting/collect-info.mdx#provide-incident-description) + * [Create debug package](../../server/troubleshooting/collect-info.mdx#create-debug-package) + * [Enable logs for ongoing issues](../../server/troubleshooting/collect-info.mdx#enable-logs-for-ongoing-issues) + * [Download logs](../../server/troubleshooting/collect-info.mdx#download-logs) + * [Reproduce scenario](../../server/troubleshooting/collect-info.mdx#reproduce-scenario) + * [Create failing test](../../server/troubleshooting/collect-info.mdx#create-failing-test) + + + +## Provide incident description + +* __Description__: + * Provide a detailed description of the incident you are experiencing. + * Include any error messages, warnings, or unexpected behavior you have encountered. + +* __Exceptions__: + * If applicable, attach the __full exception stack trace__ including the error message as plain text. + * Specify the origin of the exception + (e.g. from RavenDB Studio, from the client, from server logs, etc.). + +* __Versions__: + * Specify the RavenDB Server, Studio, and the client __versions__ that you are using. + + + +## Create debug package + +![Create Debug Package](./assets/create-debug-package.png) + + + +1. Navigate to __Manage Server > Gather Debug Info__ + +2. Select the data source(s) to retrieve. It is recommended to check all options. + +3. Select all databases or choose specific databases to gather information from. + +4. Select whether to create the package for the entire cluster or the current server. + +5. Click 'Download'. + A zip file containing the debug package will be downloaded. + + + + + +__If the Studio is unavailable__: + +* Try to download the debug package by issuing an HTTP GET request to the following endpoint: + `{SERVER_URL}/admin/debug/info-package`. + +* Execute this request for each node in the cluster, replacing `{SERVER_URL}` with the relevant node's URL. + + + + + +__Before sending the debug package zip file__, perform the following checks: + + * Verify that the zip file can be successfully extracted. + * Verify that the content is similar to the following sample images. + +![Zip file contents 1](./assets/zip-1.png) +![Zip file contents 2](./assets/zip-2.png) +![Zip file contents 3](./assets/zip-3.png) +![Zip file contents 4](./assets/zip-4.png) + + + + + + +## Enable logs for ongoing issues + +If the issue you encounter is still **ongoing**, enable the following logs on disk (if they are not already enabled) +before downloading the existing log files, as described below. + +![Admin logs view](./assets/admin-logs-view.png) + +1. Navigate to **Manage Server > Admin Logs** + +2. Click **Settings** in the "Logs on disk" section. + The following Settings dialog will open: + +![Settings logs on disk](./assets/settings-logs-on-disk.png) +### Server logs: + ![Server logs](./assets/server-logs.png) +### Microsoft logs: + ![Microsoft logs](./assets/microsoft-logs.png) +### Traffic watch logs: + ![Traffic watch logs 1](./assets/traffic-watch-logs.png) + + +Be aware that all log settings will reset to their default values after a server restart. +To preserve your settings after a restart, you can either: + + * In each log dialog, save the settings to the `settings.json` file by checking the checkbox in the UI: + (Scroll down in the Traffic watch logs dialog to see this option) + ![Save to settings.json](./assets/save-to-settings-json.png) + + * Or set the relevant configuration key, as described in this [configuration overview](../../server/configuration/configuration-options.mdx): + * For **Server logs**: set the [Logs.MinLevel](../../server/configuration/logs-configuration.mdx#logsminlevel) configuration key. + * For **Microsoft logs**: set the [Logs.Microsoft.MinLevel](../../server/configuration/logs-configuration.mdx#logsmicrosoftminlevel) configuration key. + * For **Traffic watch logs**: set the [TrafficWatch.Mode](../../server/configuration/traffic-watch-configuration.mdx#trafficwatchmode) configuration key. + + + + +## Download logs + +Perform the following for each node in your cluster: + +![download logs](./assets/download-logs-1.png) +![download logs](./assets/download-logs-2.png) + + + +1. Navigate to __Manage Server > Admin Logs__ and click 'Download Logs' in the "Logs on disk" section. + +2. Either check 'Use minimum' to retrieve logs information from the time the server was started, + or enter a specific (local) time. + +3. Either check 'Use maximum' to retrieve logs information up to the current time, + or enter a specific (local) time. + +4. Click 'Download'. + A zip file containing the logs will be downloaded. + + + + + +__If the Studio is unavailable__, or if the logs downloaded via the Studio appear problematic, +then copy the log files directly from the disk to another location to ensure that you keep them, +avoiding potential loss due to the retention configuration. + +* The location of the log files is determined by the [Logs.Path](../../server/configuration/logs-configuration.mdx#logspath) configuration. + +* Logs deletion is controlled by the following configurations: + * [Logs.MaxArchiveDays](../../server/configuration/logs-configuration.mdx#logsmaxarchivedays) + * [Logs.MaxArchiveFiles ](../../server/configuration/logs-configuration.mdx#logsmaxarchivefiles) + + + + + +__Before sending the log files__, perform the following checks: + +* Verify that the zip files can be successfully extracted. +* Confirm that the logs correspond to the time of the incident. +* Verify that the content is similar to the following sample images. + +![Zip file contents 5](./assets/zip-5.png) +![Zip file contents 6](./assets/zip-6.png) + + + + + +## Reproduce scenario + +* If the incident is over and you can reproduce it, then first verify logging level is set to 'Information'. + +* See how to enable the logs in [Enable logs](../../server/troubleshooting/collect-info.mdx#enable-logs-for-ongoing-issues). + + + +## Create failing test + +* If possible, it is advised to create a unit test that showcases the failure in your client code. + +* Refer to [Writing your unit test](../../start/test-driver.mdx) to learn how to use __RavenDB's TestDriver__. + + + diff --git a/versioned_docs/version-7.1/server/troubleshooting/debug-routes.mdx b/versioned_docs/version-7.1/server/troubleshooting/debug-routes.mdx new file mode 100644 index 0000000000..5c4d10ef10 --- /dev/null +++ b/versioned_docs/version-7.1/server/troubleshooting/debug-routes.mdx @@ -0,0 +1,324 @@ +--- +title: "Troubleshooting: Debug Endpoints" +hide_table_of_contents: true +sidebar_label: Debug Endpoints +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Troubleshooting: Debug Endpoints + + +* This page contains a comprehensive list of endpoints for debugging and testing. + +* In this page: + * [Debug Endpoints](../../server/troubleshooting/debug-routes.mdx#debug-endpoints) + * [Production Endpoints](../../server/troubleshooting/debug-routes.mdx#production-endpoints) + + +For the endpoints that begin with `/databases/*/`, replace `*` with the name of your database. + + + +## Debug Endpoints + +| Endpoint | Method | Parameters | Description | Remarks | +|-|-|-|-|-| +| /admin/debug/cluster-info-package | GET | | Returns whole cluster information package as zip format | | +| /admin/debug/cpu/stats | GET | | Returns RavenDB's processor usage and thread pool status | | +| /admin/debug/info-package | GET | | Save debug package information for later analysis | | +| /admin/debug/memory/low-mem-log | GET | | Low memory events report | | +| /admin/debug/memory/smaps | GET | | Returns all of RavenDB's process mappings including shared/private clean/dirty memory allocations | Available only on Linux | +| /admin/debug/memory/stats | GET | | Full report of memory usage including un/managed usage by thread and free memory status | | +| /admin/debug/node/engine-logs | GET | | Rachis logs | | +| /admin/debug/node/ping | GET |
  • `url` (Optional)
  • `node` (Optional)
Specifies the node by url or node tag | Test the ability to reach the server | | +| /admin/debug/node/remote-connections | GET | | Returns connections' details of members and watchers in the cluster | | +| /admin/debug/node/state-change-history | GET | | List the node's state transition history in the cluster | | +| /admin/debug/proc/meminfo | GET | | Return /proc/<RavenDB ProcNum>/meminfo | Available only on Linux | +| /admin/debug/proc/stats | GET | | Return /proc/<RavenDB ProcNum>/stats | Available only on Linux | +| /admin/debug/proc/status | GET | | Return /proc/<RavenDB ProcNum>/status | Available only on Linux | +| /admin/debug/threads/runaway | GET | | List all threads and their names, sorted by duration | | +| /build/version | GET | | Returns product build number, major version, commit hash and full version number | | +| /databases/*/admin/debug/cluster/txinfo | GET |
  • `from` (Optional)
    Number of results to skip
  • `take` (Optional)
    Number of results to take
| List the incomplete [cluster transaction commands](../clustering/cluster-transactions.mdx#cluster--cluster-wide-transactions) | | +| /databases/*/admin/debug/txinfo | GET | | List | | +| /databases/*/debug/documents/huge | GET | | List IDs of documents which exceed `PerformanceHints.`
`Documents.`
`HugeDocumentSizeInMb` setting | | +| /databases/*/debug/identities | GET | | | | +| /databases/*/debug/info-package | GET | | Save debug package information for later analysis | | +| /databases/*/debug/io-metrics | GET | | Get current IO metrics: "Writes, Flush, Sync" for each storage environment | | +| /databases/*/debug/perf-metrics | GET | | Get current IO metrics: "Transactions" for each storage environment | | +| /databases/*/debug/queries/cache/list | GET | | | | +| /databases/*/debug/queries/running | GET | | | | +| /databases/*/debug/script-runners | GET | | | | +| /databases/*/debug/storage/all-environments/report | GET | | | | +| /databases/*/debug/storage/report | GET | | | | +| /databases/*/indexes | GET | | | | +| /databases/*/indexes/errors | GET | | | | +| /databases/*/indexes/stats | GET | | | | +| /databases/*/replication/debug/incoming-last-activity-time | GET | | | | +| /databases/*/replication/debug/incoming-rejection-info | GET | | | | +| /databases/*/replication/debug/outgoing-failures | GET | | | | +| /databases/*/replication/debug/outgoing-reconnect-queue | GET | | | | +| /databases/*/stats | GET | | | | +| /debug/server-id | GET | | | | + + + +## Production Endpoints + +| Endpoint | Method | Parameters | Description | Remarks | +|-------------------------------------------------------|-------------------------------------|-------------|--------------|----------| +| / | GET | | | | +| /admin/certificates | POST, PUT, DELETE, GET | | | | +| /admin/certificates/cluster-domains | GET | | | | +| /admin/certificates/edit | POST | | | | +| /admin/certificates/export | GET | | | | +| /admin/certificates/letsencrypt/force-renew | OPTIONS, POST | | | | +| /admin/certificates/letsencrypt/renewal-date | GET | | | | +| /admin/certificates/mode | GET | | | | +| /admin/certificates/refresh | OPTIONS, POST | | | | +| /admin/certificates/replace-cluster-cert | OPTIONS, POST | | | | +| /admin/certificates/replacement/reset | POST | | | | +| /admin/certificates/replacement/status | GET | | | | +| /admin/cluster/bootstrap | POST | | | | +| /admin/cluster/demote | OPTIONS, POST | | | | +| /admin/cluster/log | GET | | | | +| /admin/cluster/maintenance-stats | GET | | | | +| /admin/cluster/node | OPTIONS, PUT, DELETE | | | | +| /admin/cluster/observer/decisions | GET, OPTIONS | | | | +| /admin/cluster/observer/suspend | POST, OPTIONS | | | | +| /admin/cluster/promote | OPTIONS, POST | | | | +| /admin/cluster/reelect | OPTIONS, POST | | | | +| /admin/cluster/timeout | OPTIONS, POST | | | | +| /admin/compact | POST | | | | +| /admin/configuration/client | PUT | | | | +| /admin/configuration/studio | PUT | | | | +| /admin/console | POST | | | | +| /admin/databases | GET, PUT, DELETE | | | | +| /admin/databases/disable | POST | | | | +| /admin/databases/dynamic-node-distribution | POST | | | | +| /admin/databases/enable | POST | | | | +| /admin/databases/node | PUT | | | | +| /admin/databases/promote | POST | | | | +| /admin/databases/reorder | POST | | | | +| /admin/debug/remote-cluster-info-package | GET | | | | +| /admin/debug/script-runners | GET | | | | +| /admin/license/activate | POST | | | | +| /admin/license/eula/accept | POST | | | | +| /admin/license/forceUpdate | POST | | | | +| /admin/license/set-limit | OPTIONS, POST | | | | +| /admin/logs/configuration | GET, POST | | | | +| /admin/logs/watch | GET | | | | +| /admin/memory/gc | GET | | | | +| /admin/metrics | GET | | | | +| /admin/migrate | POST | | | | +| /admin/migrate/offline | POST | | | | +| /admin/operations/kill | POST | | | | +| /admin/operations/next-operation-id | GET | | | | +| /admin/rachis/send | POST | | | | +| /admin/remote-server/build/version | GET | | | | +| /admin/replication/conflicts/solver | POST | | | | +| /admin/restore/database | POST | | | | +| /admin/restore/points | POST | | | | +| /admin/secrets | GET, POST | | | | +| /admin/secrets/distribute | POST | | | | +| /admin/secrets/generate | GET | | | | +| /admin/studio-tasks/full-data-directory | GET | | | | +| /admin/studio-tasks/offline-migration-test | GET | | | | +| /admin/test-connection | POST | | | | +| /admin/test/delay | GET | | | | +| /admin/test/empty-message | GET | | | | +| /admin/test/sized-message | GET | | | | +| /admin/traffic-watch | GET | | | | +| /auth-error.html | GET | | | | +| /build/version/updates | POST | | | | +| /certificates/whoami | GET | | | | +| /cluster/node-info | GET | | | | +| /cluster/topology | GET | | | | +| /configuration/client | GET | | | | +| /configuration/studio | GET | | | | +| /databases | GET | | | | +| /databases/*/admin/backup/database | OPTIONS, POST | | | | +| /databases/*/admin/configuration/client | PUT | | | | +| /databases/*/admin/configuration/studio | PUT | | | | +| /databases/*/admin/connection-strings | DELETE, GET, PUT | | | | +| /databases/*/admin/etl | RESET, PUT | | | | +| /databases/*/admin/etl/raven/test | POST | | | | +| /databases/*/admin/etl/sql/test | POST | | | | +| /databases/*/admin/etl/sql/test-connection | POST | | | | +| /databases/*/admin/expiration/config | POST | | | | +| /databases/*/admin/indexes | PUT | | | | +| /databases/*/admin/indexes/disable | POST | | | | +| /databases/*/admin/indexes/enable | POST | | | | +| /databases/*/admin/indexes/start | POST | | | | +| /databases/*/admin/indexes/stop | POST | | | | +| /databases/*/admin/periodic-backup | POST | | | | +| /databases/*/admin/periodic-backup/config | GET | | | | +| /databases/*/admin/periodic-backup/test-credentials | POST | | | | +| /databases/*/admin/revisions | DELETE | | | | +| /databases/*/admin/revisions/config | POST | | | | +| /databases/*/admin/smuggler/import | GET | | | | +| /databases/*/admin/smuggler/import-dir | GET | | | | +| /databases/*/admin/smuggler/import-s3-dir | GET | | | | +| /databases/*/admin/smuggler/migrate | POST | | | | +| /databases/*/admin/smuggler/migrate/ravendb | POST | | | | +| /databases/*/admin/sql-migration/import | POST | | | | +| /databases/*/admin/sql-migration/schema | POST | | | | +| /databases/*/admin/sql-migration/test | POST | | | | +| /databases/*/admin/tasks | DELETE | | | | +| /databases/*/admin/tasks/external-replication | POST | | | | +| /databases/*/admin/tasks/state | POST | | | | +| /databases/*/admin/transactions-mode | GET | | | | +| /databases/*/admin/transactions/start-recording | POST | | | | +| /databases/*/admin/transactions/stop-recording | POST | | | | +| /databases/*/attachments | HEAD, GET, POST, PUT, DELETE | | | | +| /databases/*/bulk_docs | POST | | | | +| /databases/*/bulk_insert | POST | | | | +| /databases/*/changes | GET, DELETE | | | | +| /databases/*/changes/debug | GET | | | | +| /databases/*/cmpxchg | GET, PUT, DELETE | | | | +| /databases/*/collections/docs | GET | | | | +| /databases/*/collections/stats | GET | | | | +| /databases/*/configuration/client | GET | | | | +| /databases/*/configuration/studio | GET | | | | +| /databases/*/counters | GET, POST | | | | +| /databases/*/debug/attachments/hash | GET | | | | +| /databases/*/debug/attachments/metadata | GET | | | | +| /databases/*/debug/documents/export-all-ids | GET | | | | +| /databases/*/debug/documents/get-revisions | GET | | | | +| /databases/*/debug/io-metrics/live | GET | | | | +| /databases/*/debug/queries/kill | POST | | | | +| /databases/*/debug/storage/btree-structure | GET | | | | +| /databases/*/debug/storage/environment/report | GET | | | | +| /databases/*/debug/storage/fst-structure | GET | | | | +| /databases/*/docs | HEAD, GET, POST, DELETE, PUT, PATCH | | | | +| /databases/*/docs/class | GET | | | | +| /databases/*/docs/size | GET | | | | +| /databases/*/etl/debug/stats | GET | | | | +| /databases/*/etl/performance | GET | | | | +| /databases/*/etl/stats | GET | | | | +| /databases/*/expiration/config | GET | | | | +| /databases/*/hilo/next | GET | | | | +| /databases/*/hilo/return | PUT | | | | +| /databases/*/identity/next | POST | | | | +| /databases/*/identity/seed | POST | | | | +| /databases/*/index/open-faulty-index | POST | | | | +| /databases/*/indexes | RESET, DELETE, PUT | | | | +| /databases/*/indexes/c-sharp-index-definition | GET | | | | +| /databases/*/indexes/debug | GET | | | | +| /databases/*/indexes/has-changed | POST | | | | +| /databases/*/indexes/performance | GET | | | | +| /databases/*/indexes/performance/live | GET | | | | +| /databases/*/indexes/progress | GET | | | | +| /databases/*/indexes/replace | POST | | | | +| /databases/*/indexes/set-lock | POST | | | | +| /databases/*/indexes/set-priority | POST | | | | +| /databases/*/indexes/source | GET | | | | +| /databases/*/indexes/staleness | GET | | | | +| /databases/*/indexes/status | GET | | | | +| /databases/*/indexes/suggest-index-merge | GET | | | | +| /databases/*/indexes/terms | GET | | | | +| /databases/*/indexes/total-time | GET | | | | +| /databases/*/indexes/try | POST | | | | +| /databases/*/info/tcp | GET | | | | +| /databases/*/metrics | GET | | | | +| /databases/*/metrics/bytes | GET | | | | +| /databases/*/metrics/puts | GET | | | | +| /databases/*/migrate/get-migrated-server-urls | GET | | | | +| /databases/*/multi_get | POST | | | | +| /databases/*/notification-center/dismiss | POST | | | | +| /databases/*/notification-center/postpone | POST | | | | +| /databases/*/notification-center/watch | GET | | | | +| /databases/*/operations | GET | | | | +| /databases/*/operations/kill | POST | | | | +| /databases/*/operations/next-operation-id | GET | | | | +| /databases/*/operations/state | GET | | | | +| /databases/*/queries | POST, GET, DELETE, PATCH | | | | +| /databases/*/queries/test | PATCH | | | | +| /databases/*/replication/active-connections | GET | | | | +| /databases/*/replication/conflicts | GET | | | | +| /databases/*/replication/conflicts/solver | GET | | | | +| /databases/*/replication/performance | GET | | | | +| /databases/*/replication/performance/live | GET | | | | +| /databases/*/replication/pulses/live | GET | | | | +| /databases/*/replication/tombstones | GET | | | | +| /databases/*/revisions | GET | | | | +| /databases/*/revisions/bin | GET | | | | +| /databases/*/revisions/config | GET | | | | +| /databases/*/revisions/resolved | GET | | | | +| /databases/*/smuggler/export | POST | | | | +| /databases/*/smuggler/import | POST | | | | +| /databases/*/smuggler/import/csv | POST | | | | +| /databases/*/smuggler/validate-options | POST | | | | +| /databases/*/stats/detailed | GET | | | | +| /databases/*/streams/docs | GET | | | | +| /databases/*/streams/queries | HEAD, GET, POST | | | | +| /databases/*/studio-tasks/suggest-conflict-resolution | GET | | | | +| /databases/*/studio/collections/docs | DELETE | | | | +| /databases/*/studio/collections/fields | GET | | | | +| /databases/*/studio/collections/preview | GET | | | | +| /databases/*/studio/footer/stats | GET | | | | +| /databases/*/studio/index-fields | POST | | | | +| /databases/*/studio/index-type | POST | | | | +| /databases/*/studio/sample-data | POST | | | | +| /databases/*/studio/sample-data/classes | GET | | | | +| /databases/*/subscription-tasks | DELETE | | | | +| /databases/*/subscription-tasks/state | POST | | | | +| /databases/*/subscriptions | PUT, DELETE, GET | | | | +| /databases/*/subscriptions/connection-details | GET | | | | +| /databases/*/subscriptions/drop | POST | | | | +| /databases/*/subscriptions/state | GET | | | | +| /databases/*/subscriptions/try | POST | | | | +| /databases/*/task | GET | | | | +| /databases/*/tasks | GET | | | | +| /databases/*/tcp | GET, DELETE | | | | +| /databases/*/transactions/replay | POST | | | | +| /debug/is-loaded | GET | | | | +| /debug/routes | GET | | | | +| /eula/$ | GET | | | | +| /eula/index.html | GET | | | | +| /favicon.ico | GET | | | | +| /info/tcp | GET | | | | +| /license/eula | GET | | | | +| /license/status | GET | | | | +| /license/support | GET | | | | +| /monitoring/snmp | GET | | | | +| /monitoring/snmp/oids | GET | | | | +| /operations/state | GET | | | | +| /periodic-backup | GET | | | | +| /periodic-backup/next-backup-occurrence | GET | | | | +| /periodic-backup/status | GET | | | | +| /rachis/waitfor | Get | | | | +| /server-dashboard/watch | GET | | | | +| /server/notification-center/dismiss | POST | | | | +| /server/notification-center/postpone | POST | | | | +| /server/notification-center/watch | GET | | | | +| /setup/alive | OPTIONS, GET | | | | +| /setup/continue | POST | | | | +| /setup/continue/extract | POST | | | | +| /setup/dns-n-cert | POST | | | | +| /setup/finish | POST | | | | +| /setup/hosts | POST | | | | +| /setup/ips | GET | | | | +| /setup/letsencrypt | POST | | | | +| /setup/letsencrypt/agreement | GET | | | | +| /setup/parameters | GET | | | | +| /setup/populate-ips | POST | | | | +| /setup/secured | POST | | | | +| /setup/unsecured | POST | | | | +| /setup/user-domains | POST | | | | +| /studio-tasks/format | POST | | | | +| /studio-tasks/is-valid-name | GET | | | | +| /studio/$ | GET | | | | +| /studio/feedback | POST | | | | +| /studio/index.html | GET | | | | +| /topology | GET | | | | +| /wizard/$ | GET | | | | +| /wizard/index.html | GET | | | | + + diff --git a/versioned_docs/version-7.1/server/troubleshooting/logging.mdx b/versioned_docs/version-7.1/server/troubleshooting/logging.mdx new file mode 100644 index 0000000000..58112e93e7 --- /dev/null +++ b/versioned_docs/version-7.1/server/troubleshooting/logging.mdx @@ -0,0 +1,428 @@ +--- +title: "Logging" +hide_table_of_contents: true +sidebar_label: Logging +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Logging + + +* **Multiple logging levels** + RavenDB's flexible logging system allows you to pick the minimal logging + level most suitable for your needs from a + [scale of 6 logging levels](../../server/troubleshooting/logging.mdx#available-logging-levels). + + The _Trace_ logging level, for example, will output all events, transactions, + and database changes, producing logs that can be processed for auditing + and accounting, while _Warning-level_ log entries will include only warnings + and errors to keep the log's clarity and remain easy to evaluate and possibly + escalate even when the database grows bigger and more complex. + +* **Numerous logging destinations** + Starting with RavenDB version `7.0` RavenDB outputs all log data through + [NLog](https://nlog-project.org), a widely used `.NET` logging framework + capable of streaming logged data to various destinations using a large + number of available [NLog plugins](https://nlog-project.org/config/). + +* **High performance** + Logging is asynchronous and is handled by a thread dedicated to the + execution of I/O operations, minimizing its effect on server performance. + +* In this page: + * [Available logging destinations](../../server/troubleshooting/logging.mdx#available-logging-destinations) + * [Logging configuration](../../server/troubleshooting/logging.mdx#logging-configuration) + * [Available logging levels](../../server/troubleshooting/logging.mdx#available-logging-levels) + * [Customizing logging level after Migration](../../server/troubleshooting/logging.mdx#customize-after-migration) + * [Default values](../../server/troubleshooting/logging.mdx#default-values) + * [CLI customization: immediate temporary changes](../../server/troubleshooting/logging.mdx#cli-customization:-immediate-temporary-changes) + * [Configuring and using NLog](../../server/troubleshooting/logging.mdx#configuring-and-using-nlog) + * [Configure RavenDB to use an external NLog configuration file](../../server/troubleshooting/logging.mdx#use-external-config-file) + * [Install NLog plugins that RavenDB would log data through](../../server/troubleshooting/logging.mdx#install-nlog-plugins) + * [Set your NLog configuration file](../../server/troubleshooting/logging.mdx#set-nlog-config-file) + * [An available template](../../server/troubleshooting/logging.mdx#an-available-template) + * [Mandatory `logger` definitions](../../server/troubleshooting/logging.mdx#mandatory--definitions) + * [Your own loggers](../../server/troubleshooting/logging.mdx#your-own-loggers) + * [Studio: Admin Logs](../../server/troubleshooting/logging.mdx#studio:-admin-logs) + + +## Available logging destinations + +RavenDB versions up to `6.2` output log data to files on the server machine, and can +optionally stream it to the server console. + +From version `7.0` on, RavenDB incorporates the [NLog](https://nlog-project.org) +logging framework. The logging process has hardly changed, but the integration with +NLog now allows RavenDB to log more versatile data to many additional destinations +via [NLog plugins](https://nlog-project.org/config/). +Available logging destinations include, among others, log aggregators like +[Grafana Loki](https://grafana.com/oss/loki/) and [Splunk](https://en.wikipedia.org/wiki/Splunk), +filtering and error handling tools, and a long list of applications that NLog +is integrated with. + + + +## Logging configuration + +* By default, RavenDB uses **internal configuration keys** to adjust logging. + Using this method, you can only output data to log files and to your console. + * Up to version `6.2`, this is the only available way to customize logging. + * [List of logging configuration keys](../../server/configuration/logs-configuration.mdx) + * [How to modify configuration keys](../../server/configuration/configuration-options.mdx) + +* If you want to utilize **NLog plugins** so you can output log data to additional + destinations, you must use an **external NLog configuration** file. + * Changing logging settings using an external configuration file is possible + from version `7.0` on, as part of NLog's integration with RavenDB. + * Once an external configuration file is applied, its settings override all + the values set by internal configuration keys. + * [Using an external configuration file](../../server/troubleshooting/logging.mdx#configuring-and-using-nlog) + +* To **determine whether to use an NLog configuration file or internal configuration keys**, + set the [Logs.ConfigPath](../../server/configuration/logs-configuration.mdx#logsconfigpath) + configuration key with - + - a [path](../../server/troubleshooting/logging.mdx#use-external-config-file) + to the configuration file you want to apply + - or `null` to continue using internal configuration keys. + + +* Permanent changes in the logging configuration, through either configuration keys + or an external configuration file, are _applied by restarting the server_. + +* It is also possible to apply _temporary changes_ without restarting the server, + using [the CLI](../../server/troubleshooting/logging.mdx#cli-customization-immediate-temporary-changes). + +* The _scope_ of all logging settings is _server-wide_, applying to all databases. + + + + + +## Available logging levels + +The logging levels offered by RavenDB have changed from versions prior to `7.0` +to newer versions. +#### Logging Modes + +RavenDB versions up to `6.2` support proprietary **logging modes**. + +| Available logging Mode | Description | +| ------------- | ----------- | +| `Operations` | High-level info for operational use | +| `Information` | Low-level debug info | +| `None` | Logging is disabled | +#### Logging Levels + +From version `7.0` on, RavenDB's **logging levels** are NLog-compliant. + +| Available logging Level | Description | +| ------------- | ----------- | +| `Trace` | A highly informative level that's mostly used only during development | +| `Debug` | Debug information reported by internal application processes | +| `Info` | Information related to application progress, events lifespan, etc. | +| `Warn` | Warnings returned by failed validations, mild, recoverable failures, etc. | +| `Error` | Error reports of functionality failures, caught exceptions, etc. | +| `Fatal` | Fatal failures | +| `Off` | Logging is disabled | + +
+ + +When migrating from an older version to `7.0` or higher, RavenDB **is** capable +of understanding the old version's _logging mode_ configuration and translate +it to the equivalent NLog level. + +| **Logging mode (RavenDB ≤ `6.2`)** | **Equivalent NLog level (RavenDB ≥ `7.0`)** | +| :----------: | :----: | +| Operations | Info | +| Information | Debug | + +
+ +Logging will therefore continue uninterrupted, there's no rush to modify the +logging level right after migration. + +You **will** need to modify these settings, however, if you want to use NLog features. + +* To use an NLog logging level like `Warn`, for example, you will need to modify + `settings.json` accordingly. + + + +{`... +"Logs.MinLevel": "Warn", +... +`} + + + + +* And to output log data to a destination like a log aggragation tool through an + NLog plugin, you will need to start using an + [NLog configuration file](../../server/troubleshooting/logging.mdx#configuring-and-using-nlog), + and subsequently modify any NLog settings you want to change, including the logging + level, in this file. + + + +{` +... + +... + +`} + + + + +
+ + + +## Default values + +* **Default logging level** + The default minimal logging level is `LogLevel.Info`. + - To use a different level, set the + [Logs.MinLevel](../../server/configuration/logs-configuration.mdx#logsminlevel) configuration key. +* **Default destination** + Log entries are written by default to log files in the **Logs** folder on the server machine. + - To store log files in a different path, set the + [Logs.Path](../../server/configuration/logs-configuration.mdx#logspath) configuration key. + - [Learn how to log to additional destinations](../../server/troubleshooting/logging.mdx#configuring-and-using-nlog) +* **Default time standard** + The default time standard used for log entries is `UTC`. + Changing this configuration is now available only using the NLog configuration file. + + + +{`layout="$\{longdate:universalTime=true\}..." +`} + + + +* **NLog configuration file defaults** + The **default values** given to settings in the + [NLog configuration file template](../../server/troubleshooting/logging.mdx#an-available-template) + are **identical** to those given to internal configuration keys. + - [The list of logging configuration keys and their default values](../../server/configuration/logs-configuration.mdx) + + + +## CLI customization: immediate temporary changes + +Logging settings can also be customized via [CLI](../../server/administration/cli.mdx), +using the [log](../../server/administration/cli.mdx#log) command. +Unlike the permanent customization methods described above (via internal configuration +keys or an external NLog file), that require a server restart to take effect, changes +made using CLI commands will take **immediate effect**. However, they will also be +overridden when the server is restarted and the permanent configuration is reloaded from +[settings.json](../../server/configuration/configuration-options.mdx#settingsjson) +or the NLog configuration file. + +Use this syntax to customize logging via CLI: + + +{`log [on|off] [http-] [info|debug] [no-console] +`} + + + +#### Example: +To temporarily change the logging level to `debug`, issue this command in the server console: + + +{`ravendb> log debug +`} + + + + + +## Configuring and using NLog + +To use NLog you need to: + +1. [Configure RavenDB to use an external NLog configuration file](../../server/troubleshooting/logging.mdx#use-external-config-file) +2. [Install NLog plugins that RavenDB would log data through](../../server/troubleshooting/logging.mdx#install-nlog-plugins) +3. [Set your NLog configuration file](../../server/troubleshooting/logging.mdx#set-nlog-config-file) + +
+ + +NLog options are customized using an NLog configuration file. +Direct RavenDB to the location of your configuration file using the +[Logs.ConfigPath](../../server/configuration/logs-configuration.mdx#logsconfigpath) +configuration key with the file's path as a value. +#### Example: +To use a configuration file named `NLog.config` that resides in the RavenDB server +folder, add `settings.json` this line: + + +{`... +"Logs.ConfigPath": "NLog.config", +... +`} + + + + + +* Learn to set configuration keys [here](../../server/configuration/configuration-options.mdx). +* Learn how to create and modify a configuration file [below](../../server/troubleshooting/logging.mdx#set-nlog-config-file). +* Be aware that once a configuration file is used, the logging settings included in it + will _override_ all [internal configuration keys](../../server/configuration/logs-configuration.mdx) + settings. + + + + + +NLog's biggest advantage lies in its [plugins library](https://nlog-project.org/config/), +through which applications like RavenDB can stream log data to a variety of destinations. +NLog plugins are available as Nuget packages, that we can easily instruct RavenDB to load +and run during startup. +We do this by defining the plugin Nuget package URL as a property of the +[Logs.NuGet.AdditionalPackages](../../server/configuration/logs-configuration.mdx#logsnugetadditionalpackages) +configuration key, with the plugin version we want to use as a value. +#### Example +To load a [Grafana Loki](https://grafana.com/oss/loki/) plugin, for example, +add RavenDB's `Logs.NuGet.AdditionalPackages` configuration key an `NLog.Targets.Loki` +property, with the plugin's version you want to use as a value. + +`settings.json`: + + +{`... +"Logs.NuGet.AdditionalPackages": \{ "NLog.Targets.Loki": "3.3.0" \}, +... +`} + + + + + +* [Logs.NuGet.PackagesPath](../../server/configuration/logs-configuration.mdx#logsnugetpackagespath) + Use this key to select the path to which NuGet packages are downloaded. +* [Logs.NuGet.PackageSourceUrl](../../server/configuration/logs-configuration.mdx#logsnugetpackagesourceurl) + Use this key to set the default location from which NuGet packages are downloaded. +* [Logs.NuGet.AllowPreReleasePackages](../../server/configuration/logs-configuration.mdx#logsnugetallowprereleasepackages) + Use this key to determine whether RavenDB is allowed to use pre-release versions of NuGet packages. + + + + + +Follow the procedure below to create and modify an NLog configuration file. +When you're done, restart the server to apply the new settings. +#### An available template: +We've created an NLog configuration file **template** for your convenience, it is +available in the RavenDB **server** folder under the name `NLog.template.config`. +You can copy the template and modify it by your needs, or use your own configuration +file, as you prefer. +#### Mandatory `logger` definitions: +Whether you base your configuration file on our template or use your own file, please +be sure it includes the four _loggers_ defined in the template file's **rules** section. +These definitions are mandatory, and failing to include them will generate an exception. + + +{` + + + + + + +`} + + +#### Your own loggers: +The **Raven_Default** logger, for example, directs log records to **Raven_Default_Target**. +Looking at the **Raven_Default_Target** definition (also included in the template file) we +can see that this target outputs log data to log files in the server's **Logs** folder. + + +{` + + +`} + + + +To output log data through an NLog plugin (rather than into log files) you can either +leave the default logger as is and modify its target, or add your own logger and target. + +To utilize a pre-installed `Loki` plugin, for example, you can - + +Add a new logger: + + +{` +`} + + + +And add a new "loki" target for this logger, that specifies the logging properties +for this destination. + + +{` +... + + + + + +For a complete guide to integrating RavenDB 7.0 logs with **Grafana Cloud** using **NLog and Loki**, +see [this article](https://ravendb.net/articles/sending-your-ravendb-7-0-logs-to-grafana-cloud). + + + + + + + +## Studio: Admin Logs + +Another way to view debug (or any other level) logs without having to restart +the server, is Studio's [Admin Logs view](../../studio/server/debug/admin-logs.mdx). + + + diff --git a/versioned_docs/version-7.1/server/troubleshooting/voron-recovery-tool.mdx b/versioned_docs/version-7.1/server/troubleshooting/voron-recovery-tool.mdx new file mode 100644 index 0000000000..f97c3cafc5 --- /dev/null +++ b/versioned_docs/version-7.1/server/troubleshooting/voron-recovery-tool.mdx @@ -0,0 +1,82 @@ +--- +title: "Troubleshooting: Voron Recovery Tool" +hide_table_of_contents: true +sidebar_label: Voron Recovery Tool +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Troubleshooting: Voron Recovery Tool + +The Voron recovery tool is designed to extract your data even on the worst corruption state imaginable. + +## How to run it + +For Windows, the syntax to run will be: + + +{`Voron.Recovery.exe recover +`} + + + +For Linux, the syntax to run will be: + + +{`dotnet ./Voron.Recovery.dll recover +`} + + + +The process may take some time to run, depending on the storage speed and the recovered database size. + +* The tool will create: + * **recovery-2-Documents.ravendump**, an export file containing all documents and attachments. + * **recovery-3-Revisions.ravendump**, an export file containing revisioned documents. + * **recovery-4-Conflicts.ravendump**, an export file containing conflicted documents. + * If the log is enabled a final report containing information about the amount of recovered and faulty data will be produced + For detailed information invoke the tool with `--LoggingMode Information` + For summery and errors invoke the tool with `--LoggingMode Operations` + +All the files above have the standard RavenDB export format and can be imported to RavenDB. +Recovery-1- was removed and is reserved for later use. + + + +* Recovery of encrypted data files is not supported at the moment +* The export may contain deleted documents/attachments since they still reside in the file and we can not automatically tell the latest version (under the assumption that the file is corrupted). + + + + + +## Additional flags + +`--OutputFileName`: overwrite the default output file name + +`--PageSizeInKB`: overwrite the expected Voron page size of 8KB. It should never be used unless told by the support team. + +`--InitialContextSizeInMB`: overwrite the starting size of memory used by the recovery tool, the default is 4KByte. + +`--InitialContextLongLivedSizeInKB`: overwrite the starting size of memory used by the recovery tool for long-lived objects, the default is 4KByte. + +`--ProgressIntervalInSec`: overwrite the time interval in which the recovery tool refreshes the report in the console. + +`--DisableCopyOnWriteMode`: disables the copy on write. This option should be used when recovering the journals failed, which would happen most likely due to them being corrupted. In this case, the error indicating corruption of journals will be thrown by the Voron engine, and this will stop the recovery process. + +The data-file should be backed up before using this option. + + +`--LoggingMode`: controls the logging level, either `Operations` or `Information` are valid values. + +Logging is not enabled by default, for any output report, the tool should be invoked with this option. + + + + diff --git a/versioned_docs/version-7.1/sharding/_category_.json b/versioned_docs/version-7.1/sharding/_category_.json new file mode 100644 index 0000000000..677f1e31cc --- /dev/null +++ b/versioned_docs/version-7.1/sharding/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 13, + "label": "Sharding" +} diff --git a/versioned_docs/version-7.1/sharding/administration/_category_.json b/versioned_docs/version-7.1/sharding/administration/_category_.json new file mode 100644 index 0000000000..e26e7deb6b --- /dev/null +++ b/versioned_docs/version-7.1/sharding/administration/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 1, + "label": Administration, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/sharding/administration/anchoring-documents.mdx b/versioned_docs/version-7.1/sharding/administration/anchoring-documents.mdx new file mode 100644 index 0000000000..f8edccbd77 --- /dev/null +++ b/versioned_docs/version-7.1/sharding/administration/anchoring-documents.mdx @@ -0,0 +1,141 @@ +--- +title: "Anchoring Documents to a Bucket" +hide_table_of_contents: true +sidebar_label: Anchoring Documents +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Anchoring Documents to a Bucket + + +* By default, RavenDB selects the bucket for storing a document based on a [hash code](../../sharding/overview.mdx#buckets-population) derived from the entire document ID. + To give you more control over the document's bucket selection, RavenDB allows you to **Anchor documents to a bucket**, + which ensures that multiple documents **share the same bucket**. + +* Making documents share a bucket is useful when, for example, the documents are related and are expected to be frequently loaded in the same transaction. + +* To gain control over which specific shard a document will reside in, see [sharding by prefix](../../sharding/administration/sharding-by-prefix.mdx). + +* In this page: + * [Overview](../../sharding/administration/anchoring-documents.mdx#overview) + * [Anchor multiple documents to the same bucket as a specific document](../../sharding/administration/anchoring-documents.mdx#anchor-multiple-documents-to-the-same-bucket-as-a-specific-document) + * [Anchor multiple documents using a common suffix](../../sharding/administration/anchoring-documents.mdx#anchor-multiple-documents-using-a-common-suffix) + * [Examples](../../sharding/administration/anchoring-documents.mdx#examples) + + +## Overview + +* Anchoring documents to a bucket is done by appending a suffix to the document ID. + You cannot explicitly select a bucket by number; instead, the bucket is determined based on the suffix used. + The suffix is composed of the `$` symbol + your choice of `suffix-text`. + +* RavenDB will run the hashing algorithm only over the ID part following the `$` symbol to determine which bucket the document will be placed in. + If a document ID contains multiple `$` symbols, only the suffix following the **last `$`** will be used to calculate the bucket number. + +* Documents whose IDs end with the **same suffix**, will **share the same bucket** (and therefore a shard). + + +Avoid anchoring too many documents to the same bucket to prevent creating an oversized bucket +that cannot be split and resharded if needed to balance the database. + + + + +## Anchor multiple documents to the same bucket as a specific document + +* To store a document in the same bucket as another specific document, + use the following format for the new document's ID: + <`new document ID`> + `$` + <`ID of the document you want to anchor it with`> + The new anchored document will be stored in the same bucket as that other document. + +* For example, creating a document with the following ID: <`Users/70`> + `$` + <`Users/4`> + will place document `Users/70$Users/4` in the same bucket as `Users/4`. + + !["Anchored Documents"](./assets/anchored-documents.png '"Users/70$Users/4" is stored in the same bucket as "Users/4"') + +* An anchored document is accessible only by its full name. + E.g., `Users/70` and `Users/70$Users/4` are two different documents, + where `Users/70$Users/4` is anchored to `Users/4` and `Users/70` is not. + +* It is possible to anchor multiple documents to the same document. + E.g., naming three documents `Users/70$Users/4`, `Users/71$Users/4`, and `Users/72$Users/4` + will make the database store these documents in the same bucket as `Users/4`. + + + +## Anchor multiple documents using a common suffix + +* It is possible to anchor multiple documents to the same bucket using an arbitrary suffix that does not correspond + to an existing document. + +* E.g., `Users/1$foo` and `Users/2$foo` will be stored in the same bucket. + + + +## Examples + +#### Example 1 + +Explicitly store a document with another document's name as a suffix, to keep both documents in the same bucket. +In this case, keep an invoice in the same bucket as its order. + + + +{`// The invoice will be stored with the order ID as a suffix +session.Store(invoice, invoice.Id + "$" + order.Id); +session.SaveChanges(); +`} + + +#### Example 2 + +Define and use a naming convention for invoices. +Whenever an invoice is stored, the $ symbol and an order ID are automatically added to the invoice's ID +to assure that invoices and orders are kept in the same bucket. + + + +{`// Store an invoice document in the same bucket as its order document + +// Define a naming convention for invoices +// When an invoice is stored, the $ symbol and an order ID will be added to the invoice ID +var conventions = new DocumentConventions(); +conventions.RegisterAsyncIdConvention(async (dbName, r) => +\{ + var id = await conventions.AsyncDocumentIdGenerator(dbName, r); + return $"\{id\}$\{r.OrderId\}"; +\}); + +// Deploy the naming convention defined above +using (var store = new DocumentStore() +\{ + Urls = new[] \{ "http://127.0.0.1:8080" \}, + Database = "Products", + Conventions = conventions +\}.Initialize()) +\{ + using (var session = store.OpenSession()) + \{ + var order = new Order(); + session.Store(order); + + // The invoice will be stored with the order ID as a suffix + var invoice = new Invoice \{ OrderId = order.Id \}; + session.Store(invoice); + + session.SaveChanges(); + \} +\} +`} + + + + + diff --git a/versioned_docs/version-7.1/sharding/administration/api-admin.mdx b/versioned_docs/version-7.1/sharding/administration/api-admin.mdx new file mode 100644 index 0000000000..900d0dbef2 --- /dev/null +++ b/versioned_docs/version-7.1/sharding/administration/api-admin.mdx @@ -0,0 +1,352 @@ +--- +title: "Sharding: API Administration" +hide_table_of_contents: true +sidebar_label: API Administration +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Sharding: API Administration + + +* This page explains how to manage a sharded database using RavenDB's API. +* Learn [here](../../sharding/administration/studio-admin.mdx) how to manage + a sharded database using Studio. + +* In this page: + * [Creating a Sharded Database](../../sharding/administration/api-admin.mdx#creating-a-sharded-database) + * [Orchestrator Administration](../../sharding/administration/api-admin.mdx#orchestrator-administration) + * [Adding an Orchestrator](../../sharding/administration/api-admin.mdx#adding-an-orchestrator) + * [Removing an Orchestrator](../../sharding/administration/api-admin.mdx#removing-an-orchestrator) + * [Shard Administration](../../sharding/administration/api-admin.mdx#shard-administration) + * [Adding a Shard](../../sharding/administration/api-admin.mdx#adding-a-shard) + * [Adding a Shard Replica](../../sharding/administration/api-admin.mdx#adding-a-shard-replica) + * [Promoting a Shard Replica](../../sharding/administration/api-admin.mdx#promoting-a-shard-replica) + * [Removing a Shard](../../sharding/administration/api-admin.mdx#removing-a-shard) + + +## Creating a Sharded Database + +To create a sharded database: + +* Use [CreateDatabaseOperation](../../client-api/operations/server-wide/create-database.mdx) + to create the database. +* Define `ShardingConfiguration` in the database record. + * The initial configuration can define just the database topologies for as many shards + as needed. + * Orchestrators and shards can be added and removed later on, after the database is created. + +### `ShardingConfiguration` + + +{`public class ShardingConfiguration +\{ + // Orchestrator configuration + public OrchestratorConfiguration Orchestrator; + + // A database topology per shard dictionary + public Dictionary Shards; + + // Buckets distribution between the shards (filled by RavenDB) + public List BucketRanges = new List(); + + // Buckets that are currently being resharded (filled by RavenDB) + public Dictionary BucketMigrations; +\} +`} + + + +### Example + + +{`DatabaseRecord dbRecord = new DatabaseRecord("sampleDB"); + +dbRecord.Sharding = new ShardingConfiguration +\{ + Shards = new Dictionary() + \{ + \{ 0, new DatabaseTopology() \}, // Shard #0 database topology + \{ 1, new DatabaseTopology() \}, // Shard #1 database topology + \{ 2, new DatabaseTopology() \} // Shard #2 database topology + \} +\}; + +store.Maintenance.Server.Send(new CreateDatabaseOperation(dbRecord)); +`} + + + + + +## Orchestrator Administration + +Prior to granting a cluster node an orchestrator functionality, we should +make sure that the node is up for the task, with no other tasks contesting +the orchestrator for system resources. E.g., it may be better to use as +orchestrators nodes that host no shards. + +## Adding an Orchestrator + +* To add an orchestrator pass the database name and the node to be added + as orchestrator to the `AddNodeToOrchestratorTopologyOperation` operation. + + +{`public AddNodeToOrchestratorTopologyOperation(string databaseName, string node = null) +`} + + + +* Parameters: + + | Parameter | Type | Description | + |:-------------:|:-------------:|-------------| + | `databaseName` | string | Database Name | + | `node ` | string | Node tag for the node to be made orchestrator | + +* Return value: `ModifyOrchestratorTopologyResult` + + +{`public class ModifyOrchestratorTopologyResult +\{ + public string Name; // Database Name + public OrchestratorTopology OrchestratorTopology; // Database Topology + public long RaftCommandIndex; +\} +`} + + + +## Removing an Orchestrator + +* To stop a node from functioning as an orchestrator pass the database name + and the node tag to the `RemoveNodeFromOrchestratorTopologyOperation` + operation. + + +{`public RemoveNodeFromOrchestratorTopologyOperation(string databaseName, string node) +`} + + + +* Parameters: + + | Parameter | Type | Description | + |:-------------:|:-------------:|-------------| + | `databaseName` | string | Database Name | + | `node ` | string | The node to be removed as orchestrator | + +* Return value: `ModifyOrchestratorTopologyResult` + + +{`public class ModifyOrchestratorTopologyResult +\{ + public string Name; // Database Name + public OrchestratorTopology OrchestratorTopology; // Database Topology + public long RaftCommandIndex; +\} +`} + + + + + +## Shard Administration + +## Adding a Shard + +* To add a new shard, use one of the `AddDatabaseShardOperation` operation overloads. + + +{`public AddDatabaseShardOperation(string databaseName, int? shardNumber = null) +`} + + + + +{`public AddDatabaseShardOperation(string databaseName, string[] nodes, int? shardNumber = null) +`} + + + + +{`public AddDatabaseShardOperation(string databaseName, int? replicationFactor, int? shardNumber = null) +`} + + + +* Parameters: + + | Parameter | Type | Description | + |:-------------:|:-------------:|-------------| + | `databaseName` | string | Database Name | + | `shardNumber ` | int? | Shard number
If a shard number is not explicitly provided, the shard number will be the biggest existing shard number + 1 | + | `replicationFactor` | int? | The new shard's replication factor (**see comment below**) | + | `nodes` | string[] | A list of nodes to replicate the shard to.
If provided, the replication factor will be set by the number of nodes. | + + + `replicationFactor`, the new shard's replication factor, is determined as follows: + + * If `replicationFactor` is **not** provided explicitly, and a list of nodes is provided, + the replication factor will be set by the number of nodes. + * If **neither** `replicationFactor` and a nodes list are provided, the replication factor + will be set as that of the first shard. + * If **both** `replicationFactor` and a nodes list are provided: + * If there are **less** nodes than set by `replicationFactor`, the new shard will be replicated + on these nodes. + * If there are **more** nodes than set by `replicationFactor`, only as many replications as + set by `replicationFactor` will be carried out. + + + + +* Return value: `AddDatabaseShardResult` + + +{`public class AddDatabaseShardResult +\{ + public string Name \{ get; set; \} + public int ShardNumber \{ get; set; \} + public DatabaseTopology ShardTopology \{ get; set; \} + public long RaftCommandIndex \{ get; set; \} +\} +`} + + + +## Adding a Shard Replica + +* To add a replica to an existing shard pass the database name and a shard + number to the `AddDatabaseNodeOperation` operation. + + + The replication factor is updated automatically as replicas are added, + there is no need to update it explicitly. + + + + +{`public AddDatabaseNodeOperation(string databaseName, int shardNumber, string node = null) +`} + + + +* Parameters: + + | Parameter | Type | Description | + |:-------------:|:-------------:|-------------| + | `databaseName` | string | Database Name | + | `shardNumber` | string | Shard Number | + | `node ` | string | The node that the replica will be set on (optional).
If not provided, RavenDB will select an available node. | + +* Return value: `DatabasePutResult` + + +{`public class DatabasePutResult +\{ + public long RaftCommandIndex \{ get; set; \} + + public string Name \{ get; set; \} + public DatabaseTopology Topology \{ get; set; \} + public List NodesAddedTo \{ get; set; \} + + public bool ShardsDefined \{ get; set; \} +\} +`} + + + + +## Promoting a Shard Replica + +* Shard replicas can be [promoted](../../client-api/operations/server-wide/promote-database-node.mdx) + as non-sharded databases can. + + To promote a shard, pass the database name, shard number and + node tag to the `PromoteDatabaseNodeOperation` operation. + This will help locate the exact shard instance we want to + promote, leading to the database, then to the specific shard, + and finally to the specific replica of that shard. + + +{`public PromoteDatabaseNodeOperation(string databaseName, int shardNumber, string node) +`} + + + +* Parameters: + + | Parameter | Type | Description | + |:-------------:|:-------------:|-------------| + | `databaseName` | string | Database Name | + | `shardNumber` | int | Shard number | + | `node` | string | Node tag | + +* Return value: `DatabasePutResult` + + +{`public class DatabasePutResult +\{ + public long RaftCommandIndex \{ get; set; \} + + public string Name \{ get; set; \} + public DatabaseTopology Topology \{ get; set; \} + public List NodesAddedTo \{ get; set; \} + + public bool ShardsDefined \{ get; set; \} +\} +`} + + + + +## Removing a Shard + +* A shard is removed when all its replicas have been deleted. +* RavenDB will remove a shard only after verifying that its database + is empty. If any buckets remain in the database the operation will + be aborted. +* To remove a shard use the designated `DeleteDatabasesOperation` overload. + + +{`public DeleteDatabasesOperation( + string databaseName, + int shardNumber, + bool hardDelete, + string fromNode, + TimeSpan? timeToWaitForConfirmation = null) +`} + + + +* Parameters: + + | Parameter | Type | Description | + |:-------------:|:-------------:|-------------| + | `databaseName` | string | Database Name | + | `shardNumber` | int | Shard number: number of the shard replica to be removed | + | `hardDelete` | bool | If `true`, Hard Delete: stop replication to this node and **delete the replica's database**.
If `false`, Soft Delete: stop replication to this node but do not delete the replica's database. | + | `fromNode` | string | The node we want to remove the replica from | + | `timeToWaitForConfirmation` | TimeSpan? | | + +* Return value: `DeleteDatabaseResult` + + +{`public class DeleteDatabaseResult +\{ + public long RaftCommandIndex \{ get; set; \} + public string[] PendingDeletes \{ get; set; \} +\} +`} + + + + + + diff --git a/versioned_docs/version-7.1/sharding/administration/assets/anchored-documents.png b/versioned_docs/version-7.1/sharding/administration/assets/anchored-documents.png new file mode 100644 index 0000000000..2ce773f602 Binary files /dev/null and b/versioned_docs/version-7.1/sharding/administration/assets/anchored-documents.png differ diff --git a/versioned_docs/version-7.1/sharding/administration/assets/assigned-bucket-ranges.png b/versioned_docs/version-7.1/sharding/administration/assets/assigned-bucket-ranges.png new file mode 100644 index 0000000000..0651ccc6cb Binary files /dev/null and b/versioned_docs/version-7.1/sharding/administration/assets/assigned-bucket-ranges.png differ diff --git a/versioned_docs/version-7.1/sharding/administration/assets/create-new-database.png b/versioned_docs/version-7.1/sharding/administration/assets/create-new-database.png new file mode 100644 index 0000000000..e71ad6f94c Binary files /dev/null and b/versioned_docs/version-7.1/sharding/administration/assets/create-new-database.png differ diff --git a/versioned_docs/version-7.1/sharding/administration/assets/database-group_add-orchestrator-01.png b/versioned_docs/version-7.1/sharding/administration/assets/database-group_add-orchestrator-01.png new file mode 100644 index 0000000000..f373eed686 Binary files /dev/null and b/versioned_docs/version-7.1/sharding/administration/assets/database-group_add-orchestrator-01.png differ diff --git a/versioned_docs/version-7.1/sharding/administration/assets/database-group_add-orchestrator-02.png b/versioned_docs/version-7.1/sharding/administration/assets/database-group_add-orchestrator-02.png new file mode 100644 index 0000000000..2fe60e52ba Binary files /dev/null and b/versioned_docs/version-7.1/sharding/administration/assets/database-group_add-orchestrator-02.png differ diff --git a/versioned_docs/version-7.1/sharding/administration/assets/database-group_add-shard-replica-01.png b/versioned_docs/version-7.1/sharding/administration/assets/database-group_add-shard-replica-01.png new file mode 100644 index 0000000000..6416f8cf89 Binary files /dev/null and b/versioned_docs/version-7.1/sharding/administration/assets/database-group_add-shard-replica-01.png differ diff --git a/versioned_docs/version-7.1/sharding/administration/assets/database-group_add-shard-replica-02.png b/versioned_docs/version-7.1/sharding/administration/assets/database-group_add-shard-replica-02.png new file mode 100644 index 0000000000..c56365f904 Binary files /dev/null and b/versioned_docs/version-7.1/sharding/administration/assets/database-group_add-shard-replica-02.png differ diff --git a/versioned_docs/version-7.1/sharding/administration/assets/database-group_add-shard.png b/versioned_docs/version-7.1/sharding/administration/assets/database-group_add-shard.png new file mode 100644 index 0000000000..67fcf35a1e Binary files /dev/null and b/versioned_docs/version-7.1/sharding/administration/assets/database-group_add-shard.png differ diff --git a/versioned_docs/version-7.1/sharding/administration/assets/database-group_remove-orchestrator.png b/versioned_docs/version-7.1/sharding/administration/assets/database-group_remove-orchestrator.png new file mode 100644 index 0000000000..ea1ee5ff9c Binary files /dev/null and b/versioned_docs/version-7.1/sharding/administration/assets/database-group_remove-orchestrator.png differ diff --git a/versioned_docs/version-7.1/sharding/administration/assets/database-group_remove-shard-replica-01.png b/versioned_docs/version-7.1/sharding/administration/assets/database-group_remove-shard-replica-01.png new file mode 100644 index 0000000000..e621377997 Binary files /dev/null and b/versioned_docs/version-7.1/sharding/administration/assets/database-group_remove-shard-replica-01.png differ diff --git a/versioned_docs/version-7.1/sharding/administration/assets/database-group_remove-shard-replica-02.png b/versioned_docs/version-7.1/sharding/administration/assets/database-group_remove-shard-replica-02.png new file mode 100644 index 0000000000..a6998a7e5d Binary files /dev/null and b/versioned_docs/version-7.1/sharding/administration/assets/database-group_remove-shard-replica-02.png differ diff --git a/versioned_docs/version-7.1/sharding/administration/assets/define-prefixes.png b/versioned_docs/version-7.1/sharding/administration/assets/define-prefixes.png new file mode 100644 index 0000000000..8f2016c250 Binary files /dev/null and b/versioned_docs/version-7.1/sharding/administration/assets/define-prefixes.png differ diff --git a/versioned_docs/version-7.1/sharding/administration/assets/document-list-view.png b/versioned_docs/version-7.1/sharding/administration/assets/document-list-view.png new file mode 100644 index 0000000000..af2da2dd16 Binary files /dev/null and b/versioned_docs/version-7.1/sharding/administration/assets/document-list-view.png differ diff --git a/versioned_docs/version-7.1/sharding/administration/assets/enable-sharding.png b/versioned_docs/version-7.1/sharding/administration/assets/enable-sharding.png new file mode 100644 index 0000000000..e2f6dfa92b Binary files /dev/null and b/versioned_docs/version-7.1/sharding/administration/assets/enable-sharding.png differ diff --git a/versioned_docs/version-7.1/sharding/administration/assets/studio-admin_create-new-database.png b/versioned_docs/version-7.1/sharding/administration/assets/studio-admin_create-new-database.png new file mode 100644 index 0000000000..4fb7bbcc9a Binary files /dev/null and b/versioned_docs/version-7.1/sharding/administration/assets/studio-admin_create-new-database.png differ diff --git a/versioned_docs/version-7.1/sharding/administration/assets/studio-admin_database-group.png b/versioned_docs/version-7.1/sharding/administration/assets/studio-admin_database-group.png new file mode 100644 index 0000000000..d0d4684393 Binary files /dev/null and b/versioned_docs/version-7.1/sharding/administration/assets/studio-admin_database-group.png differ diff --git a/versioned_docs/version-7.1/sharding/administration/assets/studio-admin_database-view_01.png b/versioned_docs/version-7.1/sharding/administration/assets/studio-admin_database-view_01.png new file mode 100644 index 0000000000..dd2c7b4fad Binary files /dev/null and b/versioned_docs/version-7.1/sharding/administration/assets/studio-admin_database-view_01.png differ diff --git a/versioned_docs/version-7.1/sharding/administration/assets/studio-admin_database-view_02_select-shard.png b/versioned_docs/version-7.1/sharding/administration/assets/studio-admin_database-view_02_select-shard.png new file mode 100644 index 0000000000..1ef189da21 Binary files /dev/null and b/versioned_docs/version-7.1/sharding/administration/assets/studio-admin_database-view_02_select-shard.png differ diff --git a/versioned_docs/version-7.1/sharding/administration/assets/studio-admin_database-view_03_storage-report.png b/versioned_docs/version-7.1/sharding/administration/assets/studio-admin_database-view_03_storage-report.png new file mode 100644 index 0000000000..c37d42f332 Binary files /dev/null and b/versioned_docs/version-7.1/sharding/administration/assets/studio-admin_database-view_03_storage-report.png differ diff --git a/versioned_docs/version-7.1/sharding/administration/assets/studio-admin_database-view_04_docs-view.png b/versioned_docs/version-7.1/sharding/administration/assets/studio-admin_database-view_04_docs-view.png new file mode 100644 index 0000000000..4f262e67f0 Binary files /dev/null and b/versioned_docs/version-7.1/sharding/administration/assets/studio-admin_database-view_04_docs-view.png differ diff --git a/versioned_docs/version-7.1/sharding/administration/assets/studio-admin_database-view_expanded-details.png b/versioned_docs/version-7.1/sharding/administration/assets/studio-admin_database-view_expanded-details.png new file mode 100644 index 0000000000..72e6294d00 Binary files /dev/null and b/versioned_docs/version-7.1/sharding/administration/assets/studio-admin_database-view_expanded-details.png differ diff --git a/versioned_docs/version-7.1/sharding/administration/assets/studio-admin_new-database.png b/versioned_docs/version-7.1/sharding/administration/assets/studio-admin_new-database.png new file mode 100644 index 0000000000..4b41eb030d Binary files /dev/null and b/versioned_docs/version-7.1/sharding/administration/assets/studio-admin_new-database.png differ diff --git a/versioned_docs/version-7.1/sharding/administration/sharding-by-prefix.mdx b/versioned_docs/version-7.1/sharding/administration/sharding-by-prefix.mdx new file mode 100644 index 0000000000..d1539691c6 --- /dev/null +++ b/versioned_docs/version-7.1/sharding/administration/sharding-by-prefix.mdx @@ -0,0 +1,786 @@ +--- +title: "Sharding by prefix" +hide_table_of_contents: true +sidebar_label: Sharding by Prefix +sidebar_position: 3 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Sharding by prefix + + +* The default technique of distributing data across shards uses a hashing algorithm based on document IDs + to [populate buckets](../../sharding/overview.mdx#buckets-population) that are assigned to shards. While this method is effective in distributing data evenly, + it may not meet specific data partitioning needs, as it doesn't allow you to fully control which shard a document will reside in. + +* **Sharding by prefix**, also known as **prefixed sharding**, allows you to assign data to specific shards + by explicitly specifying on which shard a document should reside based on its ID prefix. + +* With prefixed sharding, you only control the shard where a document is stored. + You cannot control the specific bucket within that shard where the document will reside. + This can be partially addressed by [anchoring documents](../../sharding/administration/anchoring-documents.mdx) to a bucket. + Learn more [below](../../sharding/administration/sharding-by-prefix.mdx#prefixed-sharding-vs-anchoring-documents). + +* In this page: + * [Why use prefixed sharding](../../sharding/administration/sharding-by-prefix.mdx#why-use-prefixed-sharding) + * [Overview](../../sharding/administration/sharding-by-prefix.mdx#overview) + * [Bucket management](../../sharding/administration/sharding-by-prefix.mdx#bucket-management) + * [Adding prefixes via Studio](../../sharding/administration/sharding-by-prefix.mdx#adding-prefixes-via-studio) + * [Adding prefixes via Client API](../../sharding/administration/sharding-by-prefix.mdx#adding-prefixes-via-client-api) + * [Add prefixes when creating a database](../../sharding/administration/sharding-by-prefix.mdx#add-prefixes-when-creating-a-database) + * [Add prefixes after database creation](../../sharding/administration/sharding-by-prefix.mdx#add-prefixes-after-database-creation) + * [Removing prefixes](../../sharding/administration/sharding-by-prefix.mdx#removing-prefixes) + * [Updating shard configurations for prefixes](../../sharding/administration/sharding-by-prefix.mdx#updating-shard-configurations-for-prefixes) + * [Querying selected shards by prefix](../../sharding/administration/sharding-by-prefix.mdx#querying-selected-shards-by-prefix) + * [Prefixed sharding vs Anchoring documents](../../sharding/administration/sharding-by-prefix.mdx#prefixed-sharding-vs-anchoring-documents) + + +## Why use prefixed sharding + +**Control over data placement**: +Prefixed sharding offers customized data distribution by allowing you to assign data to specific shards. +This facilitates tailored data partitioning aligned with your business logic and enhances the organization of your data management. + +**Geographical Data Grouping**: +For applications serving users from different regions, storing data on a shard in a region near the end user is beneficial, as it can improve access speed and reduce latency. +Additionally, laws and regulations may mandate that data be stored within specific geographical boundaries. +Business requirements might also necessitate keeping data close to the audience to enhance query performance and user experience. + +**Optimized query performance:** +Prefixed Sharding eliminates the need to query all shards in the cluster by allowing queries to target specific shards containing the relevant documents. +Grouping data on the same shard enhances query performance and reduces latency, particularly for regional operations. + +**Scalability:** +As your system grows and you add more servers, prefixed sharding simplifies managing increased data volumes and traffic. +You can add servers and shards as needed, allowing for controlled scaling that maintains both performance and reliability. + +**Overall performance**: +By grouping related data on the same shard, prefixed sharding optimizes data storage and access patterns, +reduces network latency for region-specific operations, and enhances overall system performance in distributed database environments. + + + +## Overview + +**Configure shards**: + +* To store documents on a specific shard, define the target shard for each document ID prefix that you add. + Documents with IDs matching any of the defined prefixes will be routed to the corresponding shard. + Learn how to [add prefixes](../../sharding/administration/sharding-by-prefix.mdx#adding-prefixes-via-client-api) below. + +* For example, you can define that all documents with an ID starting with `users/us/` will be stored in shard **#0**. + Consequently, the following sample documents will be stored in shard **#0**: + * _users/us/1_ + * _users/us/2_ + * _users/us/washington_ + * _users/us/california/company/department_ + +**Configure multiple shards**: + +* You can assign multiple shards to the same prefix. + +* For example, both shard **#1** and shard **#2** can be assigned to prefix `users/us/`. + In this case, any document with that prefix will be stored in either shard **#1** or shard **#2**. + +**Prefix rules**: + +* The maximum number of prefixes that can be defined is 4096. + +* The prefix string that you define must end with either the `/` character or with `-`. + e.g. `users/us/` or `users/us-`. + +* Prefixes are case-insensitive. + RavenDB will treat `/users/us/` and `/users/US/` as equivalent document prefixes. + Therefore, documents such as "_/users/us/1_" and "_/users/US/2_" will be routed according to the same rule. + +* RavenDB prioritizes the most specific prefix over more general ones. + For example, if you configure `/users/us/` to shard **#0** and `/users/us/florida/` to shard **#1**, then: + * Document "_/users/us/123_" will be stored in shard **#0**. + * Document "_/users/us/florida/123_" will be stored in shard **#1**, even though it also matches the `/users/us/` prefix. + + + +## Bucket management + +**When you define a sharded database**: +RavenDB reserves 1,048,576 buckets for the entire database. Each shard is assigned a range of buckets from this set. +Any document added to the database is processed through a hashing algorithm, which determines the bucket number where the document will reside. +The initial bucket distribution for a sharded database with 3 shards will be: + + * Buckets assigned to shard **#0**: `[0 .. 349,524]` + * Buckets assigned to shard **#1**: `[349,525 .. 699,049]` + * Buckets assigned to shard **#2**: `[699,050 .. 1,048,575]` + +**When you configure prefixes for sharding**: +RavenDB will reserve an additional range of 1,048,576 buckets for each prefix you add, on top of the buckets already reserved. +So now, if we add prefixes **`users/us/`** for shard #0 and **`users/asia/`** for shard #1, we get: + + * Additional buckets assigned to shard **#0** `[1048576 .. 2097151]` for documents with prefix `users/us/` + * Additional buckets assigned to shard **#1** `[2097152 .. 3,145,727]` for documents with prefix `users/asia/` +When creating a new document with an ID that matches any of the defined prefixes - +the hashing algorithm is applied to the document ID, but the resulting bucket number is limited to the set of buckets reserved for that prefix, +thereby routing the document to be stored in the chosen shard. + +When creating a new document with an ID that does Not match any predefined prefix - +the resulting hashed bucket number could fall into any of the 3 shards. +The reserved buckets ranges are visible in the database record in the Studio. +Navigate to **Settings > Database Record** and expand the "Sharding > BucketRanges" property: + +!["Bucket ranges"](./assets/assigned-bucket-ranges.png 'Bucket ranges across the shards') + + + +## Adding prefixes via Studio + +You can define prefixes when creating a sharded database via the Studio. + +!["Create a new database"](./assets/create-new-database.png 'Create a new database') + +1. From the database list view, click **New database** to create a new database. +2. Enter a name for the new database. +3. Click **Next** to proceed to the sharding settings on the next screen. +In this example, we define a sharded database with 3 shards, each having a replication factor of 2: + +!["Enable sharding"](./assets/enable-sharding.png 'Enable sharding') + +1. Turn on the **Enable Sharding** toggle. +2. Set the number of shards you want. +3. Turn on the **Add prefixes for shards** toggle. +4. Set the replication factor and other options as desired, and then click **Next** to proceed to define the prefixes. +Add the prefixes and specify their destination shards: + +!["|Define prefixes"](./assets/define-prefixes.png 'Define prefixes') + +1. Enter a prefix. The prefix string must end with either `/` or `-`. +2. Select the target shard. Multiple shards can be selected for the same prefix. +3. Click **Add prefix** to add additional prefixes. +4. Click **Quick Create** to complete the process using the default settings for the remaining configurations and create the new database. + Or, click **Next** to configure additional settings. +New documents will be stored in the matching shards: + +!["Document list view"](./assets/document-list-view.png 'Documents are stored in the requested shards') + +1. Documents with prefix `users/us/` are stored in shard **#0**. +2. Documents with prefix `users/asia/` are stored in shard **#1**. +3. Documents with an ID that does Not match any prefix will be stored on any of the 3 shards. + e.g. document `users/uk/london/clients/1` is stored on shard **#2** since no matching prefix was defined. + + + +## Adding prefixes via Client API + +Using the Client API, you can add prefixes when creating the database or after database creation: + + + +##### Add prefixes when creating a database + + + +{`// Define the database record: +// =========================== +var databaseRecord = new DatabaseRecord +{ + // Provide a name for the new database + DatabaseName = "SampleDB", + + // Set the sharding topology configuration + // Here each shard will have a replication factor of 2 nodes + Sharding = new ShardingConfiguration + { + Shards = new Dictionary + { + [0] = new() { Members = new List { "A", "B" } }, + [1] = new() { Members = new List { "A", "C" } }, + [2] = new() { Members = new List { "C", "B" } } + } + } +}; + +// Define prefixes and their target shard/s: +// ========================================= +databaseRecord.Sharding.Prefixed = +[ + new PrefixedShardingSetting + { + Prefix = "users/us/", + // Assign a SINGLE shard for the prefix + Shards = [0] + }, + new PrefixedShardingSetting + { + Prefix = "users/asia/", + // Can assign MULTIPLE shards for a prefix + Shards = [1, 2] + } +]; + +// Deploy the new database to the server: +// ====================================== +store.Maintenance.Server.Send(new CreateDatabaseOperation(databaseRecord)); + +// You can verify the sharding configuration that has been created: +// ================================================================ +var record = store.Maintenance.Server.Send(new GetDatabaseRecordOperation(store.Database)); + +var shardingConfiguration = record.Sharding; +var numberOfShards = shardingConfiguration.Shards.Count; // 3 +var numberOfPrefixes = shardingConfiguration.Prefixed.Count; // 2 +`} + + + + +{`// Define the database record: +// =========================== +var databaseRecord = new DatabaseRecord +{ + // Provide a name for the new database + DatabaseName = "SampleDB", + + // Set the sharding topology configuration + // Here each shard will have a replication factor of 2 nodes + Sharding = new ShardingConfiguration + { + Shards = new Dictionary + { + [0] = new() { Members = new List { "A", "B" } }, + [1] = new() { Members = new List { "A", "C" } }, + [2] = new() { Members = new List { "C", "B" } } + } + } +}; + +// Define prefixes and their target shard/s: +// ========================================= +databaseRecord.Sharding.Prefixed = +[ + new PrefixedShardingSetting + { + Prefix = "users/us/", + // Assign a SINGLE shard for the prefix + Shards = [0] + }, + new PrefixedShardingSetting + { + Prefix = "users/asia/", + // Can assign MULTIPLE shards for a prefix + Shards = [1, 2] + } +]; + +// Deploy the new database to the server: +// ====================================== +await store.Maintenance.Server.SendAsync(new CreateDatabaseOperation(databaseRecord)); + +// You can verify the sharding configuration that has been created: +// ================================================================ +var record = await store.Maintenance.Server.SendAsync(new GetDatabaseRecordOperation(store.Database)); + +var shardingConfiguration = record.Sharding; +var numberOfShards = shardingConfiguration.Shards.Count; // 3 +var numberOfPrefixes = shardingConfiguration.Prefixed.Count; // 2 +`} + + + + + + + +##### Add prefixes after database creation +* Use `AddPrefixedShardingSettingOperation` to add a prefix to your sharding configuration after the database has been created. + +* In this case, you can only add prefixes that do not match any existing document IDs in the database. + An exception will be thrown if a document with an ID that starts with the new prefix exists in the database. + + + + +{`// Define the prefix to add and its target shard/s +var shardingSetting = new PrefixedShardingSetting +{ + Prefix = "users/eu/", + Shards = [2] + // Can assign multiple shards, e.g.: Shards = [2, 3] +}; + +// Define the add operation: +var addPrefixOp = new AddPrefixedShardingSettingOperation(shardingSetting); + +// Execute the operation by passing it to Maintenance.Send +store.Maintenance.Send(addPrefixOp); +`} + + + + +{`// Define the prefix to add and its target shard/s +var shardingSetting = new PrefixedShardingSetting +{ + Prefix = "users/eu/", + Shards = [2] + // Can assign multiple shards, e.g.: Shards = [2, 3] +}; + +// Define the add operation: +var addPrefixOp = new AddPrefixedShardingSettingOperation(shardingSetting); + +// Execute the operation by passing it to Maintenance.SendAsync +await store.Maintenance.SendAsync(addPrefixOp); +`} + + + + + + + + +## Removing prefixes + +* Use `DeletePrefixedShardingSettingOperation` to remove a prefix from your sharding configuration. + +* You can only delete prefixes that do not match any existing document IDs in the database. + An exception will be thrown if a document with an ID that starts with the specified prefix exists in the database. + + + + +{`// Define the delete prefix operation, +// Pass the prefix string +var deletePrefixOp = new DeletePrefixedShardingSettingOperation("users/eu/"); + +// Execute the operation by passing it to Maintenance.Send +store.Maintenance.Send(deletePrefixOp); +`} + + + + +{`// Define the delete prefix operation, +// Pass the prefix string +var deletePrefixOp = new DeletePrefixedShardingSettingOperation("users/eu/"); + +// Execute the operation by passing it to Maintenance.SendAsync +await store.Maintenance.SendAsync(deletePrefixOp); +`} + + + + + + +## Updating shard configurations for prefixes + +* Use `UpdatePrefixedShardingSettingOperation` to modify the shards assigned to an existing prefix. + +* Unlike when defining prefixes for the first time, + the following rules must be observed when updating an existing prefix configuration: + + * **When adding a shard to an existing prefix configuration**, + RavenDB does not automatically reallocate buckets to the newly added shard. + Therefore, after assigning a new shard to a prefix, manual bucket [re-sharding](../../sharding/resharding.mdx) is required. + You must manually move some buckets initially reserved for this prefix from the existing shards to the new shard; + otherwise, documents matching the prefix will not be stored on the added shard. + + * **When removing a shard from an existing prefix configuration**, + you must first manually move the buckets from the removed shard to the other shards that are assigned to this prefix. + +* In the below example, in addition to shard **#2** that was configured in the [previous](../../sharding/administration/sharding-by-prefix.mdx#add-prefixes-after-database-creation) example, + we are adding shard **#0** as a destination for documents with prefix `users/eu/`. + + + + +{`// Define the shards configuration you wish to update for the specified prefix +var shardingSetting = new PrefixedShardingSetting +{ + Prefix = "users/eu/", + // Adding shard #0 to the previous prefix configuration + Shards = [0, 2] +}; + +// Define the update operation: +var updatePrefixOp = new UpdatePrefixedShardingSettingOperation(shardingSetting); + +// Execute the operation by passing it to Maintenance.Send +store.Maintenance.Send(updatePrefixOp); +`} + + + + +{`// Define the shards configuration you wish to update for the specified prefix +var shardingSetting = new PrefixedShardingSetting +{ + Prefix = "users/eu/", + // Adding shard #0 to the previous prefix configuration + Shards = [0, 2] +}; + +// Define the update operation: +var updatePrefixOp = new UpdatePrefixedShardingSettingOperation(shardingSetting); + +// Execute the operation by passing it to Maintenance.SendAsync +await store.Maintenance.SendAsync(updatePrefixOp); +`} + + + + + + +## Querying selected shards by prefix + +* Storing documents on specific shards allows you to query only those shards directly, + avoiding unnecessary trips to other shards by the orchestrator. + +* Use method `ShardContext` together with `ByPrefix` or `ByPrefixes` to specify which shard/s to query. + Only the shard/s assigned the specified prefix/prefixes will be queried. + Note: An exception will be thrown if the specified prefix is not already defined in the database. + +* The provided prefix does not need to match the prefix of any specific document you are querying; + it is just used to determine which shard(s) to query. + (Remember that a single prefix can be assigned to multiple shards.) + +* You can designate which shard/s to query by combining both selecting a shard by prefix + and [selecting a shard by document ID](../../sharding/querying.mdx#querying-selected-shards). + +* See the following examples: + + + +**Query a selected shard by prefix**: + + + + +{`// Query for 'User' documents from shard/s assigned to a specific prefix: +// ====================================================================== +var userDocs = session.Query() + // Call 'ShardContext' to select which shard/s to query + // RavenDB will query only the shard/s assigned to prefix 'users/' + .Customize(x => x.ShardContext(s => s.ByPrefix("users/"))) + // The query predicate + .Where(x => x.Name == "Joe") + .ToList(); + +// Variable 'userDocs' will include all documents of type 'User' +// that match the query predicate and reside on the shard/s assigned to prefix 'users/'. + +// Query for 'Company' documents from shard/s assigned to a specific prefix: +// ========================================================================= +var companyDocs = session.Query() + // This example shows that the prefix doesn't need to match the document type queried + .Customize(x => x.ShardContext(s => s.ByPrefix("users/"))) + .Where(x => x.Address.Country == "US") + .ToList(); + +// Variable 'companyDocs' will include all documents of type 'Company' +// that match the query predicate and reside on the shard/s assigned to prefix 'users/'. + +// Query for ALL documents from shard/s assigned to a specific prefix: +// =================================================================== +var allDocs = session.Query() // query with + .Customize(x => x.ShardContext(s => s.ByPrefix("users/"))) + .ToList(); + +// Variable 'allDocs' will include ALL documents that reside on +// the shard/s assigned to prefix 'users/'. +`} + + + + +{`// Query for 'User' documents from shard/s assigned to a specific prefix: +// ====================================================================== +var userDocs = await asyncSession.Query() + .Customize(x => x.ShardContext(s => s.ByPrefix("users/"))) + .Where(x => x.Name == "Joe") + .ToListAsync(); + +// Query for 'Company' documents from shard/s assigned to a specific prefix: +// ========================================================================= +var companyDocs = await asyncSession.Query() + .Customize(x => x.ShardContext(s => s.ByPrefix("users/"))) + .Where(x => x.Address.Country == "US") + .ToListAsync(); + +// Query for ALL documents from shard/s assigned to a specific prefix: +// =================================================================== +var allDocs = await asyncSession.Query() + .Customize(x => x.ShardContext(s => s.ByPrefix("users/"))) + .ToListAsync(); +`} + + + + +{`// Query for 'User' documents from shard/s assigned to a specific prefix: +// ====================================================================== +var userDocs = session.Advanced.DocumentQuery() + .ShardContext(s => s.ByPrefix("users/")) + .WhereEquals(x => x.Name, "Joe") + .ToList(); + +// Query for 'Company' documents from shard/s assigned to a specific prefix: +// ========================================================================= +var companyDocs = session.Advanced.DocumentQuery() + .ShardContext(s => s.ByPrefix("users/")) + .WhereEquals(x => x.Address.Country, "US") + .ToList(); + +// Query for ALL documents from shard/s assigned to a specific prefix: +// =================================================================== +var allDocs = session.Advanced.DocumentQuery() + .ShardContext(s => s.ByPrefix("users/")) + .ToList(); +`} + + + + +{`// Query for 'User' documents from shard/s assigned to a specific prefix: +// ====================================================================== +from "Users" +where Name == "Joe" +{ "__shardContext" : { "Prefixes": ["users/"] }} + +// Query for 'Company' documents from shard/s assigned to a specific prefix: +// ========================================================================= +from "Companies" +where Address.Country == "US" +{ "__shardContext" : { "Prefixes": ["users/"] }} + +// Query for ALL documents from shard/s assigned to a specific prefix: +// =================================================================== +from @all_docs +where Address.Country == "US" +{ "__shardContext" : { "Prefixes": ["users/"] }} +`} + + + + + + + + +**Query selected shards by prefix**: + + + + +{`// Query for 'User' documents from shard/s assigned to the specified prefixes: +// =========================================================================== +var userDocs = session.Query() + // Call 'ShardContext' to select which shard/s to query + // RavenDB will query only the shard/s assigned to prefixes 'users/us/' or 'users/asia/' + .Customize(x => x.ShardContext(s => s.ByPrefixes(["users/us/", "users/asia/"]))) + // The query predicate + .Where(x => x.Name == "Joe") + .ToList(); + +// Variable 'userDocs' will include all documents of type 'User' +// that match the query predicate and reside on the shard/s +// assigned to prefix 'users/us/' or prefix 'users/asia/'. + +// Query for 'Company' documents from shard/s assigned to the specified prefixes: +// ============================================================================== +var companyDocs = session.Query() + // This example shows that the prefixes don't need to match the document type queried + .Customize(x => x.ShardContext(s => s.ByPrefixes(["users/us/", "users/asia/"]))) + .Where(x => x.Address.Country == "US") + .ToList(); + +// Variable 'companyDocs' will include all documents of type 'Company' +// that match the query predicate and reside on the shard/s +// assigned to prefix 'users/us/' or prefix 'users/asia/'. + +// Query for ALL documents from shard/s assigned to the specified prefixes: +// ======================================================================== +var allDocs = session.Query() // query with + .Customize(x => x.ShardContext(s => s.ByPrefixes(["users/us/", "users/asia/"]))) + .ToList(); + +// Variable 'allDocs' will include all documents reside on the shard/s +// assigned to prefix 'users/us/' or prefix 'users/asia/'. +`} + + + + +{`// Query for 'User' documents from shard/s assigned to the specified prefixes: +// =========================================================================== +var userDocs = await asyncSession.Query() + .Customize(x => x.ShardContext(s => s.ByPrefixes(["users/us/", "users/asia/"]))) + .Where(x => x.Name == "Joe") + .ToListAsync(); + +// Query for 'Company' documents from shard/s assigned to the specified prefixes: +// ============================================================================== +var companyDocs = await asyncSession.Query() + .Customize(x => x.ShardContext(s => s.ByPrefixes(["users/us/", "users/asia/"]))) + .Where(x => x.Address.Country == "US") + .ToListAsync(); + +// Query for ALL documents from shard/s assigned to the specified prefixes: +// ======================================================================== +var allDocs = await asyncSession.Query() + .Customize(x => x.ShardContext(s => s.ByPrefixes(["users/us/", "users/asia/"]))) + .ToListAsync(); +`} + + + + +{`// Query for 'User' documents from shard/s assigned to the specified prefixes: +// =========================================================================== +var userDocs = session.Advanced.DocumentQuery() + .ShardContext(s => s.ByPrefixes(["users/us/", "users/asia/"])) + .WhereEquals(x => x.Name, "Joe") + .ToList(); + +// Query for 'Company' documents from shard/s assigned to the specified prefixes: +// ============================================================================== +var companyDocs = session.Advanced.DocumentQuery() + .ShardContext(s => s.ByPrefixes(["users/us/", "users/asia/"])) + .WhereEquals(x => x.Address.Country, "US") + .ToList(); + +// Query for ALL documents from shard/s assigned to the specified prefixes: +// ======================================================================== +var allDocs = session.Advanced.DocumentQuery() + .ShardContext(s => s.ByPrefixes(["users/us/", "users/asia/"])) + .ToList(); +`} + + + + +{`// Query for 'User' documents from shard/s assigned to the specified prefixes: +// =========================================================================== +from "Users" +where Name == "Joe" +{ "__shardContext": { "Prefixes": ["users/us/", "users/asia/"] }} + +// Query for 'Company' documents from shard/s assigned to the specified prefixes: +// ============================================================================== +from "Companies" +where Address.Country == "US" +{ "__shardContext": { "Prefixes": ["users/us/", "users/asia/"] }} + +// Query for ALL documents from shard/s assigned to the specified prefixes: +// ======================================================================== +from @all_docs +where Address.Country == "US" +{ "__shardContext": { "Prefixes": ["users/us/", "users/asia/"] }} +`} + + + + + + + + +**Query selected shard by prefix and by document ID**: + + + + +{`// Query for 'Company' documents from shard/s assigned to a prefix & by document ID: +// ================================================================================= +var companyDocs = session.Query() + .Customize(x => x.ShardContext(s => + s.ByPrefix("users/us/").ByDocumentId("companies/1"))) + .Where(x => x.Address.Country == "US") + .ToList(); + +// Variable 'companyDocs' will include all documents of type 'Company' +// that match the query predicate and reside on: +// * the shard/s assigned to prefix 'users/us/' +// * or the shard containing document 'companies/1'. +`} + + + + +{`// Query for 'Company' documents from shard/s assigned to a prefix & by document ID: +// ================================================================================= +var companyDocs = await asyncSession.Query() + .Customize(x => x.ShardContext(s => + s.ByPrefix("users/us/").ByDocumentId("companies/1"))) + .Where(x => x.Address.Country == "US") + .ToListAsync(); +`} + + + + +{`// Query for 'Company' documents from shard/s assigned to a prefix & by document ID: +// ================================================================================= +var companyDocs = session.Advanced.DocumentQuery() + .ShardContext(s => + s.ByPrefix("users/us/").ByDocumentId("companies/1")) + .WhereEquals(x => x.Address.Country, "US") + .ToList(); +`} + + + + +{`// Query for 'Company' documents from shard/s assigned to a prefix & by document ID: +// ================================================================================= +from "Companies" +where Address.Country == "US" +{ "__shardContext": { "DocumentIds": ["companies/1"], "Prefixes": ["users/us/"] }} +`} + + + + + + + + +## Prefixed sharding vs Anchoring documents + +**Anchoring documents**: +With [anchoring documents](../../sharding/administration/anchoring-documents.mdx), you can ensure that designated documents will be stored in the same bucket +(and therefore the same shard), but you cannot specify which exact bucket it will be. +So documents can be grouped within the same shard, but the exact shard cannot be controlled. + +**Prefixed sharding**: +With prefixed sharding, you control which shard a document is stored in. +However, you cannot specify the exact bucket within that shard. + +**Applying both methods**: +When both methods are applied, prefixed sharding takes precedence and overrides anchoring documents. +For example: + +Given: + + * Using prefixed sharding, you assign shard **#0** to store all documents with prefix `users/us/`. + * Your database already includes a document with ID `companies/456` stored in shard **#1**. + +Now: + + * Using the anchoring documents technique, you create document `users/us/123$companies/456`, + with the intention that both `users/us/123` and `companies/456` will reside in the same bucket. + * In which shard will this new document be stored ? + +The result: + + * Since prefixed sharding is prioritized, this new document will be stored in shard **#0**, + even though the suffix suggests it should be stored in shard **#1**, where the Companies document resides. + + + diff --git a/versioned_docs/version-7.1/sharding/administration/studio-admin.mdx b/versioned_docs/version-7.1/sharding/administration/studio-admin.mdx new file mode 100644 index 0000000000..9b8eae6515 --- /dev/null +++ b/versioned_docs/version-7.1/sharding/administration/studio-admin.mdx @@ -0,0 +1,179 @@ +--- +title: "Sharding: Studio Administration" +hide_table_of_contents: true +sidebar_label: Studio Administration +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Sharding: Studio Administration + + +* This page explains how to create and manage a sharded database using Studio. +* Learn [here](../../sharding/administration/api-admin.mdx) how to manage + a sharded database using API commands. + +* In this page: + * [Creating a sharded database](../../sharding/administration/studio-admin.mdx#creating-a-sharded-database) + * [Database View](../../sharding/administration/studio-admin.mdx#database-view) + * [Database Group](../../sharding/administration/studio-admin.mdx#database-group) + + +## Creating a sharded database + +![Create New Database](./assets/studio-admin_create-new-database.png) + +1. **Database View** + Click to create, view or edit database properties. +2. **Create new database** + This button is available when no database has been created yet. + Click it to create a new database. +2. **New database** + Click to create a new database. +Initiating the creation of a new database as shown above will open +the following view: + +![New Database Settings](./assets/studio-admin_new-database.png) + +1. **Name** + Enter the database name. +2. **Replication factor** + Decide the number of replicas the database will have. + If set to 1, there will be no replication. +3. **Enable sharding** + Click to enable or disable sharding on this database. +4. **Number of shards** + Set the number of shards the database will be comprised of. +5. **Set nodes layout manually** + The layout determines which nodes host each shard, and whether + nodes can function as [orchestrators](../../sharding/overview.mdx#client-server-communication). + Enable this option to set the layout manually. + Disable to let RavenDB set it for you. +6. **Orchestrator nodes** + Select which nodes can function as orchestrators. +7. **Shards replication** + Select the nodes that host shard replicas. + The number of nodes available for each shard is determined + using the "Replication factor" box at the top. +8. **Create** + Click to create the database. + + + +## Database View + +After creating a database as explained above, the database will +be available here: + +![Database View](./assets/studio-admin_database-view_01.png) + +1. **Expand/Collapse distribution details** + Click to display or hide per-shard database details. + + ![Expanded Details](./assets/studio-admin_database-view_expanded-details.png) + +2. **Encryption status** + Informative icon: notify whether the database is encrypted or not. + +3. **Shard storage report** + Click to watch the shard's storage usage. + + ![Select Shard](./assets/studio-admin_database-view_02_select-shard.png) + + Select the shard number and its replica node. + The storage report view will open. + + ![Storage Report](./assets/studio-admin_database-view_03_storage-report.png) + +4. **Documents view** + Click to view and edit documents. + + ![Documents View](./assets/studio-admin_database-view_04_docs-view.png) + +5. **Indexes view** + Click to open the [List of Indexes](../../studio/database/indexes/indexes-list-view.mdx) view. +6. **Backups view** + Click to manage [backup tasks](../../studio/database/tasks/backup-task.mdx) and restore existing backups. +7. **Manage group** + Click to manage the database group ([see below](../../sharding/administration/studio-admin.mdx#database-group)). + + + +## Database Group + +The Database Group view allows you to appoint and dismiss +[orchestrators](../../sharding/overview.mdx#client-server-communication) +and add or remove [shards](../../sharding/overview.mdx#shards) and +shard [replicas](../../sharding/overview.mdx#shard-replication). + +![Database Group](./assets/studio-admin_database-group.png) + +1. **Add Shard** + Click to add a shard. + + ![Add Shard](./assets/database-group_add-shard.png) + + Set the shard's replication factor and click **Add shard** (or **Cancel**). + The new shard will be added to the database group view. + +2. **Manage Orchestrators** + Add or remove orchestrator functionality to cluster nodes. + + * **Add Orchestrator** + This option will be available only if there are still nodes that + an orchestrator hasn't been assigned for. + + ![Add Orchestrator 1](./assets/database-group_add-orchestrator-01.png) + + Click **Add node** to add an orchestrator. + + ![Add Orchestrator 2](./assets/database-group_add-orchestrator-02.png) + + Select an available node for the orchestrator and click **Add orchestrator** (or **Cancel**). + + * **Remove Orchestrator** + + ![Remove Orchestrator](./assets/database-group_remove-orchestrator.png) + + Click **Remove** to remove the orchestrator functionality from this node. + +3. **Manage Shards** + + * **Add shard replica** + This option will be available only if there are still available + nodes for replicas of the selected shard. + + ![Add Shard Replica 1](./assets/database-group_add-shard-replica-01.png) + + Click **Add node** to add a shard replica. + + ![Add Shard Replica 2](./assets/database-group_add-shard-replica-02.png) + + Select a node for the replica. + If you want, you can also set a mentor node that will replicate + the data to the new replica. + Click "Add node" to create the new replica (or **Cancel**). + + * **Remove shard replica** + + ![Remove Shard Replica 1](./assets/database-group_remove-shard-replica-01.png) + + Click **Delete from group** to remove the shard replica from the database group. + + ![Remove Shard Replica 2](./assets/database-group_remove-shard-replica-02.png) + + Click **Soft Delete** to stop replication to this node but keep the database files on it. + Click **Hard Delete** to stop replication to this node and **delete database files** from it. + + Removing a shard is done by removing all its replicas. + Please be careful not to remove files that have no backup and may still be needed. + + + + diff --git a/versioned_docs/version-7.1/sharding/assets/external-replication_non-sharded-to-sharded.png b/versioned_docs/version-7.1/sharding/assets/external-replication_non-sharded-to-sharded.png new file mode 100644 index 0000000000..c74e48be0e Binary files /dev/null and b/versioned_docs/version-7.1/sharding/assets/external-replication_non-sharded-to-sharded.png differ diff --git a/versioned_docs/version-7.1/sharding/assets/external-replication_sharded-to-sharded.png b/versioned_docs/version-7.1/sharding/assets/external-replication_sharded-to-sharded.png new file mode 100644 index 0000000000..db198cfbc0 Binary files /dev/null and b/versioned_docs/version-7.1/sharding/assets/external-replication_sharded-to-sharded.png differ diff --git a/versioned_docs/version-7.1/sharding/assets/overview_buckets-allocation.png b/versioned_docs/version-7.1/sharding/assets/overview_buckets-allocation.png new file mode 100644 index 0000000000..033af80b8f Binary files /dev/null and b/versioned_docs/version-7.1/sharding/assets/overview_buckets-allocation.png differ diff --git a/versioned_docs/version-7.1/sharding/assets/overview_buckets-population.png b/versioned_docs/version-7.1/sharding/assets/overview_buckets-population.png new file mode 100644 index 0000000000..4db3520761 Binary files /dev/null and b/versioned_docs/version-7.1/sharding/assets/overview_buckets-population.png differ diff --git a/versioned_docs/version-7.1/sharding/assets/overview_document-view.png b/versioned_docs/version-7.1/sharding/assets/overview_document-view.png new file mode 100644 index 0000000000..caf29242b2 Binary files /dev/null and b/versioned_docs/version-7.1/sharding/assets/overview_document-view.png differ diff --git a/versioned_docs/version-7.1/sharding/assets/overview_sharding-replication-factor.png b/versioned_docs/version-7.1/sharding/assets/overview_sharding-replication-factor.png new file mode 100644 index 0000000000..d4c75072f3 Binary files /dev/null and b/versioned_docs/version-7.1/sharding/assets/overview_sharding-replication-factor.png differ diff --git a/versioned_docs/version-7.1/sharding/assets/querying_timing.png b/versioned_docs/version-7.1/sharding/assets/querying_timing.png new file mode 100644 index 0000000000..3d51c77298 Binary files /dev/null and b/versioned_docs/version-7.1/sharding/assets/querying_timing.png differ diff --git a/versioned_docs/version-7.1/sharding/assets/resharding_buckets-report.png b/versioned_docs/version-7.1/sharding/assets/resharding_buckets-report.png new file mode 100644 index 0000000000..260b91100a Binary files /dev/null and b/versioned_docs/version-7.1/sharding/assets/resharding_buckets-report.png differ diff --git a/versioned_docs/version-7.1/sharding/assets/resharding_confirm-resharding.png b/versioned_docs/version-7.1/sharding/assets/resharding_confirm-resharding.png new file mode 100644 index 0000000000..eaaa71712a Binary files /dev/null and b/versioned_docs/version-7.1/sharding/assets/resharding_confirm-resharding.png differ diff --git a/versioned_docs/version-7.1/sharding/assets/resharding_database-record.png b/versioned_docs/version-7.1/sharding/assets/resharding_database-record.png new file mode 100644 index 0000000000..7efed7f0e8 Binary files /dev/null and b/versioned_docs/version-7.1/sharding/assets/resharding_database-record.png differ diff --git a/versioned_docs/version-7.1/sharding/assets/resharding_diving-into-bucket-01.png b/versioned_docs/version-7.1/sharding/assets/resharding_diving-into-bucket-01.png new file mode 100644 index 0000000000..058e28b747 Binary files /dev/null and b/versioned_docs/version-7.1/sharding/assets/resharding_diving-into-bucket-01.png differ diff --git a/versioned_docs/version-7.1/sharding/assets/resharding_diving-into-bucket-05.png b/versioned_docs/version-7.1/sharding/assets/resharding_diving-into-bucket-05.png new file mode 100644 index 0000000000..b8f40558d6 Binary files /dev/null and b/versioned_docs/version-7.1/sharding/assets/resharding_diving-into-bucket-05.png differ diff --git a/versioned_docs/version-7.1/sharding/assets/resharding_diving-into-bucket-06.png b/versioned_docs/version-7.1/sharding/assets/resharding_diving-into-bucket-06.png new file mode 100644 index 0000000000..2682b6334a Binary files /dev/null and b/versioned_docs/version-7.1/sharding/assets/resharding_diving-into-bucket-06.png differ diff --git a/versioned_docs/version-7.1/sharding/assets/resharding_finished-resharding.png b/versioned_docs/version-7.1/sharding/assets/resharding_finished-resharding.png new file mode 100644 index 0000000000..cc526b35a5 Binary files /dev/null and b/versioned_docs/version-7.1/sharding/assets/resharding_finished-resharding.png differ diff --git a/versioned_docs/version-7.1/sharding/assets/resharding_over-two-shards.png b/versioned_docs/version-7.1/sharding/assets/resharding_over-two-shards.png new file mode 100644 index 0000000000..05f5d045e5 Binary files /dev/null and b/versioned_docs/version-7.1/sharding/assets/resharding_over-two-shards.png differ diff --git a/versioned_docs/version-7.1/sharding/assets/resharding_post-resharding.png b/versioned_docs/version-7.1/sharding/assets/resharding_post-resharding.png new file mode 100644 index 0000000000..4cec049a07 Binary files /dev/null and b/versioned_docs/version-7.1/sharding/assets/resharding_post-resharding.png differ diff --git a/versioned_docs/version-7.1/sharding/assets/resharding_sharded-database.png b/versioned_docs/version-7.1/sharding/assets/resharding_sharded-database.png new file mode 100644 index 0000000000..0a79df4100 Binary files /dev/null and b/versioned_docs/version-7.1/sharding/assets/resharding_sharded-database.png differ diff --git a/versioned_docs/version-7.1/sharding/assets/resharding_stats.png b/versioned_docs/version-7.1/sharding/assets/resharding_stats.png new file mode 100644 index 0000000000..518a53b0f3 Binary files /dev/null and b/versioned_docs/version-7.1/sharding/assets/resharding_stats.png differ diff --git a/versioned_docs/version-7.1/sharding/assets/subscriptions.png b/versioned_docs/version-7.1/sharding/assets/subscriptions.png new file mode 100644 index 0000000000..ec8f9052fe Binary files /dev/null and b/versioned_docs/version-7.1/sharding/assets/subscriptions.png differ diff --git a/versioned_docs/version-7.1/sharding/backup-and-restore/_category_.json b/versioned_docs/version-7.1/sharding/backup-and-restore/_category_.json new file mode 100644 index 0000000000..a01202f066 --- /dev/null +++ b/versioned_docs/version-7.1/sharding/backup-and-restore/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 7, + "label": Backup and Restore, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/sharding/backup-and-restore/backup.mdx b/versioned_docs/version-7.1/sharding/backup-and-restore/backup.mdx new file mode 100644 index 0000000000..e6839b61b5 --- /dev/null +++ b/versioned_docs/version-7.1/sharding/backup-and-restore/backup.mdx @@ -0,0 +1,263 @@ +--- +title: "Sharding: Backup" +hide_table_of_contents: true +sidebar_label: Backup +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Sharding: Backup + + +* Sharded databases are backed up using user-defined periodic + [backup tasks](../../server/ongoing-tasks/backup-overview.mdx). + +* Shards can store backup files **locally** (each shard using its + own node machine storage) and/or **remotely** (all shards sending + backup files to a common remote destination like an AWS S3 bucket). + +* Both [Full](../../server/ongoing-tasks/backup-overview.mdx#full-backup) + and [Incremental](../../server/ongoing-tasks/backup-overview.mdx#incremental-backup) + backups can be created for a sharded database. + +* A [logical](../../server/ongoing-tasks/backup-overview.mdx#logical-backup) + backup **can** be created for a sharded database and restored into either + a sharded or a non-sharded database. + +* A [snapshot](../../server/ongoing-tasks/backup-overview.mdx#snapshot) + backup **cannot** be created for a sharded database. + +* A manual [one-time](../../studio/database/tasks/backup-task.mdx#manually-creating-one-time-backups) + backup **can** be created for a sharded database. + +* In this page: + * [Backup](../../sharding/backup-and-restore/backup.mdx#backup) + * [Sharded and Non-Sharded Backup Tasks](../../sharding/backup-and-restore/backup.mdx#sharded-and-non-sharded-backup-tasks) + * [Backup Storage: Local and Remote](../../sharding/backup-and-restore/backup.mdx#backup-storage-local-and-remote) + * [Backup Files Extension and Structure](../../sharding/backup-and-restore/backup.mdx#backup-files-extension-and-structure) + * [Backup Type](../../sharding/backup-and-restore/backup.mdx#backup-type) + * [Backup Scope](../../sharding/backup-and-restore/backup.mdx#backup-scope) + * [Naming Convention](../../sharding/backup-and-restore/backup.mdx#naming-convention) + * [Server-Wide Backup](../../sharding/backup-and-restore/backup.mdx#server-wide-backup) + * [Example](../../sharding/backup-and-restore/backup.mdx#example) + * [Backup Options Summary](../../sharding/backup-and-restore/backup.mdx#backup-options-summary) + + +## Backup + +## Sharded and Non-Sharded Backup Tasks + +From a user's perspective, backing up a sharded database is done by +defining and running **a single backup task**, just like it is done +with a non-sharded database. + +Behind the scenes, though, each shard backs up its own slice of the +database independently from other shards. + +Distributing the backup responsibility between the shards allows +RavenDB to speed up the backup process and keep backup files in +manageable proportions no matter what the overall database size is. + +### Non-Sharded DB Backup Tasks + +* A complete replica of the database is kept by each cluster node. +* Any node can therefore be made + [responsible](../../server/clustering/distribution/highly-available-tasks.mdx#responsible-node) + for backups by the cluster. +* The responsible node runs the backup task periodically to create + a backup of the entire database. + +### Sharded DB Backup Tasks + +* Each shard hosts a unique part of the database, so no single node + can create a backup of the entire database. +* After a user defines a backup task, RavenDB automatically creates + one backup task per shard, based on the user-defined task. + This operation is automatic and requires no additional actions + from the user. +* Each shard appoints [one of its nodes](../../sharding/overview.mdx#shard-replication) + responsible for the execution of the shard's backup task. +* Each shard backup task can keep the shard's database + locally (on the shard machine), and/or remotely (on one + or more cloud destinations). +* A backup task can store backups on multiple destinations, + e.g. locally, on an S3 bucket, and on an Azure blob. +* To [restore](../../sharding/backup-and-restore/restore.mdx) + the entire database, the restore process is provided with + the locations of the backup folders used by all shards. +* When restoring the database, the user doesn't have to restore + all shard backups. It is possible, for example, to restore only + one of the shards. Using this flexibility, a sharded database + can easily be split into several databases. + +## Backup Storage: Local and Remote + +Backup files can be stored locally and remotely. +Find a code example [here](../../sharding/backup-and-restore/backup.mdx#example). + +* **Local Backup** + A shard's backup task may keep backup data locally, + using the node's local storage. + + [Restoring](../../sharding/backup-and-restore/restore.mdx#section-2) + backup files that were stored locally requires the user to provide + the restore process with the location of the backup folder on each + shard's node. + +* **Remote location** + Backups can also be kept remotely. All shards will transfer + the backup files to a common location, using one of the currently + supported platforms: + * Azure Blob Storage + * Amazon S3 Storage + * Google Cloud Platform + + [Restoring](../../sharding/backup-and-restore/restore.mdx#section-2) + backup files that were stored remotely requires the user to provide + the restore process with each shard's backup folder location. + +## Backup Files Extension and Structure + +backup files use the same internal structure as the `.ravendbdump` +files that [Studio](../../studio/database/tasks/export-database.mdx) +and [Smuggler](../../client-api/smuggler/what-is-smuggler.mdx) +create when **exporting** data. +It is therefore possible to not only [restore](../../sharding/backup-and-restore/restore.mdx) +but also **import** backup files using [studio](../../studio/database/tasks/import-data/import-data-file.mdx) +and [smuggler](../../client-api/smuggler/what-is-smuggler.mdx#import). +Read more about this feature [here](../../sharding/import-and-export.mdx#import). + + + +Backed-up data includes both +[database-level and cluster-level content](../../server/ongoing-tasks/backup-overview.mdx#backup-contents). + + +## Backup Type + +A shard backup task can create a +[Logical backup](../../server/ongoing-tasks/backup-overview.mdx#logical-backup) +only. + +A [Snapshot](../../server/ongoing-tasks/backup-overview.mdx#snapshot) +backup **cannot** be created for a sharded database. + +`Logical` backups created for a sharded database can be restored into +both sharded and non-sharded databases. + +## Backup Scope + +A shard backup task can create +a [Full backup](../../server/ongoing-tasks/backup-overview.mdx#full-backup) +with the entire content of the shard, or an +[Incremental Backup](../../server/ongoing-tasks/backup-overview.mdx#incremental-backup) +with just the difference between the current database data and the last backed-up data. + +## Naming Convention + +* Backup files created for a sharded database generally follow the same naming + [convention](../../server/ongoing-tasks/backup-overview.mdx#backup-name-and-folder-structure) + as non-sharded database backups. + +* Each shard keeps its backup files in a folder whose name consists of: + * **Date and Time** (when the folder was created) + * **Database Name** + * `$` symbol + * **Shard Number** + + The backup folders for a 3-shard database named "Books", + for example, can be named: + `2023-02-05-16-17.Books$0` for shard 0 + `2023-02-05-16-17.Books$1` for shard 1 + `2023-02-05-16-17.Books$2` for shard 2 + +## Server-Wide Backup + +[Server-wide backup](../../client-api/operations/maintenance/backup/backup-overview.mdx#server-wide-backup) +backs up all the databases hosted by the cluster, by creating a backup +task for each database and executing all tasks at a scheduled time. + +* A server-wide backup will create backups for both non-sharded **and** + sharded databases. +* To create a backup for an entire sharded database, the operation will + define and execute a backup task for each shard, behaving as if it was + defined manually. + +## Example + +The backup task that we define here is similar to the task we +would define for a non-sharded database. As part of a sharded +database, however, this task will be re-defined automatically +by the orchestrator for each shard. + + +{`var config = new PeriodicBackupConfiguration +\{ + LocalSettings = new LocalSettings + \{ + FolderPath = @"E:/RavenBackups" + \}, + + //Azure Backup settings + AzureSettings = new AzureSettings + \{ + StorageContainer = "storageContainer", + RemoteFolderName = "remoteFolder", + AccountName = "JohnAccount", + AccountKey = "key" + \}, + + //Amazon S3 bucket settings + S3Settings = new S3Settings + \{ + AwsAccessKey = "your access key here", + AwsSecretKey = "your secret key here", + AwsRegionName = "OPTIONAL", + BucketName = "john-bucket" + \}, + + // Google Cloud bucket settings + GoogleCloudSettings = new GoogleCloudSettings + \{ + BucketName = "your bucket name here", + RemoteFolderName = "remoteFolder", + GoogleCredentialsJson = "your credentials here" + \} +\}; + +var operation = new UpdatePeriodicBackupOperation(config); +var result = await docStore.Maintenance.SendAsync(operation); +`} + + + + +In a sharded database, results are returned by Backup in a `ShardedBackupResult` type. +This type is specific to a sharded database, and casting it using a non-sharded type +[will fail](../../migration/client-api/client-breaking-changes.mdx#casting-smuggler-results). + + + + +## Backup Options Summary + +| Option | Available on a Sharded Database | Comment | +| -------------------- | --------------- | --------------------- | +| Store backup files created by shards in **local shard machine storage** | **Yes** | Shards can store the backups they create locally. | +| Store backup files of sharded databases [remotely](../../sharding/backup-and-restore/backup.mdx#backup-storage-local-and-remote) | **Yes** | Shards can store the backups they create on remote S3, Azure, or Google Cloud destinations. | +| Create [Full](../../server/ongoing-tasks/backup-overview.mdx#full-backup) backups for sharded databases | **Yes** | | +| Create [Incremental](../../server/ongoing-tasks/backup-overview.mdx#incremental-backup) backups for sharded databases | **Yes** | | +| Create [Logical](../../server/ongoing-tasks/backup-overview.mdx#logical-backup) backups for sharded databases | **Yes** | | +| Create [Snapshot](../../server/ongoing-tasks/backup-overview.mdx#snapshot) backups for sharded databases | **No** | Snapshot backups CANNOT be created for (nor restored to) sharded databases. | +| Create **periodic backup tasks** for sharded databases | **Yes** | | +| Run a manual [one-time](../../studio/database/tasks/backup-task.mdx#manually-creating-one-time-backups) backup operation on a sharded database | **Yes** | | +| Include sharded databases in a [server-wide backup operation](../../sharding/backup-and-restore/backup.mdx#server-wide-backup) | **Yes** | A server-wide backup operation will create backups for all databases, including the sharded ones. | + + + diff --git a/versioned_docs/version-7.1/sharding/backup-and-restore/restore.mdx b/versioned_docs/version-7.1/sharding/backup-and-restore/restore.mdx new file mode 100644 index 0000000000..66c9ec72d7 --- /dev/null +++ b/versioned_docs/version-7.1/sharding/backup-and-restore/restore.mdx @@ -0,0 +1,629 @@ +--- +title: "Sharding: Restore" +hide_table_of_contents: true +sidebar_label: Restore +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Sharding: Restore + + +* A sharded database's backup is a set of backup files that were + created by the database's shards. +* To restore a sharded database, we need to pass the restore + operation paths to the locations of the backup files so it + can retrieve and restore them. + + Shards must be restored in their + [original order](../../sharding/backup-and-restore/restore.mdx#restore). + The backup of shard 1, for example, must be restored as shard 1. + +* Backup files can be restored from local shard node storage or from + a [remote location](../../sharding/backup-and-restore/backup.mdx#backup-storage-local-and-remote). +* A backed-up sharded database can be restored in part or in full, + to a sharded or a non-sharded database. +* Only [logical](../../server/ongoing-tasks/backup-overview.mdx#logical-backup) + backups are supported. + [Snapshot](../../server/ongoing-tasks/backup-overview.mdx#snapshot) + backups cannot be created or restored for sharded databases. +* `.ravendbdump` files (exported from RavenDB databases) and + backup files can also be + [imported](../../sharding/import-and-export.mdx#import) + into a database (sharded or non-sharded). +* A backup created for a non-sharded database **cannot** be restored + as a sharded database. + +* In this page: + * [Restore](../../sharding/backup-and-restore/restore.mdx#restore) + * [Set Paths to Backup Files Locations](../../sharding/backup-and-restore/restore.mdx#set-paths-to-backup-files-locations) + * [Define a Restore Configuration](../../sharding/backup-and-restore/restore.mdx#define-a-restore-configuration) + * [`RestoreBackupConfigurationBase`](../../sharding/backup-and-restore/restore.mdx#section) + * [Run `RestoreBackupOperation` with the Restore Configuration](../../sharding/backup-and-restore/restore.mdx#run--with-the-restore-configuration) + * [Examples](../../sharding/backup-and-restore/restore.mdx#examples) + * [Restoring to a Selected Restore Point](../../sharding/backup-and-restore/restore.mdx#restoring-to-a-selected-restore-point) + * [Restore Options Summary](../../sharding/backup-and-restore/restore.mdx#restore-options-summary) + + +## Restore + +To restore a sharded database, we need to: + +## Set Paths to Backup Files Locations +When a shard stores a backup file, it may store it locally (on the +shard node's storage) or remotely (supported platforms currently include +S3 Buckets, Azure Blobs, and Google cloud). + +To restore the backup files, we need to provide the restore process with +each shard's backup folder location. +The shards' backup folder locations, and additional data regarding the backups, +are provided in a dictionary of `SingleShardRestoreSetting` objects. + +`SingleShardRestoreSetting` + + +{`public class SingleShardRestoreSetting +\{ + // Shard number + public int ShardNumber \{ get; set; \} + // Node tag + public string NodeTag \{ get; set; \} + // Folder name + public string FolderName \{ get; set; \} + // Restore up to (including) this incremental backup file + public string LastFileNameToRestore \{ get; set; \} +\} +`} + + + +* Parameters: + + | Parameter | Value | Functionality | + | ------------- | ------------- | ----- | + | **ShardNumber** | `int` | The shard number that will be given to the restored shard.
should normally be similar to the original shard number. | + | **NodeTag** | `string` | The node to restore the shard on. | + | **FolderName** | `string` | The name of the folder that holds the backup file/s. | + | **LastFileNameToRestore** | `string` | Last incremental backup file to restore.
If omitted, restore all backup files in folder. | + + + When setting **ShardNumber**, please make sure that all shards are + given the same numbers they had when they were backed-up. + Giving a restored shard a number different than its original number + will place buckets on the wrong shards and cause mapping errors. + + E.g., a backup of shard 1 must be restored as shard 1: `ShardNumber = 1` + + + + When restoring a local shard backup, make sure that the backup file + resides on the node that the shard's `NodeTag` property is set to, + so the restore process can find the file. + + E.g., if a backup file that's been produced by node `A` is now + restored to node `B` (`NodeTag = "B"`), place the backup file in + the backup folder of node `B` before initiating the restore operation. + + +## Define a Restore Configuration +To restore the database, we pass the +[Restore Operation](../../client-api/operations/maintenance/backup/restore.mdx#restoring-a-database:-configuration-and-execution) +a **configuration object**. + +* The configuration object inherits properties from the `RestoreBackupConfigurationBase` class + ([discussed below](../../sharding/backup-and-restore/restore.mdx#section)) + and defines additional sharding-specific settings. + +* We choose what configuration object to pass the restore operation, by + the backup files' location. + The backup files may be located locally (on each shard machine storage) + or remotely (in a cloud location). + + | Configuration Object | Backup Location | Additional Properties | + | -------------------- | --------------- | --------------------- | + | `RestoreBackupConfiguration` | Local shard storage | None (see [example](../../sharding/backup-and-restore/restore.mdx#examples)) | + | `RestoreFromS3Configuration` | AWS S3 Bucket | `S3Settings` (see S3 [example](../../sharding/backup-and-restore/restore.mdx#examples)) | + | `RestoreFromAzureConfiguration` | MS Azure Blob | `AzureSettings` (see Azure [example](../../sharding/backup-and-restore/restore.mdx#examples)) | + | `RestoreFromGoogleCloudConfiguration` | Google Cloud Bucket | `GoogleCloudSettings` (see Google Cloud [example](../../sharding/backup-and-restore/restore.mdx#examples)) | +### `RestoreBackupConfigurationBase` +`RestoreBackupConfigurationBase` is a parent class to all the configuration types +mentioned above, allowing you to set backups **encryption settings** among other options. + + +{`public abstract class RestoreBackupConfigurationBase +\{ + public string DatabaseName \{ get; set; \} + + public string LastFileNameToRestore \{ get; set; \} + + public string DataDirectory \{ get; set; \} + + public string EncryptionKey \{ get; set; \} + + public bool DisableOngoingTasks \{ get; set; \} + + public bool SkipIndexes \{ get; set; \} + + public ShardedRestoreSettings ShardRestoreSettings \{ get; set; \} + + public BackupEncryptionSettings BackupEncryptionSettings \{ get; set; \} + +\} +`} + + + +* Parameters: + +| Property | Value | Functionality | +|-----------|-------|---------------| +| **DatabaseName** | `string` | Name for the new database. | +| **LastFileNameToRestore**
(Optional – omit for default) | `string` | [Last incremental backup file](../../server/ongoing-tasks/backup-overview.mdx#restoration-procedure) to restore.
Ignored when restoring a sharded database.
`SingleShardRestoreSetting.LastFileNameToRestore` used instead, per shard. | +| **DataDirectory**
(Optional – omit for default) | `string` | The new database data directory.

**Default folder:**
Under the "Databases" folder
In a folder carrying the restored database name. | +| **EncryptionKey**
(Optional – omit for default) | `string` | A key for an encrypted database.

**Default behavior:**
Try restoring as if DB is unencrypted. | +| **DisableOngoingTasks**
(Optional – omit for default) | `boolean` | `true` – disable ongoing tasks when Restore is complete.
`false` – enable ongoing tasks when Restore is complete.

**Default: `false`
Ongoing tasks will run when Restore is complete.** | +| **SkipIndexes**
(Optional – omit for default) | `boolean` | `true` – disable indexes import,
`false` – enable indexes import.

**Default: `false`
Restore all indexes.** | +| **ShardRestoreSettings** | `ShardedRestoreSettings` | A dictionary of `SingleShardRestoreSetting` instances defining paths to backup locations.

public class ShardedRestoreSettings {
public Dictionary<int, SingleShardRestoreSetting> Shards { get; set; }
}
| +| **BackupEncryptionSettings** | `BackupEncryptionSettings` | [Backup Encryption Settings](../../client-api/operations/maintenance/backup/encrypted-backup.mdx#choosing-encryption-mode--key) | + + + Verify that RavenDB has full access to the backup locations and database files. + Make sure your server has write permission to `DataDirectory`. + + +## Run `RestoreBackupOperation` with the Restore Configuration +Pass the configuration object you defined to the `RestoreBackupOperation` store operation +to restore the database. + + +{`public RestoreBackupOperation(RestoreBackupConfigurationBase restoreConfiguration) +`} + + + +* Instead of `RestoreBackupConfigurationBase`, use the configuration object + you [prepared](../../sharding/backup-and-restore/restore.mdx#define-a-restore-configuration): + * `RestoreBackupConfiguration` for locally-stored backups + * `RestoreFromS3Configuration` to restore from S3 + * `RestoreFromAzureConfiguration` to restore from Azure + * `RestoreFromGoogleCloudConfiguration` to restore from Google Cloud + +## Examples + +Here are examples for restoring a sharded database using +backup files stored locally and remotely. + + + + +{`// Create a dictionary with paths to shard backups +var restoreSettings = new ShardedRestoreSettings +{ + Shards = new Dictionary(), +}; + +// First shard +restoreSettings.Shards.Add(0, new SingleShardRestoreSetting +{ + // Shard Number - which shard to restore to. + // Please make sure that each shard is given + // the same number it had when it was backed up. + ShardNumber = 0, + // Node Tag - which node to restore to + NodeTag = "A", + // Backups Folder Name + FolderName = "E:/RavenBackups/2023-02-12-09-52-27.ravendb-Books$0-A-backup" +}); + +// Second shard +restoreSettings.Shards.Add(1, new SingleShardRestoreSetting +{ + ShardNumber = 1, + NodeTag = "B", + FolderName = "E:/RavenBackups/2023-02-12-09-52-27.ravendb-Books$1-B-backup" +}); + +// Third shard +restoreSettings.Shards.Add(2, new SingleShardRestoreSetting +{ + ShardNumber = 2, + NodeTag = "C", + FolderName = "E:/RavenBackups/2023-02-12-09-52-27.ravendb-Books$2-C-backup", +}); + +var restoreBackupOperation = new RestoreBackupOperation(new RestoreBackupConfiguration +{ + // Database Name + DatabaseName = "Books", + // Paths to backup files + ShardRestoreSettings = restoreSettings +}); + +var operation = await docStore.Maintenance.Server.SendAsync(restoreBackupOperation); +`} + + + + +{`// Create a dictionary with paths to shard backups +var restoreSettings = new ShardedRestoreSettings +{ + Shards = new Dictionary(), +}; + +// First shard +restoreSettings.Shards.Add(0, new SingleShardRestoreSetting +{ + // Shard Number - which shard to restore to. + // Please make sure that each shard is given + // the same number it had when it was backed up. + ShardNumber = 0, + // Node Tag - which node to restore to + NodeTag = "A", + // Backups Folder Name + FolderName = "RavenBackups/2023-02-12-09-52-27.ravendb-Books$0-A-backup" +}); + +// Second shard +restoreSettings.Shards.Add(1, new SingleShardRestoreSetting +{ + ShardNumber = 1, + NodeTag = "B", + FolderName = "RavenBackups/2023-02-12-09-52-27.ravendb-Books$1-B-backup" +}); + +// Third shard +restoreSettings.Shards.Add(2, new SingleShardRestoreSetting +{ + ShardNumber = 2, + NodeTag = "C", + FolderName = "RavenBackups/2023-02-12-09-52-27.ravendb-Books$2-C-backup", +}); + +var restoreBackupOperation = new RestoreBackupOperation(new RestoreFromS3Configuration +{ + // Database Name + DatabaseName = "Books", + // Paths to backup files + ShardRestoreSettings = restoreSettings, + // S3 Bucket settings + Settings = new S3Settings + { + AwsRegionName = "us-east-1", // Optional + BucketName = "your bucket name here", + RemoteFolderName = "", // Replaced by restoreSettings.Shards.FolderName + AwsAccessKey = "your access key here", + AwsSecretKey = "your secret key here", + } +}); + +var operation = await docStore.Maintenance.Server.SendAsync(restoreBackupOperation); +`} + + + + +{`// Create a dictionary with paths to shard backups +var restoreSettings = new ShardedRestoreSettings +{ + Shards = new Dictionary(), +}; + +// First shard +restoreSettings.Shards.Add(0, new SingleShardRestoreSetting +{ + // Shard Number - which shard to restore to. + // Please make sure that each shard is given + // the same number it had when it was backed up. + ShardNumber = 0, + // Node Tag - which node to restore to + NodeTag = "A", + // Backups Folder Name + FolderName = "RavenBackups/2023-02-12-09-52-27.ravendb-Books$0-A-backup" +}); + +// Second shard +restoreSettings.Shards.Add(1, new SingleShardRestoreSetting +{ + ShardNumber = 1, + NodeTag = "B", + FolderName = "RavenBackups/2023-02-12-09-52-27.ravendb-Books$1-B-backup" +}); + +// Third shard +restoreSettings.Shards.Add(2, new SingleShardRestoreSetting +{ + ShardNumber = 2, + NodeTag = "C", + FolderName = "RavenBackups/2023-02-12-09-52-27.ravendb-Books$2-C-backup", +}); + +var restoreBackupOperation = new RestoreBackupOperation(new RestoreFromAzureConfiguration +{ + // Database Name + DatabaseName = "Books", + // Paths to backup files + ShardRestoreSettings = restoreSettings, + // Azure Blob settings + Settings = new AzureSettings + { + StorageContainer = "storageContainer", + RemoteFolderName = "", // Replaced by restoreSettings.Shards.FolderName + AccountName = "your account name here", + AccountKey = "your account key here", + } + }); + +var operation = await docStore.Maintenance.Server.SendAsync(restoreBackupOperation); +`} + + + + +{`// Create a dictionary with paths to shard backups +var restoreSettings = new ShardedRestoreSettings +{ + Shards = new Dictionary(), +}; + +// First shard +restoreSettings.Shards.Add(0, new SingleShardRestoreSetting +{ + // Shard Number - which shard to restore to. + // Please make sure that each shard is given + // the same number it had when it was backed up. + ShardNumber = 0, + // Node Tag - which node to restore to + NodeTag = "A", + // Backups Folder Name + FolderName = "RavenBackups/2023-02-12-09-52-27.ravendb-Books$0-A-backup" +}); + +// Second shard +restoreSettings.Shards.Add(1, new SingleShardRestoreSetting +{ + ShardNumber = 1, + NodeTag = "B", + FolderName = "RavenBackups/2023-02-12-09-52-27.ravendb-Books$1-B-backup" +}); + +// Third shard +restoreSettings.Shards.Add(2, new SingleShardRestoreSetting +{ + ShardNumber = 2, + NodeTag = "C", + FolderName = "RavenBackups/2023-02-12-09-52-27.ravendb-Books$2-C-backup", +}); + +var restoreBackupOperation = new RestoreBackupOperation(new RestoreFromGoogleCloudConfiguration +{ + // Database Name + DatabaseName = "Books", + // Paths to backup files + ShardRestoreSettings = restoreSettings, + // Google Cloud settings + Settings = new GoogleCloudSettings + { + BucketName = "your bucket name here", + RemoteFolderName = "", // Replaced by restoreSettings.Shards.FolderName + GoogleCredentialsJson = "your credentials here" + } +}); + +var operation = await docStore.Maintenance.Server.SendAsync(restoreBackupOperation); +`} + + + + + + +## Restoring to a Selected Restore Point + +### The default procedure +When a **full backup** is created, (for either a non sharded database or for +any shard of a sharded database), a backup folder is created to contain it. +When **incremental backups** are created, they are stored in the folder of +the last full backup. +To restore a backup, its folder name is provided. By default, the full +backup stored in this folder will be restored, as well as all the incremental +backups that were added to it over time. + +### Restoring a Non-sharded database to a selected restore point + +* Read [here](../../sharding/backup-and-restore/restore.mdx) about restoring a non-sharded database. +* In short, restoring a non-sharded database to a selected restore point + is done by filling: + * [RestoreBackupConfiguration.BackupLocation](../../client-api/operations/maintenance/backup/restore.mdx#section-1) + with the backup location. + * [RestoreBackupConfiguration.LastFileNameToRestore](../../client-api/operations/maintenance/backup/restore.mdx#section-2) + with the name of the last incremental backup to restore. + +### Restoring a Sharded database to a selected restore point + +* When a sharded database is restored, `RestoreBackupConfiguration.BackupLocation` + and `RestoreBackupConfiguration.LastFileNameToRestore` mentioned above are **overridden** + by per-shard settings. + +* To restore the database of a particular shard up to a selected restore point + simply add `ShardRestoreSettings.SingleShardRestoreSetting.LastFileNameToRestore` + to the shard configuration and fill it with the name of the last incremental + backup to restore. + +* Example: + + + +{`// Create a dictionary with paths to shard backups +var restoreSettings = new ShardedRestoreSettings +{ + Shards = new Dictionary(), +}; + +// First shard +restoreSettings.Shards.Add(0, new SingleShardRestoreSetting +{ + ShardNumber = 0, + NodeTag = "A", + // Backups Folder Name + FolderName = "E:/RavenBackups/2023-02-12-09-52-27.ravendb-Books$0-A-backup", + // Last incremental backup to restore + LastFileNameToRestore = "2023-02-12-10-30-00.ravendb-incremental-backup" +}); + +var restoreBackupOperation = new RestoreBackupOperation(new RestoreBackupConfiguration +{ + // Database Name + DatabaseName = "Books", + // Paths to backup files + ShardRestoreSettings = restoreSettings +}); + +var operation = await docStore.Maintenance.Server.SendAsync(restoreBackupOperation); +`} + + + + +{`// Create a dictionary with paths to shard backups +var restoreSettings = new ShardedRestoreSettings +{ + Shards = new Dictionary(), +}; + +// First shard +restoreSettings.Shards.Add(0, new SingleShardRestoreSetting +{ + ShardNumber = 0, + NodeTag = "A", + // Backups Folder Name + FolderName = "RavenBackups/2023-02-12-09-52-27.ravendb-Books$0-A-backup", + // Last incremental backup to restore + LastFileNameToRestore = "2023-02-12-10-30-00.ravendb-incremental-backup" +}); + +var restoreBackupOperation = new RestoreBackupOperation(new RestoreFromS3Configuration +{ + // Database Name + DatabaseName = "Books", + // Paths to backup files + ShardRestoreSettings = restoreSettings, + // S3 Bucket settings + Settings = new S3Settings + { + AwsRegionName = "us-east-1", // Optional + BucketName = "your bucket name here", + RemoteFolderName = "", // Replaced by restoreSettings.Shards.FolderName + AwsAccessKey = "your access key here", + AwsSecretKey = "your secret key here", + } +}); + +var operation = await docStore.Maintenance.Server.SendAsync(restoreBackupOperation); +`} + + + + +{`// Create a dictionary with paths to shard backups +var restoreSettings = new ShardedRestoreSettings +{ + Shards = new Dictionary(), +}; + +// First shard +restoreSettings.Shards.Add(0, new SingleShardRestoreSetting +{ + ShardNumber = 0, + NodeTag = "A", + // Backups Folder Name + FolderName = "RavenBackups/2023-02-12-09-52-27.ravendb-Books$0-A-backup", + // Last incremental backup to restore + LastFileNameToRestore = "2023-02-12-10-30-00.ravendb-incremental-backup" +}); + +var restoreBackupOperation = new RestoreBackupOperation(new RestoreFromAzureConfiguration +{ + // Database Name + DatabaseName = "Books", + // Paths to backup files + ShardRestoreSettings = restoreSettings, + // Azure Blob settings + Settings = new AzureSettings + { + StorageContainer = "storageContainer", + RemoteFolderName = "", // Replaced by restoreSettings.Shards.FolderName + AccountName = "your account name here", + AccountKey = "your account key here", + } +}); + +var operation = await docStore.Maintenance.Server.SendAsync(restoreBackupOperation); +`} + + + + +{`// Create a dictionary with paths to shard backups +var restoreSettings = new ShardedRestoreSettings +{ + Shards = new Dictionary(), +}; + +// First shard +restoreSettings.Shards.Add(0, new SingleShardRestoreSetting +{ + ShardNumber = 0, + NodeTag = "A", + // Backups Folder Name + FolderName = "RavenBackups/2023-02-12-09-52-27.ravendb-Books$0-A-backup", + // Last incremental backup to restore + LastFileNameToRestore = "2023-02-12-10-30-00.ravendb-incremental-backup" +}); + +var restoreBackupOperation = new RestoreBackupOperation(new RestoreFromGoogleCloudConfiguration +{ + // Database Name + DatabaseName = "Books", + // Paths to backup files + ShardRestoreSettings = restoreSettings, + // Google Cloud settings + Settings = new GoogleCloudSettings + { + BucketName = "your bucket name here", + RemoteFolderName = "", // Replaced by restoreSettings.Shards.FolderName + GoogleCredentialsJson = "your credentials here" + } +}); + +var operation = await docStore.Maintenance.Server.SendAsync(restoreBackupOperation); +`} + + + + + + +## Restore Options Summary + +| Option | Supported on a Sharded Database | Comment | +| -------------------- | --------------- | --------------------- | +| Restore from **local shard storage** | **Yes** | | +| Restore from a [remote location](../../sharding/backup-and-restore/backup.mdx#backup-storage-local-and-remote) | **Yes** | Define a [restore configuration](../../sharding/backup-and-restore/restore.mdx#define-a-restore-configuration) with S3, Azure, or Google Cloud settings. | +| Restore a **sharded database** backup
to a **sharded database** | **Yes** | | +| Restore a **sharded database** backup
to a **non-sharded database** | **Yes** | | +| Restore a **non-sharded database** backup
to a **sharded database** | **No** | A backup created for a non-sharded database CANNOT be restored to a sharded database. | +| Restore a **Full** database backup | **Yes** | | +| Restore a **Partial** database backup | **Yes** | | +| Restore a **Logical** database backup | **Yes** | | +| Restore a **Snapshot** database backup | **No** | A snapshot backup CANNOT be restored by a sharded database. | +| Restore backed-up shards in a different order than the original | **No** | Always restore the shards in their original order. | + + + diff --git a/versioned_docs/version-7.1/sharding/document-extensions.mdx b/versioned_docs/version-7.1/sharding/document-extensions.mdx new file mode 100644 index 0000000000..db8c6ec53b --- /dev/null +++ b/versioned_docs/version-7.1/sharding/document-extensions.mdx @@ -0,0 +1,111 @@ +--- +title: "Sharding: Document Extensions" +hide_table_of_contents: true +sidebar_label: Document Extensions +sidebar_position: 6 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Sharding: Document Extensions + + +* [Document extensions](../document-extensions/overview-extensions.mdx) are + data entities that are associated with documents. They currently include + [Counters](../document-extensions/counters/overview.mdx), + [Attachments](../document-extensions/attachments/what-are-attachments.mdx), + [Time Series](../document-extensions/timeseries/overview.mdx), + and [Revisions](../document-extensions/revisions/overview.mdx). + +* From a user's point of view, document extensions behave similarly + under sharded and non-sharded databases and are handled using the + same API commands and Studio views. + +* Document extensions are identified by the ID of their parent document, + and are always stored in [the same bucket](../sharding/overview.mdx#document-extensions-storage) + as the document. + + When a document is resharded, its document extensions are transferred + along with it to the new shard. + +* In this page: + * [Document Extensions and Resharding](../sharding/document-extensions.mdx#document-extensions-and-resharding) + * [Precautions and Recommendations](../sharding/document-extensions.mdx#precautions-and-recommendations) + * [Precautions](../sharding/document-extensions.mdx#precautions) + * [Recommendations](../sharding/document-extensions.mdx#recommendations) + + +## Document Extensions and Resharding + +When RavenDB runs resharding to balance the data load between shards, it copies +documents from one shard to another and then removes their original copy. + +If a change occurs in the original data after some of it was copied to its +new location (e.g. a time series has updated entries in the original bucket after +its parent document was copied), RavenDB will **not** remove the original +document and its extensions from their original location until the new/modified +data is relocated as well. + +In all other respects, we handle the original document as if it had already moved, +including reading from and writing to only the new document in its new bucket and shard. + + + +## Precautions and Recommendations + +## Precautions + +The main contribution of sharded databases is their ability to manage huge +volumes of data efficiently by serving it from multiple shards. +We should take extra care, then, to help the database maintain its ability +to divide the data between shards. +The following points relate to this issue. + +* **Time Series** + Some time series can get very large. As they reside in a single bucket + with their parent document, they cannot be spread between shards and may + become hard to manage and use. + We recommend keeping the number of time series added to each document + fairly small, and using practices such as [rollup and retention](../document-extensions/timeseries/rollup-and-retention.mdx). + +* **Revisions** + Revisions may accumulate in a large database, especially in an + environment of rapid document modification. We can, however, create + a [revisions configuration](../document-extensions/revisions/overview.mdx#revisions-configuration) + that would take this into account, limit revisions quantity by number + and age, and automatically remove those that are no longer needed. + +* **Attachments** + Remain aware of the size and amount of attachments in your database as well, + and try to avoid adding many or oversized attachments to the same document, + especially as a recurring method. + +* **Counters** + Counters are tiny entities, that weigh much less on the system than + time series, revisions, or attachments. It is, however, recommended to + keep an eye on them as well, to make sure they are not used in quantities + that do pose a problem. + +## Recommendations + +* While planning our data model, we should prefer **a larger amount of smaller + documents** over **a smaller amount of heavier documents** that are harder to + relocate and balance. + +* As explained above, we should limit the size and amount of document extensions + and spread them among many documents. Where possible, we can use wise features + like time series rollups to summarize a large amount of data using a tiny amount + of space. + +* It takes longer to retrieve related documents (and their extensions) when + they are stored on different shards. To accelerate such operations, we can + store related documents [in the same bucket](../sharding/administration/anchoring-documents.mdx) + in advance. + + + diff --git a/versioned_docs/version-7.1/sharding/etl.mdx b/versioned_docs/version-7.1/sharding/etl.mdx new file mode 100644 index 0000000000..fa52db5b76 --- /dev/null +++ b/versioned_docs/version-7.1/sharding/etl.mdx @@ -0,0 +1,169 @@ +--- +title: "Sharding: ETL" +hide_table_of_contents: true +sidebar_label: ETL +sidebar_position: 10 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Sharding: ETL + + +* From a user's point of view, ETL usage on a sharded RavenDB database + is similar to its usage on a non-sharded database, including unchanged + syntax and support for the same list of destinations. +* Most changes are meant to remain behind the scenes and allow the + transition to a sharded database to be seamless. +* A user defines an ETL task once. + As the shards detect that a task was defined, each shard defines + its own local ETL task, based on the user's settings. +* Resharding may result in the transfer of some documents more than + once to the ETL destination. It is the user's responsibility to + [detect and handle](../sharding/etl.mdx#etl-and-resharding) such duplicates. +* ETL tasks information is available [per database and per shard](../sharding/etl.mdx#retrieving-shard-specific-etl-task-info). + +* In this page: + * [ETL](../sharding/etl.mdx#etl) + * [Sharded and Non-Sharded ETL Tasks](../sharding/etl.mdx#sharded-and-non-sharded-etl-tasks) + * [Non-Sharded Database ETL Tasks](../sharding/etl.mdx#non-sharded-database-etl-tasks) + * [Sharded Database ETL Tasks](../sharding/etl.mdx#sharded-database-etl-tasks) + * [ETL and Resharding](../sharding/etl.mdx#etl-and-resharding) + * [ETL Queries](../sharding/etl.mdx#etl-queries) + * [Retrieving Shard-Specific ETL Task Info](../sharding/etl.mdx#retrieving-shard-specific-etl-task-info) + + +## ETL + +## Sharded and Non-Sharded ETL Tasks + +From a user's point of view, creating an ongoing ETL process is +done by defining and running **a single ETL task**, just like it +is done under a non-sharded database. + +Behind the scenes, though, each shard defines and uses its own +ETL task to send data from its database to the ETL destination +independently from other shards. + +Distributing the ETL responsibility between the shards allows +RavenDB to keep its ETL destination updated with data additions +and modifications no matter how large the overall database gets. + +### Non-Sharded Database ETL Tasks + +* A complete replica of the database is kept by each cluster node. +* Any node can therefore be made + [responsible](../server/clustering/distribution/highly-available-tasks.mdx#responsible-node) + for ETL by the cluster. +* The responsible node runs the ETL task periodically to update + the ETL destination with any data changes. + +### Sharded Database ETL Tasks + +* Each shard hosts a unique dataset, so no single node can + monitor the entire database. +* When a user defines an ETL task, either via Studio or + using API commands like `PutConnectionStringOperation` + and `AddEtlOperation`, the change made in the database + record triggers each shard to create an ETL task of + its own, based on the user-defined task. + This creation of multiple ETL tasks, one per shard, is + automatic and requires no additional actions from the user. +* Each shard appoints [one of its nodes](../sharding/overview.mdx#shard-replication) + responsible for the execution of the shard's ETL task. +* The shards' ETL tasks behave just like an ETL task of + a non-sharded database would, **E**xtractng relevant + data from the shard's database, **T**ransforming it using + a user-defined script, and **L**oading it to the destination. +* If the responsible node fails a failover scenario will start, + another shard node will be made responsible for the task, + and the transfer will continue from the point of failure. + + + +## ETL Queries + +Queries used by an ETL task's transform script on a sharded +database are basically no different than queries executed over +a non-sharded database. +However, as some querying features are +[not yet implemented](../sharding/querying.mdx#unsupported-querying-features) +on a sharded database (e.g. loading a document that resides +on a different shard will fail) and others behave a little +differently than their non-sharded equivalents (e.g. +[filer](../sharding/querying.mdx#filtering-results-in-a-sharded-database)), +it is recommended to read the pages dedicated to [indexing](../sharding/indexing.mdx) +and [querying](../sharding/querying.mdx) on a sharded database. + + + +## ETL and Resharding + +It may happen that an ETL task would send the same data more than once. +One scenario that would make this happen is resharding: a document can +be sent from one shard by the shard's ETL task, resharded, and then +sent again to the ETL destination by its new shard's ETL task. + +Some ETL destinations will store duplicate incoming documents instead +of their former copies. Others, like OLAP and [Queue ETL](../server/ongoing-tasks/etl/queue-etl/overview.mdx) +destinations, will **Not** automatically recognize such events. +It is the user's responsibility to verify that the loaded documents +are handled as expected when they arrive. + + +OLAP helps users detect duplications using `lastModified`, see a more +thorough discussion of this [here](../studio/database/tasks/ongoing-tasks/olap-etl-task.mdx#transform-scripts) +and relevant code samples [here](../server/ongoing-tasks/etl/olap.mdx#athena-examples). + + + + +## Retrieving Shard-Specific ETL Task Info + +* The [GetOngoingTaskInfoOperation](../client-api/operations/maintenance/ongoing-tasks/ongoing-task-operations.mdx) + store operation can be used on a non-sharded database to retrieve a task's information. + +* `GetOngoingTaskInfoOperation` can also be used on a sharded database. + * **Get Task Info Per Database** + Run `GetOngoingTaskInfoOperation` using `store.Maintenance.Send` + to retrieve information regarding the basic task defined by the user. + The information includes the task's name and ID. + * **Get Task Info Per Shard** + Run `GetOngoingTaskInfoOperation` using `store.Maintenance.ForShard(x).Send`, + where x is the shard number, to retrieve information about the selected + shard's task. + Much more information is available here, including details of the + responsible and mentor nodes. + + +{`// Get basic info regarding the user-defined task +var ongoingTask = store.Maintenance.Send( + new GetOngoingTaskInfoOperation(name, OngoingTaskType.RavenEtl)); + +if (ongoingTask != null) +\{ + // go through the shards and retrieve their info + for (int i = 0; i < 3; i++) + \{ + var singleShardInfo = store.Maintenance.ForShard(i).Send( + new GetOngoingTaskInfoOperation(name, OngoingTaskType.RavenEtl)); + + shardName[i] = singleShardInfo.TaskName; + NodeTag[i] = singleShardInfo.ResponsibleNode.NodeTag; + ongoingTaskConnectionStatus[i] = singleShardInfo.TaskConnectionStatus; + mentorNode[i] = singleShardInfo.MentorNode; + ongoingTaskConnectionStatus[i] = singleShardInfo.TaskConnectionStatus; + + \} +\} +`} + + + + + diff --git a/versioned_docs/version-7.1/sharding/external-replication.mdx b/versioned_docs/version-7.1/sharding/external-replication.mdx new file mode 100644 index 0000000000..e9f00e308c --- /dev/null +++ b/versioned_docs/version-7.1/sharding/external-replication.mdx @@ -0,0 +1,138 @@ +--- +title: "Sharding: External Replication" +hide_table_of_contents: true +sidebar_label: External Replication +sidebar_position: 9 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Sharding: External Replication + + +* [External Replication](../server/ongoing-tasks/external-replication.mdx) is an + ongoing RavenDB task that you can create and run to maintain a live replica + of your database on another RavenDB server. +* Defining an external replication task via [Studio](../studio/database/tasks/ongoing-tasks/external-replication-task.mdx#definition) + or [API](../server/ongoing-tasks/external-replication.mdx#code-sample) under a + sharded database is similar to defining such tasks under a non-sharded database. +* Sharded and non-sharded databases can replicate data to each other, providing + their version is at least 6.0. + +* In this page: + * [Supported Versions](../sharding/external-replication.mdx#supported-versions) + * [External Replication Types](../sharding/external-replication.mdx#external-replication-types) + * [Non-Sharded Database to Sharded Database](../sharding/external-replication.mdx#non-sharded-database-to-sharded-database) + * [Sharded Database to Sharded Database](../sharding/external-replication.mdx#sharded-database-to-sharded-database) + * [Performance Considerations](../sharding/external-replication.mdx#performance-considerations) + + +## Supported Versions + +* A sharded database and a non-sharded database **can** replicate data to each + other, providing their versions are 6.0 or higher. +* Replicating data between a sharded database and a RavenDB version earlier + than 5.4 is **not supported**. +* Non-sharded databases **can** replicate data to each other regardless of + their version. E.g., a non-sharded 6.0 database can replicate data to a 5.2 + database and vice versa. + + + +## External Replication Types + + + +* **Internal replication** is applied automatically when the replication + factor is larger than 1, to make the shard database more available by + maintaining multiple accessible copies of it. + Learn more about shards internal replication in the [overview](../sharding/overview.mdx#shard-replication) + article and administration [Studio](../sharding/administration/studio-admin.mdx) + and [API](../sharding/administration/api-admin.mdx) articles. + +* **External replication** is applied when a dedicated task is defined for it. + Read more about it and follow a step-by-step guide + [here](../studio/database/tasks/ongoing-tasks/external-replication-task.mdx#step-by-step-guide). + + +All data replicated by or to a sharded database is mediated via +[orchestrators](../sharding/overview.mdx#client-server-communication). +The shards themselves are oblivious to their being shards: from +a shard's perspective, it is just a regular RavenDB database that +can, among its other ordinary RavenDB features, replicate data. + +External replication from and to non-sharded databases requires +no special syntax or preparations. It does, however, cost the server +some additional work, that, especially when the database is large +and every extra operation counts, should be taken into account by +the administrator. Here is how external replication works behind +the scenes. + +## Non-Sharded Database to Sharded Database + +The image below depicts a non-sharded database replicating data to a 5-shard database. + +![Non-Sharded Database to Sharded Database](./assets/external-replication_non-sharded-to-sharded.png) + +1. **Non-Sharded Database** +2. **Replication to Sharded Database** + The database is unaware that the destination database is sharded, + no special syntax or preparation is needed. +3. **Orchestrator** + The orchestrator receives and prepares the replicated data, + grouping documents and document extensions by document IDs so each + entity can be stored in the correct shard. +4. **Transfer to Shard** + The orchestrator transfers each destination shard its data. + Optimization routines are applied to make the process as + effective as possible. +5. **Shard** + Document and document extensions are assigned to buckets by document ID. + Shard replies to replicated data and replication attempts are similar + to replies made by non-sharded databases. + +## Sharded Database to Sharded Database + +* The image below depicts a 3-shard database replicating data to a 5-shard database. +* Each shard replicates its data as an autonomous database. + +![Sharded Database to Sharded Database](./assets/external-replication_sharded-to-sharded.png) + +1. **DB 1 Shard** + The shard is unaware that the destination database is sharded. +2. **Replication to DB 2** + The database is unaware that the destination database is sharded, + no special syntax or preparation is needed. +3. **DB 2 Orchestrator** + The orchestrator receives and prepares the replicated data, + grouping documents and document extensions by document IDs so each + entity can be stored in the correct shard. +4. **Transfer to Shard** + The orchestrator transfers each destination shard its data. + Optimization routines are applied to make the process as + effective as possible. +5. **DB 2 Shard** + Documents and document extensions are assigned to buckets by document ID. + Shard replies to replicated data and replication attempts are similar + to replies made by non-sharded databases. + + + +## Performance Considerations + +When external replication tasks are defined in two different sharded databases, +so they replicate their data to each other, each data item sent from one of the +databases to the other will then be sent back from the target database to the +original sender. The original sender will then recognize that the data item is +already stored locally, ignore it, and end the cycle. + +In this specific case, of two external databases replicating data to each other, +please consider this overhead in your performance considerations and tests. + + + diff --git a/versioned_docs/version-7.1/sharding/import-and-export.mdx b/versioned_docs/version-7.1/sharding/import-and-export.mdx new file mode 100644 index 0000000000..5c2226a92d --- /dev/null +++ b/versioned_docs/version-7.1/sharding/import-and-export.mdx @@ -0,0 +1,146 @@ +--- +title: "Sharding: Import and Export" +hide_table_of_contents: true +sidebar_label: Import and Export +sidebar_position: 8 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Sharding: Import and Export + + +* Smuggler is a RavenDB interface with which data can be + exported from a database into a dump file and imported + from a dump file into a database. + Learn to use Smuggler [here](../client-api/smuggler/what-is-smuggler.mdx). + +* Smuggler is operated using the same API when the database + is sharded and when it isn't, and the same set of features + is available in both cases. + E.g., a [transform script](../client-api/smuggler/what-is-smuggler.mdx#transformscript) + can be used to filter and structure the transferred data + when the database is sharded and when it is non-sharded. + +* **Studio** can also be used to + [export](../studio/database/tasks/export-database.mdx) + and [import](../studio/database/tasks/import-data/import-data-file.mdx) + data from and to a sharded database, the same way it is + done with a non-sharded database. + Behind the scenes, Studio uses Smuggler to perform these operations. + +* In this page: + * [Export](../sharding/import-and-export.mdx#export) + * [Import](../sharding/import-and-export.mdx#import) + * [Export and Import Options Summary](../sharding/import-and-export.mdx#export-and-import-options-summary) + + +## Export + +When smuggler is called to +[export](../client-api/smuggler/what-is-smuggler.mdx#export) +a sharded database: + +* The client sends an export request to the orchestrator. +* The orchestrator forwards the request to all other shards. +* All shards stream their data to the client machine, where the data is gathered in a single `.ravendbdump` file. + + The amount of time it takes to complete the export process depends + upon factors like shards' databases sizes, network performance, and + the user-defined transform script. + + +See a code example [here](../client-api/smuggler/what-is-smuggler.mdx#example). + + +In a sharded database, results are returned by Smuggler, Import, and Export +in a `ShardedSmugglerResult` type. This type is specific to a sharded database, +and casting it using a non-sharded type [will fail](../migration/client-api/client-breaking-changes.mdx#casting-smuggler-results). + + + + +## Import + +Smuggler can be used to [import](../client-api/smuggler/what-is-smuggler.mdx#import) +data into a database from either a [.ravendbdump](../sharding/import-and-export.mdx#export) +file or from backup files (full or incremental). + +When data is imported into a sharded database one of the shard nodes +is appointed **orchestrator**; the orchestrator retrieves items from +the `.ravendbdump` or backup files, gathers the imported items into +batches, and distributes them among the shards. + +## Importing data from a `.ravendbdump` file + +There are no preliminary requirements regarding the structure +or contents of the database the data is imported to. + +* The data can be imported into a long-existing database, + as well as into a database just created. +* The data can be imported into both sharded and non-sharded databases. + In both cases, the data will be retrieved into the database from + a local `.ravendbdump` file. +* If the database is sharded, the imported data will be distributed among the shards. + In case the shard has [several nodes](../sharding/overview.mdx#shard-replication), + each shard database will be replicated to all the nodes of this shard. + +See a code example [here](../client-api/smuggler/what-is-smuggler.mdx#example-1). + +In a sharded database, results are returned by Smuggler, Import, and Export +in a `ShardedSmugglerResult` type. This type is specific to a sharded database, +and casting it using a non-sharded type [will fail](../migration/client-api/client-breaking-changes.mdx#casting-smuggler-results). + + +## Importing data from backup files + +* Backup files are given an extension that reflects the backup type. + A full-backup file, for example, will be given a `.ravendb-full-backup` + extension. + Regardless of the extension, the internal structure of backup + files is similar to that of `.ravendbdump` files. + It **is** therefore possible to import backup files. +* Unlike the [restore](../client-api/operations/maintenance/backup/restore.mdx) operation, + which creates a new database from the backup files, importing + a backup file adds the imported data to data already stored in + the database. +* Import can be helpful in the following cases, among others: + * a backup created for a non-sharded database can be imported + into a sharded database. + This allows the migration of data from a non-sharded database + to a sharded one. + * When a sharded database is backed up, each shard creates a backup + of its own database. If you want to split the sharded database + into multiple databases, you can simply import the backup of each + shard into a newly created (sharded or not) database. + + +[Incremental backups](../server/ongoing-tasks/backup-overview.mdx#backup-scope:-full-or-incremental) +contain only the changes that have been made in the database since the last full backup. +Import the full backup first, and then the incremental backups that complement it. + + + + +## Export and Import Options Summary + +| Option | Available on a Sharded Database | Comment | +| -------------------- | --------------- | --------------------- | +| Export and Import [.ravendbdump](../sharding/import-and-export.mdx#export) files using [Smuggler](../client-api/smuggler/what-is-smuggler.mdx) | **Yes** | Smuggler behaves just like it does on non-sharded databases. | +| [Export](../studio/database/tasks/export-database.mdx) and [import](../studio/database/tasks/import-data/import-data-file.mdx) sharded database data using Studio | **Yes** | Behind the scenes, Studio uses Smuggler. | +| Export to **client machine** | **Yes** | | +| Export to **local shard node storage** | **No** | | +| Export to **remote locations** like S3, Azure, or Google Cloud | **No** | | +| Import from a `.ravendbdump` file | **Yes** | An orchestrator is appointed to distribute the data among the shards. | +| Import from **Backup files** | **Yes** | **Importing** data from a backup file does **not** create a new database like running the [restore process](../sharding/backup-and-restore/restore.mdx) over the backup file would, but **adds the data** to the existing database by distributing it among the shards. | +| Import from **Full backup files** | **Yes** | | +| Import from **Incremental backup files** | **Yes** | | + + + + diff --git a/versioned_docs/version-7.1/sharding/indexing.mdx b/versioned_docs/version-7.1/sharding/indexing.mdx new file mode 100644 index 0000000000..07007d3a0f --- /dev/null +++ b/versioned_docs/version-7.1/sharding/indexing.mdx @@ -0,0 +1,94 @@ +--- +title: "Sharding: Indexing" +hide_table_of_contents: true +sidebar_label: Indexing +sidebar_position: 4 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Sharding: Indexing + + +* Indexing a sharded database is performed locally, per shard. + There is no multi-shard indexing process. + +* Indexes use the same syntax in sharded and non-sharded databases. + +* Most indexing features supported by non-sharded databases + are also supported by sharded databases. Unsupported features are listed below. + +* In this page: + * [Indexing](../sharding/indexing.mdx#indexing) + * [Map-Reduce Indexes on a Sharded Database](../sharding/indexing.mdx#map-reduce-indexes-on-a-sharded-database) + * [Unsupported Indexing Features](../sharding/indexing.mdx#unsupported-indexing-features) + + +## Indexing + +Indexing each database shard is basically similar to indexing a non-sharded database. +As each shard holds and manages a unique dataset, indexing is performed +per-shard and indexes are stored only on the shard that created and uses them. + +## Map-Reduce Indexes on a Sharded Database + +Map-reduce indexes on a sharded database are used to reduce data both over each +shard during indexation, and on the orchestrator machine each time a query uses them. + +1. **Reduction by each shard during indexation** + Similarly to non-sharded databases, when shards index their data they reduce + the results by map-reduce indexes. +2. **Reduction by the orchestrator during queries** + When a query is executed over map-reduce indexes the orchestrator + distributes the query to the shards, collects and combines the results, + and then reduces them again. + + +Learn about **querying map-reduce indexes** in a sharded database [here](../sharding/querying.mdx#orderby-in-a-map-reduce-index). + + +## Unsupported Indexing Features + +Unsupported or yet-unimplemented indexing features include: + +* **Rolling index deployment** + [Rolling index deployment](../indexes/rolling-index-deployment.mdx) + is not supported in a Sharded Database. +* **Loading documents from other shards** + Loading a document during indexing is possible only if the document + resides on the shard. + Consider the below index, for example, that attempts to load a document. + If the requested document is stored on a different shard, the load operation + will be ignored. + + +{`Map = products => from product in products + select new Result + \{ + CategoryName = LoadDocument(product.Category).Name + \}; +`} + + + + You can make sure that documents share a bucket, and + can therefore locate and load each other, using the + [$ syntax](../sharding/administration/anchoring-documents.mdx). + +* **Map-Reduce Output Documents** + Using [OutputReduceToCollection](../indexes/map-reduce-indexes.mdx#map-reduce-output-documents) + to output the results of a map-reduce index to a collection + is not supported in a Sharded Database. +* [Custom Sorters](../indexes/querying/sorting.mdx#creating-a-custom-sorter) + are not supported in a Sharded Database. + + + + + + diff --git a/versioned_docs/version-7.1/sharding/migration.mdx b/versioned_docs/version-7.1/sharding/migration.mdx new file mode 100644 index 0000000000..1246d9bf9c --- /dev/null +++ b/versioned_docs/version-7.1/sharding/migration.mdx @@ -0,0 +1,110 @@ +--- +title: "Sharding: Migration" +hide_table_of_contents: true +sidebar_label: Migration +sidebar_position: 3 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Sharding: Migration + + +When [it's time](../sharding/overview.mdx#when-should-sharding-be-used) +to move on to a sharded database, data can be migrated from the existing +database to the new one in several ways. + +* The data can be [exported](../sharding/import-and-export.mdx#export) + from the original database into a RavenDB dump file and then + [imported](../sharding/import-and-export.mdx#import) from the file + into the new sharded database. +* **Import** can also be used with [backups](../sharding/backup-and-restore/backup.mdx) + prepared for non-sharded databases, to add their contents into + an existing sharded database. +* An [external replication task](../sharding/external-replication.mdx) + can maintain a live replica of a non-sharded database on a sharded + destination database, as backup or as part of a gradual deployment + of the replica. + +* In this page: + * [Export & Import](../sharding/migration.mdx#export--import) + * [Backup](../sharding/migration.mdx#backup) + * [External Replication](../sharding/migration.mdx#external-replication) + + +## Export & Import + +Export is a **one-time operation** that dumps the current contents +of the database into a `.ravendbdump` file. +The exported dump file of either a sharded or a non-sharded database +can then be [imported](../sharding/import-and-export.mdx#import) into +a sharded database, that will distribute the data between its shard. + +* Exporting can be done via [code](../client-api/smuggler/what-is-smuggler.mdx#export) + or [Studio](../studio/database/tasks/export-database.mdx). +* Importing can be done via [code](../client-api/smuggler/what-is-smuggler.mdx#import) + or [Studio](../studio/database/tasks/import-data/import-data-file.mdx). +* Read about export and import in a sharded database [here](../sharding/import-and-export.mdx). + +* Data can also be + [imported from backup files](../sharding/import-and-export.mdx#importing-data-from-backup-files). + A backup of a non-sharded database **can** be imported into an existing sharded database. + The imported data will be added to the sharded database and distributed between the shards. + + Note that this opposes [restoring](../sharding/backup-and-restore/restore.mdx) + a non-sharded database, which can only create a new non-sharded database + but **not** create a sharded database or add its data to an existing database. + + + + +## Backup + +RavenDB backups are **periodic operations**, implemented by +[ongoing tasks](../server/ongoing-tasks/backup-overview.mdx) +that routinely save either a full copy of the database or +an incremental delta of data changes made since the last backup. + +To migrate data from a non-sharded database to a sharded one, +backup files made for a non-sharded database can be +[imported](../sharding/import-and-export.mdx#importing-data-from-backup-files) +into a new or an existing sharded database. + + +A backup created for a non-sharded database **cannot** be +[restored](../sharding/backup-and-restore/restore.mdx) as a sharded database. +The data can be migrated into a sharded database only by importing it. + + + + +## External Replication + +[External replication](../server/ongoing-tasks/external-replication.mdx) +is an **ongoing task** that keeps a live replica of its database's data +on another database. +The task keeps the two databases in sync by updating the destination +database with any deletion, addition, or modification made in the +origin database's data. + +An external replication task can run on a non-sharded database, +and [create a live replica of its data on a sharded database](../sharding/external-replication.mdx#non-sharded-database-to-sharded-database). +The destination (sharded) database will distribute incoming data +among its shards. +The sharded database can be kept and tested in a controlled environment +as long as necessary, as the original database keeps on serving its users. +When all is ready, the new database can be moved into production. + + +A live replica created using external replication includes all documents +and document extensions, but **not** server and cluster level features like +indexes and compare exchange entries. Find the full list [here](../server/ongoing-tasks/external-replication.mdx#general-information-about-external-replication-task). + + + + diff --git a/versioned_docs/version-7.1/sharding/overview.mdx b/versioned_docs/version-7.1/sharding/overview.mdx new file mode 100644 index 0000000000..e29b235fec --- /dev/null +++ b/versioned_docs/version-7.1/sharding/overview.mdx @@ -0,0 +1,251 @@ +--- +title: "Sharding Overview" +hide_table_of_contents: true +sidebar_label: Overview +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Sharding Overview + + +* Sharding, supported by RavenDB from version 6.0 onward, is the distribution of a database's content among autonomous **Shards**, + where each Shard hosts and manages a **unique subset** of the database content. + +* Shards can be replicated across multiple nodes to enhance data accessibility. + Therefore, each RavenDB shard is hosted on at least one cluster node. + +* In most cases, sharding allows the efficient usage and management of exceptionally large databases + (e.g., a 10-terabyte DB). + +* In this page: + * [Sharding](../sharding/overview.mdx#sharding) + * [Licensing](../sharding/overview.mdx#licensing) + * [Client compatibility](../sharding/overview.mdx#client-compatibility) + * [Client-Server communication](../sharding/overview.mdx#client-server-communication) + * [When should sharding be used?](../sharding/overview.mdx#when-should-sharding-be-used) + * [Shards](../sharding/overview.mdx#shards) + * [Shard replication](../sharding/overview.mdx#shard-replication) + * [How documents are distributed among shards](../sharding/overview.mdx#how-documents-are-distributed-among-shards) + * [Buckets allocation](../sharding/overview.mdx#buckets-allocation) + * [Buckets population](../sharding/overview.mdx#buckets-population) + * [Document extensions storage](../sharding/overview.mdx#document-extensions-storage) + * [Resharding](../sharding/overview.mdx#resharding) + * [Paging](../sharding/overview.mdx#paging) + * [Using local IP addresses](../sharding/overview.mdx#using-local-ip-addresses) + * [Creating a sharded database](../sharding/overview.mdx#creating-a-sharded-database) + + +## Sharding + +As a database grows [very large](https://en.wikipedia.org/wiki/Very_large_database), storing and managing it may become too demanding for any single node. +System performance may suffer as resources like RAM, CPU, and storage are exhausted, routine chores like indexing and backup become massive tasks, +responsiveness to client requests and queries slows down, and the system's throughput spreads thin serving an ever-growing number of clients. + +With sharding, as the volume of stored data grows, the database can be scaled out by splitting it into [shards](../sharding/overview.mdx#shards). +This allows the database to be managed by multiple nodes and effectively removes most limits on its growth. +In this manner, the size of the overall database, comprised of all shards, can reach dozens of terabytes and more, +while keeping the resources of each shard in check and maintaining high performance and throughput. +#### Licensing + + +Sharding is fully available with the **Enterprise** license. + + +* On a **Developer** license, the replication factor is restricted to 1. +* On **Community** and **Professional** licenses, all shards must be on the same node. +* Learn more about licensing [here](../start/licensing/licensing-overview.mdx). +#### Client compatibility + +Sharding is managed by the RavenDB server; +clients require no special adaptation when accessing a sharded database: + + * The client API remains unchanged when using a sharded database. + * Clients using RavenDB versions older than 6.0, which lack sharding support, + can seamlessly connect to a sharded database without any adaptations or even realizing it is sharded. + +Specific modifications to RavenDB features in a sharded environment are documented in detail +in feature-specific articles. +#### Client-Server communication + +When a client connects to a sharded database, it is appointed a RavenDB server that functions as an **orchestrator**, +mediating all the communication between the client and the database shards. +The client remains unaware of this process and uses the same API used by non-sharded databases to load documents, query, and perform other operations. + +Note that this additional communication between the client and the orchestrator, as well as between the orchestrator and the shards, +introduces some overhead compared to using a non-sharded database. +#### When should sharding be used? + +While sharding solves many issues related to the storage and management of high-volume databases, +the overhead it introduces can outweigh its benefits when the database size still poses no problem. + +You can postpone the transit to a sharded database when, for example, the database size is 100 GB, +the server is well-equipped and can comfortably handle a much larger volume, +and no dramatic increase in the number of potential users is expected any time soon. + +We recommend that you plan ahead for a transition to a sharded database when your database size +is in the vicinity of 250 GB, so the transition is already well established when it reaches 500 GB. + + + +* RavenDB 6.0 and above can **migrate** a non-sharded database to a sharded database via + [external replication](../server/ongoing-tasks/external-replication.mdx) or [export & import](../studio/database/tasks/export-database.mdx) operations. + +* To upgrade a non-sharded database from an earlier version of RavenDB to a sharded one, + you need to first upgrade the server to version 6.0 (or later), create a new sharded database, + and then replicate or export the data into it. + + + + +## Shards + +While each cluster node of a non-sharded database handles a full replica of the entire database, +each **shard** is assigned a **subset** of the entire database content. + + +For example: + +Take a 3-shard database, in which shard **1** is populated with documents `Users/1`..`Users/2000`, +shard **2** contains documents `Users/2001`..`Users/4000`, and shard **3** contains `Users/4001`..`Users/6000`. + +A client that connects to this database to retrieve `Users/3000` and `Users/5000` would be served by an +automatically-appointed [orchestrator node](../sharding/overview.mdx#client-server-communication) +that would seamlessly retrieve `Users/3000` from shard **2** +and `Users/5000` from shard **3** and hand them to the client. + + +As far as clients are concerned, a sharded database is still a single entity: +clients are not required to detect whether the database is sharded or not, +and clients of RavenDB versions prior to 6.0, which had no sharding support, +can access a sharded database without any alterations. + +That said, shard-specific operations are also available: +a client can, for example, track the shard where a document is stored and query that shard. +Studio can be used to relocate ([reshard](../sharding/resharding.mdx)) documents from one shard to another. + +!["Studio Document View"](./assets/overview_document-view.png) +#### Shard replication + +Similar to non-sharded databases, shards can be **replicated** across cluster nodes to ensure the continuous availability +of all shards in case of a node failure, provide multiple access points, and load-balance the traffic between shard replicas. + +The number of nodes a shard is replicated to is determined by the **Shard Replication Factor**. + +!["Shard Replication"](./assets/overview_sharding-replication-factor.png) + +* In the image above, a 3-shard database is hosted by a 5-node cluster + (where two of the nodes, **D** and **E**, are not used by this database). + +* The Shard Replication Factor is set to 2, maintaining two replicas of each shard. + + + +## How documents are distributed among shards + +#### Buckets +Documents in a sharded database are stored within virtual containers called **buckets**. +The number of documents and the amount of data stored in each bucket may vary. +#### Buckets allocation + +Upon creating a sharded database, the cluster reserves **1,048,576** (1024 x 1024) buckets for the entire database. +Each shard is assigned a range of buckets from this overall set, where documents can be stored. +(Note: This default reservation method differs when using prefixed sharding. Learn more in [Bucket management](../sharding/administration/sharding-by-prefix.mdx#bucket-management)). + +!["Buckets Allocation"](./assets/overview_buckets-allocation.png) +#### Buckets population + +The cluster automatically populates the buckets with documents in the following way: + +A hashing algorithm is applied to each document ID, generating a number between **0** and **1,048,575**. +The resulting number determines the bucket number where the document is stored. +(Note: This default hashing method differs when using prefixed sharding. Learn more in [Bucket management](../sharding/administration/sharding-by-prefix.mdx#bucket-management).) + +Since the buckets are pre-assigned to the shards, +the bucket number assigned to a document also determines which shard the document will reside on. + +!["Buckets Population"](./assets/overview_buckets-population.png) + + + +* **Anchoring documents to a bucket**: + You can make documents share a bucket (and therefore a shard) based on their document ID suffix. + RavenDB uses this suffix to calculate the bucket number for the document. + Learn more in [Anchoring documents to a bucket](../sharding/administration/anchoring-documents.mdx). + +* **Anchoring documents to a shard**: + You can make documents reside on a specific shard based on their document ID prefix. + RavenDB uses this prefix to calculate a bucket number that resides on the requested shard. + Learn more in [Sharding by prefix](../sharding/administration/sharding-by-prefix.mdx). + + +#### Document extensions storage + +Document extensions (i.e. Attachments, Time series, Counters, and Revisions) are stored in the same bucket as the document they belong to. +To achieve this, the bucket number (hash code) for these extensions is calculated using the ID of the document that owns them. + + + +## Resharding + +[Resharding](../sharding/resharding.mdx) is the relocation of data from one shard to another to maintain a balanced database, +where all shards handle approximately the same volume of data. + +The resharding process moves all data related to a certain bucket, including documents, document extensions, +tombstones, etc., to a different shard and then associates the bucket with the new shard. + + + +For example: + + 1. Bucket `100,000` was initially associated with shard **1**. + Therefore, all data added to this bucket has been stored in shard **1**. + 2. Relocating bucket `100,000` to shard **2** will: + * Move all the data that belongs to this bucket to shard **2**. + * Associate bucket `100,000` with shard **2**. + * From now on, any data added to this bucket will be stored in shard **2**. + + + + +## Paging + +From the client's perspective, [paging](../indexes/querying/paging.mdx) is conducted similarly in both sharded and non-sharded databases, +using the same API. + +However, paging in a sharded database is more costly because the orchestrator must load data **from each shard** +and sort the retrieved results before handing the selected page to the client. + +Read more about paging [here](../sharding/querying.mdx#paging). + + + +## Using local IP addresses + +The local IP address of a cluster node can be exposed, allowing other cluster nodes to prioritize it over the public IP address when accessing the node. +Using a node's local IP address for inter-cluster communications can speed up the service and offer substantial cost savings over time. + +Using this method can be particularly helpful in a sharded cluster, where each client request is handled by an orchestrator +that may need to communicate with all other shards to process the request and its results. + +Use [this configuration option](../server/configuration/core-configuration.mdx#serverurlcluster) to expose a node's local IP address to other nodes. + + + +## Creating a sharded database + +* When a database is created, the user can choose whether it will be sharded or not. + RavenDB (version 6.0 and later) provides this option by default, with no further steps required to enable the feature. + +* A sharded database can be created via the [Studio](../sharding/administration/studio-admin.mdx#creating-a-sharded-database) or the [Client API](../sharding/administration/api-admin.mdx). + +* A RavenDB cluster can run both sharded and non-sharded databases in parallel. + + + diff --git a/versioned_docs/version-7.1/sharding/querying.mdx b/versioned_docs/version-7.1/sharding/querying.mdx new file mode 100644 index 0000000000..4e7f8d285a --- /dev/null +++ b/versioned_docs/version-7.1/sharding/querying.mdx @@ -0,0 +1,600 @@ +--- +title: "Sharding: Querying" +hide_table_of_contents: true +sidebar_label: Querying +sidebar_position: 5 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Sharding: Querying + + +* Query syntax is similar in sharded and non-sharded databases. + +* A sharded database offers the same set of querying features that a non-sharded database offers, + so queries that were written for a non-sharded database can generally be kept as is. + +* Some querying features are yet to be implemented. + Others (like [filter](../sharding/querying.mdx#filtering-results-in-a-sharded-database)) behave a little differently in a sharded database. + These cases are discussed below. + +* In this page: + * [Querying a sharded database](../sharding/querying.mdx#querying-a-sharded-database) + * [Querying selected shards](../sharding/querying.mdx#querying-selected-shards) + * [Including items](../sharding/querying.mdx#including-items) + * [Paging results](../sharding/querying.mdx#paging-results) + * [Filtering results](../sharding/querying.mdx#filtering-results) + * [`where`](../sharding/querying.mdx#section) + * [`filter`](../sharding/querying.mdx#section-1) + * [`where` vs `filter` recommendations](../sharding/querying.mdx#vsrecommendations) + * [Querying Map-Reduce indexes](../sharding/querying.mdx#querying-map-reduce-indexes) + * [Loading document within a projection](../sharding/querying.mdx#loading-document-within-a-projection) + * [OrderBy in a Map-Reduce index query](../sharding/querying.mdx#orderby-in-a-map-reduce-index-query) + * [Timing queries](../sharding/querying.mdx#timing-queries) + * [Unsupported querying features](../sharding/querying.mdx#unsupported-querying-features) + + +## Querying a sharded database + +From a user's point of view, querying a sharded RavenDB database is similar to querying a non-sharded database: +query syntax is the same, and the same results can be expected to be returned in the same format. + +To allow this comfort, the database performs the following steps when a client sends a query to a sharded database: + +* The query is received by a RavenDB server that was appointed as an [orchestrator](../sharding/overview.mdx#client-server-communication). + The orchestrator mediates all the communications between the client and the database shards. +* The orchestrator distributes the query to the shards. +* Each shard runs the query over its own database, using its own indexes. + When the data is retrieved, the shard transfers it to the orchestrator. +* The orchestrator combines the data it received from all shards into a single dataset, and may perform additional operations over it. + E.g., querying a [map-reduce index](../sharding/indexing.mdx#map-reduce-indexes-on-a-sharded-database) would retrieve from the shards data that has already been reduced by map-reduce indexes. + Once the orchestrator gets all the data it will reduce the full dataset once again. +* Finally, the orchestrator returns the combined dataset to the client. +* The client remains unaware that it has just communicated with a sharded database. + Note, however, that this process is costly in comparison with the simple data retrieval performed by non-sharded databases. + Sharding is therefore [recommended](../sharding/overview.mdx#when-should-sharding-be-used) only when the database has grown to substantial size and complexity. + + + +## Querying selected shards + +* A query is normally executed over all shards. However, it is also possible to query only selected shards. + Querying a specific shard directly avoids unnecessary trips to other shards by the orchestrator. + +* This approach can be useful, for example, when documents are intentionally stored on the same shard using [Anchoring documents](../sharding/administration/anchoring-documents.mdx). + +* To query specific shards using a pre-defined sharding prefix, see: [Querying selected shards by prefix](../sharding/administration/sharding-by-prefix.mdx#querying-selected-shards-by-prefix). +* Use method `ShardContext` together with `ByDocumentId` or `ByDocumentIds` to specify which shard/s to query. + +* To identify which shard to query, RavenDB passes the document ID that you provide in the _ByDocumentId/s_ methods + to the [hashing algorithm](../sharding/overview.mdx#how-documents-are-distributed-among-shards), which determines the bucket ID and thus the shard. + +* The document ID parameter is not required to be one of the documents you are querying for; + it is just used to determine the target shard to query. See the following examples: + + + +**Query a selected shard**: + +Query only the shard containing document `companies/1`: + + + + +{`// Query for 'User' documents from a specific shard: +// ================================================= +var userDocuments = session.Query() + // Call 'ShardContext' to select which shard to query + // RavenDB will query only the shard containing document "companies/1" + .Customize(x => x.ShardContext(s => s.ByDocumentId("companies/1"))) + // The query predicate + .Where(x => x.Name == "Joe") + .ToList(); + +// Variable 'userDocuments' will include all documents of type 'User' +// that match the query predicate and reside on the shard containing document 'companies/1'. + +// Query for ALL documents from a specific shard: +// ============================================== +var allDocuments = session.Query() // query with + .Customize(x => x.ShardContext(s => s.ByDocumentId("companies/1"))) + .ToList(); + +// Variable 'allDocuments' will include ALL documents +// that reside on the shard containing document 'companies/1'. +`} + + + + +{`// Query for 'User' documents from a specific shard: +// ================================================= +var userDocuments = await asyncSession.Query() + // Call 'ShardContext' to select which shard to query + .Customize(x => x.ShardContext(s => s.ByDocumentId("companies/1"))) + // The query predicate + .Where(x => x.Name == "Joe") + .ToListAsync(); + +// Query for ALL documents from a specific shard: +// ============================================== +var allDocuments = await asyncSession.Query() + .Customize(x => x.ShardContext(s => s.ByDocumentId("companies/1"))) + .ToListAsync(); +`} + + + + +{`// Query for 'User' documents from a specific shard: +// ================================================= +var userDocuments = session.Advanced.DocumentQuery() + // Call 'ShardContext' to select which shard to query + .ShardContext(s => s.ByDocumentId("companies/1")) + // The query predicate + .Where(x => x.Name == "Joe") + .ToList(); + +// Query for ALL documents from a specific shard: +// ============================================== +var allDocuments = session.Advanced.DocumentQuery() + .ShardContext(s => s.ByDocumentId("companies/1")) + .ToList(); +`} + + + + +{`// Query for 'User' documents from a specific shard: +// ================================================= +var userDocuments = await asyncSession.Advanced.AsyncDocumentQuery() + // Call 'ShardContext' to select which shard to query + .ShardContext(s => s.ByDocumentId("companies/1")) + // The query predicate + .WhereEquals(x => x.Name, "Joe") + .ToListAsync(); + +// Query for ALL documents from a specific shard: +// ============================================== +var allDocuments = await asyncSession.Advanced.AsyncDocumentQuery() + .ShardContext(s => s.ByDocumentId("companies/1")) + .ToListAsync(); +`} + + + + +{`// Query for 'User' documents from a specific shard: +// ================================================ +from "Users" +where Name == "Joe" +{ "__shardContext": "companies/1" } + +// Query for ALL documents from a specific shard: +// ============================================== +from @all_docs +where Name == "Joe" +{ "__shardContext": "companies/1" } +`} + + + + + + + +**Query selected shards**: + +Query only the shards containing documents `companies/2` and `companies/3`: + + + + +{`// Query for 'User' documents from the specified shards: +// ===================================================== +var userDocuments = session.Query() + // Call 'ShardContext' to select which shards to query + // RavenDB will query only the shards containing documents "companies/2" & "companies/3" + .Customize(x => x.ShardContext(s => s.ByDocumentIds(new[] { "companies/2", "companies/3" }))) + // The query predicate + .Where(x => x.Name == "Joe") + .ToList(); + +// Variable 'userDocuments' will include all documents of type 'User' that match the query predicate +// and reside on either the shard containing document 'companies/2' +// or the shard containing document 'companies/3'. + +// To get ALL documents from the designated shards instead of just 'User' documents, +// query with \`session.Query\`. +`} + + + + +{`// Query for 'User' documents from the specified shards: +// ===================================================== +var userDocuments = await asyncSession.Query() + // Call 'ShardContext' to select which shards to query + .Customize(x => x.ShardContext(s => s.ByDocumentIds(new[] { "companies/2", "companies/3" }))) + // The query predicate + .Where(x => x.Name == "Joe") + .ToListAsync(); +`} + + + + +{`// Query for 'User' documents from the specified shards: +// ===================================================== +var userDocuments = session.Advanced.DocumentQuery() + // Call 'ShardContext' to select which shards to query + .ShardContext(s => s.ByDocumentIds(new[] {"companies/2", "companies/3"})) + // The query predicate + .Where(x => x.Name == "Joe") + .ToList(); +`} + + + + +{`// Query for 'User' documents from the specified shards: +// ===================================================== +var userDocuments = await asyncSession.Advanced.AsyncDocumentQuery() + // Call 'ShardContext' to select which shards to query + .ShardContext(s => s.ByDocumentIds(new[] {"companies/2", "companies/3"})) + // The query predicate + .WhereEquals(x => x.Name, "Joe") + .ToListAsync(); +`} + + + + +{`// Query for 'User' documents from the specified shards: +// ===================================================== +from "Users" +where Name == "Joe" +{ "__shardContext" : ["companies/2", "companies/3"] } + +// Query for ALL documents from the specified shards: +// ================================================== +from @all_docs +where Name == "Joe" +{ "__shardContext" : ["companies/2", "companies/3"] } +`} + + + + + + + + +## Including items + +* **Including** items by a query or an index **will** work even if the included item resides on another shard. + If the requested item is not located on this shard, the orchestrator will fetch it from the shard where it is located. + +* Note that this process will cost an extra travel to the shard that hosts the requested item. + + + +## Paging results + +From the client's point of view, [paging](../indexes/querying/paging.mdx) is conducted similarly in sharded and non-sharded databases, +and the same API is used to define page size and retrieve selected pages. + +Under the hood, however, performing paging in a sharded database entails some overhead since the orchestrator is required to load +the requested data **from each shard** and sort the retrieved results before handing the selected page to the client. + +For example, let's compare what happens when we load the 8th page (with a page size of 100) from a non-sharded and a sharded database: + + + + +{`IList results = session + .Query() + .Statistics(out QueryStatistics stats) // fill query statistics + .Where(x => x.UnitsInStock > 10) + .Skip(700) // skip the first 7 pages (700 results) + .Take(100) // get pages 701-800 + .ToList(); + +long totalResults = stats.TotalResults; +`} + + + + +{`IList results = session + .Advanced + .DocumentQuery() + .Statistics(out QueryStatistics stats) // fill query statistics + .WhereGreaterThan(x => x.UnitsInStock, 10) + .Skip(700) // skip the first 7 pages (700 results) + .Take(100) // get pages 701-800 + .ToList(); + +long totalResults = stats.TotalResults; +`} + + + + +{`public class Products_ByUnitsInStock : AbstractIndexCreationTask +{ + public Products_ByUnitsInStock() + { + Map = products => from product in products + select new + { + UnitsInStock = product.UnitsInStock + }; + } +} +`} + + + + +* When the database is **Not sharded** the server would: + * Skip 7 pages. + * Hand page 8 to the client (results 701 to 800). + +* When the database is **Sharded** the orchestrator would: + * Load 8 pages (sorted by modification order) from each shard. + * Sort the retrieved results (in a 3-shard database, for example, the orchestrator would sort 2400 results). + * Skip 7 pages (of 24). + * Hand page 8 to the client (results 701 to 800). + + +The shards sort the data by modification order before sending it to the orchestrator. +For example, if a shard is required to send 800 results to the orchestrator, +the first result will be the most recently modified document, while the last result will be the document modified first. + + + + +## Filtering results + +* Data can be filtered using the [where](../indexes/querying/filtering.mdx#where) + and [filter](../indexes/querying/exploration-queries.mdx#filter) keywords on both non-sharded and sharded databases. + +* There **are**, however, differences in the behavior of these commands on sharded and non-sharded databases. + This section explains these differences. +### `where` + +`where` is RavenDB's basic filtering command. +It is used by the server to restrict data retrieval from the database to only those items that match given conditions. + +* **On a non-sharded database** + When a query that applies `where` is executed over a non-sharded database, + the filtering applies to the **entire** database. + + To find only the most successful products, we can easily run a query such as: + + +{`from index 'Products/Sales' +where TotalSales >= 5000 +`} + + + + This will retrieve only the documents of products that were sold at least 5000 times. + +* **On a sharded database**: + When a query that includes a `where` clause is sent to a sharded database, + filtering is applied **per-shard**, over each shard's database. + + This presents us with the following problem: + The filtering that runs on each shard takes into account only the data present on that shard. + If a certain product was sold 4000 times on each shard, the query demonstrated + above will filter this product out on each shard, even though its total sales far exceed 5000. + + To solve this problem, the role of the `filter` command is [altered on sharded databases](../sharding/querying.mdx#section-1). + + + Using `where` raises no problem and is actually [recommended](../sharding/querying.mdx#vs--recommendations) + when the filtering is done [over a GroupBy field](../sharding/querying.mdx#orderby-in-a-map-reduce-index). + +### `filter` + +The `filter` command is used when we want to scan data that has already been retrieved from the database but is still on the server. + +* **On a non-sharded database** + When a query that includes a `filter` clause is sent to a non-sharded database its main usage is as an [exploration query](../indexes/querying/exploration-queries.mdx): + an additional layer of filtering that scans the entire retrieved dataset without creating an index that would then have to be maintained. + + We consider exploration queries one-time operations and use them cautiously because scanning the entire retrieved dataset may take a high toll on resources. + +* **On a sharded database**: + When a query that includes a `filter` clause is sent to a sharded database: + * The `filter` clause is omitted from the query. + All data is retrieved from the shards to the orchestrator. + * The `filter` clause is executed on the orchestrator machine over the entire retrieved dataset. + + **On the Cons side**, + a huge amount of data may be retrieved from the database and then scanned by the filtering condition. + + **On the Pros side**, + this mechanism allows us to filter data using [computational fields](../sharding/querying.mdx#orderby-in-a-map-reduce-index) as we do over a non-sharded database. + The below query, for example, will indeed return all the products that were sold at least 5000 times, + no matter how their sales are divided between the shards. + + +{`from index 'Products/Sales' +filter TotalSales >= 5000 +`} + + + + + The results volume retrieved from the shards can be decreased (when it makes sense as part of the query) + by applying `where` [over a GroupBy field](../sharding/querying.mdx#orderby-in-a-map-reduce-index) before calling `filter`. + +### `where` vs `filter` recommendations + +As using `filter` may (unless `where` is also used) cause the retrieval and scanning of a substantial amount of data, +it is recommended to use`filter` cautiously and restrict its operation wherever needed. + +* Prefer `where` over `filter` when the query is executed over a [GroupBy](../sharding/querying.mdx#orderby-in-a-map-reduce-index) field. +* Prefer `filter` over `where` when the query is executed over a conditional query field like [Total or Sum](../sharding/querying.mdx#orderby-in-a-map-reduce-index) field. +* When using `filter`, set a [limit](../indexes/querying/exploration-queries.mdx#usage) if possible. +* When `filter` is needed, use `where` first to minimize the dataset that needs to be transferred from the shards to the orchestrator and scanned by `filter` over the orchestrator machine. + E.g. - + + +{`from index 'Products/Sales' +where Category = 'categories/7-A' +filter TotalSales >= 5000 +`} + + + + + +## Querying Map-Reduce indexes + +### Loading document within a projection + +[Loading a document within a Map-Reduce projection](../indexes/querying/projections.mdx#example-viii---projection-using-a-loaded-document) +is **not supported** in a sharded database. + +When attempting to load a document from a Map-Reduce projection, the database will respond with a `NotSupportedInShardingException`, +specifying that "Loading a document inside a projection from a Map-Reduce index isn't supported." + +Unlike Map-Reduce index projections, projections of queries that use no index and projections of Map indexes can load a document, +[provided that the document is on this shard](../sharding/querying.mdx#unsupported-querying-features). + +| Projection | Can load Document | Condition | +|-----------------------------|---------------------|-------------------------------| +| Query projection | Yes | The document is on this shard | +| Map index projection | Yes | The document is on this shard | +| Map-Reduce index projection | No | | + +### OrderBy in a Map-Reduce index query + +Similar to its behavior under a non-sharded database, [OrderBy](../indexes/querying/sorting.mdx) is used in an index query or a dynamic query to sort the retrieved dataset by a given order. + +But under a sharded database, when `OrderBy` is used in a Map-Reduce index and [limit](../indexes/querying/paging.mdx#example-ii---basic-paging) +is applied to restrict the number of retrieved results, there are scenarios in which **all** the results will still be retrieved from all shards. +To understand how this can happen, let's run a few queries over this Map-Reduce index: + + + +{`Reduce = results => + from result in results + group result by result.Name + into g + select new Result + \{ + // Group-by field (reduce key) + Name = g.Key, + // Computation field + Sum = g.Sum(x => x.Sum) + \}; +`} + + + +* The first query sorts the results using `OrderBy` without setting any limit. + This will load **all** matching results from all shards (just like this query would load all matching results from a non-sharded database). + + +{`var queryResult = session.Query() + .OrderBy(x => x.Name) + .ToList(); +`} + + + +* The second query sorts the results by one of the `GroupBy` fields, `Name`, and sets a limit to restrict the retrieved dataset to 3 results. + This **will** restrict the retrieved dataset to the set limit. + + +{`var queryResult = session.Query() + .OrderBy(x => x.Name) + .Take(3) // this limit will apply while retrieving the items + .ToList(); +`} + + + +* The third query sorts the results **not** by a `GroupBy` field but by a field that computes a sum from retrieved values. + This will retrieve **all** the results from all shards regardless of the set limit, perform the computation over them all, + and only then sort them and provide us with just the number of results we requested. + + +{`var queryResult = session.Query() + .OrderBy(x => x.Sum) + .Take(3) // this limit will only apply after retrieving all items + .ToList(); +`} + + + + + Note that retrieving all the results from all shards, either by setting no limit or by setting a limit based on a computation as demonstrated above, + may cause the retrieval of a large amount of data and extend memory, CPU, and bandwidth usage. + + + + +## Timing queries + +* The duration of queries and query parts (e.g. optimization or execution time) can be measured using API or Studio. + +* In a sharded database, the timings for each part will be provided __per shard__. + +* Timing is disabled by default, to avoid the measuring overhead. + It can be enabled per query by adding `include timings()` to an RQL query or calling [`Timings()`](../client-api/session/querying/debugging/query-timings.mdx#syntax) + in your query code, as explained in [include query timings](../client-api/session/querying/debugging/query-timings.mdx). + +* To view the query timings in the Studio, open the [Query View](../studio/database/queries/query-view.mdx), + run an RQL query with `include timings()`, and open the __Timings tab__. + +!["Timing Shards Querying"](./assets/querying_timing.png) + +1. **Textual view** of query parts and their duration. + Point the mouse cursor at captions to display timing properties in the graphical view on the right. +2. **Per-shard Timings** +3. **Graphical View** + Point the mouse cursor at graph sections to display query parts duration: + **A**. Shard #0 overall query time + **B**. Shard #0 optimization period + **C**. Shard #0 query period + **D**. Shard #0 staleness period + + + +## Unsupported querying features + +Querying features that are not supported or not yet implemented on sharded databases include: + +* **Loading a document that resides on another shard** + An [index](../sharding/indexing.mdx#unsupported-indexing-features) or a query can only load a document if it resides on the same shard. + Loading a document that resides on a different shard will return _null_ instead of the loaded document. + +* **Loading a document within a map-reduce projection** + Read more about this topic [above](../sharding/querying.mdx#projection). + +* **Streaming Map-Reduce results** + [Streaming](../client-api/session/querying/how-to-stream-query-results.mdx#stream-an-index-query) + map-reduce results is not supported in a sharded database. + +* **Querying with a limit is not supported in patch/delete by query operations** + Attempting to set a [limit](../client-api/session/querying/what-is-rql.mdx#limit) when executing + [PatchByQueryOperation](../client-api/operations/patching/set-based.mdx#sending-a-patch-request) or [DeleteByQueryOperation](../client-api/operations/common/delete-by-query.mdx) + will throw a `NotSupportedInShardingException` exception. + +* **Querying for similar documents with _MoreLikeThis_** + Method [MoreLikeThis](../client-api/session/querying/how-to-use-morelikethis.mdx) is not supported in a sharded database. + + + diff --git a/versioned_docs/version-7.1/sharding/resharding.mdx b/versioned_docs/version-7.1/sharding/resharding.mdx new file mode 100644 index 0000000000..772f9d5853 --- /dev/null +++ b/versioned_docs/version-7.1/sharding/resharding.mdx @@ -0,0 +1,226 @@ +--- +title: "Sharding: Resharding" +hide_table_of_contents: true +sidebar_label: Resharding +sidebar_position: 12 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Sharding: Resharding + + +* **Resharding** is the relocation of data stored on one [shard](../sharding/overview.mdx#shards) + to another shard, to maintain an overall balanced database in which all + shards handle about the same volume of data. +* The resharding process moves all the data related to a certain [bucket](../sharding/overview.mdx#buckets) + (including documents, document extensions, tombstones, etc.), to a different + shard, and then associates the bucket with the new shard. +* An even distribution of data and workload between all shards maintains + a steadier overall usage of resources like disk space, memory, and bandwidth, + improves availability, and eases database management. +* Resharding can currently be initiated only manually, via Studio. + A user can reshard a range of buckets as well as a single bucket. +* When resharding is initiated, RavenDB implements it gradually, one bucket + at a time, to avoid resource overuse. + +* In this page: + * [Resharding](../sharding/resharding.mdx#resharding) + * [The Resharding Process](../sharding/resharding.mdx#the-resharding-process) + * [Following Resharding Progress](../sharding/resharding.mdx#following-resharding-progress) + * [Racing](../sharding/resharding.mdx#racing) + * [Change Vector on a Sharded Database](../sharding/resharding.mdx#change-vector-on-a-sharded-database) + * [Resharding and Other Features](../sharding/resharding.mdx#resharding-and-other-features) + * [Executing Resharding](../sharding/resharding.mdx#executing-resharding) + * [Bucket Ownership](../sharding/resharding.mdx#bucket-ownership) + + +## Resharding + +Over time, data may be distributed unevenly between the database's shards, until +some shards may host and handle a much bigger portion of the overall load than others. + +Resharding is the process of re-distributing stored data between the shards. + +Keeping about the same amount of data on all shards helps maintain an equal +level of resource usage and improves the database's overall availability and +querying speed. + +**Resharding is currently manual**. +Resharding can currently be initiated manually, via Studio. +RavenDB provides a comfortable resharding interface and does alert +its users when disk space and other resources are exhausted, but it +is up to you to commence the resharding process when it is needed. + +**Resharding is carried out one bucket at a time**. +The smallest unit that can be resharded is a single bucket with all its contents. +It is also possible to reshard ranges of buckets, but resharding is always done +one bucket at a time to keep the process light, avoiding any additional burden to +shards that may already be preoccupied. + +## The Resharding Process + +Let's follow the resharding of a range of buckets from shard #0 to shard #2, +using a database like the one shown below: + +!["Sharded Database"](./assets/resharding_sharded-database.png) + +1. The client requests to reshard buckets from shard `#0` to shard `#2`. +2. Shard `#0` connects shard `#2` and transfers to it all the content of the first bucket. +3. Shard `#0` remains the owner of the bucket until all data has been propagated to + all shard `#2` [replicas](../sharding/overview.mdx#shard-replication). +4. The ownership is transferred, the bucket is remapped to shard `#2`. +5. Shard `#0` starts purging all the entities whose ownership is now held by shard `#2`. + If there are still buckets to shift, shard `#0` can start transferring content from the next bucket. + +## Following Resharding Progress + +You can follow the progress of the resharding progress using - + +* **Studio Popup Messages** + When [Studio is used for resharding](../sharding/resharding.mdx#executing-resharding) + the user interface produces popup messages to keep users + informed of its progress. + +* **The [Database Record](../studio/database/settings/database-record.mdx)** + All sharding-related info is stored in the database record `Sharding` + property, where this info can be accessed by all shards. + During resharding, migrating buckets details like status, source shard, + and destination shard, are updated in related `Sharding` sub-properties. + * Via [Studio](../studio/database/settings/database-record.mdx#the-database-record) + Open Studio's Database Record view and the Sharding property. + The details of currently-migrating buckets are recorded in + the `BucketMigration` property. + !["Database Record"](./assets/resharding_database-record.png) + **1**. Click to open the Settings + **2**. Click to view or edit the database record + + * Via API + To get the database record via API, pass `GetDatabaseRecordOperation` the + database name. + Open the database record `BucketMigration` property to check migrating buckets + status, source and destination shards. + +## Racing + +It may happen that a file (like a time series, due to the addition +of a time series entry) would find its way into a bucket after the +ownership of this bucket has already been shifted to another shard +and before RavenDB managed to delete it. +To handle such occurences, a **periodic documents migrator** task +routinely checks the system. +Upon locating a file in a bucket that is already owned by another shard, +the documents migrator task immediately initiates a new resharding process +for the related bucket. + + + +## Change Vector on a Sharded Database + +On a non-sharded database, a document's [change vector](../server/clustering/replication/change-vector.mdx) +indicates both the document's **version** (so we can tell which version of it +to replicate, for example) and its **order** on the database (so we can tell, +for example, whether to replicate or skip it). + +On a sharded database, the latter (order) property may turn meaningless, because +resharding may change the order of documents: an old document may be moved to a shard +that contains newer documents, and get a change vector newer than theirs. + +To resolve this issue, resharded documents are given an altered change vector +that explicitly defines both their version and their order, using this format: +`|` + +* E.g. `Users/1 A:3|B:7` + In the example above `A:3|B:7` is `Users/1`'s change vector. + * `A:3` indicates the document's **order**. + * The `|` symbol separates the two parts. + * `B:7` inidcates the document's **version**. + + +The change vector is altered this way only for documents that have been resharded. + + + + +## Resharding and Other Features + +### Resharding and External Replication + +During [external replication](../sharding/external-replication.mdx): + +* The **order** (left-hand) part of the document's change vector + is checked on the **source** side, to determine whether the document + should be replicated. +* The **version** (right-hand) part of the document's change vector + is checked on the **destination** side, to determine whether this + version of the document already resides on it. +### Resharding and ETL + +[ETL tasks](../sharding/etl.mdx) cannot determine whether a document that +was resharded does or doesn't exist on their target. +Therefore ETL tasks consider **all** resharded documents (that match the +transform script) new/modified and transfer them all to the destination. +### Resharding and Data Subscriptions + +Our promise to [data subscription](../sharding/subscriptions.mdx) workers is that we +send all data **at least once**. +As in a non-sharded database, we do our best not to send documents twice, but it +is the responsibility of the worker to check whether a document is duplicated or not. +### Resharding and Querying + +Since documents are stored in their buckets along with all the data +related to them, including revisions, time series, attachments, and +so on, resharding a large document's bucket may take a considerable +amount of time. +During this time, checking which shard the bucket belongs to +[may show](../sharding/resharding.mdx#bucket-ownership) +both the bucket's source and destination shards. +However, if documents stored in this bucket are [queried](../sharding/querying.mdx) +during this time, RavenDB will add them to the retrieved dataset +**only once** and prevent results duplication due to resharding. + + + +## Executing Resharding + +Resharding can currently be initiated only via Studio. +To reshard selected buckets, open the **Stats** view. + +!["Stats View"](./assets/resharding_stats.png) +Open the **Buckets Report** view. + +!["Buckets Report"](./assets/resharding_buckets-report.png) +Selecting a range of buckets will present the selected range. +You can reshard a whole range of buckets, or continue increasing the +resolution until you present a single bucket and the files it contains. + +!["Range of Buckets"](./assets/resharding_diving-into-bucket-01.png) + +!["Single Bucket"](./assets/resharding_diving-into-bucket-05.png) +Select the shard you want to transfer the bucket/s to and confirm the transfer + +!["Reshard"](./assets/resharding_diving-into-bucket-06.png) + +!["Confirm Resharding"](./assets/resharding_confirm-resharding.png) +Studio will indicate its progress in resharding the requested +buckets range using popup messages until the process ends. + +!["Finished Resharding"](./assets/resharding_finished-resharding.png) +### Bucket Ownership + +For a while, as long as there are still entities to transfer to the new +shard's replicas or delete from the old shard, transferred buckets are +presented as if they reside on both their old and new shards. + +!["Document On Two Shards"](./assets/resharding_over-two-shards.png) +Eventually, the bucket/s reside on their new shard. + +!["Post Resharding"](./assets/resharding_post-resharding.png) + + + diff --git a/versioned_docs/version-7.1/sharding/subscriptions.mdx b/versioned_docs/version-7.1/sharding/subscriptions.mdx new file mode 100644 index 0000000000..a56b035a6b --- /dev/null +++ b/versioned_docs/version-7.1/sharding/subscriptions.mdx @@ -0,0 +1,111 @@ +--- +title: "Sharding: Data Subscriptions" +hide_table_of_contents: true +sidebar_label: Data Subscriptions +sidebar_position: 11 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Sharding: Data Subscriptions + + +* From a user's point of view, [Data Subscriptions](../client-api/data-subscriptions/what-are-data-subscriptions.mdx) + are created and [consumed](../client-api/data-subscriptions/consumption/how-to-consume-data-subscription.mdx) + in the exact same way when the database is sharded and when it is not. + +* Old clients are supported and can continue communicating with RavenDB + without knowing whether it is sharded or not. + Subscription workers of a non-sharded database are not required to change + anything to connect to a sharded database. + +* Data subscriptions in a sharded database are managed by orchestrators + that serve the workers, and shards that serve the orchestrators. + +* In this page: + * [Data Subscriptions in a Sharded Database](../sharding/subscriptions.mdx#data-subscriptions-in-a-sharded-database) + * [Unsupported Features](../sharding/subscriptions.mdx#unsupported-features) + + +## Data Subscriptions in a Sharded Database + +To allow data subscriptions in a sharded database: + +* From a user's point of view, creating a data subscription is + done once, [just like it is done](../client-api/data-subscriptions/creation/how-to-create-data-subscription.mdx) + under a non-sharded database. +* Behind the scenes, though, the [orchestrator](../sharding/overview.mdx#client-server-communication) + that was appointed to handle this client and received its + subscription request uses the subscription's settings and + creates a data subscription with each [shard](../sharding/overview.mdx#shards). +* Each shard independently organizes available data in batches + and sends the batches to the orchestrator in response to data + requests. +* The orchestrator unifies the data sent by shards, handles + documents (e.g. by adding included documents even if the + original document arrives from one shard and the included + document from another), and arranges them in new consumable + batches. +* The orchestrator keeps track of the worker's progress. + When the worker consumes its current delivery and requests + another, the orchestrator sends it the next available batch. + +Distributing the subscriptions between all shards this way +allows RavenDB to serve its workers efficiently no matter +how large the overall database gets. +![Subscription](./assets/subscriptions.png) + +1. **Data Subscription Worker** +2. **Worker Subscription and Data Requests** + * The worker subscribes with the orchestrator. + The worker is unaware that the destination database is sharded, + no special syntax or preparation is needed. + * The worker informs the orchestrator when all data was consumed. +3. **Orchestrator** + * A subscription is created with the orchestrator. + * The orchestrator registers which data has been consumed by the worker + and which is still to be delivered. +4. **Orchestrator Subscription and Data Requests** + * The orchestrator subscribes with all shards. + * The orchestrator informs all shards when all data was consumed. +5. **Shard** + * A subscription is created with the shard. + * Relevant data is organized in consumable batches. + The shard registers which data has been consumed and which is still + to be delivered. +6. **Data Delivery to Orchestrator** + When the orchestrator informs the shard that all data has been consumed + and the shard has an available batch, the batch is delivered to the orchestrator. +6. **Data Delivery to Worker** + When the worker informs the orchestrator that all data has been consumed + and the orchestrator has an available batch, the batch is delivered to the worker. + + + +## Unsupported Features + +Data subscriptions features that are not supported yet under a sharded database: + +* [Concurrent Subscriptions](../client-api/data-subscriptions/concurrent-subscriptions.mdx) + Allowing multiple workers to connect a common subscription simultaneously. +* [Data Subscriptions Revisions Support](../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx) + Subscribing to document revisions. +* [SubscriptionCreationOptions.ChangeVector](../client-api/data-subscriptions/creation/api-overview.mdx#subscriptioncreationoptionst) + Providing an arbitrary change vector from which the subscription would start processing + is currently not supported. + + Setting `ChangeVector` to one of the following special values **is** supported: + + * `"LastDocument"` (the latest change vector on the database) + * `"BeginningOfTime"` (the earliest change vector on the database) + * `"DoNotChange"` (keep current subscription change vector) + + + + + diff --git a/versioned_docs/version-7.1/sharding/unsupported.mdx b/versioned_docs/version-7.1/sharding/unsupported.mdx new file mode 100644 index 0000000000..1756e0352e --- /dev/null +++ b/versioned_docs/version-7.1/sharding/unsupported.mdx @@ -0,0 +1,129 @@ +--- +title: "Sharding: Unsupported Features" +hide_table_of_contents: true +sidebar_label: Unsupported Features +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Sharding: Unsupported Features + + +* A sharded RavenDB database generally provides the same services that + a non-sharded database offers, so clients of older versions and non-sharded + database are supported and existing queries, subscriptions, patches, + and so on, require no modification. +* Find below a list of yet unimplemented features, that are currently + supported by non-sharded RavenDB databases but not by sharded ones. + +* In this page: + * [Unsupported Features](../sharding/unsupported.mdx#unsupported-features) + * [Unsupported Indexing Features](../sharding/unsupported.mdx#unsupported-indexing-features) + * [Unsupported Querying Features](../sharding/unsupported.mdx#unsupported-querying-features) + * [Unsupported Document Extensions Features](../sharding/unsupported.mdx#unsupported-document-extensions-features) + * [Unsupported Backup Features](../sharding/unsupported.mdx#unsupported-backup-features) + * [Unsupported Import & Export Features](../sharding/unsupported.mdx#unsupported-import--export-features) + * [Unsupported Migration Features](../sharding/unsupported.mdx#unsupported-migration-features) + * [Unsupported Data Subscription Features](../sharding/unsupported.mdx#unsupported-data-subscription-features) + * [Unsupported Integrations Features](../sharding/unsupported.mdx#unsupported-integrations-features) + * [Unsupported Patching Features](../sharding/unsupported.mdx#unsupported-patching-features) + * [Unsupported Replication Features](../sharding/unsupported.mdx#unsupported-replication-features) + + +## Unsupported Features + +## Unsupported Indexing Features + +| Unsupported Feature | Comment | +| ------------- | ------------- | +| [Rolling index deployment](../indexes/rolling-index-deployment.mdx) | | +| [Load Document from another shard](../sharding/indexing.mdx#unsupported-indexing-features) | Loading a document during indexing is possible only if the document resides on the shard. | +| **Map-Reduce Output Documents** | Using [OutputReduceToCollection](../indexes/map-reduce-indexes.mdx#map-reduce-output-documents) to output the results of a map-reduce index to a collection is not supported in a Sharded Database. | +| [Custom Sorters](../indexes/querying/sorting.mdx#creating-a-custom-sorter) | | + +## Unsupported Querying Features + +| Unsupported Feature | Comment | +| ------------- | ------------- | +| [Load Document from another shard](../sharding/indexing.mdx#unsupported-indexing-features) | An index or a query can only load a document if it resides on the same shard. | +| [Load Document within a map-reduce projection](../sharding/querying.mdx#projection) | | +| **Stream Map-Reduce results** | [Streaming](../client-api/session/querying/how-to-stream-query-results.mdx#stream-an-index-query) map-reduce results is not supported in a Sharded Database. | +| **Stream Includes and Loads** | [Streaming](../client-api/session/querying/how-to-stream-query-results.mdx#stream-an-index-query) Includes and Loads is not supported in a Sharded Database. | +| Use `limit` with [PatchByQueryOperation](../client-api/operations/patching/set-based.mdx#patchbyqueryoperation) or [DeleteByQueryOperation](../client-api/operations/common/delete-by-query.mdx) | [Unsupported Querying Features](../sharding/querying.mdx#unsupported-querying-features) | +| [MoreLikeThis](../client-api/session/querying/how-to-use-morelikethis.mdx) | | +| [OrderByScore](../indexes/querying/sorting.mdx#ordering-by-score) | | +| [OrderByDistance](../client-api/session/querying/how-to-make-a-spatial-query.mdx#spatial-sorting) | Not supported in spatial map reduce indexes | +| [Highlighting](../indexes/querying/highlighting.mdx) | | +| [Intersection](../indexes/querying/intersection.mdx) | | + + +## Unsupported Document Extensions Features + +| Unsupported Feature | Comment | +| ------------- | ------------- | +| **Move Attachments** | E.g. `session.Advanced.Attachments.Move("users/1","foo","users/2","bar");` is not supported. | +| **Copy Attachments** | E.g. `session.Advanced.Attachments.Copy("users/1","foo","users/2","bar");` is not supported. | +| **Get multiple Attachments** | E.g. `session.Advanced.Attachments.Get(attachmentNames)` is not supported. | +| **Copy Time Series** | E.g. `session.Advanced.Defer(new CopyTimeSeriesCommandData(id, "Count", id2, "Count"));` is not supported. | + +## Unsupported Backup Features + +| Unsupported Feature | Comment | +| ------------- | ------------- | +| [Create a Snapshot Backup](../sharding/backup-and-restore/backup.mdx#backup-type) | | +| [Restore from a Snapshot Backup](../sharding/backup-and-restore/restore.mdx#sharding-restore) | | + +## Unsupported Import & Export Features + +| Unsupported Feature | Comment | +| ------------- | ------------- | +| [Import from a CSV file](../studio/database/tasks/import-data/import-from-csv.mdx) | | +| **Import from an S3 Bucket** | using GET, Studio, smuggler, import s3 dir | +| [Import from SQL](../studio/database/tasks/import-data/import-from-sql.mdx) | | +| [Import from Other Databases](../studio/database/tasks/import-data/import-from-other.mdx) | Importing from databases like MongoDB and CosmosDB is not supported | + +## Unsupported Migration Features + +| Unsupported Feature | Comment | +| ------------- | ------------- | +| [Migrate from RavenDB](../studio/database/tasks/import-data/import-from-ravendb.mdx) | By POST, Studio, smuggler | +| [Migrate from SQL DB](../studio/database/tasks/import-data/import-from-sql.mdx) | | + +## Unsupported Data Subscription Features + +| Unsupported Feature | Comment | +| ------------- | ------------- | +| [Concurrent Subscriptions](../client-api/data-subscriptions/concurrent-subscriptions.mdx) | | +| [Data Subscriptions Revisions Support](../client-api/data-subscriptions/advanced-topics/subscription-with-revisioning.mdx) | Subscribing to document revisions | +| [SubscriptionCreationOptions.ChangeVector](../sharding/subscriptions.mdx#unsupported-features) | Providing a change vector to start the processing from is not supported
except for these special cases:
`"LastDocument"`, `"BeginningOfTime"`, `"DoNotChange"` | + +## Unsupported Integrations Features + +| Unsupported Feature | Comment | +| ------------- | ------------- | +| [PostgreSQL](../integrations/postgresql-protocol/overview.mdx) | | +| [Queue ETL](../server/ongoing-tasks/etl/queue-etl/overview.mdx) | [Kafka ETL](../server/ongoing-tasks/etl/queue-etl/kafka.mdx), [RabbitMQ ETL](../server/ongoing-tasks/etl/queue-etl/rabbit-mq.mdx) | +| [Queue Sink](../server/ongoing-tasks/queue-sink/overview.mdx) | [Kafka Queue Sink](../server/ongoing-tasks/queue-sink/kafka-queue-sink.mdx), [RabbitMQ Queue Sink](../server/ongoing-tasks/queue-sink/rabbit-mq-queue-sink.mdx) | + +## Unsupported Patching Features + +| Unsupported Feature | Comment | +| ------------- | ------------- | +| [JSON patch](../client-api/operations/patching/json-patch-syntax.mdx) | | + +## Unsupported Replication Features + +| Unsupported Feature | Comment | +| ------------- | ------------- | +| [Filtered Replication](../studio/database/tasks/ongoing-tasks/hub-sink-replication/overview.mdx#filtered-replication) | | +| [Hub/Sink Replication](../studio/database/tasks/ongoing-tasks/hub-sink-replication/overview.mdx) | | +| **Legacy replication** | From RavenDB 3.x instances | + + + diff --git a/versioned_docs/version-7.1/start/_category_.json b/versioned_docs/version-7.1/start/_category_.json new file mode 100644 index 0000000000..202cf83040 --- /dev/null +++ b/versioned_docs/version-7.1/start/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 0, + "label": "Getting Started" +} diff --git a/versioned_docs/version-7.1/start/_test-driver-csharp.mdx b/versioned_docs/version-7.1/start/_test-driver-csharp.mdx new file mode 100644 index 0000000000..995b173a57 --- /dev/null +++ b/versioned_docs/version-7.1/start/_test-driver-csharp.mdx @@ -0,0 +1,409 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In this section, we explain how to use [RavenDB.TestDriver](https://www.nuget.org/packages/RavenDB.TestDriver/) + to write RavenDB unit tests. + +* TestDriver uses an [Embedded Server](../server/embedded.mdx) package with the same set of + [prerequisites](../server/embedded.mdx#prerequisite) as embedded servers to run the tests. + +* In this page: + - [`RavenTestDriver`](../start/test-driver.mdx#raventestdriver) + - [Pre-initializing the store: `PreInitialize`](../start/test-driver.mdx#pre-initializing-the-store:-preinitialize) + - [Configure the server: `ConfigureServer`](../start/test-driver.mdx#configure-the-server:-configureserver) + - [Unit test](../start/test-driver.mdx#unit-test) + - [Complete example](../start/test-driver.mdx#complete-example) + - [Continuous Integration (CI) Servers](../start/test-driver.mdx#continuous-integration-(ci)-servers) + - [Licensing](../start/test-driver.mdx#licensing) + + + +## `RavenTestDriver` + +Start by creating a class that derives from `RavenTestDriver`. +Find below a list of test driver methods, followed by [samples](../start/test-driver.mdx#complete-example). + +### TestDriver Methods + +#### `DatabaseDumpFilePath` +Override the path to the database dump file that is loaded when calling ImportDatabase. + + +{`protected virtual string DatabaseDumpFilePath => null; +`} + + + +#### `DatabaseDumpFileStream` +Allow overriding the stream containing the database dump loaded when calling ImportDatabase. + + +{`protected virtual Stream DatabaseDumpFileStream => null; +`} + + + +#### `GetDocumentStore` +Get an IDocumentStore instance for the requested database. + + +{`public IDocumentStore GetDocumentStore([CallerMemberName] string database = null, + TimeSpan? waitForIndexingTimeout = null) +`} + + + +#### `PreInitialize` +Pre-initialize IDocumentStore. + + +{`protected virtual void PreInitialize(IDocumentStore documentStore) +`} + + + +#### `PreConfigureDatabase` +Pre configure the database record before creating it. + + +{`protected virtual void PreConfigureDatabase(DatabaseRecord databaseRecord) +`} + + + +#### `SetupDatabase` +Initialize the database + + +{`protected virtual void SetupDatabase(IDocumentStore documentStore) +`} + + + +#### `DriverDisposed` +An event raised when the test driver is disposed of + + +{`protected event EventHandler DriverDisposed; +`} + + + +#### `ConfigureServer` +Configure the server before running it + + +{`public static void ConfigureServer(TestServerOptions options) +`} + + + +#### `WaitForIndexing` +Wait for indexes to become non-stale + + +{`public void WaitForIndexing(IDocumentStore store, string database = null, + TimeSpan? timeout = null) +`} + + + +#### `WaitForUserToContinueTheTest` +Pause the test and launch Studio to examine database state + + +{`public void WaitForUserToContinueTheTest(IDocumentStore store) +`} + + + +#### `OpenBrowser` +Open browser + + +{`protected virtual void OpenBrowser(string url) +`} + + + +#### `Dispose` +Dispose of the server + + +{`public virtual void Dispose() +`} + + + + + +## Pre-initializing the store: `PreInitialize` + +Pre-Initializing the IDocumentStore allows you to mutate the conventions used by the document store. + +### Example + + + +{`//This allows us to modify the conventions of the store we get from 'GetDocumentStore' +protected override void PreInitialize(IDocumentStore documentStore) +\{ + documentStore.Conventions.MaxNumberOfRequestsPerSession = 50; +\} +`} + + + + + +## Configure the server: `ConfigureServer` + +The `ConfigureServer` method allows you to be more in control of your server. +You can use it with `TestServerOptions` to change the path to the Raven server binaries, specify data storage path, adjust .NET framework versions, etc. + +* `ConfigureServer` can only be set once per test run. + It needs to be set before `GetDocumentStore` is called. + See an [example](../start/test-driver.mdx#complete-example) below. + +* If it is called twice, or within the `DocumentStore` scope, you will get the following error message: + `System.InvalidOperationException : Cannot configure server after it was started. Please call 'ConfigureServer' method before any 'GetDocumentStore' is called.` + + + +Defining TestServerOptions allows you to be more in control of +how the embedded server is going to run with just a minor [definition change](../start/test-driver.mdx#example-2). + +* To see the complete list of `TestServerOptions`, which inherits from embedded servers, go to embedded [ServerOptions](../server/embedded.mdx#serveroptions). +* It's important to be sure that the correct [.NET FrameworkVersion](../server/embedded.mdx#net-frameworkversion) is set. + + + +#### Example + + + +{`var testServerOptions = new TestServerOptions +\{ + // Looks for the newest version on your machine including 3.1.15 and any newer patches + // but not major new releases (default is .NET version at time of server release). + FrameworkVersion = "3.1.15+", + + // Specifies where ravendb server binaries are located (Optional) + ServerDirectory = "PATH_TO_RAVENDB_SERVER", + + // Specifies where ravendb data will be placed/located (Optional) + DataDirectory = "PATH_TO_RAVENDB_DATADIR", +\}; + +ConfigureServer(testServerOptions); +`} + + + + + +## Unit test + +We use [xunit](https://www.nuget.org/packages/xunit/) for the test framework in the below example. + + +Note that the test itself is meant to show different capabilities of the test driver and is not meant to be the most efficient. + + +The example below depends on the `TestDocumentByName` index and `TestDocument` class that can be seen in the [full example](../start/test-driver.mdx#complete-example) + +#### Example + +In the example, we get an `IDocumentStore` object to our test database, deploy an index, and insert two documents into the document store. + +We then use `WaitForUserToContinueTheTest(store)` which launches the Studio so we can verify that the documents +and index are deployed (we can remove this line after the test succeeds). + +Finally, we use `session.Query` to query for "TestDocument" where the name contains the word 'hello', +and we assert that we have only one such document. + + + +{`[Fact] +public void MyFirstTest() +\{ + using (var store = GetDocumentStore()) + \{ + store.ExecuteIndex(new TestDocumentByName()); + using (var session = store.OpenSession()) + \{ + session.Store(new TestDocument \{ Name = "Hello world!" \}); + session.Store(new TestDocument \{ Name = "Goodbye..." \}); + session.SaveChanges(); + \} + // If we want to query documents, sometimes we need to wait for the indexes to catch up + // to prevent using stale indexes. + WaitForIndexing(store); + + // Sometimes we want to debug the test itself. This method redirects us to the studio + // so that we can see if the code worked as expected (in this case, created two documents). + WaitForUserToContinueTheTest(store); + + using (var session = store.OpenSession()) + \{ + var query = session.Query().Where(x => x.Name == "hello").ToList(); + Assert.Single(query); + \} + \} +\} +`} + + + + + +## Complete example + +This is a full unit test using [Xunit](https://www.nuget.org/packages/xunit/). + +In the test, we get an `IDocumentStore` object to our test database, deploy an index, and insert two documents into the document store. + +We then use `WaitForUserToContinueTheTest(store)` which launches the Studio so we can verify that the documents +and index are deployed (we can remove this line after the test succeeds). + +Finally, we use `session.Query` to query for "TestDocument" where the name contains the word 'hello', +and we assert that we have only one such document. + + + +{`using Raven.Client.Documents; +using Raven.TestDriver; +using Xunit; +using System.Linq; +using Raven.Client.Documents.Indexes; + +namespace RavenDBTestDriverFullExample +\{ + + public class RavenDBTestDriver : RavenTestDriver + \{ + static RavenDBTestDriver() + \{ + // ConfigureServer() must be set before calling GetDocumentStore() + // and can only be set once per test run. + ConfigureServer(new TestServerOptions + \{ + DataDirectory = "C:\\\\RavenDBTestDir" + \}); + \} + // This allows us to modify the conventions of the store we get from 'GetDocumentStore' + protected override void PreInitialize(IDocumentStore documentStore) + \{ + documentStore.Conventions.MaxNumberOfRequestsPerSession = 50; + \} + + [Fact] + public void MyFirstTest() + \{ + // GetDocumentStore() evokes the Document Store, which establishes and manages communication + // between your client application and a RavenDB cluster via HTTP requests. + using (var store = GetDocumentStore()) + \{ + store.ExecuteIndex(new TestDocumentByName()); + using (var session = store.OpenSession()) + \{ + session.Store(new TestDocument \{ Name = "Hello world!" \}); + session.Store(new TestDocument \{ Name = "Goodbye..." \}); + session.SaveChanges(); + \} + // If we want to query documents, sometimes we need to wait for the indexes to catch up + // to prevent using stale indexes. + WaitForIndexing(store); + + // Sometimes we want to debug the test itself. This method redirects us to the studio + // so that we can see if the code worked as expected (in this case, created two documents). + WaitForUserToContinueTheTest(store); + + // Queries are defined in the session scope. + // If there is no relevant index to quickly answer the query, RavenDB creates an auto-index + // based on the query parameters. + // This query will use the static index defined in lines 63-70 and filter the results by name. + using (var session = store.OpenSession()) + \{ + var query = session.Query() + .Where(x => x.Name == "hello").ToList(); + Assert.Single(query); + \} + \} + \} + \} + // AbstractIndexCreationTask allows you to create and manually define a static index. + public class TestDocumentByName : AbstractIndexCreationTask + \{ + public TestDocumentByName() + \{ + Map = docs => from doc in docs select new \{ doc.Name \}; + Indexes.Add(x => x.Name, FieldIndexing.Search); + \} + \} + + public class TestDocument + \{ + public string Name \{ get; set; \} + \} +\} +`} + + + + + +## Continuous Integration (CI) Servers + +Best practice is to use a CI/CD server to help automate the testing and deployment of your new code. +Popular CI/CD products are [AppVeyor](https://www.appveyor.com/) or [Visual Studio Team Services (aka. VSTS)](https://visualstudio.microsoft.com/team-services/). + + + +## Licensing + +The embedded server that TestDriver uses while running your tests can only apply the +features and access the resources defined by its [license](https://ravendb.net/buy). +An unlicensed server, for example, will be able to use no more than 3 CPU cores, while +a server licensed using a [free developers license](https://ravendb.net/buy#developer) +will be able to use up to 9 cores and run way faster. + +* When the server is started, its license is validated. + * If the validation succeeds, the server will run, applying the capabilities defined + by its license. + * If the validation fails, the server may still run - but its capabilities will be + limited to those defined by the basic [AGPL](https://ravendb.net/legal/ravendb/commercial-license-eula) + license (e.g., using up to 3 CPU cores). + + If the validation fails because the license expired, and the expiration date precedes + the server build date, the server will not run. + + +* A `TestServerOptions.Licensing.ThrowOnInvalidOrMissingLicense` configuration option + is available since RavenDB `5.4`, determining whether to throw a `LicenseExpiredException` + exception if TestDriver uses an unlicensed embedded server. + * If `ThrowOnInvalidOrMissingLicense` is set to **`true`** and the validation fails, + a `LicenseExpiredException` exception will be thrown to **warn TestDriver users** + that in lack of a valid license, their server's capabilities are limited and they + may therefore miss out on much of their system's potential. + * If the configuration option is set to **`false`**, **no exception will be thrown** + even if a license cannot be validated. + * Since RavenDB version `6.2`, `TestServerOptions.Licensing.ThrowOnInvalidOrMissingLicense` + is set by default to `true` so a `LicenseExpiredException` exception **would** be thrown + if the embedded server used by TestDriver fails to validate a license. + Users that prefer that no exception would be thrown if an unlicensed embedded server + is used, can explicitly set the configuration option to **`false`**. + +* Additional `TestServerOptions.Licensing` configuration options are available as well, + you can read about them [here](../server/embedded.mdx#licensing-options). + + + + + + + diff --git a/versioned_docs/version-7.1/start/_test-driver-java.mdx b/versioned_docs/version-7.1/start/_test-driver-java.mdx new file mode 100644 index 0000000000..c5a15f4742 --- /dev/null +++ b/versioned_docs/version-7.1/start/_test-driver-java.mdx @@ -0,0 +1,244 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* In this section, we explain how to use [ravendb-test-driver](https://central.sonatype.com/search?q=g:net.ravendb%20%20a:ravendb-test-driver&core=gav&smo=true) + to write RavenDB unit tests. + +* TestDriver uses an [Embedded Server](../server/embedded.mdx) package with the same set of + [prerequisites](../server/embedded.mdx#prerequisites) as embedded servers to run the tests. + +* In this page: + - [RavenTestDriver](../start/test-driver.mdx#raventestdriver) + - [Pre-initializing the store](../start/test-driver.mdx#preinitialize) + - [ConfigureServer](../start/test-driver.mdx#configureserver) + - [Unit test](../start/test-driver.mdx#unittest) + - [Complete example](../start/test-driver.mdx#complete-example) + + + +## RavenTestDriver + +First, please make sure that the Raven TestDriver is added to your project dependencies. + +``` + + net.ravendb + ravendb-test-driver + 4.1.3 + test + +``` + +Then, define a class that derives from the Raven TestDriver. + +We can start by reviewing TestDriver methods, and then get into implementation +(find a complete Raven TestDriver code sample [below](../start/test-driver.mdx#complete-example)). +### Methods + +| Signature | Description | +| ----------| ----- | +| **protected String getDatabaseDumpFilePath()** | Allows you to override the path to the database dump file that will be loaded when calling importDatabase. | +| **protected InputStream getDatabaseDumpFileStream()** | Allows you to override the stream containing the database dump that will be loaded when calling importDatabase. | +| **public IDocumentStore getDocumentStore()** | Gets you an IDocumentStore instance. | +| **public IDocumentStore getDocumentStore(String database)** | Gets you an IDocumentStore instance for the requested database. | +| **public IDocumentStore getDocumentStore(GetDocumentStoreOptions options)** | Gets you an IDocumentStore instance. | +| **public IDocumentStore getDocumentStore(GetDocumentStoreOptions options, String database)** | Gets you an IDocumentStore instance for the requested database. | +| **protected void preInitialize(IDocumentStore documentStore)** |Allows you to pre-initialize the IDocumentStore. | +| **protected void setupDatabase(IDocumentStore documentStore)** | Allows you to initialize the database. | +| **protected Consumer<RavenTestDriver> onDriverClosed** | An event that is raised when the test driver has been closed. | +| **public static void configureServer(ServerOptions options)** |Allows you to configure your server before running it| +| **public static void waitForIndexing(IDocumentStore store)** | Allows you to wait for indexes to become non-stale. | +| **public static void waitForIndexing(IDocumentStore store, String database)** | Allows you to wait for indexes to become non-stale. | +| **public static void waitForIndexing(IDocumentStore store, String database, Duration timeout)** | Allows you to wait for indexes to become non-stale. | +| **protected void waitForUserToContinueTheTest(IDocumentStore store)** | Allows you to break the test and launch the Studio to examine the state of the database. | +| **protected void openBrowser(String url)** | Allows you to open the browser. | +| **public void close()** | Allows you to dispose of the server. | + + + +## PreInitialize + +Pre-Initializing the IDocumentStore allows you to mutate the conventions used by the document store. + +### Example + + + +{`//This allows us to modify the conventions of the store we get from 'getDocumentStore' +@Override +protected void preInitialize(IDocumentStore documentStore) \{ + documentStore.getConventions().setMaxNumberOfRequestsPerSession(50); +\} +`} + + + + + +## UnitTest + +We'll be using [JUnit](https://junit.org/) for my test framework in the below example. +Note that the test itself is meant to show different capabilities of the test driver and is not meant to be the most efficient. +The example below depends on the `TestDocumentByName` index and `TestDocument` class that can be seen in the [full example](../start/test-driver.mdx#complete-example) + +### Example + + + +{`@Test +public void myFirstTest() \{ + try (IDocumentStore store = getDocumentStore()) \{ + store.executeIndex(new TestDocumentByName()); + + try (IDocumentSession session = store.openSession()) \{ + TestDocument testDocument1 = new TestDocument(); + testDocument1.setName("Hello world!"); + session.store(testDocument1); + + TestDocument testDocument2 = new TestDocument(); + testDocument2.setName("Goodbye..."); + session.store(testDocument2); + + session.saveChanges(); + \} + + waitForIndexing(store); //If we want to query documents sometime we need to wait for the indexes to catch up + waitForUserToContinueTheTest(store); //Sometimes we want to debug the test itself, this redirect us to the studio + + try (IDocumentSession session = store.openSession()) \{ + List query = session.query(TestDocument.class, TestDocumentByName.class) + .whereEquals("name", "hello") + .toList(); + + Assert.assertEquals(1, query.size()); + \} + \} +\} +`} + + + +In the test we get an IDocumentStore to our test database, deploy an index and insert two documents into it. +We then wait for the indexing to complete, and launch Studio so we can verify that the documents and index +are deployed (we can remove this line once the test is working). +Finally we query for a TestDocument whose name contains the word 'hello' and assert that we have only one +such document. + + + +## ConfigureServer + +Before RavenDB server can be started, TestDriver extracts binaries to `targetServerLocation` (Default: `.`). Optionally before doing this, target directory can be cleaned up (when `cleanTargetServerLocation` option is turned on (Default: false)). + +The `configureServer` method allows you to be more in control on your server. +You can use it with `ServerOptions` to change the target path where Raven server binaries are extracted to or to specify where your RavenDB data is stored, security, etc. + + + +`ServerOptions` gives you control of how the embedded server is going to run +with just a minor change. Here you can change your targetServerLocation. + +| Name | Type | Description | +| ------------- | ------------- | ----- | +| **targetServerLocation** | string | The temporary path used by TestDriver to extract server binary files (.dll) | +| **logsPath** | string | Path to server logs files | +| **dataDirectory** | string | Path where server stores data files | +| **cleanTargetServerLocation** | boolean | Should we remove all files from targetServerLocation before extracting server binaries? | + + + +### Example + + + +{`ServerOptions testServerOptions = new ServerOptions(); + +// specify where ravendb server should be extracted (optional) +testServerOptions.setTargetServerLocation("PATH_TO_TEMPORARY_SERVER_LOCATION"); + +// Specify where ravendb data will be placed/located (optional) +testServerOptions.setDataDirectory("PATH_TO_RAVENDB_DATADIR"); +`} + + + + + +## Complete Example + + + +{`public class RavenDBTestDriverFull extends RavenTestDriver \{ + + //This allows us to modify the conventions of the store we get from 'getDocumentStore' + @Override + protected void preInitialize(IDocumentStore documentStore) \{ + documentStore.getConventions().setMaxNumberOfRequestsPerSession(50); + \} + + @Test + public void myFirstTest() \{ + ServerOptions serverOptions = new ServerOptions(); + serverOptions.setDataDirectory("C:\\\\RavenDBTestDir"); + configureServer(serverOptions); + + try (IDocumentStore store = getDocumentStore()) \{ + store.executeIndex(new TestDocumentByName()); + + try (IDocumentSession session = store.openSession()) \{ + TestDocument testDocument1 = new TestDocument(); + testDocument1.setName("Hello world!"); + session.store(testDocument1); + + TestDocument testDocument2 = new TestDocument(); + testDocument2.setName("Goodbye..."); + session.store(testDocument2); + + session.saveChanges(); + \} + + waitForIndexing(store); //If we want to query documents sometime we need to wait for the indexes to catch up + waitForUserToContinueTheTest(store); //Sometimes we want to debug the test itself, this redirect us to the studio + + try (IDocumentSession session = store.openSession()) \{ + List query = session.query(TestDocument.class, TestDocumentByName.class) + .whereEquals("name", "hello") + .toList(); + + Assert.assertEquals(1, query.size()); + \} + \} + \} + + public static class TestDocumentByName extends AbstractIndexCreationTask \{ + public TestDocumentByName() \{ + map = "from doc in docs select new \{ doc.name \}"; + index("name", FieldIndexing.SEARCH); + \} + \} + + public static class TestDocument \{ + private String name; + + public String getName() \{ + return name; + \} + + public void setName(String name) \{ + this.name = name; + \} + \} + +\} +`} + + + + + + + diff --git a/versioned_docs/version-7.1/start/about-examples.mdx b/versioned_docs/version-7.1/start/about-examples.mdx new file mode 100644 index 0000000000..b6c258ef79 --- /dev/null +++ b/versioned_docs/version-7.1/start/about-examples.mdx @@ -0,0 +1,153 @@ +--- +title: "Getting Started: A Few Words About Examples" +hide_table_of_contents: true +sidebar_label: About Examples +sidebar_position: 3 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Getting Started: A Few Words About Examples + +Most examples in this documentation use the `Northwind` sample database (with minor adjustments) which is fairly popular in .NET, and can be deployed easily using your RavenDB Studio (more information on how to deploy it can be found [here](../studio/database/tasks/create-sample-data.mdx)). + +Not everything can be shown in an easy and understandable manner when `Northwind` is used, so there are some articles with custom examples to better illustrate and describe more sophisticated features. + +## Northwind Classes + + + +{`public class Company +\{ + public string Id \{ get; set; \} + public string ExternalId \{ get; set; \} + public string Name \{ get; set; \} + public Contact Contact \{ get; set; \} + public Address Address \{ get; set; \} + public string Phone \{ get; set; \} + public string Fax \{ get; set; \} +\} + +public class Address +\{ + public string Line1 \{ get; set; \} + public string Line2 \{ get; set; \} + public string City \{ get; set; \} + public string Region \{ get; set; \} + public string PostalCode \{ get; set; \} + public string Country \{ get; set; \} + public Location Location \{ get; set; \} +\} + +public class Location +\{ + public double Latitude \{ get; set; \} + public double Longitude \{ get; set; \} +\} + +public class Contact +\{ + public string Name \{ get; set; \} + public string Title \{ get; set; \} +\} + +public class Category +\{ + public string Id \{ get; set; \} + public string Name \{ get; set; \} + public string Description \{ get; set; \} +\} + +public class Order +\{ + public string Id \{ get; set; \} + public string Company \{ get; set; \} + public string Employee \{ get; set; \} + public DateTime OrderedAt \{ get; set; \} + public DateTime RequireAt \{ get; set; \} + public DateTime? ShippedAt \{ get; set; \} + public Address ShipTo \{ get; set; \} + public string ShipVia \{ get; set; \} + public decimal Freight \{ get; set; \} + public List Lines \{ get; set; \} +\} + +public class OrderLine +\{ + public string Product \{ get; set; \} + public string ProductName \{ get; set; \} + public decimal PricePerUnit \{ get; set; \} + public int Quantity \{ get; set; \} + public decimal Discount \{ get; set; \} +\} + +public class Product +\{ + public string Id \{ get; set; \} + public string Name \{ get; set; \} + public string Supplier \{ get; set; \} + public string Category \{ get; set; \} + public string QuantityPerUnit \{ get; set; \} + public decimal PricePerUnit \{ get; set; \} + public int UnitsInStock \{ get; set; \} + public int UnitsOnOrder \{ get; set; \} + public bool Discontinued \{ get; set; \} + public int ReorderLevel \{ get; set; \} +\} + +public class Supplier +\{ + public string Id \{ get; set; \} + public Contact Contact \{ get; set; \} + public string Name \{ get; set; \} + public Address Address \{ get; set; \} + public string Phone \{ get; set; \} + public string Fax \{ get; set; \} + public string HomePage \{ get; set; \} +\} + +public class Employee +\{ + public string Id \{ get; set; \} + public string LastName \{ get; set; \} + public string FirstName \{ get; set; \} + public string Title \{ get; set; \} + public Address Address \{ get; set; \} + public DateTime HiredAt \{ get; set; \} + public DateTime Birthday \{ get; set; \} + public string HomePhone \{ get; set; \} + public string Extension \{ get; set; \} + public string ReportsTo \{ get; set; \} + public List Notes \{ get; set; \} + public List Territories \{ get; set; \} +\} + +public class Region +\{ + public string Id \{ get; set; \} + public string Name \{ get; set; \} + public List Territories \{ get; set; \} +\} + +public class Territory +\{ + public string Code \{ get; set; \} + public string Name \{ get; set; \} + public string Area \{ get; set; \} +\} + +public class Shipper +\{ + public string Id \{ get; set; \} + public string Name \{ get; set; \} + public string Phone \{ get; set; \} +\} +`} + + + diff --git a/versioned_docs/version-7.1/start/assets/Cluster-Parent-Nodes-Folders.png b/versioned_docs/version-7.1/start/assets/Cluster-Parent-Nodes-Folders.png new file mode 100644 index 0000000000..0b7c2d1bd5 Binary files /dev/null and b/versioned_docs/version-7.1/start/assets/Cluster-Parent-Nodes-Folders.png differ diff --git a/versioned_docs/version-7.1/start/assets/help-us-improve.png b/versioned_docs/version-7.1/start/assets/help-us-improve.png new file mode 100644 index 0000000000..4b3da3fb27 Binary files /dev/null and b/versioned_docs/version-7.1/start/assets/help-us-improve.png differ diff --git a/versioned_docs/version-7.1/start/assets/run-ps1-with-PowerShell.png b/versioned_docs/version-7.1/start/assets/run-ps1-with-PowerShell.png new file mode 100644 index 0000000000..84cb9fe24f Binary files /dev/null and b/versioned_docs/version-7.1/start/assets/run-ps1-with-PowerShell.png differ diff --git a/versioned_docs/version-7.1/start/containers/_category_.json b/versioned_docs/version-7.1/start/containers/_category_.json new file mode 100644 index 0000000000..d0579bb7ae --- /dev/null +++ b/versioned_docs/version-7.1/start/containers/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 7, + "label": Containers, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/start/containers/deployment-guides.mdx b/versioned_docs/version-7.1/start/containers/deployment-guides.mdx new file mode 100644 index 0000000000..5b29b1b885 --- /dev/null +++ b/versioned_docs/version-7.1/start/containers/deployment-guides.mdx @@ -0,0 +1,24 @@ +--- +title: "Deployment Guides" +hide_table_of_contents: true +sidebar_label: Deployment Guides +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Deployment Guides + +We're continuously writing articles that guide you through various RavenDB deployment scenarios. + +This documentation page is an open list containing links to all of them. +- #### Docker Compose - Production Cluster (and more) +[https://ravendb.net/articles/ravendb-deployment-guide-docker-compose-cluster](https://ravendb.net/articles/ravendb-deployment-guide-docker-compose-cluster) + +- #### EKS (Amazon Elastic Kubernetes Service) +[https://ravendb.net/articles/setting-up-ravendb-cluster-on-aws-eks](https://ravendb.net/articles/setting-up-ravendb-cluster-on-aws-eks) diff --git a/versioned_docs/version-7.1/start/containers/dockerfile/_category_.json b/versioned_docs/version-7.1/start/containers/dockerfile/_category_.json new file mode 100644 index 0000000000..5d992f5812 --- /dev/null +++ b/versioned_docs/version-7.1/start/containers/dockerfile/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 3, + "label": Dockerfile, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/start/containers/dockerfile/dockerfile-overview.mdx b/versioned_docs/version-7.1/start/containers/dockerfile/dockerfile-overview.mdx new file mode 100644 index 0000000000..8348db9fa4 --- /dev/null +++ b/versioned_docs/version-7.1/start/containers/dockerfile/dockerfile-overview.mdx @@ -0,0 +1,179 @@ +--- +title: "Dockerfile overview" +hide_table_of_contents: true +sidebar_label: Overview +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Dockerfile overview + +Let's familiarize ourselves with [RavenDB Dockerfile](https://github.com/ravendb/ravendb/blob/v6.2/docker/ravendb-ubuntu/Dockerfile.x64). + + +##### Setting the Base Image + + +{`FROM mcr.microsoft.com/dotnet/runtime-deps:8.0-jammy +`} + + + +The `FROM` instruction sets the base image for the Dockerfile. +In this case, it uses Microsoft's lightweight .NET runtime dependencies image, specifically tailored for Ubuntu. +This provides a minimal foundation for running RavenDB in Docker, keeping the image size smaller and focusing only on required dependencies. + + +##### Defining Build Arguments + + +{`ARG PATH_TO_DEB RAVEN_USER_ID RAVEN_GROUP_ID +`} + + + +Here, three build arguments (`ARG`) are defined: + +- `PATH_TO_DEB`: Specifies the path to the `.deb` package for installing RavenDB. +- `RAVEN_USER_ID` and `RAVEN_GROUP_ID`: Used to configure the user and group IDs for the `ravendb` user, ensuring proper file permissions and container security. + +These arguments are placeholders to be replaced at build time with actual values. + + +##### Installing Required Dependencies + + +{`RUN apt-get update \\ + && apt-get install \\ + && apt-get install --no-install-recommends openssl jq curl -y +`} + + + + +The `RUN` instruction executes commands to prepare the container environment: + +- `apt-get update`: Updates the package list. +- `apt-get update`: Updates the OS package list. +- `apt-get install`: Installs system tools responsible for: + - `openssl`: managing TLS certificates + - `jq`: swiss army knife for handling JSON data + - `curl`: performing HTTP(S) requests + +The `--no-install-recommends` flag minimizes the installation of optional dependencies, keeping the image lean. + + +##### Setting Environment Variables + +This section declares default values for environment variables configuring the image runtime. The settings file in RavenDB Docker image is empty and most of the configuration would usually be passed through environment variables. + + + +{`ENV RAVEN_ARGS='' \\ + RAVEN_SETTINGS='' \\ + RAVEN_IN_DOCKER='true' \\ + RAVEN_Setup_Mode='Initial' \\ + .... +`} + + + + +###### Key Environment Variables (Brief Overview) + +- **`RAVEN_IN_DOCKER`**: Flag that RavenDB is running in a Dockerized environment for optimal behavior adjustments. +- **`RAVEN_Setup_Mode`**: `'Initial'` enables the setup wizard on the first run. +- **`RAVEN_DataDir`**: Defines where database files are stored (default `/var/lib/ravendb/data`), typically mounted as a volume for persistence. +- **`RAVEN_Security_MasterKey_Path`**: Path to the encryption master key for securing sensitive data. +- **`RAVEN_Security_UnsecuredAccessAllowed`**: `'PrivateNetwork'` permits unsecured access only within private networks, enhancing security if you run unsecured. +- **`RAVEN_ServerUrl_Tcp`**: Specifies the TCP port (`38888`) used for cluster communication between nodes. + +These variables enable flexible configuration for development, testing, or production environments. + +##### Exposing Ports + + +{`EXPOSE 8080 38888 161 +`} + + + +The `EXPOSE` instruction declares the ports that RavenDB uses: + +- `8080`: The primary HTTP port for web-based interactions. +- `38888`: TCP port for cluster communication. +- `161`: SNMP (Simple Network Management Protocol) port. + +This doesn’t bind the ports but informs users about which ones are available. + +##### Adding the RavenDB Package + + +{`COPY "$\{PATH_TO_DEB\}" /opt/ravendb.deb + +RUN apt install /opt/ravendb.deb -y \\ + && apt-get autoremove -y \\ + && rm -rf /var/lib/apt/lists/* +`} + + + +This section installs RavenDB: + +1. The `COPY` command transfers the `.deb` package (RavenDB’s installation file) into the container at `/opt/ravendb.deb`. +2. The `RUN` command installs the package using `apt`, cleans up unnecessary files to reduce the image size - removes cached OS package lists (`/var/lib/apt/lists/*`). +##### Adding Scripts and Configuration + + +{`COPY server-utils.sh cert-utils.sh run-raven.sh healthcheck.sh link-legacy-datadir.sh /usr/lib/ravendb/scripts/ +COPY --chown=root:$\{RAVEN_USER_ID\} --chmod=660 settings.json /etc/ravendb +`} + + + +- Several utility scripts (`server-utils.sh`, `cert-utils.sh`, etc.) are copied into the container. These scripts manage server initialization, health checks, and certificates. +- `settings.json`, a configuration file, is copied into `/etc/ravendb` with specific ownership (`root:${RAVEN_USER_ID}`) and permissions (`660`) for security. +##### Setting the User and Health Check + + +{`USER ravendb:ravendb + +HEALTHCHECK --start-period=60s CMD /usr/lib/ravendb/scripts/healthcheck.sh +`} + + + +1. `USER`: Switches the container to run as the `ravendb` user instead of `root`, improving security. +2. `HEALTHCHECK`: Defines a command to verify RavenDB's health. The script (`healthcheck.sh`) is run after a 60-second delay to allow the server to initialize. +##### Declaring Volumes + + +{`VOLUME /var/lib/ravendb/data /etc/ravendb +`} + + + +`VOLUME` specifies directories to be mounted as volumes. These are: + +- `/var/lib/ravendb/data`: For database files. +- `/etc/ravendb`: For configuration and security-related files. + +Ensures that data persists outside the container lifecycle, using volumes. +##### Setting the Working Directory and Entry Command + + +{`WORKDIR /usr/lib/ravendb + +CMD [ "/bin/bash", "/usr/lib/ravendb/scripts/run-raven.sh" ] +`} + + + +- `WORKDIR`: Sets the default working directory inside the container, simplifying subsequent commands. +- `CMD`: Specifies the command to run when the container starts. Here, it launches a Bash shell to execute the `run-raven.sh` script, which handles server initialization and starts RavenDB. diff --git a/versioned_docs/version-7.1/start/containers/dockerfile/extending.mdx b/versioned_docs/version-7.1/start/containers/dockerfile/extending.mdx new file mode 100644 index 0000000000..93d4189aec --- /dev/null +++ b/versioned_docs/version-7.1/start/containers/dockerfile/extending.mdx @@ -0,0 +1,195 @@ +--- +title: "Extending & Modifying RavenDB Dockerfile" +hide_table_of_contents: true +sidebar_label: Extending & Tuning +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Extending & Modifying RavenDB Dockerfile + +Some custom setups solutions may lead you to a necessity of building over our Dockerfile, or even customizing it. +This article explains in detail, how it works. + + +For detailed Dockerfile guide, visit [Containers > Dockerfile > Overview](./dockerfile-overview) + +## Extending the Existing `ravendb/ravendb` Image + + +This approach involves using `FROM ravendb/ravendb` to build upon the official image, adding custom commands, scripts, or configurations. + + +##### Why Should I Extend the RavenDB Dockerfile? + +- **Simple customization:** Ideal for small, incremental changes that do not require rebuilding the entire image from scratch. +- **Keeps RavenDB's default behavior:** Build on top of the existing official `ravendb/ravendb` image while preserving core functionality. +- **Customization of startup scripts:** Provides flexibility to replace the `run-raven.sh` startup script with a custom, more focused version tailored to your deployment needs. + +##### **Dockerfile Example for Extension** + + + +{`# Use the official RavenDB image as the base +FROM ravendb/ravendb:7.0-ubuntu-latest + +# Add your custom script or commands +COPY my-script.sh /usr/lib/ravendb/scripts/my-script.sh +RUN chmod +x /usr/lib/ravendb/scripts/my-script.sh + +# Add an environment variable +ENV MY_CUSTOM_VAR="MyValue" + +# Customize the HEALTHCHECK +HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \\ + CMD curl -f http://localhost:8080 || exit 1 + +# Replace the CMD (optional!) +COPY my-run-raven.sh /usr/lib/ravendb/scripts/run-raven.sh +CMD ["/bin/bash", "/usr/lib/ravendb/scripts/run-raven.sh"] +`} + + + +##### Building for Multiple Platforms + +Use BuildX if you need to build for multiple architectures (e.g., x64, arm64): + + + +{`docker buildx build --platform linux/amd64,linux/arm64 -t my-ravendb . +`} + + + +##### Tips +You can: + +- Add scripts that configure or extend RavenDB's behavior (e.g., preloading data or setting specific configurations). Ensure scripts are executable (chmod +x). +- Pass custom values or configurations via ENV directives or at runtime using docker run -e. +- Customize HEALTHCHECK to match your deployment’s requirements, ensuring RavenDB is responding as expected. +## Customizing the RavenDB Dockerfile + +If your use case requires more extensive customization, you can modify the Dockerfile directly to create a tailored image. + +#### Possibilities of modifications + +###### Change the Base Image +Replace `FROM mcr.microsoft.com/dotnet/runtime-deps:8.0-jammy` with another OS, such as `debian:buster` or `alpine:latest`. + +However, make sure to select one that includes .NET runtime dependencies or install them on your own. + +###### Add Custom Dependencies +Use `RUN` commands to install additional dependencies specific to your environment. + +###### Customize Environment Variables +Adjust `ENV` declarations to preconfigure RavenDB settings, such as data directories or logs. +You can do the same thing in Docker using `-e` argument without tinkering with the Dockerfiles, but may want to have this fixed in the image. + +###### Replace the Entry Script +Copy and use a custom `run-raven.sh` script: + + +{`COPY my-run-raven.sh /usr/lib/ravendb/scripts/run-raven.sh +CMD ["/bin/bash", "/usr/lib/ravendb/scripts/run-raven.sh"] +`} + + + + +## Entry Script ([run-raven.sh](https://github.com/ravendb/ravendb/blob/v6.2/docker/ravendb-ubuntu/run-raven.sh)) + +This script initializes and runs the RavenDB server. +It's designed to work for anyone, so in some scenarios you may want to chisel it down a bit, modify, or completely replace, which we'll cover next. +Key responsibilities include: + +###### Legacy Data Migration + + +{`/usr/lib/ravendb/scripts/link-legacy-datadir.sh +`} + + + +This script was created to handle legacy data volumes, that were working with RavenDB before 6.0. +The data had different directory structure inside Linux back then, so we needed to migrate them properly after update to 6.0+. + +###### Command Construction +Constructs the RavenDB server start command using arguments and environment variables: + + +{`COMMAND="/usr/lib/ravendb/server/Raven.Server" +`} + + + +###### Certificate Checks +Ensures proper certificate configuration for secure HTTPS connections. + +###### Startup Environment Setup + +Configures the `RAVEN_ServerUrl` environment variable if not already set. + +###### Graceful Shutdown + +Handles termination signals to cleanly stop the server. + +###### Database Auto-Creation +Calls a utility script to create the database if the `RAVEN_DATABASE` environment variable is set. + +## Replacing run-raven.sh + +Replacing allows full control of the startup process for both extending and modifying approaches. The only requirement is that **the script must ultimately run the RavenDB server**. Everything else is optional and can be customized to fit your specific needs. Some ideas include: + +###### Certificates management + +Retrieve and configure SSL certificates, such as from a secure vault or web server. + +###### Exports Environment Variables + +Define and set variables like `RAVEN_ServerUrl` to match your deployment requirements. + +###### Handles Shutdown Gracefully + +Use signal trapping for smooth termination of the server: + + +{`trap 'kill -TERM "$COMMANDPID"' TERM INT +`} + + + +###### Custom Operations +Add database initialization or additional setup scripts as needed. +##### **Example Custom Script** + + + +{`# Custom startup script for RavenDB + +echo "Starting custom RavenDB setup..." + +# Get and configure certificates +get_certificates() \{ + # Implement logic to retrieve certificates, e.g., from a web server or secured vault +\} + +check_for_certificates + +# Start RavenDB +COMMAND="/usr/lib/ravendb/server/Raven.Server -c /etc/ravendb/settings.json" +$COMMAND & +COMMANDPID=$! + +# Handle shutdown gracefully +trap 'kill -TERM "$COMMANDPID"' TERM INT +wait $COMMANDPID +`} + + diff --git a/versioned_docs/version-7.1/start/containers/dockerfile/guide.mdx b/versioned_docs/version-7.1/start/containers/dockerfile/guide.mdx new file mode 100644 index 0000000000..1ab74558ad --- /dev/null +++ b/versioned_docs/version-7.1/start/containers/dockerfile/guide.mdx @@ -0,0 +1,31 @@ +--- +title: "Working with RavenDB's Dockerfile" +hide_table_of_contents: true +sidebar_label: Start +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Working with RavenDB's Dockerfile + +RavenDB’s image and Dockerfile provide a universal and flexible base. +If you need to, you can either extend the existing `ravendb/ravendb` image with your modifications or build a new image entirely by tweaking its Dockerfile. +Additionally, for complex scenarios, you can replace the default entry script (`run-raven.sh`) in both cases to fully customize the startup process. + + +- To learn about Dockerfile, visit this page: [Containers > Dockerfile > Overview](./dockerfile-overview) + + +- If you want to modify the image, or use it as a base, go here: [Containers > Dockerfile > Extending & Tuning](./extending) + + +- To go deep with running RavenDB on prod in containers, visit our requirements knowledge base: [Containers > Requirements](../requirements/compute.mdx), or read some of our [articles](https://ravendb.net/articles) - look for deployment guides. + + +- If you have encountered a unique significant problem, contact our [support](https://ravendb.net/support) for help. diff --git a/versioned_docs/version-7.1/start/containers/general-guide.mdx b/versioned_docs/version-7.1/start/containers/general-guide.mdx new file mode 100644 index 0000000000..cc418213bd --- /dev/null +++ b/versioned_docs/version-7.1/start/containers/general-guide.mdx @@ -0,0 +1,129 @@ +--- +title: "General Guide - Containers & RavenDB" +hide_table_of_contents: true +sidebar_label: General Guide +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# General Guide - Containers & RavenDB + +## Introduction + +RavenDB is a NoSQL database built for performance, simplicity, and ease of use. It integrates seamlessly with containerized environments, enabling stable and efficient deployments. While running our database in a containerized environment offers numerous benefits, it also comes with challenges, such as managing data persistence and ensuring secure networking. + +This guide provides a comprehensive overview of running RavenDB in containers. It summarizes key concepts, outlines requirements, and highlights the advantages of containerized setups. Additionally, it serves as a hub of knowledge, linking to detailed documentation, guides, and articles to help you navigate every aspect of deploying RavenDB in containerized environments. + +## Contents + +1. [Core Concepts & Difficulties](../../start/containers/general-guide.mdx#core-concepts) +2. [What We Offer](../../start/containers/general-guide.mdx#what-we-offer) +3. [What We Require](../../start/containers/general-guide.mdx#what-we-require) +4. [Benefits](../../start/containers/general-guide.mdx#benefits) +## 1. Core concepts + +#### Containers +Containers encapsulate RavenDB and its dependencies for consistent behavior across environments. A containerized setup bundles the runtime, libraries, and configurations necessary for RavenDB operations into a single isolated unit. This isolation ensures that RavenDB functions reliably regardless of variations in the host operating system or underlying hardware. + +While virtual machines also provide isolation, they achieve it by replicating entire operating systems, which introduces significant overhead. Containers, by contrast, leverage the shared kernel architecture, using the host OS kernel to create isolated environments without duplicating the operating system. This approach makes containers inherently lightweight, efficient, and scalable. Other secondary concepts, like container storage and networking, enable this primary design, enhancing practicality while maintaining performance advantages over traditional VMs. + +#### Orchestration +Most systems depend on multiple applications and technologies that must work together effectively to serve the end user. Container technology simplifies this by allowing multiple applications to run in isolated environments (containers), but manual deployment and management of all applications separately can become a challenge. + +The orchestration simplifies the deployment and maintenance of systems built using multiple containers - it combines all containerized apps into a preconfigured and hardened definition. This definition describes the ideal state of the system, including application configurations, storage, networking, security measures, and scalability. This way is called the declarative approach. The developer describes the system (usually by writing a .yaml file) and supplies the orchestrator with it. Orchestrator acknowledges the definition and starts working - it deploys the described system and manages it to keep it working exactly like that. + +Orchestrators also simplify cluster scaling by design and enable self-healing by automatically recovering application containers across available nodes (instances or machines). + + +## Difficulties + +Hosting a database in containerized environment brings many difficulties and challenges, that developers need to face. + +#### Statefulness in a Stateless World +Containers are inherently stateless and designed to be ephemeral, but RavenDB, as a database, requires durable storage for its data. + +This dichotomy introduces challenges like data persistence—storage backends, such as AWS EBS, Azure Disk, or on-premise NFS, which must be properly configured or integrated with the orchestration platform. + +#### Security & Networking +Proper network setup is necessary for secure and reliable communication between RavenDB nodes since RavenDB defines a Cluster differently. +Each Node is a fully independent entity rather than just a "replica." +This design involves a couple of quirks that need addressing. + +This independence enhances resiliency but requires solid configuration to maintain consistent and secure communication across the cluster. + +#### Orchestrator Complexity +Orchestration platforms simplify container management but can complicate troubleshooting. +The network setup can obscure communication paths, making identifying issues like latency or misconfigurations difficult. +Containerized RavenDB instances may be challenging to analyze without direct access due to security limitations on Docker images. + +This security detail restricts traditional debugging tools and complicates problem resolution. +It sometimes requires the usage of container host tooling, which can be not sufficient or even available in serverless regime. +Effective management of RavenDB in such environments requires a solid understanding of the database and the orchestration platform. + + + +## 2. What We Offer + +In the matter of deploying containers, aside from Server features, we explicitly offer + +#### Official Docker Images +Official RavenDB images for: + +- Ubuntu & Windows Nanoserver - [Dockerhub](https://hub.docker.com/r/ravendb/ravendb/) +- Security Hardened RedHat UBI - [IronBank](https://repo1.dso.mil/dsop/opensource/ravendb/ravendb) + +#### Helm Chart of Secured RavenDB Cluster +Automatic RavenDB cluster deployment in Kubernetes. + +- [ArtifactHub](https://artifacthub.io/packages/helm/ravendb-cluster/ravendb-cluster) +- [GitHub](https://github.com/ravendb/helm-charts) + +#### Deployment Articles & Guides +Step-by-step guides for containerized and orchestrated setups - [View Deployment Guides](./deployment-guides) or [Visit Articles Page](https://ravendb.net/articles) + +#### Containers Knowledge Base +Detailed documentation of hosting RavenDB in container environments + +#### Technical Support +Professional & community support scoped at deploying RavenDB in containers - [Support Page](https://ravendb.net/support) +## 3. What We Require + +#### Container Runtime +Docker, Podman, containerd or an equivalent. + +#### Compute +Sufficient memory & CPU. Either on-premise or cloud solutions. See [Containers > Requirements > Compute](./requirements/compute). + +#### Networking Configuration +Proper communication between nodes and ingress management. See article [Containers > Requirements > Networking](./requirements/networking). + +#### Persistent Storage +Configure volumes to retain database data across container restarts. See the article [Containers > Requirements > Storage](./requirements/storage). + +#### Security +Depending on your solution, you'll need SSL/TLS certificates, role-based access control (RBAC), or other methods for secure deployment. See the article [Containers > Requirements > Security](./requirements/security). + +## 4. Benefits +### a. Containers +#### Consistency +Containers ensure a uniform environment across development, staging, and production, eliminating the "it works on my machine" problem. + +#### Isolation +Containers provide an isolated environment for the application. Thus, there's no need to plan strategy of environment sharing between applications. + +#### Lightweight +Containers share the host's kernel, reducing overhead, improving resource efficiency, and keeping processes separate. + + +### b. Orchestration +#### Declarative Management +Tools like Kubernetes enable you to define the desired system state through YAML files, such as application configuration, nodes, resource allocation, security, and networking. The orchestrator ensures the system maintains this state automatically. + +#### High Availability +Orchestration platforms distribute RavenDB nodes across multiple machines or regions, ensuring resilience against hardware failures. Automatic failover mechanisms keep your database accessible even when a node goes offline. diff --git a/versioned_docs/version-7.1/start/containers/image-usage.mdx b/versioned_docs/version-7.1/start/containers/image-usage.mdx new file mode 100644 index 0000000000..fd38fc7553 --- /dev/null +++ b/versioned_docs/version-7.1/start/containers/image-usage.mdx @@ -0,0 +1,186 @@ +--- +title: "RavenDB Images" +hide_table_of_contents: true +sidebar_label: Docker Images +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# RavenDB Images + +## Usage +RavenDB offers official images based on Ubuntu and Windows NanoServer. +First, let's describe basic development usage of our images. +We'll **focus on basics**, not going *too deep* into security, networking and storage. +For detailed instructions on how to spin up your production, you can read our guides for specific container/orchestration platforms - search through our [articles](https://ravendb.net/articles), or go *deep* with our that also addresses containerized RavenDB [production requirements](./requirements/compute). + +To quickly try out RavenDB, you can run the following command: + + + +{`docker run -it --rm -p 8080:8080 ravendb/ravendb +`} + + + +This will: + +1. Download the appropriate image from DockerHub (if not already cached). +2. Run RavenDB, exposing the web interface on port `8080`. +3. Enter the container shell. +4. Kill and remove the container at exit. + + +#### **Available Tags** + +RavenDB images are available in the following flavors: + +- `latest`/`latest-lts`: The latest stable or latest long-term support (LTS) version of RavenDB +- `ubuntu-latest / ubuntu-latest-lts`: Ubuntu floating tags +- `windows-latest`, `windows-latest-lts`: Windows Nanoserver floating tags +- **Fixed** tags like `6.2.2-ubuntu.22.04-arm32v7`, `6.0.108-ubuntu.22.04-x64`, `6.2.2-windows-1809`, and more. Check the full [tags list](https://hub.docker.com/r/ravendb/ravendb/tags) for details. + +#### **Runtime customization** + +While running your container with RavenDB inside, you may need to use some options, that either modify the container behavior, or edit Raven configuration. +Here are some examples: + +| **Option** | **Description** | +|-------------------------------------------------------------------|-------------------------------------------------------------------------| +| `-p 8080:8080` | Maps the RavenDB web interface to port `8080` on the host machine. | +| `-v /my/config:/etc/ravendb` | Mounts a custom configuration directory. | +| `-v /my/data:/var/lib/ravendb/data` | Mounts a custom data directory for persistence. | +| `-e RAVEN_Setup_Mode=Initial` | Configures the setup mode (e.g., `None`, `Initial`, `LetsEncrypt`). | +| `-e RAVEN_ServerUrl=http://0.0.0.0:46290` | Run RavenDB on custom HTTP port. | +| `-e RAVEN_Logs_Mode=Operations` (in 7.0+: `RAVEN_Logs_Min_Level`) | Sets the logging level. | +| `-e RAVEN_Security_UnsecuredAccessAllowed=PublicNetwork` | Allows unsecured access for development purposes. | +| `--restart unless-stopped` | Ensures the container restarts automatically unless explicitly stopped. | + +#### **Using Environment Variables to Configure RavenDB** + +RavenDB's behavior can be configured through environment variables. These variables allow you to: + +- Disable the setup wizard: `RAVEN_Setup_Mode=None` +- Set RavenDB License: `RAVEN_License` +- Set Public Server Url: `RAVEN_PublicServerUrl` +- Configure logging: `RAVEN_Logs_Mode=Operations` (in 7.0+: `RAVEN_Logs_Min_Level=3`) + +Example: + + + +{`docker run -p 8080:8080 \\ + -e RAVEN_Setup_Mode=None \\ + -e RAVEN_Security_UnsecuredAccessAllowed=PrivateNetwork \\ + -e RAVEN_Logs_Min_Level=3 \\ + ravendb/ravendb:ubuntu-latest +`} + + + +For more options, visit this page: [Server Configuration](../../server/configuration.mdx) + +#### **Storing data** + +For development purposes, you may want to persist your data. +RavenDB uses the following volumes for persistence and configuration: + +- **Configuration Volume**: `/etc/ravendb` (e.g., settings.json) +- **Data Volume**: `/var/lib/ravendb/data` + +To mount these volumes, use: + + + +{`docker run -v /path/to/config:/etc/ravendb \\ + -v /path/to/data:/var/lib/ravendb/data \\ + ravendb/ravendb:ubuntu-latest +`} + + + +To learn about statefullness and storing RavenDB data in a containers, or if you run into trouble, visit [Containers > Requirements > Storage](./requirements/storage). + +#### **Advanced Networking** +To read more on RavenDB networking containerized environment, go to [Containers > Requirements > Networking](./requirements/networking). + +## FAQ + +**Q: I use Docker Compose or automated installation. How do I disable the setup wizard?** +A: Set the `Setup.Mode` configuration option to `None` like so: + + + +{`RAVEN_ARGS='--Setup.Mode=None' +`} + + + +**Q: How can I try RavenDB on my local development machine in unsecured mode?** +A: Set the environment variables: + + + +{`RAVEN_ARGS='--Setup.Mode=None' +RAVEN_Security_UnsecuredAccessAllowed='PrivateNetwork' +`} + + + +**Q: How can I pass command-line arguments through environment variables?** +A: By modifying the `RAVEN_ARGS`, which will pass the arguments to the RavenDB server: + + + +{`RAVEN_ARGS='--log-to-console' +`} + + + +**Q: Can I view RavenDB logs using the `docker logs` command?** +A: Yes, but you need to enable console logging by setting the following environment variable: + + + +{`RAVEN_ARGS='--log-to-console' +`} + + + +Additionally, use `RAVEN_Logs_Min_Level` (7.0+) to set more specific desired logging levels. + +Note that enabling logging to the console may impact performance. + +**Q: How do I use a custom configuration file?** +A: Mount the configuration file as a Docker volume and use the `--config-path` argument: + + + +{`docker run -v /path/to/settings.json:/etc/ravendb/settings.json \\ +-e RAVEN_ARGS='--config-path /etc/ravendb/settings.json' \\ +ravendb/ravendb +`} + + + +Alternatively, pass the custom `settings.json` content via the `RAVENDB_SETTINGS` environment variable. + +**Q: How can I manage a server running in a container using CLI?** +A: Besides using the RavenDB Studio, you can connect to the RavenDB administration console using the `rvn` utility: + + + +{`docker exec -it CONTAINER_ID /var/lib/ravendb/Server/rvn admin-channel +`} + + + +This will connect you to the RavenDB admin console, where you can manage the server interactively. + + diff --git a/versioned_docs/version-7.1/start/containers/requirements/_category_.json b/versioned_docs/version-7.1/start/containers/requirements/_category_.json new file mode 100644 index 0000000000..20254051ae --- /dev/null +++ b/versioned_docs/version-7.1/start/containers/requirements/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 4, + "label": Requirements, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/start/containers/requirements/compute.mdx b/versioned_docs/version-7.1/start/containers/requirements/compute.mdx new file mode 100644 index 0000000000..f232e2a4fa --- /dev/null +++ b/versioned_docs/version-7.1/start/containers/requirements/compute.mdx @@ -0,0 +1,70 @@ +--- +title: "Computing Requirements for RavenDB in Containers" +hide_table_of_contents: true +sidebar_label: Compute +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Computing Requirements for RavenDB in Containers + +Whether you're deploying on-premises, in virtual machines, or in managed Kubernetes environments, understanding the computing requirements is key to ensuring smooth operation. + +## **Container Runtime** + +RavenDB requires a container runtime capable of running the Docker image. This includes common runtimes such as: + +- Docker +- Podman +- containerd + +Ensure your runtime supports the architecture and platform for your deployment. + +## **Machine Requirements** + +To achieve optimal performance, allocate resources according to your workload: + +- **CPU:** + A minimum of 2 cores is required for basic setups. For medium workloads, allocate at least 4 cores. +- **Memory:** + At least 1 GB of RAM is essential for minimal setups. If additional memory is needed, consider using `swap` as an alternative for super-minimal setups. +- **Storage:** + SSDs are recommended for low-latency I/O operations. Ensure sufficient capacity to accommodate your database size, along with extra space for indexing and backups. + + +## **Deployment Options** + +#### On-Premise machines +Deploy RavenDB containers on **your physical servers**. +It provides full control over your hardware and networking. +It's suitable for environments with existing infrastructure. + +All you need is one of the container runtimes, and a kernel. + +#### Virtual Machines (VMs) +**Cloud-based or self-hosted VMs**. It's scalable and flexible, while maintaining control over resources. +AWS EC2, Azure Virtual Machines, or private data centers. + +#### Kubernetes +Run RavenDB in managed Kubernetes clusters to simplify container orchestration and scalability. +This option supports dynamic workloads with features like autoscaling and node group management. + +You should be able to deploy a **node group** to match your computing needs. Many providers are offering such service - EKS, AKS, GKE, etc. +Kubernetes always increases the cost of a solution **by far**, but the power it offers is often unmatched. + + +## **ARM Architecture Support** + +RavenDB supports **ARM64**, allowing deployments on cost-efficient architectures. + +- AWS Graviton instances. +- Azure Ampere-based virtual machines. +- Google Cloud Tau T2A instances. + +The official RavenDB Docker image is compatible with both **x64** and **ARM64**, ensuring broad support across modern hardware. diff --git a/versioned_docs/version-7.1/start/containers/requirements/licensing.mdx b/versioned_docs/version-7.1/start/containers/requirements/licensing.mdx new file mode 100644 index 0000000000..092c930016 --- /dev/null +++ b/versioned_docs/version-7.1/start/containers/requirements/licensing.mdx @@ -0,0 +1,35 @@ +--- +title: "Licensing Requirements for RavenDB in Containers" +hide_table_of_contents: true +sidebar_label: Licensing +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Licensing Requirements for RavenDB in Containers + +When running your database in a containerized or orchestrated environment, ensuring **RavenDB can access its license** is a necessary but straightforward step. Compared to more complex matters like managing persistent storage, securing networking, or optimizing system performance, licensing is one of the simplest tasks to configure. + +For developers and DevOps teams, this involves making the license accessible to the database using methods such as configuration settings, environment variables, or secure solutions like secret vaults. The key is to provide the license to the server while maintaining its security. +Common problems include passing the JSON license through to the container - **escaping, quotes, slashes**. +You need to ensure that. + +##### Simplify License Formatting with jq +To ensure the license is properly formatted, use the following command: +``` +jq -c '.' license.json > formatted-license.json +``` + +This command reads the license.json file, formats it into a single-line JSON string suitable for environment variables or configuration files, and saves it to formatted-license.json. + +------ + +In most cases, using an environment variable is sufficient, especially in isolated or development environments. For production or sensitive setups, a secret vault can further enhance security. Regardless of the method, the container must have consistent access to the license. + +For **detailed guidance** on licensing RavenDB in containerized environments, including examples, **refer to our documentation**: [Licensing RavenDB in Docker](../../licensing/license-under-docker.mdx). diff --git a/versioned_docs/version-7.1/start/containers/requirements/networking.mdx b/versioned_docs/version-7.1/start/containers/requirements/networking.mdx new file mode 100644 index 0000000000..c14837e0a5 --- /dev/null +++ b/versioned_docs/version-7.1/start/containers/requirements/networking.mdx @@ -0,0 +1,86 @@ +--- +title: "Networking Requirements for Running RavenDB in Containers" +hide_table_of_contents: true +sidebar_label: Networking +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Networking Requirements for Running RavenDB in Containers + +## **Overview** + +Networking is a critical aspect of running RavenDB in containers. +As a database that relies on both HTTP and TCP protocols for communication, +proper network configuration is essential to ensure healthy cluster node-to-node communication and client connectivity. +This article outlines the specific networking requirements and configurations for RavenDB in containerized environments. + + +## **Key Networking Concepts for RavenDB** + +#### Ports Used by RavenDB + +- **HTTP Port:** Used for client communication, management, and the RavenDB Studio. Default is `8080`. In production, RavenDB should use secured, certificate-backed connection (HTTPS). +- **TCP Port:** Used for cluster communication between nodes. Default is `38888`. + +Both ports must be properly exposed and accessible for RavenDB to function correctly, especially in a cluster setup. +Additionally, RavenDB must be able to **reach itself** e.g. for cluster health checks. + + +#### Cluster Communication + +- RavenDB nodes within a cluster use both HTTP and TCP ports to perform clustering and replication. See more here: [Clustering Overview](../../../server/clustering/overview.mdx) +- It is crucial that traffic between these ports can flow freely across all nodes in the cluster. + +#### PublicServerUrl + +- In containerized environments, `RAVEN_ServerUrl` should point at the network interface bound to the container itself. While this is sufficient for internal communication, external clients and nodes cannot access this address. +- The `RAVEN_PublicServerUrl` must be set to a **DNS name**, **public-facing IP**, or **routing mechanism** (e.g., load balancer or proxy) that directs traffic to the RavenDB container. This ensures external traffic can properly reach the RavenDB instance. + +**Common Issue**: Setting `RAVEN_PublicServerUrl` to `127.0.0.1` can cause confusion and connectivity problems. +In containerized environments, **`127.0.0.1` refers to the loopback address inside the container, not the host machine**. +This mismatch often leads to connectivity issues for external clients or nodes. + + +## **Networking Configuration for Containers** + +#### Expose the server ports + +Ensure both server ports are exposed and accessible internally. +Check firewall rules, security groups, port-forwarding configuration, network policies - anything that can interrupt the network buzz, depending on your setup. +Depending on your container runtime (e.g., Docker, Kubernetes), configure the ports to be published or mapped to the host machine or external network. + +#### Configure PublicServerUrl + +For external traffic, set `RAVEN_PublicServerUrl` to match the network visibility you require: + +- For **LAN** access: Use the machine’s LAN IP (e.g., 192.168.x.x) and ensure it is reachable by other devices on the local network. +- For **external internet** access: Use a public-facing DNS name or IP address, often routed via a load balancer or reverse proxy. +- For localhost **testing** only: Use the loopback address (127.0.0.1), but note that this will restrict access to clients on the same container. + +**Important Note**: The address set in RAVEN_PublicServerUrl determines how other systems (e.g., clients, other nodes, or users) perceive and reach the server. Ensure it aligns with your intended deployment network. + +#### Ensure node-to-node communication + +Nodes in a RavenDB cluster must communicate over both HTTP and TCP ports. +Networking configurations should allow unrestricted traffic flow between nodes to maintain cluster health. + +#### Ensure client communication + +Make sure your client traffic (including RavenDB Studio) reaches the server correctly. + +On local networks, ensure the DNS or IP address provided in RAVEN_PublicServerUrl resolves correctly within the LAN. + +On public networks, a load balancer or reverse proxy should direct traffic to the container. Using an internet-facing IP, DNS name, load balancer, reverse proxy or other mechanism, ensure consistent routing to the container. + +#### Secure Communication +Use encryption (TLS/SSL) to secure the payload, especially when exposing RavenDB to public networks. +See more in [Requirements > Security](./security) + + diff --git a/versioned_docs/version-7.1/start/containers/requirements/security.mdx b/versioned_docs/version-7.1/start/containers/requirements/security.mdx new file mode 100644 index 0000000000..318981664e --- /dev/null +++ b/versioned_docs/version-7.1/start/containers/requirements/security.mdx @@ -0,0 +1,86 @@ +--- +title: "Certificates & Renewal requirements for Running RavenDB in Containers" +hide_table_of_contents: true +sidebar_label: Security +sidebar_position: 3 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Certificates & Renewal requirements for Running RavenDB in Containers + + +## **Overview** + +After networking works properly [Containers > Requirements > Networking](./networking), let's focus on reliable and secure communication requirements. +RavenDB uses X.509 certificate-based authentication [Security Overview](../../../server/security/overview.mdx) for secured connection. + + +This documentation page will describe the crucial aspects of using certificates while running RavenDB in containers. + +## Getting the certificate +#### Setup Wizard +You can use the `Setup.Mode: Initial` configuration option to run the Setup Wizard if you need it. It can generate the LetsEncrypt certificate for you, create a setup package for your other nodes, and more. +Check what it's fully capable of here: [Setup Wizard Docs](../../installation/setup-wizard.mdx) + +Setup Wizard will be a no-go for you in some containerized or orchestrated setups because of the quirks of containerized (stateless) or declarative world. +Fortunately, RavenDB is aware of that and provides many different ways to handle it, like certificate management script injection, to cover your case. + + +#### Generate it on your own +If you need to generate it on your own behalf, you'll need to generate the .pfx certificate yourself (e.g., using OpenSSL or tools like certbot that talk to LetsEncrypt). +After getting your cert, start tuning the server to leverage it for security. + +## Providing a certificate +RavenDB needs to get its server certificate. You can configure its retrieval from one of these origins: + +- Path - A .pfx certificate stored under a path reachable for a container. + **Security.Certificate.Path** configuration option value defines the path. [Security Configuration - Security.Certificate.Path](../../../server/configuration/security-configuration.mdx#securitycertificatepath) + +- Script - A script that returns your certificate by any means. + You can obtain it from container environmental variables, secured vault, secret, etc. + **Security.Certificate.Load.Exec** configuration option value defines the script path. [Security Configuration - Security.Certificate.Load.Exec](../../../server/configuration/security-configuration.mdx#securitycertificateloadexec) + +These configuration options can be passed to RavenDB by settings.json, environmental variables, or command line arguments. +See more here: [Configuration Options](../../../server/configuration/configuration-options.mdx) + +This way, RavenDB should be able to get its certificate. + +## Certificate expiration +Expired certificates should be updated and replaced. + +#### LetsEncrypt - Setup Wizard +LetsEncrypt certificate management automation allows RavenDB to refresh your LetsEncrypt certificate automatically. +To enable it, set `Setup.Mode` configuration option to `LetsEncrypt`. +Be aware that this automation will work **only when using Setup Wizard to obtain the LetsEncrypt certificate**, as RavenDB doesn't control your domain nor can access your backend used in the certificate load script. +To learn more about this, visit this site: [RavenDB Lets Encrypt Certificates Docs](../../../server/security/authentication/lets-encrypt-certificates.mdx) + +You can learn about different `Raven.SetupMode` values here: [Core Configuration - Setup.Mode](../../../server/configuration/core-configuration.mdx#setupmode) +Also, you need to provide us with an email that you will use for the Let's Encrypt matter. Use `Security.Certificate.LetsEncrypt.Email`. + +#### Manual +To configure manual certificate replacement and updates, write scripts to supply RavenDB configuration: + +- [Security.Certificate.Renew.Exec](../../../server/configuration/security-configuration.mdx#securitycertificaterenewexec) +- [Security.Certificate.Change.Exec](../../../server/configuration/security-configuration.mdx#securitycertificatechangeexec) + +It will allow RavenDB to execute your routines, which should: + +- check if the certificate is ready to renew and do so if needed, then return it (**Renew**) +- replace the old certificate (**Change**) + +To learn about manual certificate replacement, read this article: [Certificate Renewal And Rotation](../../../server/security/authentication/certificate-renewal-and-rotation.mdx) + + +### Conclusion +Some of these approaches may not apply in your containerized case, especially when storing your certificates in a secured vault. +Be careful and pick the way of handling certificates that suits your case the best. +A good rule of thumb is to pick the least complex solution that works for you. +The easiest is to rely on the automatic renewal provided by RavenDB. +Writing your own scripts for getting, updating, and replacing certificates may require more effort but may also suit you better. + diff --git a/versioned_docs/version-7.1/start/containers/requirements/storage.mdx b/versioned_docs/version-7.1/start/containers/requirements/storage.mdx new file mode 100644 index 0000000000..b30785ea18 --- /dev/null +++ b/versioned_docs/version-7.1/start/containers/requirements/storage.mdx @@ -0,0 +1,55 @@ +--- +title: "Storage Requirements for Running RavenDB in Containers" +hide_table_of_contents: true +sidebar_label: Storage +sidebar_position: 4 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Storage Requirements for Running RavenDB in Containers + +## **Overview** + +RavenDB is a *database*, and requires reliable and durable storage for its data. +This article focuses on describing the storage needs for **containerized environments**, +addressing the unique challenges and requirements of running RavenDB in containers. +Containers, being stateless, require robust storage configurations to persist RavenDB data across container restarts, upgrades, or failures. + +If you are looking for a broader understanding of RavenDB's storage mechanisms, please refer to the following articles: + +- [Storage Engine](../../../server/storage/storage-engine.mdx) +- [Directory Structure](../../../server/storage/directory-structure.mdx) + + +## **Why Storage Matters for RavenDB Containers** + +Containers encapsulate applications for consistency and portability but lack built-in mechanisms for persisting data. +As a database, RavenDB must store its data in a **persistent volume** or an equivalent storage solution to survive the ephemeral nature of containers. +Without proper configuration: + +- Data will be lost if the container is restarted or replaced. +- Performance may degrade due to suboptimal storage setups. +- Inconsistent behavior can arise during scaling, updates or failover operations. + + +## **Requirements** + +1. **Volume Configuration** + - RavenDB requires a volume (or equivalent storage backend) to store its data files, journals, and indexes. + - The volume must be explicitly mounted into the container and made accessible to the `ravendb` process. + +2. **Permissions** + - The container runs with the `ravendb` user (`UID:GID 999:999`). + - Ensure the mounted storage has the correct read/write permissions for the `ravendb` user. + - If you're using volume, that is a slice of already-existing file system, the original file system needs to already have correct permissions set for `ravendb (999:999)`. + +3. **Storage Backend Options for Containers** + - Host-mounted volumes + - Managed storage services like AWS EBS, Azure Disk, or Google Persistent Disks + diff --git a/versioned_docs/version-7.1/start/getting-started.mdx b/versioned_docs/version-7.1/start/getting-started.mdx new file mode 100644 index 0000000000..67ca7b775a --- /dev/null +++ b/versioned_docs/version-7.1/start/getting-started.mdx @@ -0,0 +1,563 @@ +--- +title: "Getting Started" +hide_table_of_contents: true +sidebar_label: Getting Started +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Getting Started + + + +Welcome to RavenDB! + +* This article will get you started with RavenDB, covering the essentials for a simple setup and basic usage. + +* The [Server section](../start/getting-started.mdx#server) covers installation, setup, and configuration of the RavenDB server: + * [Prerequisites](../start/getting-started.mdx#prerequisites) + * [Installation & Setup](../start/getting-started.mdx#installation--setup) + * [Configuration](../start/getting-started.mdx#configuration) + * [Studio](../start/getting-started.mdx#studio) + * [Security Concerns](../start/getting-started.mdx#security-concerns) + +* The [Client section](../start/getting-started.mdx#client) shows how to connect your application to the RavenDB server + and begin working with documents using a client library: + * [DocumentStore](../start/getting-started.mdx#documentstore) + * [Session](../start/getting-started.mdx#session) + + + +## Server + +Let's start by installing and configuring the server. To do that, first we need to download the server package +from the [downloads](https://ravendb.net/download) page. + +RavenDB is cross-platform with support for the following operating systems: + +- Windows x64 / x86 +- Linux x64 +- Docker +- MacOS +- Raspberry Pi + +### Prerequisites + +RavenDB is written in `.NET` so it requires the same set of prerequisites as `.NET`. + + + +Please install [Visual C++ 2015 Redistributable Package](https://support.microsoft.com/en-us/help/2977003/the-latest-supported-visual-c-downloads) +(or newer) before launching the RavenDB server. +This package should be the sole requirement for the 'Windows' platforms. +If you're experiencing difficulties, please check the +prerequisites for .NET on Windows in this [Microsoft article](https://learn.microsoft.com/en-us/dotnet/core/install/windows?tabs=net70#dependencies). + + + + + +We highly recommend **updating** your **Linux OS** prior to launching the RavenDB server. +Also, please check the prerequisites for .NET on Linux in this [Microsoft article](https://learn.microsoft.com/en-us/dotnet/core/install/linux-scripted-manual#dependencies). + + + + + +We highly recommend **updating** your **MacOS** and checking the prerequisites for .NET on macOS +in this [Microsoft article](https://learn.microsoft.com/en-us/dotnet/core/install/macos) before +launching the RavenDB Server. + + + +### Installation & Setup + + + +We recommend setting up your cluster nodes on separate machines so that if one goes down, the others can keep the cluster active. + + + +1. Set up a server folder on each machine that will host the nodes in your cluster. + You may want to include the node designation (nodes A, B, C...) in the name of each server folder, to prevent future confusion. + +2. Extract the server package into permanent server folders on each machine. + Each folder that contains an extracted server package will become a functional node in your cluster. + If you've set up on separate machines, go to step 3 below. + + +If you move this folder after installation, the server will not run. +You'll receive a 'System.InvalidOperationException: Unable to start the server.' error because it will look for the file path that is set +when you install. If you must move your folder at a later time, you can [reconfigure the certificate file path](../server/security/authentication/certificate-configuration.mdx#standard-manual-setup-with-certificate-stored-locally) +in the `settings.json` file. + + +If you choose to use only one machine (although this will increase the chances of your cluster going down) you'll need to: + +1. Set up a parent folder in a permanent location for your installation package and server settings for the next steps. +2. Set up separate folders in the parent folder for each node and keep it in a safe place for future use. + ![Cluster Parent/Nodes Folder](./assets/Cluster-Parent-Nodes-Folders.png) + +3. Extract the [downloaded](https://ravendb.net/download) `RavenDB...zip` server package into each node folder. +4. If you want to install the cluster **as a service** (it will improve availability because it will automatically run in the background every time your +machine restarts), this simple step will be done after initial secure installation via the Setup Wizard or manually. Read [Running as a Service](installation/running-as-service). +5. Start the [Setup Wizard](../start/installation/setup-wizard.mdx) by running `run.ps1` (or `run.sh` in Linux) in PowerShell or [disable the 'Setup Wizard' and configuring the server manually](../start/installation/manual.mdx). +![Running the Setup Wizard](./assets/run-ps1-with-PowerShell.png) + + + +If you are interested in hosting the server in a Docker container, please read our [dedicated knowledge base](../start/containers/dockerfile/dockerfile-overview.mdx). + + + + + +If you are interested in hosting the server on a VM, please refer to + +- [AWS Windows VM](../start/installation/setup-examples/aws-windows-vm.mdx) +- [AWS Linux VM](../start/installation/setup-examples/aws-linux-vm.mdx) + + + + + +If you want to test RavenDB without manual setup try [RavenDB Cloud](https://cloud.ravendb.net). +We offer one free instance per customer. For more information, please read our [dedicated article](/cloud/cloud-overview). + + + + +When you first launch RavenDB, you will see this prompt asking if you'd be willing to +anonymously share some Studio usage data with us in order to help us improve RavenDB: + +![NoSQL Database Share Studio Usage](./assets/help-us-improve.png) + +Once you respond to this prompt, it should not appear again. However, in some scenarios, +such as running RavenDB embedded, or working without browser cookies, the prompt may +appear again. + +If necessary, you can add this flag to the Studio URL to prevent the prompt from +appearing: + +`#dashboard?disableAnalytics=true` + + + +### Configuration + +The RavenDB server uses a [settings.json](../server/configuration/configuration-options.mdx#settingsjson) file in each node `Server` folder to store the server-wide configuration options. +When starting a server, RavenDB will look for the `settings.json` file in the node `Server` folder, so it must be located there. +The [Setup Wizard](../start/installation/setup-wizard.mdx) places it correctly automatically. + +After making changes to this file, a server restart is required for them to be applied. + +You can read more about the available configuration options in our [dedicated article](../server/configuration/configuration-options.mdx). + + + +The configuration file included in each RavenDB server distribution package is as follows: + + + +{`\{ + "ServerUrl": "http://127.0.0.1:0", + "Setup.Mode": "Initial", + "DataDir": "RavenData" +\} +`} + + + +This means that the server will run: + +- On `localhost` with a `random port` +- In `Setup Wizard` mode +- Store the data in the `RavenData` directory. + + + + + +In some cases, the port might be in use. This will prevent the Server from starting with an "address in use" error (`EADDRINUSE`). + +The port can be changed by editing the `ServerUrl` value in the `settings.json` file. +For a list of IPs and ports already in use, run `netstat -a` in the command line. + + + + + +RavenDB requires write permissions to the following locations: + +- The folder where RavenDB server is running (to update [settings.json](../server/configuration/configuration-options.mdx#settingsjson) by the [Setup Wizard](../start/installation/setup-wizard.mdx)) +- The data folder ([`DataDir`](../server/configuration/core-configuration.mdx#datadir) setting) +- The logs folder ([`Logs.Path`](../server/configuration/logs-configuration.mdx#logspath) setting) + +If you intend to run as a service, the write permissions should be granted to the user running the service (e.g. "Local Service"). + + + +### Studio + + + +Our GUI, the RavenDB Management Studio, comes **free** with **every license type**: + +- Community +- Professional +- Enterprise + + + +After installation and setup, the Studio can be accessed via the browser using the `ServerUrl` or the `ServerPublicUrl` value e.g. `http://localhost:8080`. +Whenever you run the server folder script `run.ps1` the Studio opens automatically in your browser. + +### Security Concerns + +**We recommend using the 'Setup Wizard' to easily install RavenDB securely from the very start** to prevent potential future vulnerability. +[The process](../start/getting-started.mdx#installation--setup) in RavenDB only takes a few minutes and is free. + +To let a developer start coding an application quickly, RavenDB will run with the following default security mode: + + + +As long as the database is used inside the local machine and no outside connections are allowed, you can ignore security concerns +and you require no authentication. Once you set RavenDB to listen to connections outside your local machine, +your database will immediately block this now vulnerable configuration and require the administrator to properly set up the security and +access control to prevent unauthorized access to your data or to explicitly allow the unsecured configuration. + + + +**We recommend using the 'Setup Wizard' to easily install RavenDB securely from the very start** to prevent potential future vulnerability. The process takes a few minutes and is free. + +Read more about security and how to [enable authentication here](../server/security/overview.mdx). + +--- + +## Client + +Once the server is up and running, you can start building your application using one of RavenDB’s official client libraries. +These libraries manage the connection to the server, handle document storage and retrieval, and give you full access to RavenDB features in your programming language. + +Client libraries are available for the following languages: + +* .NET - `RavenDB.Client` on [NuGet](https://www.nuget.org/packages/RavenDB.Client/) +* Java - `ravendb` on [Maven Central](https://search.maven.org/#search%7Cga%7C1%7Cg%3A%22net.ravendb%22%20AND%20a%3A%22ravendb%22) +* Node.js - `ravendb` on [NPM](https://www.npmjs.com/package/ravendb) +* Python - `ravendb` on [PyPI](https://pypi.org/project/ravendb/) +* PHP - `ravendb-php-client` on [GitHub](https://github.com/ravendb/ravendb-php-client) +* Ruby - `ravendb-ruby-client` on [GitHub](https://github.com/ravendb/ravendb-ruby-client) +* Go - `ravendb-go-client` on [GitHub](https://github.com/ravendb/ravendb-go-client) + +### DocumentStore + +In order to start, you need to create an instance of the `DocumentStore` - the main entry point for your application which is responsible for establishing and managing connections between a RavenDB server (or cluster) and your application. + + + +Before proceeding to the examples, we would like to point out that most of the articles are using the `Northwind` database. You can read more about it and how to deploy it [here](../studio/database/tasks/create-sample-data.mdx). + + + + + + +{`using (IDocumentStore store = new DocumentStore +{ + Urls = new[] // URL to the Server, + { // or list of URLs + "http://live-test.ravendb.net" // to all Cluster Servers (Nodes) + }, + Database = "Northwind", // Default database that DocumentStore will interact with + Conventions = { } // DocumentStore customizations +}) +{ + store.Initialize(); // Each DocumentStore needs to be initialized before use. + // This process establishes the connection with the Server + // and downloads various configurations + // e.g. cluster topology or client configuration +} +`} + + + + +{`try (IDocumentStore store = new DocumentStore( + new String[]{ "http://live-test.ravendb.net" }, // URL to the Server, + // or list of URLs + // to all Cluster Servers (Nodes) + "Northwind") // Default database that DocumentStore will interact with +) { + + DocumentConventions conventions = store.getConventions(); // DocumentStore customizations + + store.initialize(); // Each DocumentStore needs to be initialized before use. + // This process establishes the connection with the Server + // and downloads various configurations + // e.g. cluster topology or client configuration +} +`} + + + + +{`import { DocumentStore } from "ravendb"; + +const store = new DocumentStore( + ["http://live-test.ravendb.net"], // URL to the Server + // or list of URLs + // to all Cluster Servers (Nodes) + + "Northwind"); // Default database that DocumentStore will interact with + +const conventions = store.conventions; // DocumentStore customizations + +store.initialize(); // Each DocumentStore needs to be initialized before use. + // This process establishes the connection with the Server + // and downloads various configurations + // e.g. cluster topology or client configuration + +store.dispose(); // Dispose the resources claimed by the DocumentStore +`} + + + + + + +The `DocumentStore` is capable of working with multiple databases. +For proper operation, we **recommend** having only one `DocumentStore` instance per application. + + + +The following articles can extend your knowledge about the `DocumentStore` and its configuration: + +- [What is a Document Store?](../client-api/what-is-a-document-store.mdx) +- [How to Create a Document Store?](../client-api/creating-document-store.mdx) +- [How to Setup a Default Database?](../client-api/setting-up-default-database.mdx) +- [How to configure the Document Store using Conventions?](../client-api/configuration/conventions.mdx) + +### Session + +The `Session` is used to manipulate the data. It implements the `Unit of Work` pattern and is capable of batching the requests to save expensive remote calls. In contrast to a `DocumentStore` it is a lightweight object and can be created more frequently. For example, in web applications, a common (and recommended) pattern is to create a session per request. + +### Example I - Storing + +RavenDB is a Document Database. All stored objects are called `documents`. Each document contains a **unique ID** that identifies it, **data** and adjacent **metadata**, both stored in JSON format. The metadata contains information describing the document, e.g. the last modification date (`@last-modified` property) or the [collection](../client-api/faq/what-is-a-collection.mdx) (`@collection` property) assignment. + + + + +{`using (IDocumentSession session = store.OpenSession()) // Open a session for a default 'Database' +{ + Category category = new Category + { + Name = "Database Category" + }; + + session.Store(category); // Assign an 'Id' and collection (Categories) + // and start tracking an entity + + Product product = new Product + { + Name = "RavenDB Database", + Category = category.Id, + UnitsInStock = 10 + }; + + session.Store(product); // Assign an 'Id' and collection (Products) + // and start tracking an entity + + session.SaveChanges(); // Send to the Server + // one request processed in one transaction +} +`} + + + + +{`try (IDocumentSession session = store.openSession()) { // Open a session for a default 'Database' + Category category = new Category(); + category.setName("Database Category"); + + session.store(category); // Assign an 'Id' and collection (Categories) + // and start tracking an entity + + Product product = new Product(); + product.setName("RavenDB Database"); + product.setCategory(category.getId()); + product.setUnitsInStock(10); + + session.store(product); // Assign an 'Id' and collection (Products) + // and start tracking an entity + + session.saveChanges(); // Send to the Server + // one request processed in one transaction +} +`} + + + + +{`const session = store.openSession(); // Open a session for a default 'Database' + +const category = new Category("Database Category"); + +await session.store(category); // Assign an 'Id' and collection (Categories) + // and start tracking an entity + +const product = new Product( + "RavenDB Database", + category.Id, + 10); + +await session.store(product); // Assign an 'Id' and collection (Products) + // and start tracking an entity + +await session.saveChanges(); // Send to the Server + // one request processed in one transaction +`} + + + + +### Example II - Loading + +The `Session` was designed to help the user write efficient code easily. For example, when a document is being loaded (`.Load`) from the server, there is an option [to retrieve additional documents in the same request](../client-api/session/loading-entities.mdx#load-with-includes) (using `.Include`), minimizing the number of expensive calls. + +Besides that, the session implements the `Unit of Work` pattern, meaning that all **changes** to loaded entities are **automatically tracked**. The `SaveChanges` call will synchronize (with the server) **only the documents that have changed within the session**. All of those changes are **sent in one request (saving network calls)** and **processed in one transaction** (you can read why RavenDB is an [ACID database here](../client-api/faq/transaction-support.mdx)). + + + + +{`using (IDocumentSession session = store.OpenSession()) // Open a session for a default 'Database' +{ + Product product = session + .Include(x => x.Category) // Include Category + .Load(productId); // Load the Product and start tracking + + Category category = session + .Load(product.Category); // No remote calls, + // Session contains this entity from .Include + + product.Name = "RavenDB"; // Apply changes + category.Name = "Database"; + + session.SaveChanges(); // Synchronize with the Server + // one request processed in one transaction +} +`} + + + + +{`try (IDocumentSession session = store.openSession()) { // Open a session for a default 'Database' + Product product = session + .include("Category") // Include Category + .load(Product.class, productId); // Load the Product and start tracking + + Category category = session + .load(Category.class, // No remote calls, + product.getCategory()); // Session contains this entity from .include + + product.setName("RavenDB"); // Apply changes + category.setName("Database"); + + + session.saveChanges(); // Synchronize with the Server + // one request processed in one transaction +} +`} + + + + +{`const session = store.openSession(); // Open a session for a default 'Database' + +const product = await session + .include("Category") // Include Category + .load(productId); // Load the Product and start tracking + +const category = await session + .load(product.Category); // No remote calls, + // Session contains this entity from .include + +product.Name = "RavenDB"; // Apply changes +category.Name = "Database"; + +await session.saveChanges(); // Synchronize with the Server + // one request processed in one transaction +`} + + + + +### Example III - Querying + +To satisfy queries, [indexes](../indexes/what-are-indexes.mdx) are used. From the querying perspective, an index defines which document fields can be used to find a document. The whole indexing process is done asynchronously, which gives very quick querying response times, even when large amounts of data have been changed. However, an implication of this approach is that the index might be [stale](../indexes/stale-indexes.mdx). + +When no index is specified in the query (like in the query below), RavenDB will use its [intelligent auto-indexes](../indexes/creating-and-deploying.mdx#auto-indexes) feature that will either use an already existing index or create a new one if no match is found. + +The other option is to write the index yourself and deploy it to the server. Those indexes are called [Static Indexes](../indexes/creating-and-deploying.mdx#static-indexes). + +Behind the scenes, queries are translated to the Raven Query Language (RQL) syntax. Read more about RQL [here](../client-api/session/querying/what-is-rql.mdx). + + + + +{`using (IDocumentSession session = store.OpenSession()) // Open a session for a default 'Database' +{ + List productNames = session + .Query() // Query for Products + .Where(x => x.UnitsInStock > 5) // Filter + .Skip(0).Take(10) // Page + .Select(x => x.Name) // Project + .ToList(); // Materialize query +} +`} + + + + +{`try (IDocumentSession session = store.openSession()) { // Open a session for a default 'Database' + List productNames = session + .query(Product.class) // Query for Products + .whereGreaterThan("UnitsInStock", 5) // Filter + .skip(0).take(10) // Page + .selectFields(String.class, "Name") // Project + .toList(); // Materialize query +} +`} + + + + +{`const session = store.openSession(); // Open a session for a default 'Database' + +const productNames = await session + .query({ collection: "Products" }) // Query for Products + .whereGreaterThan("UnitsInStock", 5) // Filter + .skip(0).take(10) // Page + .selectFields("Name") // Project + .all(); // Materialize query +`} + + + + +{`from Products +where UnitsInStock > 5 +select Name +`} + + + + + + diff --git a/versioned_docs/version-7.1/start/guides/_category_.json b/versioned_docs/version-7.1/start/guides/_category_.json new file mode 100644 index 0000000000..613ea27a37 --- /dev/null +++ b/versioned_docs/version-7.1/start/guides/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 9, + "label": Guides, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/start/guides/aws-lambda/_category_.json b/versioned_docs/version-7.1/start/guides/aws-lambda/_category_.json new file mode 100644 index 0000000000..9a09256e94 --- /dev/null +++ b/versioned_docs/version-7.1/start/guides/aws-lambda/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 0, + "label": AWS Lambda, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/start/guides/aws-lambda/assets/aws-iam-access-keys.jpg b/versioned_docs/version-7.1/start/guides/aws-lambda/assets/aws-iam-access-keys.jpg new file mode 100644 index 0000000000..d84895b738 Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/aws-lambda/assets/aws-iam-access-keys.jpg differ diff --git a/versioned_docs/version-7.1/start/guides/aws-lambda/assets/aws-iam-permissions.jpg b/versioned_docs/version-7.1/start/guides/aws-lambda/assets/aws-iam-permissions.jpg new file mode 100644 index 0000000000..aae10f3cb8 Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/aws-lambda/assets/aws-iam-permissions.jpg differ diff --git a/versioned_docs/version-7.1/start/guides/aws-lambda/assets/aws-iam-users.jpg b/versioned_docs/version-7.1/start/guides/aws-lambda/assets/aws-iam-users.jpg new file mode 100644 index 0000000000..24546fdecc Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/aws-lambda/assets/aws-iam-users.jpg differ diff --git a/versioned_docs/version-7.1/start/guides/aws-lambda/assets/aws-lambda-env-vars-pem.jpg b/versioned_docs/version-7.1/start/guides/aws-lambda/assets/aws-lambda-env-vars-pem.jpg new file mode 100644 index 0000000000..bca2716032 Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/aws-lambda/assets/aws-lambda-env-vars-pem.jpg differ diff --git a/versioned_docs/version-7.1/start/guides/aws-lambda/assets/aws-lambda-env-vars.jpg b/versioned_docs/version-7.1/start/guides/aws-lambda/assets/aws-lambda-env-vars.jpg new file mode 100644 index 0000000000..bf3ea9fccc Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/aws-lambda/assets/aws-lambda-env-vars.jpg differ diff --git a/versioned_docs/version-7.1/start/guides/aws-lambda/assets/dotnet-lambda-success.jpg b/versioned_docs/version-7.1/start/guides/aws-lambda/assets/dotnet-lambda-success.jpg new file mode 100644 index 0000000000..122c25dcd2 Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/aws-lambda/assets/dotnet-lambda-success.jpg differ diff --git a/versioned_docs/version-7.1/start/guides/aws-lambda/assets/dotnet-run.jpg b/versioned_docs/version-7.1/start/guides/aws-lambda/assets/dotnet-run.jpg new file mode 100644 index 0000000000..01d6208a38 Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/aws-lambda/assets/dotnet-run.jpg differ diff --git a/versioned_docs/version-7.1/start/guides/aws-lambda/assets/overview-gh-secrets.png b/versioned_docs/version-7.1/start/guides/aws-lambda/assets/overview-gh-secrets.png new file mode 100644 index 0000000000..359181141d Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/aws-lambda/assets/overview-gh-secrets.png differ diff --git a/versioned_docs/version-7.1/start/guides/aws-lambda/assets/overview-gh-variables.png b/versioned_docs/version-7.1/start/guides/aws-lambda/assets/overview-gh-variables.png new file mode 100644 index 0000000000..6d6b2ffc30 Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/aws-lambda/assets/overview-gh-variables.png differ diff --git a/versioned_docs/version-7.1/start/guides/aws-lambda/deployment.mdx b/versioned_docs/version-7.1/start/guides/aws-lambda/deployment.mdx new file mode 100644 index 0000000000..5d310fc56b --- /dev/null +++ b/versioned_docs/version-7.1/start/guides/aws-lambda/deployment.mdx @@ -0,0 +1,48 @@ +--- +title: "Deployment Considerations" +hide_table_of_contents: true +sidebar_label: Deployment Considerations +sidebar_position: 3 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Deployment Considerations + +## Environment Variable Limitations + +AWS limits total environment variable size to 5KB and individual values to 4KB. +This presents an issue when dealing with certificates as a full public/private +keypair typically exceeds 5KB. + +As a workaround, the template and guidance in the documentation suggests only +providing the PEM-encoded private key through an environment variable (around 3KB). + +For a full production implementation, it's recommended to use +[AWS Secrets Manager](../../../start/guides/aws-lambda/secrets-manager.mdx). + +## Document Store Lifetime and Cold Starts + +One of the benefits of serverless is that you potentially can lower costs for +less-used services and pay-per-invocation. As a trade-off, these functions incur +a startup cost known as a "cold start" before they can serve requests. + +The Document Store is meant to be instantiated once for the lifetime of an application. +However, cold vs. warm starts in serverless environments have some implications on this. + +In AWS Lambda, the document store will be shared across invocations of a function as long +as it remains warmed up. The time varies based on the runtime and is not controlled by the developer, +however the document store will remain initialized and you should not see an impact to latency. + +If an Lambda function is wound down, the next time it is invoked will incur a cold start cost. +The vast majority of cold start time is due to the Lambda runtime. Document store initialization +will not have a major impact on latency, as establishing the TCP & TLS connection is still quite fast. + +To reduce cold starts, learn more about [optimizing AWS Lambda functions][aws-lambda-optimization]. + +[aws-lambda-optimization]: https://aws.amazon.com/blogs/compute/operating-lambda-performance-optimization-part-1/ diff --git a/versioned_docs/version-7.1/start/guides/aws-lambda/existing-project.mdx b/versioned_docs/version-7.1/start/guides/aws-lambda/existing-project.mdx new file mode 100644 index 0000000000..5dc948fe5a --- /dev/null +++ b/versioned_docs/version-7.1/start/guides/aws-lambda/existing-project.mdx @@ -0,0 +1,436 @@ +--- +title: "Add RavenDB to an Existing AWS Lambda Project (.NET C#)" +hide_table_of_contents: true +sidebar_label: Adding to Existing Project +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Add RavenDB to an Existing AWS Lambda Project (.NET C#) + + +* **AWS Lambda** is a serverless platform that supports multiple languages and frameworks + that let you deploy workloads that scale without managing any infrastructure. + +* Learn more about [how AWS Lambda works][aws-lambda]. + +* In this guide, you will learn how to connect to RavenDB from your existing .NET AWS Lambda functions. + We assume you are familiar with .NET development techniques and the basics of AWS Lambda functions. + +* In this page: + * [Before We Get Started](#before-we-get-started) + * [Installing the RavenDB Client SDK](#installing-the-ravendb-client-sdk) + * [Initializing the Document Store](#initializing-the-document-store) + * [Adding Support for App Settings](#adding-support-for-app-settings) + * [Configuring Support for Certificates](#configuring-support-for-certificates) + * [Configuring AWS](#configuring-aws) + + + +## Before We Get Started + +You will need the following before continuing: + +- A [RavenDB Cloud][cloud-signup] account or self-hosted client certificate +- A local [AWS .NET development environment][aws-dotnet] set up + - _Recommended_: [AWS Toolkit for VS Code][aws-vs-code] + - _Recommended_: [AWS Toolkit for Visual Studio][aws-vs] +- [Amazon Lambda Tools package for .NET CLI][aws-dotnet-lambda] +- [.NET 6.x][ms-download-dotnet] + + +For a brand new AWS Lambda function, we recommend using the [RavenDB AWS Lambda .NET template](../../../start/guides/aws-lambda/overview.mdx) +which is set up with dependency injection, X.509/PEM certificate support, and AWS Secrets Manager integration. +You can also reference the template to see how the integration is set up. + + + + +## Installing the RavenDB Client SDK + +Get started by installing the [RavenDB.Client][nuget-ravendb-client] Nuget package in your solution +or project which provides the .NET client SDK. + +Using the .NET CLI: + + + +{`dotnet add package RavenDB.Client +`} + + + + + +## Initializing the Document Store + +Import the `DocumentStore` from `Raven.Client.Documents` namespace to create a new instance with the +required configuration and initialize your connection to RavenDB by calling the `Initialize` method. + + + +{`using Raven.Client.Documents; + +var documentStore = new DocumentStore() \{ + Urls = new [] \{ "https://a.free.mycompany.ravendb.cloud" \}, + DatabaseName = "demo", + // Other options +\}; +documentStore.Initialize(); +`} + + + +For more on what options are available, see [Creating a Document Store][docs-creating-document-store]. + +### Set up dependency injection + +For AWS Lambda functions, it's recommended to configure the document store and document sessions with .NET dependency injection. +The easiest way is to use the community Nuget package [RavenDB.DependencyInjection][nuget-ravendb-di]: + + + +{`dotnet add package RavenDB.DependencyInjection +`} + + + +The pattern to set up dependency injection to inject an `IAsyncDocumentSession` only works reliably with +a [ASP.NET Core Lambda function][aws-dotnet-aspnetcore]. If you are not using the AWS Lambda ASP.NET Core +Hosting for .NET, you can still use a more traditional singleton `DocumentStoreHolder` pattern. + +In your `Program.cs`, add a using statement for `Raven.DependencyInjection` which exposes two extension methods: + +- `IServiceCollection.AddRavenDbDocStore` +- `IServiceCollection.AddRavenDbAsyncSession` + +The resulting service configuration will look like this: + + + +{`// Requires a using statement +using Raven.DependencyInjection; + +var builder = WebApplication.CreateBuilder(args); + +// Configure injection for IDocumentStore +services.AddRavenDbDocStore(); + +// Configure injection for IAsyncDocumentSession +services.AddRavenDbAsyncSession(); + +builder.Services.AddControllers(); +builder.Services.AddEndpointsApiExplorer(); +builder.Services.AddSwaggerGen(); + +// Register Lambda to replace Kestrel as the web server for the ASP.NET Core application. +// If the application is not running in Lambda then this method will do nothing. +builder.Services.AddAWSLambdaHosting(LambdaEventSource.HttpApi); + +var app = builder.Build(); + +// Configure the HTTP request pipeline. +if (app.Environment.IsDevelopment()) +\{ + app.UseSwagger(); + app.UseSwaggerUI(); +\} + +app.UseHttpsRedirection(); + +app.UseAuthorization(); + +app.MapControllers(); + +app.Run(); +`} + + + +You can customize the options before they get passed down to the underlying `DocumentStore` with an overload: + + + +{`services.AddRavenDbDocStore(options => \{ + // ... + // Customize \`options\` + // ... + + options.Conventions.UseOptimisticConcurrency = true; +\}); +`} + + + + +In AWS Lambda, the instance will be shared across function invocations if the Lambda is warmed up, +otherwise it will be constructed each time the function warms up. For more, see [Deployment Considerations][deployment-considerations]. + + +You can set options manually but it's more likely you'll want to configure support for app settings. + + + +## Adding Support for App Settings + +You will need a way to pass options to the `DocumentStore` on your local machine and when deployed to AWS Lambda. + +The RavenDB.DependencyInjection package supports reading settings from `appsettings.json` for ASP.NET applications. +The default ASP.NET Core hosting also supports environment variable configuration. + +For more on the different configuration providers supported, see [Configuration in ASP.NET Core][ms-docs-aspnet-configuration]. + +### Using JSON settings + +An example `appsettings.json` file that connects to the RavenDB live test cluster might look like: + + + +{`\{ + "Logging": \{ + "LogLevel": \{ + "Default": "Information", + "Microsoft.AspNetCore": "Warning" + \} + \}, + "AllowedHosts": "*", + "RavenSettings": \{ + "Urls": ["http://live-test.ravendb.net"], + "DatabaseName": "(not set)" + \} +\} +`} + + + +### Using environment variables + +Environment variables follow the .NET conventions with `__` being the dot-notation separator (e.g. `RavenSettings__DatabaseName`). + +You can pass environment variables in your terminal profile, OS settings, Docker `env`, on the command-line, or within AWS. + + + +## Configuring Support for Certificates + +RavenDB uses client certificate authentication (mutual TLS) to secure your database connection. +The .NET Client SDK supports `X509Certificate2` which is passed to the `DocumentStore.Certificate` option. +There are multiple ways to load a certificate: + +- Load from .pfx files +- Load from PEM-encoded certificate +- Load from AWS Secrets Manager + +### Load from .pfx Files + +You can load PFX files with or without a password by providing the certificate path using `RavenSettings:CertFilePath`: + + + +{`\{ + "RavenSettings": \{ + "Urls": ["https://a.free.company.ravendb.cloud"], + "DatabaseName": "demo", + "CertFilePath": "..\\\\shared\\\\certs\\\\company.client.certificate.pfx" + \} +\} +`} + + + +The dependency injection logic will automatically load the certificate from this path without extra code. + +If the `.pfx` file requires a password, provide it using the .NET secrets tool by setting `RavenSettings:CertPassword`: + + + +{`dotnet user-secrets init +dotnet user-secrets set "RavenSettings:CertPassword" "" +`} + + + +However, keep in mind that using an absolute physical file path or a user secret requires manual steps +for every developer working on a project to configure. + + +PFX files can be compromised, especially if they are not password-protected. Using a physical file also makes +it hard to manage and rotate when they expire. They are only recommended for ease-of-use on your local machine. +For production, it is better to use the PEM certificate method or AWS Secrets Manager. + + + +### Load from PEM-encoded certificate + +For AWS Lambda, it's recommended to use a PEM-encoded certificate that can be provided through an environment +variable without deploying any files. + +Unlike a `.pfx` file, a PEM-encoded certificate is plain-text encoded: + + + +{`-----BEGIN CERTIFICATE----- +MIIFCzCCAvO... +-----END CERTIFICATE----- +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAK... +-----END RSA PRIVATE KEY----- +`} + + + +AWS limits the size of an environment variable to 4KB with a 5KB limit for all variables. +To pass a PEM-encoded certificate, you will need to store the public key (`.crt` file) alongside your app +files and pass the private key contents through an environment variable like `RavenSettings__CertPrivateKey`. +The private key will be about 3KB, leaving 2KB left for other environment variables. + +On the client, you will have to assemble a PEM using the static `X509Certificate2.CreateFromPem(publicKey, privateKey)` method. + +Here is an example `Program.cs` that adds support for assembling a PEM certificate by adding +`RavenSettings:CertPublicKeyFilePath` and `RavenSettings:CertPrivateKey` configuration options: + + + +{`using System.Security.Cryptography.X509Certificates; +using Raven.DependencyInjection; + +var builder = WebApplication.CreateBuilder(args); + +builder.Services.AddRavenDbAsyncSession(); +builder.Services.AddRavenDbDocStore(options => + \{ + var certPrivateKey = builder.Configuration.GetSection("RavenSettings:CertPrivateKey"); + var certPublicKeyFilePath = builder.Configuration.GetSection("RavenSettings:CertPublicKeyFilePath"); + var usePemCert = certPrivateKey != null && certPublicKeyFilePath != null; + + if (usePemCert) + \{ + var certPem = File.ReadAllText(certPublicKeyFilePath); + // Workaround ephemeral keys in Windows + // See: https://github.com/dotnet/runtime/issues/66283 + var intermediateCert = X509Certificate2.CreateFromPem(certPem, certPrivateKey); + var cert = new X509Certificate2(intermediateCert.Export(X509ContentType.Pfx)); + intermediateCert.Dispose(); + + options.Certificate = cert; + \} + \}); + +builder.Services.AddControllers(); +builder.Services.AddAWSLambdaHosting(LambdaEventSource.HttpApi); + +var app = builder.Build(); + +app.UseHttpsRedirection(); +app.UseAuthorization(); +app.MapControllers(); +app.Run(); +`} + + + +This supports using `.pfx` files or a PEM-encoded certificate, if provided. +It works around a [known issue](https://github.com/dotnet/runtime/issues/66283) in Windows with ephemeral keys. + +For a full reference implementation, view the code on the [template repository][gh-ravendb-template]. + +### Load from AWS Secrets Manager + +If you want to load your .NET configuration from AWS Secrets Manager, you can use the community package +[Kralizek.Extensions.Configuration.AWSSecretsManager][kralizek] to support securely loading certificates +instead of relying on production environment variables. + +[Learn more about configuring AWS Secrets Manager](secrets-manager) + + + +## Configuring AWS + +You will need to configure certificate authentication in AWS Lambda. Depending on the method you choose above, the steps vary. + +### Using Environment Variables + +Under your Lambda function, go to **Configuration > Environment** to edit your environment variables. + +![AWS environment variable settings](./assets/aws-lambda-env-vars.jpg) + +#### Specifying Path to Certificate Files + +If you are deploying a physical `.pfx` file, you can specify the `RavenSettings__CertFilePath` +and `RavenSettings__CertPassword` environment variables. + +If you are using a PEM-encoded certificate, using the example code above you would pass +a `RavenSettings__CertPublicKeyFilePath` environment variable (if it differs from your `appsettings.json` value). + +#### Specifying the PEM-encoded Private Key + +The `RavenSettings__CertPrivateKey` environment variable should be set to the contents of the `.key` file +from the RavenDB client certificate package. + +**Example value:** + + + +{`RavenSettings__CertPrivateKey=----- BEGIN RSA PRIVATE KEY ----- MIIJKA... +`} + + + +It will look like this in the AWS console: + +![AWS environment variable settings for PEM certificate private key](./assets/aws-lambda-env-vars-pem.jpg) + +When pasting into the AWS Console, line breaks will automatically be removed and this should +still be parsed successfully with `X509Certificate2.CreateFromPem` without extra handling. + +These values will override `appsettings.json` once saved. + + + +## Next Steps + +- Learn more about [how to use the RavenDB .NET client SDK][docs-dotnet] +- Reference the [.NET AWS Lambda starter template][gh-ravendb-template] to see the code +- [Troubleshoot][troubleshooting] issues with RavenDB and AWS Lambda +- [Deployment Considerations][deployment-considerations] for RavenDB and AWS Lambda + + + +[troubleshooting]: ../../../start/guides/aws-lambda/troubleshooting +[deployment-considerations]: ../../../start/guides/aws-lambda/deployment +[cloud-signup]: https://cloud.ravendb.net?utm_source=ravendb_docs&utm_medium=web&utm_campaign=howto_template_lambda_dotnet_existing&utm_content=cloud_signup +[docs-dotnet]: ../../../client-api/session/what-is-a-session-and-how-does-it-work +[docs-creating-document-store]: ../../../client-api/creating-document-store +[gh-ravendb-template]: https://github.com/ravendb/templates/tree/main/aws-lambda/csharp-http +[aws-lambda]: https://docs.aws.amazon.com/lambda/latest/dg/welcome.html +[aws-dotnet]: https://aws.amazon.com/sdk-for-net/ +[aws-dotnet-lambda]: https://docs.aws.amazon.com/lambda/latest/dg/csharp-package-cli.html +[aws-dotnet-aspnetcore]: https://github.com/aws/aws-lambda-dotnet/tree/master/Libraries/src/Amazon.Lambda.AspNetCoreServer.Hosting +[aws-vs-code]: https://aws.amazon.com/visualstudiocode/ +[aws-vs]: https://aws.amazon.com/visualstudio/ +[ms-download-dotnet]: https://dotnet.microsoft.com/en-us/download/dotnet/6.0 +[ms-docs-aspnet-configuration]: https://learn.microsoft.com/en-us/aspnet/core/fundamentals/configuration/#configuration-providers +[nuget-ravendb-client]: https://www.nuget.org/packages/RavenDB.Client +[nuget-ravendb-di]: https://www.nuget.org/packages/RavenDB.DependencyInjection +[kralizek]: https://github.com/Kralizek/AWSSecretsManagerConfigurationExtensions + +### AWS +- [Lambda](https://docs.aws.amazon.com/lambda/latest/dg/welcome.html) +- [dotnet Lambda](https://docs.aws.amazon.com/lambda/latest/dg/csharp-package-cli.html) +- [AWS VS Code](https://aws.amazon.com/visualstudiocode/) +- [VS](https://aws.amazon.com/visualstudio/) +- [Lambda Deploy](https://docs.aws.amazon.com/sdk-for-net/v3/developer-guide/deploying-lambda.html) +- [Template](https://github.com/ravendb/templates/tree/main/aws-lambda/csharp-http) + +### RavenDB +- [AWS Lambda: Overview](../../../start/guides/aws-lambda/overview.mdx) +- [Cloud Signup](https://cloud.ravendb.net?utm_source=ravendb_docs&utm_medium=web&utm_campaign=howto_template_lambda_csharp&utm_content=cloud_signup) +- [Get Started](../../../start/getting-started.mdx) +- [Client Certificates](../../../client-api/setting-up-authentication-and-authorization.mdx) +- [Session](../../../client-api/session/what-is-a-session-and-how-does-it-work.mdx) diff --git a/versioned_docs/version-7.1/start/guides/aws-lambda/overview.mdx b/versioned_docs/version-7.1/start/guides/aws-lambda/overview.mdx new file mode 100644 index 0000000000..bb63b03bcd --- /dev/null +++ b/versioned_docs/version-7.1/start/guides/aws-lambda/overview.mdx @@ -0,0 +1,601 @@ +--- +title: "Guides: AWS Lambda (.NET C#)" +hide_table_of_contents: true +sidebar_label: Overview +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Guides: AWS Lambda (.NET C#) + + +* **AWS Lambda** is a serverless platform that supports multiple languages and frameworks + that let you deploy workloads that scale without managing any infrastructure. + Learn more about [working with AWS Lambda][aws-lambda]. + +* In this guide, you will learn how to deploy a .NET C# Lambda Handler Function using the + [RavenDB AWS Lambda C# template][template] that is connected to your RavenDB database. + +* Watch our tutorial video [below](../../../start/guides/aws-lambda/overview.mdx#tutorial-video) + or [on YouTube](https://www.youtube.com/watch?v=T2r9sqrTrYE). + +* In this page: + * [Before We Get Started](../../../start/guides/aws-lambda/overview.mdx#before-we-get-started) + * [Create a Local Lambda Function](../../../start/guides/aws-lambda/overview.mdx#create-a-local-lambda-function) + * [Configuring Local Connection to RavenDB](../../../start/guides/aws-lambda/overview.mdx#configuring-local-connection-to-ravendb) + * [Deploying to AWS](../../../start/guides/aws-lambda/overview.mdx#deploying-to-aws) + * [Configuring Production Connection to RavenDB](../../../start/guides/aws-lambda/overview.mdx#configuring-production-connection-to-ravendb) + * [Verify the Connection Works](../../../start/guides/aws-lambda/overview.mdx#verify-the-connection-works) + * [Using RavenDB in the Lambda Function](../../../start/guides/aws-lambda/overview.mdx#using-ravendb-in-the-lambda-function) + * [Next Steps](../../../start/guides/aws-lambda/overview.mdx#next-steps) + * [Tutorial Video](../../../start/guides/aws-lambda/overview.mdx#tutorial-video) + + + +## Before We Get Started + +You will need the following before continuing: + +- A [RavenDB Cloud][cloud-signup] account or self-hosted client certificate +- A local [AWS .NET development environment][aws-dotnet] set up + - _Recommended_: [AWS Toolkit for VS Code][aws-vs-code] + - _Recommended_: [AWS Toolkit for Visual Studio][aws-vs] +- [Amazon Lambda Tools package for .NET CLI][aws-dotnet-lambda] +- [Git](https://git-scm.org) +- [.NET 6.x][download-dotnet] + +If you are new to AWS Lambda local development, see the [AWS Lambda Developer Guide][aws-lambda] +for how to get up and running with your toolchain of choice. + + + +## Create a Local Lambda Function + +The [RavenDB AWS Lambda C# template][template] is a template repository on GitHub which means +you can either create a new repository derived from the template or clone and push it to a new repository. + +This will set up a local Lambda C# function that we will deploy to your AWS account at the end of the guide. + +### Creating a New Repository from the Template + +Depending on your environment, there are several ways to clone the template and initialize a new Git repository. +The template repository lists each clone method you can copy & paste directly. + +**Using `npx` and the [degit][tool-degit] tool if you have Node.js installed:** + + + +{`npx degit ravendb/templates/aws-lambda/csharp-http my-project +cd my-project +git init +`} + + + +**Using Bash or PowerShell:** + + + +{`git clone https://github.com/ravendb/templates my-project +cd my-project +git filter-branch --subdirectory-filter aws-lambda/csharp-http +rm -rf .git # Bash +rm -r -force .git # PowerShell +git init +`} + + + +### Install Dependencies + +After cloning the repository locally, restore .NET dependencies with `dotnet`: + + + +{`dotnet restore +`} + + + +By default, the template is configured to connect to the Live Test instance of RavenDB. +Since this is only for testing purposes, next you will configure the app to connect to +your existing RavenDB database. + +### Starting the Function + +You can start the Lambda function locally using: + + + +{`dotnet run +`} + + + +If you are using Visual Studio Code, you can also debug the function with F5 debugging. + +You will see the welcome screen if the template is set up correctly: + +![.NET template welcome screen](./assets/dotnet-run.jpg) + +### Install AWS .NET tools + +You will need the .NET Global Tools for Lambda installed to perform the deployment steps later. + +Install the `Amazon.Lambda.Tools` package: + + + +{`dotnet tool install -g Amazon.Lambda.Tools\` +`} + + + +Or make sure it's updated if you already have it: + + + +{`dotnet tool update -g Amazon.Lambda.Tools +`} + + + +### Set Up Your Environment + +AWS libraries, SDKs and this template rely on several environmental artifacts to work. +One is your AWS credentials, stored in `~/.aws/credentials` and the other is the default AWS region to use. + +**Using the defaults file:** You can use the template's `aws-lambda-tools-defaults.json` to set your Functions region: + + + +{`\{ + ... + "region": "us-east-1", + ... +\} +`} + + + +**Using an environment variable:** Set the `AWS_REGION` environment variable in your terminal session or profile. + +Learn more about [setting up AWS credentials or the default AWS region][aws-dotnet-project-setup]. + + + + +## Configuring Local Connection to RavenDB + +To configure the local version of your AWS Lambda function to connect to RavenDB, +you will need to update the `appsettings.json` file with the `RavenSettings.Urls` +value and `RavenSettings.DatabaseName` value. + +An example `appsettings.json` connecting to the RavenDB live test cluster might look like: + + + +{`\{ + "Logging": \{ + "LogLevel": \{ + "Default": "Information", + "Microsoft.AspNetCore": "Warning" + \} + \}, + "AllowedHosts": "*", + "RavenSettings": \{ + "Urls": ["http://live-test.ravendb.net"], + "DatabaseName": "demo", + "CertFilePath": "", + "CertPassword": "" + \} +\} +`} + + + +If using an authenticated RavenDB URL, you will need a local client certificate. +Learn more about [configuring client authentication for RavenDB][docs-client-certs]. + +### Using a PFX Certificate File + +To configure the local Lambda function to load a certificate from outside the project +directory, specify the `RavenSettings.CertFilePath` and, optionally, the `RavenSettings.CertPassword` settings: + + + +{`\{ + "Logging": \{ + "LogLevel": \{ + "Default": "Information", + "Microsoft.AspNetCore": "Warning" + \} + \}, + "AllowedHosts": "*", + "RavenSettings": \{ + "Urls": ["https://a.MYCOMPANY.ravendb.cloud"], + "DatabaseName": "MyDB", + "CertFilePath": "../certs/free.MYCOMPANY.client.certificate.without.password.pfx" + \} +\} +`} + + + +This will connect to the `a.MYCOMPANY.ravendb.cloud` RavenDB Cloud cluster using the local certificate file. +The file path can be relative to the `.csproj` file or absolute. + + +It is recommended to only use the PFX file locally, e.g. `free.MYCOMPANY.client.certificate.without.password.pfx` +and keep it outside your project directory. The template is configured by default to ignore it in Git and to never +copy PFX files to the `bin` and `publish` folders. + + +#### Using the password-protected PFX file + +If you prefer to use the password-protected PFX file, you can store the `CertPassword` using the +[.NET User Secrets Tool][dotnet-user-secrets]. +However, keep in mind that your team will need this secret configured locally to use the PFX file. + + + +{`dotnet user-secrets init +dotnet user-secrets set "RavenSettings:CertPassword" "" +`} + + + +### Loading Configuration from AWS Secrets Manager + +The template uses [Kralizek.Extensions.Configuration.AWSSecretsManager][kralizek] to automatically +load .NET configuration from AWS Secrets Manager to support securely loading certificates instead +of relying on production environment variables. This has an added cost but it may scale better for +a large team and help you better manage the lifecycle of your certificates. + +The configuration will be loaded from AWS Secrets Manager if it exists, otherwise `appsettings.json` will be used. + + + +## Deploying to AWS + +At this point, the local Lambda app is ready to be deployed. +There are 4 main ways to deploy your new AWS Lambda function: +GitHub actions, .NET CLI, AWS SDK CLI, and or the AWS Toolkit extensions. + +The template has already been set up to use continuous deployment using GitHub Actions. +For the other methods, see [Deploying AWS Lamda Functions][aws-lambda-deploy]. + +However, we need to do a deployment manually for the first-time setup, such as setting +the Function role and policy. Once it is setup, GitHub Actions will automatically deploy +on new commits. + +Start by deploying your function manually using the .NET CLI: + + + +{`dotnet lambda deploy-function +`} + + + + +The function name should match the name of the `.csproj` file. + + +The tool will walk you through the first-time deployment: + +- **Function IAM Role:** This can be the name of your function name plus "Role", e.g. `RavenDBTemplateRole` +- **Function Policy:** Choose `AWSLambdaBasicExecutionRole` to allow for basic AWS Lambda execution permissions + +### Create a deployment AWS Access Key + +If you do not have code deployment user, create a new IAM user to be used by your GitHub automation (e.g. `gh_actions`). + +![AWS IAM users for deployment](./assets/aws-iam-users.jpg) + +You will need the following security policies (assigned to group or user): + +- `AWSLambda_FullAccess` +- `IAMReadOnlyAccess` + +![AWS IAM permissions for Lambda deployment](./assets/aws-iam-permissions.jpg) + +Once you have the user created, create and obtain an AWS access key specific for your GitHub action deployment workflow. + +![AWS access keys for Lambda deployment](./assets/aws-iam-access-keys.jpg) + + +It is recommended to use a dedicated deployment IAM user with specific access policies for automated deployment +through GitHub Actions. +Ensure you don't store your AWS keys in plain-text on your machine or elsewhere. They are password-equivalents. +GitHub Secrets are encrypted and cannot be retrieved after being stored. + + +### Configure GitHub Secrets and Variables + +The GitHub deployment workflow relies on having some specific secrets and variables set. + +#### Setting up Secrets + +1. Obtain or create an Access Key for the user +1. Go to your [GitHub repository's secrets settings][gh-secrets] +1. Add a new secret + - Name: `AWS_ACCESS_KEY_ID` + - Value: The access key ID +1. Add a new secret + - Name: `AWS_SECRET_ACCESS_KEY` + - Value: The secret access key + +![Example of GitHub repository secrets set](./assets/overview-gh-secrets.png) + +#### Setting up Variables + +1. Go to your [GitHub repository's variables settings][gh-variables] +1. Add or modify a variable + - Name: `AWS_LAMBDA_FUNCTION_NAME` + - Value: The name of your function used in the deploy command +1. Add or modify a variable + - Name: `AWS_LAMBDA_FUNCTION_ROLE` + - Value: The IAM role name of the Function set when you first deployed +1. If you are **not** using the `aws-lambda-tools-defaults.json` file to set the region, add or modify a variable: + - Name: `AWS_REGION` + - Value: The default region your Function will deploy to + +![Example of GitHub repository variables set](./assets/overview-gh-variables.png) + +### Trigger a Deployment + +Your repository and GitHub action is now set up. To test the deployment, you can push a commit to the repository. + +If you have already committed and pushed, it is likely that the Action failed and you can re-run the job using +the new secret variable. +Once deployed, using the default settings, the Function will connect to the Live Test database. + +### Changing Application Configuration for Production + +By default, configuration will be loaded from `appsettings.json` but it is likely you may have +different configuration needed once the Lambda function is deployed. + + + +## Configuring Production Connection to RavenDB + +To configure the production version of your AWS Lambda function to connect to RavenDB, you will +need to override your app settings through environment variables or, optionally, using AWS Secrets Manager. + +![AWS environment variable settings](./assets/aws-lambda-env-vars.jpg) + +### Environment Variable Configuration + +The convention to override .NET app settings would look like: + +- `RavenSettings__Urls__0` -- Specify database URLs in array format (zero-indexed) +- `RavenSettings__DatabaseName` -- Specify database name + +You only need to provide the environment variables you want to override in the `appsettings.json`. + +#### Using a PEM Certificate + +You will need to configure the client certificate to connect to an authenticated RavenDB cluster. +If you are not using AWS Secrets Manager, you will need to use environment variables. +There is a 5KB limit on the size of variables, which poses an issue for using certificate auth. +To accomodate this, you will need to set `RavenSettings__CertPublicKeyFilePath` and `RavenSettings__CertPrivateKey`. + +First, copy your PEM-encoded `.crt` public key certificate to your project. It is safe to commit +and deploy since it does not contain your private key. The template is configured to automatically +copy `*.crt` files to your `bin` and `publish` directories. + +Specify the path to the file relative to your `.csproj`: + + + +{`\{ + "RavenSettings": \{ + "Urls": ["https://a.MYCOMPANY.ravendb.cloud"], + "DatabaseName": "MyDB", + "CertFilePath": "../certs/free.MYCOMPANY.client.certificate.without.password.pfx", + "CertPublicKeyFilePath": "free.MYCOMPANY.client.certificate.crt" + \} +\} +`} + + + +You can choose whether to set this in production through the `RavenSettings__CertPublicKeyFilePath` +or in your `appsettings.json` file. + +The `RavenSettings__CertPrivateKey` environment variable should be set to the contents of the `.key` +file from the RavenDB client certificate package. + +**Example value:** + + + +{`RavenSettings__CertPrivateKey=----- BEGIN RSA PRIVATE KEY ----- MIIJKA... +`} + + + +It will look like this in the AWS console: + +![AWS environment variable settings for PEM certificate private key](./assets/aws-lambda-env-vars-pem.jpg) + +The template will automatically decode the value and construct a PEM certificate from +these two settings using the .NET [X502Certificate2.CreateFromPem][dotnet-createfrompem] API. + +### AWS Secrets Manager Configuration (optional) + +The template uses [Kralizek.Extensions.Configuration.AWSSecretsManager][kralizek] to automatically +load .NET configuration from AWS Secrets Manager to support securely loading certificates instead of +relying on production environment variables. + +[Learn more about configuring AWS Secrets Manager](secrets-manager "Learn more about configuring AWS Secrets Manager") + + + +## Verify the Connection Works + +Once the environment variables are set up correctly, your Lambda function should authenticate +successfully to your cluster. + +You should see a welcome screen like this with the connection information: + +![AWS Lambda welcome connected screen](./assets/dotnet-lambda-success.jpg) + +This means your Lambda function is correctly configured and ready to work with RavenDB. + + + +## Using RavenDB in the Lambda Function + +The template sets up a singleton `DocumentStore` and dependency injection for the +`IAsyncDocumentStore` per handler invocation which you can inject into Function classes. + +### Example: Injecting `IAsyncDocumentSession` + +Pass the `IAsyncDocumentSession` in the handler function using `[FromServices]` which is +available from `Amazon.Lambda.Annotations` package: + + + +{`using System.Threading.Tasks; +using Amazon.Lambda.Annotations; +using Amazon.Lambda.Annotations.APIGateway; +using Amazon.Lambda.Core; +using Raven.Client.Documents.Session; + +[assembly: LambdaSerializer(typeof( + Amazon.Lambda.Serialization.SystemTextJson.DefaultLambdaJsonSerializer))] + +namespace RavenDBLambda; + +public class Functions +\{ + [LambdaFunction] + [HttpApi(LambdaHttpMethod.Get, "/")] + public async Task FunctionHandler([FromServices] + IAsyncDocumentSession session, ILambdaContext context) + \{ + var node = await session.Advanced.GetCurrentSessionNode(); + + return $"Successfully connected to RavenDB - Node \{node.ClusterTag\}"; + \} +\} +`} + + + +### Example: Injecting `IDocumentStore` + +You can also inject an `IDocumentStore` to get a reference to the current store instance. +For singleton references, inject using a public class constructor: + + + +{`using System.Threading.Tasks; +using Amazon.Lambda.Annotations; +using Amazon.Lambda.Annotations.APIGateway; +using Amazon.Lambda.Core; +using Raven.Client.Documents; + +[assembly: LambdaSerializer(typeof( + Amazon.Lambda.Serialization.SystemTextJson.DefaultLambdaJsonSerializer))] + +namespace RavenDBLambda; + +public class Functions +\{ + + private readonly IDocumentStore _store; + + public Functions(IDocumentStore store) \{ + _store = store; + \} + + [LambdaFunction] + [HttpApi(LambdaHttpMethod.Get, "/")] + public async string FunctionHandler(ILambdaContext context) + \{ + // Access _store DocumentStore methods + \} +\} +`} + + + +### Example: Loading a user + + + +{`using System.Threading.Tasks; +using Amazon.Lambda.Annotations; +using Amazon.Lambda.Annotations.APIGateway; +using Amazon.Lambda.Core; +using Raven.Client.Documents.Session; + +[assembly: LambdaSerializer(typeof( + Amazon.Lambda.Serialization.SystemTextJson.DefaultLambdaJsonSerializer))] + +namespace RavenDBLambda; + +public class Functions +\{ + [LambdaFunction] + [HttpApi(LambdaHttpMethod.Get, "/users/\{id\}")] + public async Task FunctionHandler([FromServices] + IAsyncDocumentSession session, string id, ILambdaContext context) + \{ + var user = await session.Load("users/" + id); + + return user; + \} +\} +`} + + + + + +## Next Steps + +* For more robust certificate handling, [configure AWS Secrets Manager support][docs-lambda-secrets] +* Learn more about [deployment considerations](deployment) for RavenDB and AWS Lambda +* Learn more about [using the RavenDB .NET client SDK][ravendb-dotnet] + + + +## Tutorial Video + +Watch our _How To Use AWS Lambda with RavenDB .NET_ tutorial: +<iframe width="560" height="315" src="https://www.youtube.com/embed/T2r9sqrTrYE?si=mLHnncOBhcSbcJh5" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" referrerpolicy="strict-origin-when-cross-origin" allowfullscreen></iframe> + + + +[download-dotnet]: https://dotnet.microsoft.com/en-us/download/dotnet/6.0 +[aws-lambda]: https://docs.aws.amazon.com/lambda/latest/dg/welcome.html +[aws-dotnet]: https://aws.amazon.com/sdk-for-net/ +[aws-dotnet-project-setup]: https://docs.aws.amazon.com/sdk-for-net/v3/developer-guide/net-dg-config.html +[aws-dotnet-lambda]: https://docs.aws.amazon.com/lambda/latest/dg/csharp-package-cli.html +[aws-vs-code]: https://aws.amazon.com/visualstudiocode/ +[aws-vs]: https://aws.amazon.com/visualstudio/ +[aws-lambda-deploy]: https://docs.aws.amazon.com/sdk-for-net/v3/developer-guide/deploying-lambda.html +[dotnet-user-secrets]: https://learn.microsoft.com/en-us/aspnet/core/security/app-secrets +[dotnet-createfrompem]: https://learn.microsoft.com/en-us/dotnet/api/system.security.cryptography.x509certificates.x509certificate2.createfrompem?view=net-7.0 +[template]: https://github.com/ravendb/templates/tree/main/aws-lambda/csharp-http +[gh-secrets]: https://docs.github.com/en/actions/security-guides/encrypted-secrets +[gh-variables]: https://docs.github.com/en/actions/learn-github-actions/variables +[cloud-signup]: https://cloud.ravendb.net?utm_source=ravendb_docs&utm_medium=web&utm_campaign=howto_template_lambda_csharp&utm_content=cloud_signup +[docs-lambda-secrets]: ../../../start/guides/aws-lambda/secrets-manager +[docs-get-started]: ../../../start/getting-started +[docs-client-certs]: ../../../client-api/setting-up-authentication-and-authorization +[ravendb-dotnet]: ../../../client-api/session/what-is-a-session-and-how-does-it-work +[kralizek]: https://github.com/Kralizek/AWSSecretsManagerConfigurationExtensions +[tool-base64]: https://www.base64encode.org/ +[tool-degit]: https://npmjs.com/package/degit + diff --git a/versioned_docs/version-7.1/start/guides/aws-lambda/secrets-manager.mdx b/versioned_docs/version-7.1/start/guides/aws-lambda/secrets-manager.mdx new file mode 100644 index 0000000000..9c693d7d6e --- /dev/null +++ b/versioned_docs/version-7.1/start/guides/aws-lambda/secrets-manager.mdx @@ -0,0 +1,152 @@ +--- +title: "Using the AWS Secrets Manager" +hide_table_of_contents: true +sidebar_label: Using AWS Secrets Manager +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Using the AWS Secrets Manager +The template supports using AWS Secrets Manager to store app configuration, +including the X.509 certificate contents. + + +While AWS Secrets Manager is the most secure way to load a client certificate, +it does incur a cost. Learn more about [how much it will cost to store secrets][aws-secrets-pricing] +for your application. + +If you do not wish to use this method, you can still use the PEM certificate +option using environment variables through the `RavenSettings.CertPublicKeyFilePath` +and `RavenSettings.CertPrivateKey` settings. + + +Before continuing, make sure you have: + +- The [AWS CLI][aws-cli] installed +- A configured AWS local environment +- Your RavenDB client certificate with password (`.pfx` file) or PEM-encoded `.crt` and `.key` files +- Your IAM role name used by your AWS Lambda function(s) +- Your AWS account ID number + +## Storing RavenDB Secrets + +For `RavenSettings` values, you can use the **Key/Value** JSON storage +using a secret named `RavenSettings` that the Lambda function will load. + +Learn more about [adding secrets to Secrets Manager][aws-secrets-mgr-add]. + +There are two ways to specify certificates using `RavenSettings`: + +### CertPem: Store PEM Certificate in Secrets Manager + +The AWS template for RavenDB can load certificates through the +`RavenSettings:CertPublicKeyFilePath` and `RavenSettings:CertPrivateKey` +JSON configuration, supported through [X502Certificate2.CreateFromPem][dotnet-createfrompem]. + +The `CertPublicKeyFilePath` JSON key should be set to the relative path to the `.crt` +public key certificate, relative to the `.csproj` file. This should be copied to the +output and publish directories automatically. + +The `CertPrivateKey` JSON key should be set to a value containing the [base64-encoded][tool-base64] +contents of the `.key` file from the RavenDB client certificate package. + +**Example `RavenSettings` key configuration value:** + + + +{`\{ + // ... other settings + "CertPublicKeyFilePath": "free.mycompany.client.certificate.crt", + "CertPrivateKey": "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS0FJLi4uCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0t" +\} +`} + + + +Settings will be _merged_ with `appsettings.json` configuration, +so you only need to specify settings you wish to overwrite. + +### CertBytes: Store PFX Certificate in Secrets Manager + +The AWS template for RavenDB can also load certificates through the `CertBytes` +secret setting. This means the client certificate needs to be stored in binary +in AWS Secrets Manager. In the Secrets Manager console, you can add JSON and +plaintext secrets. Binary secrets must be uploaded through the AWS CLI. + + +When you enter commands into your terminal, the command history is at risk of being accessed. +Learn more about [mitigating risks of using the AWS CLI to store secrets][aws-secrets-mgr-cli] + + + + +{`aws secretsmanager create-secret \\ + --name RavenSettings.CertBytes \\ + --description "RavenDB Client Certificate file" \\ + --secret-binary file://free.mycompany.client.certificate.with.password.pfx +`} + + + +We then need to grant access to the IAM role used by the Lambda function (created above). + +#### Apply a Resource Policy + +First, create a file `certpolicy.json` with the following AWS policy: + + + +{`\{ + "Version": "2012-10-17", + "Statement": [ + \{ + "Effect": "Allow", + "Principal": \{ + "AWS": "arn:aws:iam:::role/" + \}, + "Action": "secretsmanager:GetSecretValue", + "Resource": "*" + \} + ] +\} +`} + + + +Replace `` with your AWS account ID and `` with the +above-created role assigned to the Lambda function. + +Next, use `aws secretsmanager put-resource-policy` command to set the resource policy +while also verifying the secret is not broadly accessible: + + + +{`aws secretsmanager put-resource-policy \\ + --secret-id RavenSettings.CertBytes \\ + --resource-policy file://certpolicy.json \\ + --block-public-policy +`} + + + +The certificate file contents is now stored and will be accessed by the Lambda function on startup. + +### Verifying the Secret is Loaded + +Test invoking the Lambda function again, which should access AWS Secrets Manager successfully +and load the X.509 certificate to use with RavenDB. + + + +[aws-cli]: https://aws.amazon.com/cli/ +[aws-secrets-pricing]: https://aws.amazon.com/secrets-manager/pricing/ +[aws-secrets-mgr-add]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/hardcoded.html +[aws-secrets-mgr-cli]: https://docs.aws.amazon.com/secretsmanager/latest/userguide/security_cli-exposure-risks.html +[dotnet-createfrompem]: https://learn.microsoft.com/en-us/dotnet/api/system.security.cryptography.x509certificates.x509certificate2.createfrompem?view=net-7.0 +[tool-base64]: https://www.base64encode.org/ diff --git a/versioned_docs/version-7.1/start/guides/aws-lambda/troubleshooting.mdx b/versioned_docs/version-7.1/start/guides/aws-lambda/troubleshooting.mdx new file mode 100644 index 0000000000..b15f11b814 --- /dev/null +++ b/versioned_docs/version-7.1/start/guides/aws-lambda/troubleshooting.mdx @@ -0,0 +1,71 @@ +--- +title: "Troubleshooting" +hide_table_of_contents: true +sidebar_label: Troubleshooting +sidebar_position: 4 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Troubleshooting +## Cannot find `AWSRegion` or `ServiceUrl` + +Your local AWS development environment is not completely set up. + +1. Check that you have [configured AWS Credentials][aws-credentials] (e.g. `~/.aws/credentials`) + - The AWS Toolkits for Visual Studio or Visual Studio will help you set these up +1. Check that you have a default AWS Region specified in your environment + - Example: `$env:AWS_REGION = "east-us-1"` or `export AWS_REGION='us-east-1'` + +## Not authorized to perform: `` + +The user profile used in your AWS credentials is missing an IAM policy. +The default RavenDB Lambda template requires the following policies: + + +If you have multiple AWS profiles, you can change the AWS profile used +by setting the `$env:AWS_PROFILE` environment variable. + + +Learn more about [configuring IAM user policies][aws-iam-policies]. + +### Runtime policies + +* `SecretsManagerReadWrite` for accessing AWS Secrets Manager configuration to load a client certificate + +### Deployment policies + +To deploy your AWS Lambda functions, it's recommended to set up a dedicated deployment IAM user. + +This user will need the following policies set: + +* `AWSLambda_FullAccess` for local deployment +* `IAMFullAccess` for local deployment and `IAMReadOnlyAccess` for hosted deployment + +## Environment Variables Exceed AWS Limits + +AWS limits the size of individual environment variables to 4KB and 5KB overall. +This does not leave much room for using public/private keypair values to pass to your Lambda function. + +The template is built to support loading the PEM-encoded public key from the +file system (`.crt` file), to be deployed alongside your app through the +`RavenSettings:CertPublicKeyFilePath` app setting. +The private key can be provided in plain-text through the `RavenSettings:CertPrivateKey` +app setting, which using the .NET conventions is the `RavenSettings__CertPrivateKey` +environment variable. + +The private key should be around 3.1KB, which is under the 4KB limit, +but you may still exceed the 5KB limit overall when combined with your +other environment variables. + +If this is the case, we recommend [using the AWS Secrets Manager](secrets-manager) +for storing your certificate instead since this will not be subject to the same +limitations (and it is more secure and robust for production-scale usage). + +[aws-credentials]: https://docs.aws.amazon.com/sdk-for-net/v3/developer-guide/net-dg-config-creds.html +[aws-iam-policies]: https://docs.aws.amazon.com/sdk-for-net/v3/developer-guide/net-dg-users-roles.html diff --git a/versioned_docs/version-7.1/start/guides/azure-functions/_category_.json b/versioned_docs/version-7.1/start/guides/azure-functions/_category_.json new file mode 100644 index 0000000000..05452655e1 --- /dev/null +++ b/versioned_docs/version-7.1/start/guides/azure-functions/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 1, + "label": Azure Functions, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/start/guides/azure-functions/_existing-project-csharp.mdx b/versioned_docs/version-7.1/start/guides/azure-functions/_existing-project-csharp.mdx new file mode 100644 index 0000000000..bf7cae21dc --- /dev/null +++ b/versioned_docs/version-7.1/start/guides/azure-functions/_existing-project-csharp.mdx @@ -0,0 +1,476 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Microsoft **Azure Functions** is a serverless platform that supports multiple + languages and frameworks that let you deploy workloads that scale without managing + any infrastructure. + +* Learn more about [how Microsoft Azure Functions work][az-funcs]. + +* In this guide, you will learn how to connect to RavenDB from your existing C# Azure Functions. + We assume you are familiar with .NET development techniques and the basics of Azure Function apps. + +* In this page: + * [Before We Get Started](../../../start/guides/azure-functions/existing-project.mdx#before-we-get-started) + * [Installing the RavenDB Client SDK](../../../start/guides/azure-functions/existing-project.mdx#installing-the-ravendb-client-sdk) + * [Initializing the Document Store](../../../start/guides/azure-functions/existing-project.mdx#initializing-the-document-store) + * [Adding Support for App Settings](../../../start/guides/azure-functions/existing-project.mdx#adding-support-for-app-settings) + * [Configuring Support for Certificates](../../../start/guides/azure-functions/existing-project.mdx#configuring-support-for-certificates) + * [Configuring Azure](../../../start/guides/azure-functions/existing-project.mdx#configuring-azure) + + + +## Before We Get Started + +You will need the following before continuing: + +- A [RavenDB Cloud][cloud-signup] account or self-hosted client certificate +- [Azure Function Core Tools][az-core-tools] 4.x+ +- [.NET 6.x][ms-download-dotnet] + + +For a brand new Azure Functions app, we recommend using the [RavenDB Azure Functions .NET template](../../../start/guides/azure-functions/overview.mdx) +which is set up with dependency injection and X.509 certificate support. +You can also reference the template to see how the integration is set up. + + + + +## Installing the RavenDB Client SDK + +Get started by installing the [RavenDB.Client][nuget-ravendb-client] Nuget package in your solution or project +which provides the .NET client SDK. + +Using the .NET CLI: + + + +{`dotnet add package RavenDB.Client +`} + + + + + +## Initializing the Document Store + +Import the `DocumentStore` from `Raven.Client.Documents` namespace to create a new instance with +the required configuration and initialize your connection to RavenDB by calling the `Initialize` method. + + + +{`using Raven.Client.Documents; + +var documentStore = new DocumentStore() \{ + Urls = new [] \{ "https://a.free.mycompany.ravendb.cloud" \}, + DatabaseName = "demo", + // Other options +\}; +documentStore.Initialize(); +`} + + + +For more on what options are available, see [Creating a Document Store][docs-creating-document-store]. + +### Set up dependency injection + +For Azure Function methods, it's recommended to configure the document store and document +sessions with .NET dependency injection. The easiest way is to use the community Nuget package +[RavenDB.DependencyInjection][nuget-ravendb-di]: + + + +{`dotnet add package RavenDB.DependencyInjection +`} + + + +The pattern to set up dependency injection to inject an `IAsyncDocumentSession` with Azure +Functions differs depending on whether your C# functions are running: + +- Follow the [in-process DI guide][az-func-di-ip] for C# class library functions +- Follow the [out-of-process DI guide][az-func-di-oop] for .NET isolated functions + +Once set up with the appropriate configuration, add a using statement for `Raven.DependencyInjection` +which exposes two extension methods: + +- `IServiceCollection.AddRavenDbDocStore` +- `IServiceCollection.AddRavenDbAsyncSession` + +The resulting service configuration will look like this: + + + +{`// Requires a using statement +using Raven.DependencyInjection; + +// Configure injection for IDocumentStore +services.AddRavenDbDocStore(); + +// Configure injection for IAsyncDocumentSession +services.AddRavenDbAsyncSession(); +`} + + + +You can customize the options before they get passed down to the underlying `DocumentStore` with an overload: + + + +{`services.AddRavenDbDocStore(options => \{ + // ... + // Customize \`options\` + // ... + + options.Conventions.UseOptimisticConcurrency = true; +\}); +`} + + + + +In Azure Functions, the instance will be shared across function invocations if the Function is warmed up, +otherwise it will be constructed each time the function warms up. For more, see [Deployment Considerations][deployment-considerations]. + + +You can set options manually but it's more likely you'll want to configure support for app settings. + + + +## Adding Support for App Settings + +You will need a way to pass options to the `DocumentStore` on your local machine and when deployed to Azure. + +The RavenDB.DependencyInjection package supports reading settings from `appsettings.json` for ASP.NET +applications but Azure Function apps require some manual setup. To support Azure app settings, you will +also need to add support to override those settings through environment variables by using `Microsoft.Extensions.Configuration`. + +Within your `FunctionsStartup` class, override the `ConfigureAppConfiguration` method to customize how the configuration is read. + +Here's an example startup file for an in-process C# Azure Function app: + + + +{`using System; +using Microsoft.Azure.Functions.Extensions.DependencyInjection; +using Microsoft.Extensions.Configuration; +using Raven.DependencyInjection; + +[assembly: FunctionsStartup(typeof(Company.FunctionApp.Startup))] + +namespace Company.FunctionApp; + +public class Startup : FunctionsStartup +\{ + public override void Configure(IFunctionsHostBuilder builder) + \{ + builder.Services.AddRavenDbDocStore(); + builder.Services.AddRavenDbAsyncSession(); + \} + + public override void ConfigureAppConfiguration(IFunctionsConfigurationBuilder builder) + \{ + FunctionsHostBuilderContext context = builder.GetContext(); + + builder.ConfigurationBuilder + // Add support for appsettings.json + .AddJsonFile(Path.Combine(context.ApplicationRootPath, "appsettings.json"), optional: true, reloadOnChange: false) + // Add support for appsettings.ENV.json + .AddJsonFile(Path.Combine(context.ApplicationRootPath, $"appsettings.\{context.EnvironmentName\}.json"), optional: true, reloadOnChange: false) + // Allow environment variables to override + .AddEnvironmentVariables(); + \} +\} +`} + + + +For more on the different configuration providers supported, see [Configuration in ASP.NET Core][ms-docs-aspnet-configuration]. + +### Using JSON settings + +An example `appsettings.json` file may look like this: + + + +{`\{ + "RavenSettings": \{ + "Urls": ["https://a.free.company.ravendb.cloud"], + "DatabaseName": "demo" + \} +\} +`} + + + +### Using environment variables + +Environment variables follow the .NET conventions with `__` being the dot-notation separator (e.g. `RavenSettings__DatabaseName`). + +You can pass environment variables in your terminal profile, OS settings, Docker `env`, on the command-line, or within Azure. + + + +## Configuring Support for Certificates + +RavenDB uses client certificate authentication (mutual TLS) to secure your database connection. The .NET Client SDK +supports `X509Certificate2` which is passed to the `DocumentStore.Certificate` option. There are multiple ways to load a certificate: + +- Load from .pfx files +- Load from Certificate Store by thumbprint +- Load from Azure Key Vault + +### Load from .pfx Files + +You can load PFX files with or without a password by providing the certificate path using `RavenSettings:CertFilePath`: + + + +{`\{ + "RavenSettings": \{ + "Urls": ["https://a.free.company.ravendb.cloud"], + "DatabaseName": "demo", + "CertFilePath": "..\\\\shared\\\\certs\\\\company.client.certificate.pfx" + \} +\} +`} + + + +The dependency injection logic will automatically load the certificate from this path without extra code. + +If the `.pfx` file requires a password, provide it using the .NET secrets tool by setting `RavenSettings:CertPassword`: + + + +{`dotnet user-secrets init +dotnet user-secrets set "RavenSettings:CertPassword" "" +`} + + + +However, keep in mind that using an absolute physical file path or a user secret requires manual steps for every +developer working on a project to configure. + + +PFX files can be compromised, especially if they are not password-protected. Using a physical file also makes +it hard to manage and rotate when they expire. They are only recommended for ease-of-use on your local machine. +For production, it is better to use the Certificate Store method or Azure Key Vault. + + +### Load from Certificate Store by Thumbprint + +For .NET-based Azure Functions, it's recommended to use the Windows Certificate Store since you can upload +a password-protected .pfx file to the Azure Portal and load it programmatically without deploying any files. + +On your local machine, you can import a certificate on Windows by right-clicking the `.pfx` file and adding +it to your Current User store (`CurrentUser\My`): + +![Windows certificate import wizard](./assets/dotnet-certificate-install.jpg) + +The certificate thumbprint is displayed in the details when viewing the certificate information: + +![Windows certificate thumbprint](./assets/dotnet-certificate-thumbprint.jpg) + +You can also install and view certificates using PowerShell through the +[Import-PfxCertificate][ms-powershell-import-pfxcert] and [Get-Certificate][ms-powershell-get-cert] cmdlets. + +To specify the thumbprint you can add a new `RavenSettings:CertThumbprint` setting: + + + +{`\{ + "RavenSettings": \{ + "Urls": ["https://a.free.mycompany.ravendb.cloud"], + "DatabaseName": "company_db", + "CertThumbprint": "" + \} +\} +`} + + + +Update your `DocumentStore` initialization to load the certificate by its thumbprint using the +`IConfiguration.GetSection` method to retrieve it when building options. +The [X509Store][ms-docs-x509store] can be used to find certificates by thumbprint. +In Azure, certificates will be stored in the `CurrentUser\My` cert store. + +Here is how the starter template adds support for loading certificates by thumbprint: + + + +{`using System; +using System.IO; +using System.Security.Cryptography.X509Certificates; +using Microsoft.Azure.Functions.Extensions.DependencyInjection; +using Microsoft.Extensions.Configuration; +using Raven.DependencyInjection; + +[assembly: FunctionsStartup(typeof(Company.FunctionApp.Startup))] + +namespace Company.FunctionApp; + +public class Startup : FunctionsStartup +\{ + public override void Configure(IFunctionsHostBuilder builder) + \{ + var context = builder.GetContext(); + + builder.Services.AddRavenDbDocStore(options => + \{ + var certThumbprint = context.Configuration.GetSection("RavenSettings:CertThumbprint").Value; + + if (!string.IsNullOrWhiteSpace(certThumbprint)) + \{ + var cert = GetRavendbCertificate(certThumbprint); + + options.Certificate = cert; + \} + \}); + + builder.Services.AddRavenDbAsyncSession(); + \} + + private static X509Certificate2 GetRavendbCertificate(string certThumb) + \{ + X509Store certStore = new X509Store(StoreName.My, StoreLocation.CurrentUser); + certStore.Open(OpenFlags.ReadOnly); + + X509Certificate2Collection certCollection = certStore.Certificates + .Find(X509FindType.FindByThumbprint, certThumb, false); + + // Get the first cert with the thumbprint + if (certCollection.Count > 0) + \{ + X509Certificate2 cert = certCollection[0]; + return cert; + \} + + certStore.Close(); + + throw new Exception($"Certificate \{certThumb\} not found."); + \} +\} +`} + + + +This will only load by thumbprint if it is specified, meaning that you can still load by a physical +`.pfx` path locally if you choose. On Azure, follow the steps below to upload a certificate. + +### Load from Azure Key Vault + +[Azure Key Vault][ms-az-key-vault] is a paid service that allows you to store, retrieve, and rotate +encrypted secrets including X.509 Certificates. This is recommended for more robust certificate handling. + +Using the [Azure Key Vault configuration provider][ms-az-key-vault-configuration], you can load `RavenSettings` +from Key Vault. +However, you will need to use the [CertificateClient][ms-az-key-vault-cert-client] to retrieve a certificate from the vault. + +For more, see the [sample code for using CertificateClient][ms-az-key-vault-sample-get]. + + + +## Configuring Azure + +You will need to configure certificate authentication in Azure. Depending on the method you choose above, the steps vary. + +### Specifying Path to Certificate + +If you are deploying a physical `.pfx` file, you can specify the `RavenSettings__CertFilePath` +and `RavenSettings__CertPassword` app settings. + +### Upload Your Client Certificate (.pfx) + +If you are loading a certificate by its thumbprint from the Certificate Store, follow the +steps below to make your uploaded `.pfx` certificate available to your Azure Functions: + +![.NET upload certificate to Azure](./assets/dotnet-azure-upload-cert.jpg) + +1. Go to your Azure Functions dashboard in the Portal +1. Click "Certificates" +1. Click the "Bring Your Own Certificate" tab +1. Click "+ Add Certificate" button +1. Upload the RavenDB client certificate (PFX) file +1. Enter the certificate password +1. Once uploaded, click the certificate to view details +1. Copy the "Thumbprint" for the next step + + +The Azure portal will only use the certificate password once on upload. You will not need the password +in your Functions App, only the public thumbprint. You can safely delete the password from your device +once the certificate is uploaded in the Portal so as not to risk it being discovered. + + +### Configure Application Settings + +![.NET update Azure app settings](./assets/dotnet-azure-app-settings.jpg) + +1. Go to your Azure Functions dashboard in the Portal +1. Click the Application Settings menu +1. Modify or add app setting for `WEBSITE_LOAD_CERTIFICATES` to the certificate thumbprint you copied + + ![.NET WEBSITE_LOAD_CERTIFICATES example](./assets/dotnet-azure-website-load-certificates.jpg) + +1. Modify or add app setting for `RavenSettings__CertThumbprint` with the certificate thumbprint you copied + + ![.NET WEBSITE_LOAD_CERTIFICATES example](./assets/dotnet-azure-ravensettings__certthumbprint.jpg) + +1. Modify or add app setting for `RavenSettings__Urls` with the comma-separated list of RavenDB node URLs to connect to +1. Modify or add an app setting for `RavenSettings__DatabaseName` with the database name to connect to + +These values will override `appsettings.json` once deployed on Azure. + + +`WEBSITE_LOAD_CERTIFICATES` makes any specified certificates available in the Windows Certificate Store +under the `CurrentUser\My` location. +You can use the wildcard value `*` for `WEBSITE_LOAD_CERTIFICATES` to load ALL uploaded certificates for +your Function App. However, it's recommended to be specific and use comma-separated thumbprints so that only +allowed certificates are made available. +This avoids accidentally exposing a certificate to the application that isn't explicitly used. + + + +The `WEBSITE_LOAD_CERTIFICATES` setting is not supported yet for Linux-based consumption plans. +To use this method, you will need to use a Windows-based plan. + + + + +## Next Steps + +- Learn more about [how to use the RavenDB .NET client SDK][docs-dotnet] +- Reference the [.NET Azure Function starter template][gh-ravendb-template] to see the code +- [Troubleshoot][troubleshooting] issues with RavenDB and Azure Functions +- [Deployment Considerations][deployment-considerations] for RavenDB and Azure Functions + + + +[troubleshooting]: ../../../start/guides/azure-functions/troubleshooting +[deployment-considerations]: ../../../start/guides/azure-functions/deployment +[cloud-signup]: https://cloud.ravendb.net?utm_source=ravendb_docs&utm_medium=web&utm_campaign=howto_template_azurefns_dotnet_existing&utm_content=cloud_signup +[docs-dotnet]: ../../../client-api/session/what-is-a-session-and-how-does-it-work +[docs-creating-document-store]: ../../../client-api/creating-document-store +[gh-ravendb-template]: https://github.com/ravendb/templates/tree/main/azure-functions/csharp-http +[az-funcs]: https://learn.microsoft.com/en-us/azure/azure-functions/functions-get-started +[az-core-tools]: https://learn.microsoft.com/en-us/azure/azure-functions/functions-run-local +[az-func-di-ip]: https://learn.microsoft.com/en-us/azure/azure-functions/functions-dotnet-dependency-injection +[az-func-di-oop]: https://learn.microsoft.com/en-us/azure/azure-functions/dotnet-isolated-process-guide#dependency-injection +[ms-download-dotnet]: https://dotnet.microsoft.com/en-us/download/dotnet/6.0 +[ms-az-key-vault]: https://learn.microsoft.com/en-us/azure/key-vault/ +[ms-az-key-vault-configuration]: https://learn.microsoft.com/en-us/aspnet/core/security/key-vault-configuration +[ms-az-key-vault-cert-client]: https://learn.microsoft.com/en-us/dotnet/api/overview/azure/security.keyvault.certificates-readme +[ms-az-key-vault-sample-get]: https://github.com/Azure/azure-sdk-for-net/blob/0e2399f030aa365c13fcd06f891a57ee9154fc60/sdk/keyvault/Azure.Security.KeyVault.Certificates/samples/Sample1_HelloWorld.md +[ms-powershell-get-cert]: https://learn.microsoft.com/en-us/powershell/module/pki/get-certificate +[ms-powershell-import-pfxcert]: https://learn.microsoft.com/en-us/powershell/module/pki/import-certificate +[ms-docs-aspnet-configuration]: https://learn.microsoft.com/en-us/aspnet/core/fundamentals/configuration/#configuration-providers +[ms-docs-x509store]: https://learn.microsoft.com/en-us/dotnet/api/system.security.cryptography.x509certificates.x509store +[nuget-ravendb-client]: https://www.nuget.org/packages/RavenDB.Client +[nuget-ravendb-di]: https://www.nuget.org/packages/RavenDB.DependencyInjection + + diff --git a/versioned_docs/version-7.1/start/guides/azure-functions/_existing-project-nodejs.mdx b/versioned_docs/version-7.1/start/guides/azure-functions/_existing-project-nodejs.mdx new file mode 100644 index 0000000000..39a581b8b6 --- /dev/null +++ b/versioned_docs/version-7.1/start/guides/azure-functions/_existing-project-nodejs.mdx @@ -0,0 +1,416 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Microsoft **Azure Functions** is a serverless platform that supports multiple + languages and frameworks that let you deploy workloads that scale without managing + any infrastructure. + +* Learn more about [how Microsoft Azure Functions work][az-funcs]. + +* In this guide, you will learn how to connect to RavenDB from your existing Node.js Azure Functions. + We assume you are familiar with Node.js development techniques and the basics of Azure Function apps. + +* In this page: + * [Before We Get Started](../../../start/guides/azure-functions/existing-project.mdx#before-we-get-started) + * [Installing the RavenDB Client SDK](../../../start/guides/azure-functions/existing-project.mdx#installing-the-ravendb-client-sdk) + * [Initializing the Document Store](../../../start/guides/azure-functions/existing-project.mdx#initializing-the-document-store) + * [Adding Support for App Settings](../../../start/guides/azure-functions/existing-project.mdx#adding-support-for-app-settings) + * [Configuring Support for Certificates](../../../start/guides/azure-functions/existing-project.mdx#configuring-support-for-certificates) + * [Configuring Azure](../../../start/guides/azure-functions/existing-project.mdx#configuring-azure) + + + +## Before We Get Started + +You will need the following before continuing: + +- A [RavenDB Cloud][cloud-signup] account or self-hosted client certificate +- [Azure Function Core Tools][az-core-tools] 4.x+ +- [Node.js][nodejs] 18+ + + +For a brand new Azure Functions app, we recommend using the [RavenDB Azure Functions Node.js template](../../../start/guides/azure-functions/overview.mdx) +which is set up with PEM certificate support. +You can also reference the template to see how the integration is set up. + + + + +## Installing the RavenDB Client SDK + +Get started by installing the [ravendb][npm-ravendb-client] npm package in your project which provides the Node.js client SDK. + +Using npm: + + + +{`npm install ravendb +`} + + + + + +## Initializing the Document Store + +Import the `DocumentStore` from `ravendb` package to create a new instance with the required +configuration and initialize your connection to RavenDB by calling the `initialize` method. +You can then export a function to initialize a document session to use in your Azure functions. + +Example `db.js` Node module: + + + +{`import \{ DocumentStore \} from "ravendb"; + +const documentStore = new DocumentStore( + ["https://a.free.mycompany.ravendb.cloud"], + "demo", + // authOptions +\}; + +var initialized = false; + +function initialize() \{ + if (initialized) return; + documentStore.initialize(); + initialized = true; +\} + +export function openAsyncSession() \{ + if (!initialized) \{ + initialize(); + \} + + return documentStore.openAsyncSession(); +\} +`} + + + +For more on what options are available, see [Creating a Document Store][docs-creating-document-store]. + + +In Azure Functions, the instance will be shared across function invocations if the Function is warmed up, +otherwise it will be constructed each time the function warms up. For more, see [Deployment Considerations][deployment-considerations]. + + +You can set options manually but it's more likely you'll want to configure support for app settings. + + + +## Adding Support for App Settings + +You will need a way to pass options to the `DocumentStore` on your local machine and when deployed to Azure. + +Node.js Azure Functions support a `local.settings.json` file which you can use to add additional settings locally. For example: + + + +{`\{ + "IsEncrypted": false, + "Values": \{ + "AzureWebJobsStorage": "", + "FUNCTIONS_WORKER_RUNTIME": "node", + "DB_URLS": "https://a.free.company.ravendb.cloud", + "DB_NAME": "demo", + "DB_CERT_PATH": "../certs/company.client.certificate.pfx" + \} +\} +`} + + + +You can then load environment variables through `process.env`: + + + +{`import \{ readFileSync \} from "fs"; +import \{ DocumentStore \} from "ravendb"; + +var documentStore; +var initialized = false; + +function initialize() \{ + if (initialized) return; + + const authOptions = \{ + type: "pfx", + // Read .pfx file using fs.readFileSync + certificate: readFileSync(process.env.DB_CERT_PATH) + \}; + + documentStore = new DocumentStore( + process.env.DB_URLS.split(","), // Split by "," separator + process.env.DB_NAME, + authOptions + \}; + documentStore.initialize(); + + initialized = true; +\} + +export function openAsyncSession() \{ + if (!initialized) \{ + initialize(); + \} + + return documentStore.openAsyncSession(); +\} +`} + + + + + +## Configuring Support for Certificates + +RavenDB uses client certificate authentication (mutual TLS) to secure your database connection. +The Node.js client SDK supports `.pfx` files or `.pem` files which is passed to the `authOptions.certificate` option. +There are multiple ways to load a certificate: + +- Load from .pfx files +- Load from PEM-encoded certificate +- Load from Azure Key Vault + +### Load from .pfx Files + +You can load PFX files with or without a password by providing the certificate buffer using `authOptions.certificate`: + + + +{`const authOptions = \{ + type: "pfx", + // Read .pfx file using fs.readFileSync + certificate: readFileSync("../cert/company.client.certificate.pfx"), + // Optionally provide the password + password: "" +\}; + +documentStore = new DocumentStore( + ["https://a.free.company.ravendb.cloud"], + "demo", + authOptions +\}; +documentStore.initialize(); +`} + + + +If the `.pfx` file requires a password, provide it using `password` option. +However, keep in mind that using an absolute physical file path or a password +requires manual steps for every developer working on a project to configure. + + +PFX files can be compromised, especially if they are not password-protected. +Using a physical file also makes it hard to manage and rotate when they expire. +They are only recommended for ease-of-use on your local machine. +For production, it is better to use the Certificate Store method or Azure Key Vault. + + +### Load from PEM-encoded certificate + +For Node.js-based Azure Functions, it's recommended to use a PEM-encoded certificate that +can be provided through Azure app settings without deploying any files. + +Unlike a `.pfx` file, a PEM-encoded certificate is plain-text encoded: + + + +{`-----BEGIN CERTIFICATE----- +MIIFCzCCAvO... +-----END CERTIFICATE----- +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAK... +-----END RSA PRIVATE KEY----- +`} + + + +To pass a PEM-encoded certificate, you can read an environment variable like `DB_CERT_PEM` and +set `authOptions` using the `pem` certificate type: + + + +{`const authOptions = \{ + type: "pem", + certificate: process.env.DB_CERT_PEM +\}; + +documentStore = new DocumentStore( + ["https://a.free.company.ravendb.cloud"], + "demo", + authOptions +\}; +documentStore.initialize(); +`} + + + + +Be aware that the Azure portal removes line endings and you will need to manually normalize +the value for PEM parsing to succeed. If you are setting the value in the `local.settings.json` +file, you will need to format the value for JSON using [a stringify tool][tool-stringify]. + + +Here is how the starter template adds support for loading certificates using a `DB_CERT_PEM` environment variable: + + + +{`import \{ EOL \} from "os"; +import \{ readFile \} from "fs/promises"; +import \{ DocumentStore \} from "ravendb"; + +let store; +let initialized = false; + +export async function initializeDb(\{ + urls, + databaseName, + dbCertPassword, + dbCertPath, + dbCertPem, + customize, +\}) \{ + if (initialized) return; + + let authOptions = undefined; + + if (dbCertPath) \{ + authOptions = await getAuthOptionsFromCertificatePath( + dbCertPath, + dbCertPassword + ); + \} else if (dbCertPem) \{ + authOptions = getAuthOptionsFromCertPem(dbCertPem); + \} + + store = new DocumentStore(urls, databaseName, authOptions); + + if (customize) \{ + customize(store.conventions); + \} + + store.initialize(); + + initialized = true; + + return store; +\} + +async function getAuthOptionsFromCertificatePath( + dbCertPath, + dbCertPassword +) \{ + return \{ + certificate: await readFile(dbCertPath), + password: dbCertPassword, + type: "pfx", + \}; +\} + +function getAuthOptionsFromCertPem(dbCertPem) \{ + let certificate = dbCertPem; + const isMissingLineEndings = !dbCertPem.includes(EOL); + + if (isMissingLineEndings) \{ + // Typically when pasting values into Azure env vars + certificate = normalizePEM(certificate); + \} + + return \{ + certificate, + type: "pem", + \}; +\} + +function normalizePEM(pem: string): string \{ + return pem.replace(PEM_REGEX, (match, certSection, certSectionBody) => \{ + const normalizedCertSectionBody = certSectionBody.replace(/\\s/g, EOL); + return \`-----BEGIN $\{certSection\}-----$\{EOL\}$\{normalizedCertSectionBody.trim()\}$\{EOL\}-----END $\{certSection\}-----$\{EOL\}\`; + \}); +\} + +const PEM_REGEX = + /-----BEGIN ([A-Z\\s]+)-----(\\s?[A-Za-z0-9+\\/=\\s]+?\\s?)-----END \\1-----/gm; + +export function openDbSession(opts) \{ + if (!initialized) + throw new Error( + "DocumentStore is not initialized yet. Must \`initializeDb()\` before calling \`openDbSession()\`." + ); + return store.openSession(opts); +\} +`} + + + +This supports using `.pfx` files or a PEM-encoded certificate, if provided. +It normalizes the PEM value if it does not contain line endings. + +### Load from Azure Key Vault + +[Azure Key Vault][ms-az-key-vault] is a paid service that allows you to store, retrieve, and rotate +encrypted secrets including X.509 Certificates. This is recommended for more robust certificate handling. + +Using the [SecretsClient][ms-az-key-vault-secrets-client], you can load secrets from Key Vault. +However, you will need to use the [CertificateClient][ms-az-key-vault-cert-client] to retrieve a certificate from the vault. + +For more, see the [sample code for using CertificateClient][ms-az-key-vault-sample-get]. + + + +## Configuring Azure + +You will need to configure certificate authentication in Azure. Depending on the method you choose above, the steps vary. + +### Specifying Path to Certificate + +If you are deploying a physical `.pfx` file, you can specify the `DB_CERT_PATH` and `DB_PASSWORD` app settings. + +### Specifying PEM Certificate + +If you are loading a PEM-encoded certificate, follow the steps below to make your `.pem` certificate available to your Azure Functions: + +![.NET update Azure app settings](./assets/js-azure-app-settings.jpg) + +1. Find the `.pem` certificate provided by RavenDB client certificate package +1. Copy its full contents +1. Go to your Azure Functions dashboard in the Portal +1. Click the Application Settings menu +1. Modify or add the app setting for `DB_CERT_PEM` and paste the contents of your `.pem` file + +These values will override `local.settings.json` once deployed on Azure. + + + +## Next Steps + +- Learn more about [how to use the RavenDB Node.js client SDK][docs-nodejs] +- Reference the [Node.js Azure Function starter template][gh-ravendb-template] to see the code +- [Troubleshoot][troubleshooting] issues with RavenDB and Azure Functions +- [Deployment Considerations][deployment-considerations] for RavenDB and Azure Functions + + + +[troubleshooting]: ../../../start/guides/azure-functions/troubleshooting +[deployment-considerations]: ../../../start/guides/azure-functions/deployment +[cloud-signup]: https://cloud.ravendb.net?utm_source=ravendb_docs&utm_medium=web&utm_campaign=howto_template_azurefns_nodejs_existing&utm_content=cloud_signup +[nodejs]: https://nodejs.org +[npm-ravendb-client]: https://npmjs.com/package/ravendb +[docs-nodejs]: ../../../client-api/session/what-is-a-session-and-how-does-it-work +[docs-creating-document-store]: ../../../client-api/creating-document-store +[gh-ravendb-template]: https://github.com/ravendb/templates/tree/main/azure-functions/node-http +[az-funcs]: https://learn.microsoft.com/en-us/azure/azure-functions/functions-get-started +[az-core-tools]: https://learn.microsoft.com/en-us/azure/azure-functions/functions-run-local +[ms-az-key-vault]: https://learn.microsoft.com/en-us/azure/key-vault/ +[ms-az-key-vault-secrets-client]: https://learn.microsoft.com/en-us/javascript/api/overview/azure/keyvault-secrets-readme?view=azure-node-latest +[ms-az-key-vault-cert-client]: https://learn.microsoft.com/en-us/javascript/api/overview/azure/keyvault-certificates-readme?view=azure-node-latest +[ms-az-key-vault-sample-get]: https://github.com/Azure/azure-sdk-for-js/blob/30c703fa2179831d330201bdb0fff5ac6c0a8b57/sdk/keyvault/keyvault-certificates/samples/v4/javascript/helloWorld.js +[tool-stringify]: https://onlinestringtools.com/json-stringify-string + + diff --git a/versioned_docs/version-7.1/start/guides/azure-functions/_overview-csharp.mdx b/versioned_docs/version-7.1/start/guides/azure-functions/_overview-csharp.mdx new file mode 100644 index 0000000000..c168184ca0 --- /dev/null +++ b/versioned_docs/version-7.1/start/guides/azure-functions/_overview-csharp.mdx @@ -0,0 +1,393 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Microsoft **Azure Functions** are a serverless platform that supports multiple + languages and frameworks that let you deploy workloads that scale without managing + any infrastructure. + Learn more about operating Microsoft Azure Functions [here][az-funcs]. + +* In this guide, you will learn how to deploy a C# Azure Function using the + [RavenDB Azure Functions C# template][template] that is connected to your + RavenDB database. + + This guide assumes you are familiar with .NET development techniques and the + basics of Azure Function apps. + + +* Watch our tutorial video [below](../../../start/guides/azure-functions/overview.mdx#tutorial-video) + or [on YouTube](https://www.youtube.com/watch?v=1vnpfsD3bSE&). + +* In this page: + * [Before We Get Started](../../../start/guides/azure-functions/overview.mdx#before-we-get-started) + * [Create a Local Azure Function App](../../../start/guides/azure-functions/overview.mdx#create-a-local-azure-function-app) + * [Configuring Local Connection to RavenDB](../../../start/guides/azure-functions/overview.mdx#configuring-local-connection-to-ravendb) + * [Creating Function App Resources in Azure](../../../start/guides/azure-functions/overview.mdx#creating-function-app-resources-in-azure) + * [Deploying to Azure](../../../start/guides/azure-functions/overview.mdx#deploying-to-azure) + * [Verify the Connection Works](../../../start/guides/azure-functions/overview.mdx#verify-the-connection-works) + * [Using RavenDB in the Azure Functions App](../../../start/guides/azure-functions/overview.mdx#using-ravendb-in-the-azure-functions-app) + * [Tutorial Video](../../../start/guides/azure-functions/overview.mdx#tutorial-video) + + + +## Before We Get Started + +You will need the following before continuing: + +- A [RavenDB Cloud][cloud-signup] account or self-hosted client certificate +- [Azure Function Core Tools][az-core-tools] 4.x+ +- [Git](https://git-scm.org) +- [.NET 6.x][download-dotnet] + +If you are new to Azure Function local development, see the [Getting started guide][az-funcs] +for how to get up and running with your toolchain of choice. + + + +## Create a Local Azure Function App + +The [RavenDB Azure Function template][template] is a template repository on GitHub which means +you can either create a new repository derived from the template or clone and push it to a new repository. + +This will set up a local Azure Function app that we will deploy to your Azure account at the end of the guide. + +### Creating a New Repository from the Template + +Depending on your environment, there are several ways to clone the template and initialize a new Git repository. +The template repository lists each clone method you can copy & paste directly. + +**Using `npx` and the [degit][tool-degit] tool if you have Node.js installed:** + + + +{`npx degit ravendb/templates/azure-functions/csharp-http my-project +cd my-project +git init +`} + + + +**Using Bash or PowerShell:** + + + +{`git clone https://github.com/ravendb/templates my-project +cd my-project +git filter-branch --subdirectory-filter azure-functions/csharp-http +rm -rf .git # Bash +rm -r -force .git # PowerShell +git init +`} + + + +### Install Dependencies + +After cloning the repository locally, restore .NET dependencies with `dotnet`: + + + +{`dotnet restore +`} + + + +By default, the template is configured to connect to the Live Test instance of RavenDB. +Since this is only for testing purposes, next you will configure the app to connect to your existing RavenDB database. + +### Starting the Function + +You can start the Azure Function locally using: + + + +{`func start +`} + + + +If you are using Visual Studio Code, you can also debug the function with F5 debugging. + +You will see the welcome screen if the template is set up correctly: + +![.NET template welcome screen](./assets/dotnet-func-start.jpg) + + + + +## Configuring Local Connection to RavenDB + +To configure the local version of your Azure Functions app to connect to RavenDB, you will need to update +the `appsettings.json` file with the `RavenSettings:Urls` value and `RavenSettings:DatabaseName` value. +The default is: + + + +{`\{ + "RavenSettings": \{ + "Urls": ["http://live-test.ravendb.net"], + "DatabaseName": "Northwind" + \} +\} +`} + + + +If using an authenticated RavenDB URL, you will need a local client certificate installed. +Learn more about configuring client authentication for RavenDB [here][docs-client-certs]. + +### Certificate Path and Password (Windows and Linux) + +To specify the path to a `.pfx` file, specify a relative or absolute file path using `RavenSettings:CertFilePath`. + +To specify a PFX password, use the .NET User Secrets tool to add a secret locally: + + + +{`dotnet user-secrets init +dotnet user-secrets set RavenSettings:CertPassword "" +`} + + + +Replace `` with your PFX password. + +Example `appsettings.json`: + + + +{`\{ + "RavenSettings": \{ + "Urls": ["https://a.free.mycompany.ravendb.cloud"], + "DatabaseName": "company_db", + "CertFilePath": "a.free.mycompany.ravendb.cloud.with.password.pfx" + \} +\} +`} + + + +### Certificate Thumbprint (Windows Only) + +You can also specify a certificate to use from the `CurrentUser\My` Windows certificate store by setting +`RavenSettings:CertThumbprint`. + +Example `appsettings.json`: + + + +{`\{ + "RavenSettings": \{ + "Urls": ["https://a.free.mycompany.ravendb.cloud"], + "DatabaseName": "company_db", + "CertThumbprint": "" + \} +\} +`} + + + + + +## Creating Function App Resources in Azure + +At this point, the local Function app is ready to be deployed. Before you can do that, +you need to set up the Function App resources in Azure. + +The template includes an ARM deployment option using the **Deploy to Azure** button. +This will open the Azure Portal and walkthrough creating a default Function App with +the required resources and app settings. + +Follow the guide of your choice in the Microsoft docs. Once the app is created, come +back here to finish configuring your database connection. + +### Upload Your Client Certificate (.pfx) + +Once the app is created in the portal, follow these steps to upload the client certificate and make +it accessible to your Function. + +![.NET upload certificate to Azure](./assets/dotnet-azure-upload-cert.jpg) + +1. Go to your Azure Functions dashboard in the Portal +1. Click "Certificates" +1. Click the "Bring Your Own Certificate" tab +1. Click "+ Add Certificate" button +1. Upload the RavenDB client certificate (PFX) file +1. Enter the certificate password +1. Once uploaded, click the certificate to view details +1. Copy the "Thumbprint" for the next step + + +The Azure portal will only use the certificate password once on upload. You will not need the password +in your Functions App, only the public thumbprint. +You can safely delete the password from your device once the certificate is uploaded in the Portal so +as not to risk it being discovered. + + +### Configure Application Settings + +![.NET update Azure app settings](./assets/dotnet-azure-app-settings.jpg) + +1. Go to your Azure Functions dashboard in the Portal +1. Click the Application Settings menu +1. Modify or add app setting for `WEBSITE_LOAD_CERTIFICATES` to the certificate thumbprint you copied + + ![.NET WEBSITE_LOAD_CERTIFICATES example](./assets/dotnet-azure-website-load-certificates.jpg) + +1. Modify or add app setting for `RavenSettings__CertThumbprint` with the certificate thumbprint you copied + + ![.NET WEBSITE_LOAD_CERTIFICATES example](./assets/dotnet-azure-ravensettings__certthumbprint.jpg) + +1. Modify or add app setting for `RavenSettings__Urls` with the comma-separated list of RavenDB node URLs to connect to +1. Modify or add an app setting for `RavenSettings__DatabaseName` with the database name to connect to + +These values will override `appsettings.json` once deployed on Azure. + + +`WEBSITE_LOAD_CERTIFICATES` makes any specified certificates available in the Windows +Certificate Store under the `CurrentUser\My` location. You can use the wildcard value +`*` for `WEBSITE_LOAD_CERTIFICATES` to load ALL uploaded certificates for your Function App. +However, it's recommended to be specific and use comma-separated thumbprints so that only +allowed certificates are made available. This avoids accidentally exposing a certificate +to the application that isn't explicitly used. + + + + +## Deploying to Azure + +Once the Azure app is set up in the portal, you are ready to deploy your app. +There are 3 main ways to deploy your new Azure Function app: GitHub actions, command-line, and an extension. + +The template has already been set up to use continuous deployment using GitHub Actions. +For the other methods, see [Deploying Azure Function apps][az-deploy]. + +### Configure GitHub Secrets + +The GitHub actions rely on having a secret environment variable `AZURE_FUNCTIONAPP_PUBLISH_PROFILE` +in your repository secrets. + +1. Go to your Azure Functions dashboard in the Azure Portal +1. Click "Get Publish Profile" + + ![download Azure publish profile](./assets/azure-download-publish-profile.jpg) + +1. Download the publish profile +1. Open it and copy the full XML +1. Go to your [GitHub repository's secrets settings][gh-secrets] + + ![add GitHub secret for publish profile](./assets/github-publish-profile-secret.jpg) + +1. Add a new secret: `AZURE_FUNCTIONAPP_PUBLISH_PROFILE` +1. Paste in the value of the publish profile + +### Trigger a Deployment + +Your repository and GitHub action is now set up. To test the deployment, you can push a commit to the repository. + +If you have already committed and pushed, it is likely that the Action failed and you can re-run the job using +the new secret variable. + + + +## Verify the Connection Works + +If the deployment succeeds, the `HttpTrigger` endpoint should now be available at your Function URL. + +Once you open the URL in the browser, you should see a welcome screen like this with the connection information: + +![.NET Azure Function welcome connected screen](./assets/dotnet-azure-func-success.jpg) + +This means your Azure Functions app is correctly configured and ready to work with RavenDB. + + + +## Using RavenDB in the Azure Functions App + +The template sets up a singleton `DocumentStore` and dependency injection for the `IAsyncDocumentStore` per function +invocation which you can inject into Function classes. + +### Example: Injecting `IAsyncDocumentSession` + +Pass the `IAsyncDocumentSession` in a function class constructor to make it available to trigger functions: + + + +{`private readonly IAsyncDocumentSession session; + +public HttpTrigger_1(IAsyncDocumentSession session) +\{ + this.session = session; +\} + +[FunctionName("HttpTrigger_1")] +public async Task Run( +[HttpTrigger(AuthorizationLevel.Function, "get", "post", Route = null)] HttpRequest req, +ILogger log) +\{ + // Access \`session\` within the body of the function + + var user = await session.LoadAsync("users/100"); + + return new OkObjectResult(user); +\} +`} + + + +You can also inject an `IDocumentStore` to get a reference to the current store instance. + +### Example: Loading a user + + + +{`private readonly IAsyncDocumentSession session; + +public HttpTrigger_2(IAsyncDocumentSession session) +\{ + this.session = session; +\} + +[FunctionName("HttpTrigger_2")] +public async Task Run( + [HttpTrigger(AuthorizationLevel.Function, "get", "post", Route = "\{id:int\}")] int id, + ILogger log) +\{ + log.LogInformation("C# HTTP trigger function processed a request."); + + var user = await session.LoadAsync("users/" + id); + + return new OkObjectResult(user); +\} +`} + + + +Learn more about using the RavenDB .NET client SDK [here][ravendb-dotnet]. + + + +## Tutorial Video + +Watch our _Using Azure Functions with RavenDB .NET_ tutorial: +<iframe width="560" height="315" src="https://www.youtube.com/embed/1vnpfsD3bSE?si=X6hyNiwfzEH5wR8w" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" referrerpolicy="strict-origin-when-cross-origin" allowfullscreen></iframe> + + + +[download-dotnet]: https://dotnet.microsoft.com/en-us/download/dotnet/6.0 +[az-funcs]: https://learn.microsoft.com/en-us/azure/azure-functions/functions-get-started +[az-core-tools]: https://learn.microsoft.com/en-us/azure/azure-functions/functions-run-local +[az-deploy]: https://learn.microsoft.com/en-us/azure/azure-functions/functions-deployment-technologies +[template]: https://github.com/ravendb/templates/tree/main/azure-functions/csharp-http +[gh-secrets]: https://docs.github.com/en/actions/security-guides/encrypted-secrets +[cloud-signup]: https://cloud.ravendb.net?utm_source=ravendb_docs&utm_medium=web&utm_campaign=howto_template_azurefns_dotnet&utm_content=cloud_signup +[docs-get-started]: ../../../start/getting-started +[docs-client-certs]: ../../../client-api/setting-up-authentication-and-authorization +[ravendb-dotnet]: ../../../client-api/session/what-is-a-session-and-how-does-it-work +[tool-degit]: https://npmjs.com/package/degit + + + diff --git a/versioned_docs/version-7.1/start/guides/azure-functions/_overview-nodejs.mdx b/versioned_docs/version-7.1/start/guides/azure-functions/_overview-nodejs.mdx new file mode 100644 index 0000000000..d2f9cd8940 --- /dev/null +++ b/versioned_docs/version-7.1/start/guides/azure-functions/_overview-nodejs.mdx @@ -0,0 +1,332 @@ +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + + + +* Microsoft **Azure Functions** are a serverless platform that supports multiple + languages and frameworks that let you deploy workloads that scale without managing + any infrastructure. + Learn more about operating Microsoft Azure Functions [here][az-funcs]. + +* In this guide, you will learn how to deploy a Node.js Azure Function using the + [RavenDB Azure Functions Node.js template][template] that is connected to your + RavenDB database. + + This guide assumes you are familiar with Node.js development techniques + and the basics of Azure Function apps. + + +* Watch our tutorial video [below](../../../start/guides/azure-functions/overview.mdx#tutorial-video) + or [on YouTube](https://www.youtube.com/watch?v=TJdJ3TJK-Sg). + +* In this page: + * [Before We Get Started](../../../start/guides/azure-functions/overview.mdx#before-we-get-started) + * [Create a Local Azure Function App](../../../start/guides/azure-functions/overview.mdx#create-a-local-azure-function-app) + * [Connecting to RavenDB](../../../start/guides/azure-functions/overview.mdx#connecting-to-ravendb) + * [Creating a Function App in Azure](../../../start/guides/azure-functions/overview.mdx#creating-a-function-app-in-azure) + * [Deploying to Azure](../../../start/guides/azure-functions/overview.mdx#deploying-to-azure) + * [Verify the Connection Works](../../../start/guides/azure-functions/overview.mdx#verify-the-connection-works) + * [Using RavenDB in the Azure Functions App](../../../start/guides/azure-functions/overview.mdx#using-ravendb-in-the-azure-functions-app) + * [Tutorial Video](../../../start/guides/azure-functions/overview.mdx#tutorial-video) + + + +## Before We Get Started + +You will need the following before continuing: + +- A [RavenDB Cloud][cloud-signup] account or self-hosted client certificate +- [Azure Function Core Tools][az-core-tools] 4.x+ +- [Git](https://git-scm.org) +- [Node.js][nodejs] 18+ + +If you are new to Azure Function local development, see the [Getting started guide][az-funcs] +for how to get up and running with your toolchain of choice. + + + +## Create a Local Azure Function App + +The [RavenDB Azure Function template][template] is a template repository on GitHub which means +you can either create a new repository derived from the template or clone and push it to a new repository. + +This will set up a local Azure Function app that we will deploy to your Azure account at the end of the guide. + +### Creating a New Repository from the Template + +Depending on your environment, there are several ways to clone the template and initialize +a new Git repository. +The template repository lists each clone method you can copy & paste directly, but the fastest +way is by using [degit][tool-degit]. + + + +{`npx degit ravendb/templates/azure-functions/node-http my-project +cd my-project +git init +`} + + + +### Install Dependencies + +After cloning the repository locally, install the Node.js dependencies with `npm`: + + + +{`npm install +`} + + + +By default, the template is configured to connect to the Live Test instance of RavenDB. +Since this is only for testing purposes, next you will configure the app to connect to your existing +RavenDB database. + +### Starting the Function + +You can start the Azure Function locally using: + +`npm start` + +If you are using Visual Studio Code, you can also debug the function with F5 debugging. + +You will see the welcome screen if the template is set up correctly: + +![.NET template welcome screen](./assets/js-func-start.jpg ".NET template welcome screen") + +Since this is only for testing purposes, next you will configure the connection to your existing RavenDB database. + + + +## Connecting to RavenDB + +To configure the local version of your Azure Functions app to connect to RavenDB, +you will need to update the `local.settings.json` file with the `DB_URLS` value and `DB_NAME` value. +The default is: + + + +{`\{ + "IsEncrypted": false, + "Values": \{ + "AzureWebJobsStorage": "", + "FUNCTIONS_WORKER_RUNTIME": "node", + "DB_URLS": "", + "DB_NAME": "" + \} +\} +`} + + + +### Configure Local Database Certificate + +RavenDB is secured using client-certificate authentication (or Mutual TLS). + +The template supports loading certificate through physical `.pfx` files (X.509 certificates) locally. + +Specify the following app settings: + +- `DB_CERT_PATH`: the absolute path or relative path from the project root to your `.pfx` file, e.g. `../certs/db.pfx` +- `DB_PASSWORD`: the password that is protecting your PFX file + + +You are not required to use the password-protected PFX locally. +If you do intend to use the password-protected PFX file, you will +need to set `DB_PASSWORD` as an environment variable in your terminal +session (e.g. `export DB_PASSWORD=abc`) or through your terminal +profile (e.g. `.bashrc`). +Do not store the `.pfx` files to source control. + + + + +## Creating a Function App in Azure +At this point, the local Function app is ready to be deployed. There are multiple ways to create +and deploy Function apps using tools like Visual Studio Code or the portal itself. + +Follow the guide of your choice in the Microsoft docs. Once the app is created, come back here +to finish configuring your database connection. + +### Configuring Application Settings + +1. Go to your Azure Functions dashboard in the Portal +1. Click the Application Settings menu +1. Add an app setting for `DB_URLS` with the comma-separated list of RavenDB node URLs to connect to +1. Add an app setting for `DB_NAME` with the database name to connect to + +![JS update Azure app settings](./assets/js-azure-app-settings.jpg) + +These values will override `local.settings.json` once deployed on Azure. + +### Configuring PEM Certificate in Azure + +Azure Functions supports client certificates on both the Consumption or App Service Plans. + +Specify the `DB_CERT_PEM` app settings: + +![JS add DB_CERT_PEM Azure app setting](./assets/js-azure-db-cert-pem.jpg) + +The value should be the contents of the PEM-encoded certificate (`.pem` file) downloaded from RavenDB. + +You can safely copy/paste the contents of the file into the environment variable in the Azure Portal +without preserving newlines. If you are setting the value in the `local.settings.json` file, you will +need to format the value for JSON using [a stringify tool][tool-stringify]. + + + +Azure allows you to upload PFX certificates to the portal and load them using the +`WEBSITE_LOAD_CERTIFICATES` app setting. However, this is much more difficult to use +for Node.js functions. That method is better suited for .NET or Java functions. +**Regardless, this is not yet supported on Linux Consumption-based plans.** For +a discussion on this, reference [this issue on the Azure Functions repository][ms-issue-linux-certs-unsupported]. + +The template is configured to use the PEM certificate method for ease of use across plan types and platforms. + + + + + +## Deploying to Azure + +Once the Azure app is set up in the portal, you are ready to deploy your app. There are 3 main +ways to deploy your new Azure Function app: GitHub actions, command-line, and an extension. + +The template has already been set up to use continuous deployment using GitHub Actions. +For the other methods, see [Deploying Azure Function apps][az-deploy]. + +### Configure GitHub Secrets + +The GitHub actions rely on having a secret environment variable `AZURE_FUNCTIONAPP_PUBLISH_PROFILE` +in your repository secrets. + +1. Go to your Azure Functions dashboard in the Azure Portal +1. Click "Get Publish Profile" + + ![download Azure publish profile](./assets/azure-download-publish-profile.jpg) + +1. Download the publish profile +1. Open it and copy the full XML +1. Go to your [GitHub repository's secrets settings][gh-secrets] + + ![add GitHub secret for publish profile](./assets/github-publish-profile-secret.jpg) + +1. Add a new secret: `AZURE_FUNCTIONAPP_PUBLISH_PROFILE` +1. Paste in the value of the publish profile + +### Trigger a Deployment + +Your repository and GitHub action is now set up. To test the deployment, you can push +a commit to the repository. + +If you have already committed and pushed, it is likely that the Action failed and you +can re-run the job using the new secret variable. + + + +## Verify the Connection Works + +If the deployment succeeds, the `HttpTrigger` endpoint should now be available at your Function URL. + +Once you open the URL in the browser, you should see a welcome screen like this with the connection information: + +![JS Azure func welcome screen](./assets/js-azure-func-success.jpg) + +This means your Azure Functions app is correctly configured and ready to work with RavenDB. + + + +## Using RavenDB in the Azure Functions App + +The template uses the [@senacor/azure-function-middleware][npm-middleware] npm package to provide +a `middleware` helper function that can wrap Azure function handlers. The template includes a database +middleware that opens a new session per request and ensures the document store is initialized once. + +### Exporting an Azure Function trigger with middleware + +By default, Azure Function handlers are exported like `export default httpTrigger;`. + +You will need to change this to export with the `middleware` helper function for any new triggers +being added. Import the `createDbMiddleware` function and pass it as the second parameter to `middleware`, like this: + +`export default middleware(httpTrigger, [createDbMiddleware]);` + +### Example: Passing the database middleware to an Azure function handler + + + +{`import \{ Context, HttpRequest \} from "@azure/functions"; + +// Import the middleware helpers +import \{ middleware \} from "@senacor/azure-function-middleware"; +import \{ createDbMiddleware \} from "../db/middleware"; + +const httpTrigger = async function ( + context: Context, + req: HttpRequest +): Promise \{ + context.log("HTTP trigger function processed a request."); + + context.res = \{ + // status: 200, /* Defaults to 200 */ + body: 'success' + \}; +\}; + +// Export default trigger wrapped with middleware +export default middleware(httpTrigger, [createDbMiddleware]); +`} + + + +The middleware injects a `db` parameter on the `context` object of type `IDocumentSession`. You can access the document session using `context.db` in the function handler. + +### Example: Loading a user + + + +{`const httpTrigger = async function ( + context: Context, + req: HttpRequest +): Promise \{ + context.log("HTTP trigger function processed a request."); + + const user = await context.db.load("users/" + req.params.id); + + context.res = \{ + body: JSON.stringify(\{ user \}) + \}; +\}; +`} + + + +Learn more about using the RavenDB Node.js client SDK [here][ravendb-nodejs]. + + + +## Tutorial Video + +Watch our _Using Azure Functions with RavenDB Node.js_ tutorial: +<iframe width="560" height="315" src="https://www.youtube.com/embed/TJdJ3TJK-Sg?si=m1cSrn2k7EF6z6bW" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" referrerpolicy="strict-origin-when-cross-origin" allowfullscreen></iframe> + + + +[az-funcs]: https://learn.microsoft.com/en-us/azure/azure-functions/functions-get-started +[az-core-tools]: https://learn.microsoft.com/en-us/azure/azure-functions/functions-run-local +[az-deploy]: https://learn.microsoft.com/en-us/azure/azure-functions/functions-deployment-technologies +[nodejs]: https://nodejs.org +[template]: https://github.com/ravendb/templates/tree/main/azure-functions/node-http +[gh-secrets]: https://docs.github.com/en/actions/security-guides/encrypted-secrets +[cloud-signup]: https://cloud.ravendb.net?utm_source=ravendb_docs&utm_medium=web&utm_campaign=howto_template_azurefns_nodejs&utm_content=cloud_signup +[docs-get-started]: ../../../start/getting-started +[ravendb-nodejs]: ../../..//client-api/session/what-is-a-session-and-how-does-it-work +[npm-middleware]: https://npmjs.com/package/@senacor/azure-function-middleware +[tool-stringify]: https://onlinestringtools.com/json-stringify-string +[tool-degit]: https://npmjs.com/package/degit +[ms-issue-linux-certs-unsupported]: https://github.com/Azure/Azure-Functions/issues/1644 + + diff --git a/versioned_docs/version-7.1/start/guides/azure-functions/assets/azure-download-publish-profile.jpg b/versioned_docs/version-7.1/start/guides/azure-functions/assets/azure-download-publish-profile.jpg new file mode 100644 index 0000000000..339157897d Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/azure-functions/assets/azure-download-publish-profile.jpg differ diff --git a/versioned_docs/version-7.1/start/guides/azure-functions/assets/dotnet-azure-app-settings.jpg b/versioned_docs/version-7.1/start/guides/azure-functions/assets/dotnet-azure-app-settings.jpg new file mode 100644 index 0000000000..55a3bb627a Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/azure-functions/assets/dotnet-azure-app-settings.jpg differ diff --git a/versioned_docs/version-7.1/start/guides/azure-functions/assets/dotnet-azure-func-success.jpg b/versioned_docs/version-7.1/start/guides/azure-functions/assets/dotnet-azure-func-success.jpg new file mode 100644 index 0000000000..c8b2bc0f07 Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/azure-functions/assets/dotnet-azure-func-success.jpg differ diff --git a/versioned_docs/version-7.1/start/guides/azure-functions/assets/dotnet-azure-ravensettings__certthumbprint.jpg b/versioned_docs/version-7.1/start/guides/azure-functions/assets/dotnet-azure-ravensettings__certthumbprint.jpg new file mode 100644 index 0000000000..1e42d6630b Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/azure-functions/assets/dotnet-azure-ravensettings__certthumbprint.jpg differ diff --git a/versioned_docs/version-7.1/start/guides/azure-functions/assets/dotnet-azure-upload-cert.jpg b/versioned_docs/version-7.1/start/guides/azure-functions/assets/dotnet-azure-upload-cert.jpg new file mode 100644 index 0000000000..6cb34b4bc8 Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/azure-functions/assets/dotnet-azure-upload-cert.jpg differ diff --git a/versioned_docs/version-7.1/start/guides/azure-functions/assets/dotnet-azure-website-load-certificates.jpg b/versioned_docs/version-7.1/start/guides/azure-functions/assets/dotnet-azure-website-load-certificates.jpg new file mode 100644 index 0000000000..602aea1f40 Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/azure-functions/assets/dotnet-azure-website-load-certificates.jpg differ diff --git a/versioned_docs/version-7.1/start/guides/azure-functions/assets/dotnet-certificate-install.jpg b/versioned_docs/version-7.1/start/guides/azure-functions/assets/dotnet-certificate-install.jpg new file mode 100644 index 0000000000..480fde704b Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/azure-functions/assets/dotnet-certificate-install.jpg differ diff --git a/versioned_docs/version-7.1/start/guides/azure-functions/assets/dotnet-certificate-thumbprint.jpg b/versioned_docs/version-7.1/start/guides/azure-functions/assets/dotnet-certificate-thumbprint.jpg new file mode 100644 index 0000000000..83ff616261 Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/azure-functions/assets/dotnet-certificate-thumbprint.jpg differ diff --git a/versioned_docs/version-7.1/start/guides/azure-functions/assets/dotnet-func-start.jpg b/versioned_docs/version-7.1/start/guides/azure-functions/assets/dotnet-func-start.jpg new file mode 100644 index 0000000000..d47786a34f Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/azure-functions/assets/dotnet-func-start.jpg differ diff --git a/versioned_docs/version-7.1/start/guides/azure-functions/assets/github-publish-profile-secret.jpg b/versioned_docs/version-7.1/start/guides/azure-functions/assets/github-publish-profile-secret.jpg new file mode 100644 index 0000000000..1c12230858 Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/azure-functions/assets/github-publish-profile-secret.jpg differ diff --git a/versioned_docs/version-7.1/start/guides/azure-functions/assets/js-azure-app-settings.jpg b/versioned_docs/version-7.1/start/guides/azure-functions/assets/js-azure-app-settings.jpg new file mode 100644 index 0000000000..4adfabc00a Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/azure-functions/assets/js-azure-app-settings.jpg differ diff --git a/versioned_docs/version-7.1/start/guides/azure-functions/assets/js-azure-db-cert-pem.jpg b/versioned_docs/version-7.1/start/guides/azure-functions/assets/js-azure-db-cert-pem.jpg new file mode 100644 index 0000000000..53e8331dc4 Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/azure-functions/assets/js-azure-db-cert-pem.jpg differ diff --git a/versioned_docs/version-7.1/start/guides/azure-functions/assets/js-azure-func-success.jpg b/versioned_docs/version-7.1/start/guides/azure-functions/assets/js-azure-func-success.jpg new file mode 100644 index 0000000000..ef89bff844 Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/azure-functions/assets/js-azure-func-success.jpg differ diff --git a/versioned_docs/version-7.1/start/guides/azure-functions/assets/js-func-start.jpg b/versioned_docs/version-7.1/start/guides/azure-functions/assets/js-func-start.jpg new file mode 100644 index 0000000000..43fe268306 Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/azure-functions/assets/js-func-start.jpg differ diff --git a/versioned_docs/version-7.1/start/guides/azure-functions/deployment.mdx b/versioned_docs/version-7.1/start/guides/azure-functions/deployment.mdx new file mode 100644 index 0000000000..8c4104f460 --- /dev/null +++ b/versioned_docs/version-7.1/start/guides/azure-functions/deployment.mdx @@ -0,0 +1,70 @@ +--- +title: "Azure Functions: Deployment Considerations" +hide_table_of_contents: true +sidebar_label: Deployment Considerations +sidebar_position: 2 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Azure Functions: Deployment Considerations + + +This document describes different things to consider when deploying RavenDB +serverless workloads to Microsoft Azure Functions platform. + +* In this page: + * [Linux or Windows?](../../../start/guides/azure-functions/deployment.mdx#linux-or-windows?) + * [Document Store Lifetime and Cold Starts](../../../start/guides/azure-functions/deployment.mdx#document-store-lifetime-and-cold-starts) + + + +## Linux or Windows? + +Azure Functions allows you to deploy Function Apps on Linux or Windows. For RavenDB, this mainly +affects the way certificates are loaded and handled. + +**Using a Windows deployment plus `.NET` Azure Functions will provide the most secure and performant option.** +This will allow you to upload client certificates through the Portal and make them accessible to your `.NET` +functions with the `WEBSITE_LOAD_CERTIFICATES` application setting. +Furthermore, `.NET` Core functions have the lowest cold start times compared to Node.js. + +**If you prefer to use Node.js,** you can choose Linux or Windows. +The template is configured to support a PEM-encoded certificate loaded through an environment +variable due to a limitation with cross-platform support for `WEBSITE_LOAD_CERTIFICATES` (see +[this issue][ms-issue-linux-certs-unsupported] on the Azure Functions repo). + + + +## Document Store Lifetime and Cold Starts + +One of the benefits of serverless is that you can potentially lower costs for less-used services +and pay-per-invocation. +As a trade-off, these functions incur a startup cost known as a "cold start" before they can serve requests. + +The Document Store is meant to be instantiated once for the lifetime of an application. +However, cold vs. warm starts in serverless environments have some implications on this. + +In Azure Functions, the document store will be shared across invocations of a Function as +long as it remains warmed up. +For Functions being invoked more than once every 60 seconds, the document store will remain +initialized and you should not see an impact to latency. + +If an Azure Function is wound down, the next time it is invoked will incur a cold start cost. +The vast majority of cold start time is due to the Azure Function runtime. Document store initialization +will not have a major impact on latency, as establishing the TCP & TLS connection is still quite fast. + +To reduce cold starts, consider switching from the Consumption (Pay-as-you-Go) plan to a Premium App +Service Plan which will allow you to keep Functions warm for longer periods of time. + +Learn more about [how Azure Functions deals with cold vs. warm start times][az-func-cold-warm]. + + + +[az-func-cold-warm]: https://azure.microsoft.com/en-us/blog/understanding-serverless-cold-start/ +[ms-issue-linux-certs-unsupported]: https://github.com/Azure/Azure-Functions/issues/1644 diff --git a/versioned_docs/version-7.1/start/guides/azure-functions/existing-project.mdx b/versioned_docs/version-7.1/start/guides/azure-functions/existing-project.mdx new file mode 100644 index 0000000000..72530bd842 --- /dev/null +++ b/versioned_docs/version-7.1/start/guides/azure-functions/existing-project.mdx @@ -0,0 +1,41 @@ +--- +title: "Guides: Add RavenDB to an Existing Azure Functions Project (.NET C#)" +hide_table_of_contents: true +sidebar_label: Add to Existing Project +sidebar_position: 1 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import ExistingProjectCsharp from './_existing-project-csharp.mdx'; +import ExistingProjectNodejs from './_existing-project-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/start/guides/azure-functions/overview.mdx b/versioned_docs/version-7.1/start/guides/azure-functions/overview.mdx new file mode 100644 index 0000000000..d0faa47624 --- /dev/null +++ b/versioned_docs/version-7.1/start/guides/azure-functions/overview.mdx @@ -0,0 +1,45 @@ +--- +title: "Guides: Azure Functions (.NET C#)" +hide_table_of_contents: true +sidebar_label: Overview +sidebar_position: 0 +--- + +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +import OverviewCsharp from './_overview-csharp.mdx'; +import OverviewNodejs from './_overview-nodejs.mdx'; + +export const supportedLanguages = ["csharp", "nodejs"]; + + + + + + + + + + + + + \ No newline at end of file diff --git a/versioned_docs/version-7.1/start/guides/azure-functions/troubleshooting.mdx b/versioned_docs/version-7.1/start/guides/azure-functions/troubleshooting.mdx new file mode 100644 index 0000000000..ffb2157e2d --- /dev/null +++ b/versioned_docs/version-7.1/start/guides/azure-functions/troubleshooting.mdx @@ -0,0 +1,30 @@ +--- +title: "Troubleshooting" +hide_table_of_contents: true +sidebar_label: Troubleshooting +sidebar_position: 3 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Troubleshooting +### `DatabaseDoesNotExist` error + +The instance you're connecting to doesn't have a database yet (specified through `DB_NAME`). + +Follow the instructions to [create a new database][docs-create-db] in the Studio. +### `InvalidAuth` certificate error + +If RavenDB returns an `InvalidAuth` response with a message like: + +`This server requires client certificate for authentication, but none was provided by the client.` + +The provided certificate may not be the right one, may have the wrong password, or may lack +permissions. Double-check that the certificate works locally. + +[docs-create-db]: ../../../studio/database/create-new-database/general-flow diff --git a/versioned_docs/version-7.1/start/guides/cloudflare-workers/_category_.json b/versioned_docs/version-7.1/start/guides/cloudflare-workers/_category_.json new file mode 100644 index 0000000000..20716ebf55 --- /dev/null +++ b/versioned_docs/version-7.1/start/guides/cloudflare-workers/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 2, + "label": Cloudflare Workers, +} \ No newline at end of file diff --git a/versioned_docs/version-7.1/start/guides/cloudflare-workers/assets/cf-env-vars.jpg b/versioned_docs/version-7.1/start/guides/cloudflare-workers/assets/cf-env-vars.jpg new file mode 100644 index 0000000000..5fbf4d2d13 Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/cloudflare-workers/assets/cf-env-vars.jpg differ diff --git a/versioned_docs/version-7.1/start/guides/cloudflare-workers/assets/cloudflare-worker-preview.jpg b/versioned_docs/version-7.1/start/guides/cloudflare-workers/assets/cloudflare-worker-preview.jpg new file mode 100644 index 0000000000..8b598906b7 Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/cloudflare-workers/assets/cloudflare-worker-preview.jpg differ diff --git a/versioned_docs/version-7.1/start/guides/cloudflare-workers/assets/template-deploy-cloudflare.jpg b/versioned_docs/version-7.1/start/guides/cloudflare-workers/assets/template-deploy-cloudflare.jpg new file mode 100644 index 0000000000..e1b7611e85 Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/cloudflare-workers/assets/template-deploy-cloudflare.jpg differ diff --git a/versioned_docs/version-7.1/start/guides/cloudflare-workers/assets/template-welcome-authenticated.jpg b/versioned_docs/version-7.1/start/guides/cloudflare-workers/assets/template-welcome-authenticated.jpg new file mode 100644 index 0000000000..0faf669314 Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/cloudflare-workers/assets/template-welcome-authenticated.jpg differ diff --git a/versioned_docs/version-7.1/start/guides/cloudflare-workers/assets/template-welcome-unauthenticated.jpg b/versioned_docs/version-7.1/start/guides/cloudflare-workers/assets/template-welcome-unauthenticated.jpg new file mode 100644 index 0000000000..f38bffa422 Binary files /dev/null and b/versioned_docs/version-7.1/start/guides/cloudflare-workers/assets/template-welcome-unauthenticated.jpg differ diff --git a/versioned_docs/version-7.1/start/guides/cloudflare-workers/existing-project.mdx b/versioned_docs/version-7.1/start/guides/cloudflare-workers/existing-project.mdx new file mode 100644 index 0000000000..9f080e2065 --- /dev/null +++ b/versioned_docs/version-7.1/start/guides/cloudflare-workers/existing-project.mdx @@ -0,0 +1,432 @@ +--- +title: "Add RavenDB to an Existing Cloudflare Worker (TypeScript)" +hide_table_of_contents: true +sidebar_label: Add to Existing Project +sidebar_position: 1 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Add RavenDB to an Existing Cloudflare Worker (TypeScript) + + +* [Cloudflare Workers](https://developers.cloudflare.com/workers/) is a serverless platform that allows + you to deploy workloads closer to where your users are with 200+ metro data centers in its CDN network. + +* Learn more about [how Workers works](https://developers.cloudflare.com/workers/learning/how-workers-works/). + +* Cloudflare Workers run on the V8 Runtime. + The RavenDB Node.js client SDK provides support to query RavenDB resources in RavenDB Cloud or in your + own cloud infrastructure. + +* In this guide, you will learn how connect to RavenDB from an existing Cloudflare Worker. + We assume you are familiar with Node.js development techniques and the basics of Cloudflare Workers. + +* In this page: + * [Before We Get Started](../../../start/guides/cloudflare-workers/existing-project.mdx#before-we-get-started) + * [Installing the RavenDB Client SDK](../../../start/guides/cloudflare-workers/existing-project.mdx#installing-the-ravendb-client-sdk) + * [Initializing the Document Store](../../../start/guides/cloudflare-workers/existing-project.mdx#initializing-the-document-store) + * [Updating Database Connection Settings](../../../start/guides/cloudflare-workers/existing-project.mdx#updating-database-connection-settings) + * [Configuring Support for Certificates](../../../start/guides/cloudflare-workers/existing-project.mdx#configuring-support-for-certificates) + * [Configuring Cloudflare](../../../start/guides/cloudflare-workers/existing-project.mdx#configuring-cloudflare) + + + +## Before We Get Started + +You will need the following before continuing: + +- A [RavenDB Cloud][cloud-signup] account or self-hosted client certificate +- A free or paid [Cloudflare account](https://cloudflare.com) +- [Node.js](https://nodejs.com) 16+ with npm + + + +## Installing the RavenDB Client SDK + +Get started by installing the [ravendb][npm-ravendb-client] npm package in your project which provides the Node.js client SDK. + +Using npm: + + + +{`npm install ravendb +`} + + + + +Support for Cloudflare Workers was added in `5.2.8+`. + + + + +## Initializing the Document Store + +Import the `DocumentStore` from `ravendb` package to create a new instance with the required configuration +and initialize your connection to RavenDB by calling the `initialize` method. +You can then export a function to initialize a document session to use in your Cloudflare Worker. + +Example `db.ts` TypeScript module: + + + +{`import \{ DocumentStore \} from "ravendb"; + +const documentStore = new DocumentStore( + ["https://a.free.mycompany.ravendb.cloud"], + "demo", + // authOptions +); + +let initialized = false; + +function initialize() \{ + if (initialized) return; + documentStore.initialize(); + initialized = true; +\} + +export function openAsyncSession() \{ + if (!initialized) \{ + initialize(); + \} + + return documentStore.openSession(); +\} +`} + + + +For more on what options are available, see [Creating a Document Store][docs-what-is-document-store]. + +You can set options manually but it's more likely you'll want to set config variables in Wrangler +or in Cloudflare to customize the document store initialization. + + + +## Updating Database Connection Settings + +### Enable Node Compatibility + +Update your `wrangler.toml` file to enable Node.js compatibility by setting `node_compat` to `true`: + + + +{`name = "ravendb-worker" +main = "./src/index.ts" +node_compat = true +compatibility_date = "2022-05-03" +`} + + + +This setting is required for the RavenDB Node.js SDK to operate correctly. If this is `false` or missing, +you will experience runtime exceptions. + +### Define environment variables + +You will also want to set environment variables to pass database information such as the URLs and database name: + + + +{`# Define top-level environment variables +# under the \`[vars]\` block using +# the \`key = "value"\` format +[vars] +DB_URLS = "https://a.free.company.ravendb.cloud" +DB_NAME = "dev" + +# Override values for \`--env production\` usage +[env.production.vars] +DB_URLS = "https://a.free.company.ravendb.cloud,https://b.free.company.ravendb.cloud" +DB_NAME = "prod" +`} + + + +There are two variables defined above: + +- `DB_URLS` -- These are the node URLs for your RavenDB instance (Cloud or self-hosted). The values are comma-separated. +- `DB_NAME` -- This is the default database to connect to. + +The defaults are under `[vars]` and overriden in `[env.production.vars]`. + + +You can also define settings within the Cloudflare worker dashboard. The values in the `wrangler.toml` will +overwrite those values *on new deployment*. Keep this in mind when deciding where to define the variables! + + +Variables defined here will be exposed on the `env` variable passed to the root `fetch` function of the Worker. + +For example, a barebones `index.ts` ES module could look like: + + + +{`import \{ DocumentStore \} from "ravendb"; + +let documentStore: DocumentStore; +let initialized = false; + +function initialize(\{ urls, databaseName \}) \{ + if (initialized) return; + documentStore = new DocumentStore( + urls, + databaseName + ); + documentStore.initialize(); + initialized = true; +\} + +function openAsyncSession() \{ + if (!initialized) \{ + throw new Error("DocumentStore has not been initialized"); + \} + + return documentStore.openSession(); +\} + +export default \{ + fetch(req, env, ctx) \{ + const \{ DB_URLS, DB_NAME \} = env; + + initialize(\{ + urls: DB_URLS.split(","), + databaseName: DB_NAME + \}); + + ctx.db = openAsyncSession(); + + // Handle request + // ... + return handleRequest(req, env, ctx); + \} +\} +`} + + + +This creates a session-per-request and avoids re-initializing the document store. +The request handler can then pass the document session to middleware or route handlers as needed. +The [gh-ravendb-template][gh-ravendb-template] uses the [itty-router][npm-itty-router] package to make this easier. + + +Cloudflare Worker invocations do not incur cold start cost like other serverless platforms. +However, requests are isolated and modules are not shared between requests. This means that +document store initialization is incurred every request, however the overhead is minimal. + +For document caching during a session or across requests, using Cloudflare's KV natively is not yet +supported by the RavenDB Node.js client SDK but could be implemented manually through application logic. + + + + +## Configuring Support for Certificates + +Client certificate authentication is handled through [Cloudflare mTLS authentication for Workers][cf-mtls-worker]. +You will need to upload your certificate to your Cloudflare account so that it can be accessed and bound to your Worker. + +### Obtain RavenDB certificate + +First, download your RavenDB client certificate package you will use to authenticate. +Follow the guides for either [Cloud certificates][docs-cloud-certs] or for [self-hosted certificates][docs-on-prem-certs]. +It is recommended to [generate a new client certificate][docs-generate-client-certificate] +with limited access rights instead of a `ClusterAdmin`-level certificate. +This also ensures the Worker is using a dedicated certificate that can be managed separately. + +Once extracted to a folder, you'll need the paths to the `.crt` and `.key` files for the next step. + + + +For Cloudflare Workers, you do not store your certificate files in your project directory. +**Certificates are password-equivalents.** Take care not to accidentally commit them to source control. +Keep them outside the project directory for this next step. + + + +### Upload certificate using wrangler + +You will use Cloudflare's `wrangler` CLI to upload your `.crt` and `.key` files as an mTLS certificate. +You only need to do this once (and each time the certificate needs to be renewed). + + + +This guide will use `npx` to execute wrangler to ensure the commands work across platforms. +You can also choose to install `wrangler` globally using `npm i wrangler -g` to use without `npx`, +but you will need to keep it updated. Read more about [Installing and updating Wrangler][cf-wrangler] + + + +In the project directory, run: + + + +{`npx wrangler mtls-certificate upload --cert path/to/db.crt --key path/to/db.key --name ravendb_cert +`} + + + +This will display output like: + + + +{`Uploading mTLS Certificate ravendb_cert... +Success! Uploaded mTLS Certificate ravendb_cert +ID: +Issuer: CN=... +Expires on ... +`} + + + +Copy the `` in the output for the next step. + +### Setup mTLS binding in wrangler + +You will need to add a mTLS "binding" so that the certificate is made available and used by the Worker at runtime. + +Edit your `wrangler.toml` file to update the following: + + + +{`mtls_certificates = [ + \{ binding = "DB_CERT", certificate_id = "" \} +] +`} + + + +Replace `` with the Certificate ID you copied from the previous step. + +Be sure to also update the `DB_URLS` and `DB_NAME` variables for your cluster. + +For a deeper dive on what this is doing, you can [read more][cf-mtls-worker] about how mTLS bindings work in Cloudflare Workers. + +### Set custom fetcher for DocumentStore + +Once the certificate binding is added, Cloudflare will create a `DB_CERT` object exposed through `env`. +You can then bind the provided `env.DB_CERT.fetch` custom fetch function to the `DocumentStore` using +the `DocumentConventions.customFetch` option. + +An updated example `index.ts` ES module: + + + +{`import \{ DocumentStore \} from "ravendb"; + +let documentStore: DocumentStore; +let initialized = false; + +function initialize(\{ urls, databaseName, mtlsBinding \}) \{ + if (initialized) return; + documentStore = new DocumentStore( + urls, + databaseName + ); + + // Bind custom mTLS binding \`fetch()\` function and ensure \`this\` remains bound to + // original context + documentStore.conventions.customFetch = mtlsBinding.fetch.bind(mtlsBinding); + + documentStore.initialize(); + initialized = true; +\} + +function openAsyncSession() \{ + if (!initialized) \{ + throw new Error("DocumentStore has not been initialized"); + \} + + return documentStore.openSession(); +\} + +export default \{ + fetch(req, env, ctx) \{ + const \{ DB_URLS, DB_NAME, DB_CERT \} = env; + + initialize(\{ + urls: DB_URLS.split(","), + databaseName: DB_NAME, + mtlsBinding: DB_CERT + \}); + + ctx.db = openAsyncSession(); + + // Handle request + // ... + return handleRequest(req, env, ctx); + \} +\} +`} + + + +The `DB_CERT` variable is exposed on `env` and has a single `fetch` function that is automatically +bound to the client certificate at runtime. Note that passing the function requires binding the `this` +context to the original scope, otherwise you will run into closure-related exceptions. + + +The `env.DB_CERT` binding will not be available in local mode (`--local`), this is a known issue with Wrangler. + + +### Add TypeScript Declaration for DB_CERT + +If you are using TypeScript, `env.DB_CERT` will not be typed by default. You can create a `globals.d.ts` file +in your project and add the following type declarations: + + + +{`declare global \{ + namespace NodeJS \{ + interface ProcessEnv \{ + DB_CERT?: \{ fetch: typeof fetch \} + \} + \} +\} +`} + + + + + +## Configuring Cloudflare + +There is no extra configuration necessary if you are providing all the connection information in your `wrangler.toml` file. +However, you may want to override variables set in the `[env.production.vars]` through the Cloudflare Worker dashboard. + +Navigate to your Worker **Settings > Variables > Environment Variables** and add variables to override, like +`DB_NAME` and `DB_URLS`. You will also see the `DB_CERT` binding listed if the mTLS binding was successfully uploaded. + +![Cloudflare Worker environment variables and settings](./assets/cf-env-vars.jpg) + + + +## Next Steps + +- Learn more about [how to use the RavenDB Node.js client SDK][docs-nodejs] +- Reference the [Cloudflare Worker starter template][gh-ravendb-template] to see the code +- [Troubleshoot][troubleshooting] issues with RavenDB and Cloudflare Workers + + + +[troubleshooting]: ../../../start/guides/cloudflare-workers/troubleshooting +[cloud-signup]: https://cloud.ravendb.net?utm_source=ravendb_docs&utm_medium=web&utm_campaign=howto_template_cloudflare_worker&utm_content=cloud_signup +[gh-ravendb-template]: https://github.com/ravendb/template-cloudflare-worker +[deploy-with-workers]: https://deploy.workers.cloudflare.com/?url=https://github.com/ravendb/template-cloudflare-worker +[live-test]: http://live-test.ravendb.net +[cf-mtls-worker]: https://developers.cloudflare.com/workers/runtime-apis/mtls +[cf-wrangler]: https://developers.cloudflare.com/workers/wrangler/install-and-update/ +[docs-nodejs]: ../../../client-api/session/what-is-a-session-and-how-does-it-work +[docs-what-is-document-store]: ../../../client-api/what-is-a-document-store +[docs-create-db]: ../../../studio/database/create-new-database/general-flow +[docs-cloud-certs]: ../../../cloud/cloud-security +[docs-on-prem-certs]: ../../../studio/overview +[docs-generate-client-certificate]: ../../../server/security/authentication/certificate-management#generate-client-certificate +[npm-ravendb-client]: https://npmjs.com/package/ravendb +[npm-itty-router]: https://npmjs.com/package/itty-router + diff --git a/versioned_docs/version-7.1/start/guides/cloudflare-workers/overview.mdx b/versioned_docs/version-7.1/start/guides/cloudflare-workers/overview.mdx new file mode 100644 index 0000000000..03c61e7767 --- /dev/null +++ b/versioned_docs/version-7.1/start/guides/cloudflare-workers/overview.mdx @@ -0,0 +1,363 @@ +--- +title: "Guides: Cloudflare Workers" +hide_table_of_contents: true +sidebar_label: Overview +sidebar_position: 0 +--- + +import Admonition from '@theme/Admonition'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import LanguageSwitcher from "@site/src/components/LanguageSwitcher"; +import LanguageContent from "@site/src/components/LanguageContent"; + +# Guides: Cloudflare Workers + + +* [Cloudflare Workers](https://developers.cloudflare.com/workers/) is a serverless platform that allows + you to deploy workloads closer to where your users are with 200+ metro data centers in its CDN network. + Learn more about operating **Workers** [here](https://developers.cloudflare.com/workers/learning/how-workers-works/). +* Cloudflare Workers run on the V8 Runtime. The RavenDB Node.js client SDK provides support to query RavenDB + resources in RavenDB Cloud or in your own cloud infrastructure. +* In this guide, you will learn how to deploy a Cloudflare Worker using the [RavenDB Cloudflare Worker template][template] + that is connected to your RavenDB database. + + This guide assumes you are familiar with Node.js development techniques + and the basics of Cloudflare Workers. + +* Watch our tutorial video [below](../../../start/guides/cloudflare-workers/overview.mdx#tutorial-video) + or [on YouTube](https://www.youtube.com/watch?v=qMJfgQicjwk&t=1s). + +* In this page: + * [Before We Get Started](../../../start/guides/cloudflare-workers/overview.mdx#before-we-get-started) + * [Create a Cloudflare RavenDB Worker project](../../../start/guides/cloudflare-workers/overview.mdx#create-a-cloudflare-ravendb-worker-project) + * [Updating Database Connection Settings](../../../start/guides/cloudflare-workers/overview.mdx#updating-database-connection-settings) + * [Connecting to an Authenticated RavenDB Cluster](../../../start/guides/cloudflare-workers/overview.mdx#connecting-to-an-authenticated-ravendb-cluster) + * [Deploying to Production](../../../start/guides/cloudflare-workers/overview.mdx#deploying-to-production) + * [Using RavenDB in the Worker](../../../start/guides/cloudflare-workers/overview.mdx#using-ravendb-in-the-worker) + * [Tutorial Video](../../../start/guides/cloudflare-workers/overview.mdx#tutorial-video) + + + +## Before We Get Started + +You will need the following before continuing: + +- A [RavenDB Cloud][cloud-signup] account or self-hosted client certificate +- A free or paid [Cloudflare account](https://cloudflare.com) +- [Git](https://git-scm.org) +- [Node.js](https://nodejs.com) version 16 and above with npm + + + +## Create a Cloudflare RavenDB Worker project + +There are two primary ways to get started: + +- Using the Cloudflare Deploy with Workers wizard +- Using npm to initialize an empty template + +### Using Deploy with Workers wizard + +Using [Deploy with Workers][deploy-with-workers] step-by-step wizard is the simplest method +but requires a GitHub account and authorized access, which may not be applicable in all situations. +For example, this method will not work with GitLab or GitHub Enterprise. + +![Screenshot of Deploy with Cloudflare Wizard](./assets/template-deploy-cloudflare.jpg "Screenshot of Deploy with Cloudflare Wizard") + +The wizard will guide you through deploying a Worker and hooking up a new repo with continuous +deployment through GitHub actions. It will also automatically set up your repository secrets. + + +The deployment wizard will fork the GitHub repository into your GitHub user account (not an organization). +You will want to manually rename the repository and [unmark it as a "Template"][gh-template-repo] in the +repository settings before cloning it. + + +### Using npm to initialize an empty template + +If you do not want to use the wizard, you can use npm and Cloudflare's `create-cloudflare` package +to create a new Worker using [the RavenDB template][template]: + +`npm init cloudflare my-project https://github.com/ravendb/template-cloudflare-worker` + +This will generate all the required code and run `npm install` for you to set up a new project on your computer. +You can then push to GitHub or any other source provider from there. + +### Test the template locally + +By default, the template is set up to connect to the [RavenDB Live Test cluster][live-test]. + +You can run the template locally to test the connection: + +`npm run dev` + + +If this is the first time you are connecting using the Wrangler CLI, it will open a browser window +for you to authenticate using your Cloudflare account. +After you sign in, you can return to the terminal. + + +Open the browser by pressing the "B" key (e.g. `http://localhost:7071`) and you should see a screen like this: + +![Successfully connected to RavenDB welcome screen from Cloudflare](./assets/template-welcome-unauthenticated.jpg "Successfully connected to RavenDB welcome screen from Cloudflare (unauthenticated)") + +This means you have successfully connected to your RavenDB database. + + + +## Updating Database Connection Settings + +The `wrangler.toml` file contains configuration for the worker. Here's an example: + + + +{`name = "ravendb-worker" +main = "./src/index.ts" +node_compat = true +compatibility_date = "2022-05-03" + +# mtls_certificates = [ +# \{ binding = "DB_CERT", certificate_id = "" \} +# ] + +# Define top-level environment variables +# under the \`[vars]\` block using +# the \`key = "value"\` format +[vars] +DB_URLS = "" +DB_NAME = "" + +# Override values for \`--env production\` usage +[env.production] +name = "ravendb-worker-production" +[env.production.vars] +DB_URLS = "" +DB_NAME = "" +`} + + + +The `node_compat = true` setting is required for the RavenDB Node.js SDK to operate correctly. +If this is `false` or missing, you will experience runtime exceptions. + +There are two variables defined that are used by the template: + +- `DB_URLS` -- These are the node URLs for your RavenDB instance (Cloud or self-hosted). + The values are comma-separated. +- `DB_NAME` -- This is the default database to connect to. + +When left blank, the Live Test connection settings are used. The defaults are under `[vars]` +and overridden in `[env.production.vars]`. + + +You can also define settings within the Cloudflare worker dashboard. The values in the `wrangler.toml` +will overwrite those values *on new deployment*. Keep this in mind when deciding where to define the variables! + + + + +For brand new projects, the database you connect to may not exist yet. +Follow the [create database procedure][docs-create-db] to create a new database, otherwise you will +encounter a `DatabaseDoesNotExist` exception on startup. + + + +Variables defined here, including the `DB_CERT` mTLS binding, will be exposed as `process.env` variables +you can access in the worker at runtime. You'll use the mTLS binding when connecting to an authenticated +cluster using your client certificate. + + + +## Connecting to an Authenticated RavenDB Cluster + +Client certificate authentication is handled through [Cloudflare mTLS authentication for Workers][cf-mtls-worker]. +You will need to upload your certificate to your Cloudflare account so that it can be accessed and bound to your Worker. + +### Obtain RavenDB certificate + +First, download the RavenDB client certificate package you will use to authenticate. +Follow the guides for either [Cloud certificates][docs-cloud-certs] or for [self-hosted certificates][docs-on-prem-certs]. +It is recommended to [generate a new client certificate][docs-generate-client-certificate] +with limited access rights instead of a `ClusterAdmin`-level certificate. +This also ensures the Worker is using a dedicated certificate that can be managed separately. + +Once extracted to a folder, you'll need the paths to the `.crt` and `.key` files for the next step. + + + +For Cloudflare Workers, you do not store your certificate files in your project directory. +**Certificates are password-equivalents.** Take care not to accidentally commit them to source control. +Keep them outside the project directory for this next step. + + + +### Upload certificate using wrangler + +You will use Cloudflare's `wrangler` CLI to upload your `.crt` and `.key` files as an mTLS certificate. +You only need to do this once (and each time the certificate needs to be renewed). + + + +This guide will use `npx` to execute wrangler to ensure the commands work across platforms. +You can also choose to install `wrangler` globally using `npm i wrangler -g` to use without `npx`, +but you will need to keep it updated. Read more about [Installing and updating Wrangler][cf-wrangler] + + + +In the project directory, run: + +`npx wrangler mtls-certificate upload --cert path/to/db.crt --key path/to/db.key --name ravendb_cert` + +This will display output like: + + + +{`Uploading mTLS Certificate ravendb_cert... +Success! Uploaded mTLS Certificate ravendb_cert +ID: +Issuer: CN=... +Expires on ... +`} + + + +Copy the `` in the output for the next step. + +### Setup mTLS binding in wrangler + +Cloudflare Workers use the `wrangler.toml` file for configuration. You will need to add a "binding" +so that the certificate is made available and used by the Worker at runtime. + +Edit your `wrangler.toml` file to update the following: + + + +{`mtls_certificates = [ + \{ binding = "DB_CERT", certificate_id = "" \} +] +`} + + + +It is important to maintain the `DB_CERT` name here as it is expected by the template. +Replace `` with the Certificate ID you copied from the previous step. + +Be sure to also update the `DB_URLS` and `DB_NAME` variables for your cluster. + +For a deeper dive on what this is doing, you can [read more][cf-mtls-worker] about how mTLS +bindings work in Cloudflare Workers. + +### Testing Certificate Authentication Locally + +Once the certificate binding is added, you will be able to start the Worker locally and test the certificate authentication. + +`npm run dev` + +This will launch `wrangler` in development mode. It may require you to sign in to your Cloudflare account before continuing. + + +The `env.DB_CERT` binding will not be available in local mode (`--local`), this is a known issue with Wrangler. +The template is configured to start Wrangler in non-local mode. + + +You should see the following message in the console: + +> A bound cert was found and will be used for RavenDB requests. + +Once started, the Worker will be running on a localhost address. + +Open the browser by pressing the "B" key (e.g. `http://localhost:7071`) and you should see a screen like this: + +![Successfully connected to RavenDB welcome screen from Cloudflare](./assets/template-welcome-authenticated.jpg "Successfully connected to RavenDB welcome screen from Cloudflare (authenticated)") + +If you see a green check and the correct connection details, this means you have successfully connected to your RavenDB database. + + +## Deploying to Production + +### Automated Deployment + +If you have used the Deploy with Workers wizard, your GitHub repository is already set up for continuous +integration and deployment to Cloudflare. + +If you have manually initialized the template, once pushed to GitHub you can [enable GitHub action workflows][gh-workflows]. + +You will also need to [add two repository secrets][gh-repo-secrets]: + +- `CF_ACCOUNT_ID` -- Your Cloudflare global account ID +- `CF_API_TOKEN` -- An API token with the "Edit Workers" permission + +Once these secrets are added, [trigger a workflow manually][gh-workflows-manual] or push a commit to trigger a deployment. + +### Manual Deployment + +You can also deploy a Worker manually using: + +`npm run deploy` + +If your Worker account is not yet set up, Wrangler will walk you through the steps. + +### Verifying Production Worker + +In your Cloudflare Dashboard, the Worker should be deployed. +You can find your Worker URL in the dashboard under "Preview URL" and open it to test the connection is working. + +![Preview URL shown in the Cloudflare Worker dashboard](./assets/cloudflare-worker-preview.jpg "Preview URL shown in the Cloudflare Worker dashboard") + +If it is not working, verify the Wrangler settings are being applied. + + +## Using RavenDB in the Worker + +The RavenDB Cloudflare template uses the [itty-router package][npm-itty-router] to provide basic routing and middleware support. + +Each routing handler is passed a `request` and `env` argument. +A document session is opened per-request and accessible through `env.db`. + +### Example: Load user on route + + + +{`router.get("/users/:id", async (request: IRequest, env: Env) => \{ + const user = await env.db.load("users/" + request.params.id); + + return new Response(JSON.stringify(\{ user \}), \{ status: 200 \}); +\}); +`} + + + + +## Tutorial Video + +Watch our _Using Cloudflare Workers with RavenDB_ tutorial: + +